Django community: RSS
This page, updated regularly, aggregates Django Q&A from the Django community.
-
Overriding Save but database is not updated
I have a model, containing a save overide, that uses a function to set whether a sync is required based on a comparison of dates. The save is called by update_or_create which is in turn called by a management command For the purposes of debugging this issue sync_required is set to True regardless. class Site(models.Model): # LastEditDate & LastEditTime LastEditDate = models.DateField(blank=True, null=True) LastEditTime = models.TimeField(blank=True, null=True) ... # System Specific date_modified = models.DateTimeField(auto_now=True, null=True) last_synced = models.DateTimeField(blank=True, null=True) sync_required = models.BooleanField(blank=False, null=True, default=False) # Function to calculate if sync required. def is_sync_required(self): ... return True def save(self, *args, **kwargs): self.sync_required = self.is_sync_required() logger.info( f"Site/Save: LED:{self.LastEditDate}, LET:{self.LastEditTime}, DM:{self.date_modified}, LS:{self.last_synced} = SR:{self.sync_required}") super(Site, self).save(*args, **kwargs) In full operation the method is producing the correct output: A001 Site/is_sync_required/Return for A001= True with MinutesSince>=0 84659.0 Site/Save: LED:2024-02-29, LET:09:06:04.174000, DM:2024-09-27 08:19:27.430154+00:00, LS:2024-01-01 14:06:28+00:00 = SR:True A002 Site/is_sync_required/Return for A002= False with MinutesSince<0 -25449.0 Site/Save: LED:2024-08-14, LET:16:51:17.732000, DM:2024-09-27 08:20:33.720193+00:00, LS:2024-09-01 08:00:00+00:00 = SR:False A003 Site/is_sync_required/Return for A003= True with last_synced_none None Site/Save: LED:2024-09-02, LET:08:25:50.232000, DM:2024-09-27 08:18:35.456365+00:00, LS:None = SR:True A004 Site/is_sync_required/Return for A004= True with MinutesSince>=0 51517.0 Site/Save: LED:2024-08-06, LET:09:43:40.257000, DM:2024-09-27 08:21:40.917783+00:00, LS:2024-07-01 14:06:28+00:00 = SR:True However, the result of sync_required is not being saved … -
How to test the websocket connection in django channels
I need to test the success of the websocket connection. When connecting, the query_string parameter is checked. If it is transmitted, the connection is allowed. If not, the connection is closed. At the moment, the test fails with an error: Task was destroyed but it is pending! task: <Task pending name='Task-2' coro=<ProtocolTypeRouter.__call__() running at /usr/local/lib/python3.10/site-packages/channels/routing.py:62> wait_for=<Future cancelled>> Exception ignored in: <coroutine object ProtocolTypeRouter.__call__ at 0xffff845ac3c0> Traceback (most recent call last): File "/usr/local/lib/python3.10/site-packages/channels/routing.py", line 62, in __call__ File "/usr/local/lib/python3.10/site-packages/channels/security/websocket.py", line 41, in __call__ File "/usr/local/lib/python3.10/site-packages/channels/consumer.py", line 58, in __call__ File "/usr/local/lib/python3.10/site-packages/channels/utils.py", line 55, in await_many_dispatch File "/usr/local/lib/python3.10/asyncio/base_events.py", line 753, in call_soon File "/usr/local/lib/python3.10/asyncio/base_events.py", line 515, in _check_closed RuntimeError: Event loop is closed Task was destroyed but it is pending! task: <Task pending name='Task-5' coro=<Queue.get() running at /usr/local/lib/python3.10/asyncio/queues.py:159> wait_for=<Future cancelled>> Task was destroyed but it is pending! task: <Task pending name='Task-4' coro=<InMemoryChannelLayer.receive() running at /usr/local/lib/python3.10/site-packages/channels/layers.py:249> wait_for=<Future pending cb=[Task.task_wakeup()]>> Exception ignored in: <coroutine object InMemoryChannelLayer.receive at 0xffff845ac7b0> Traceback (most recent call last): File "/usr/local/lib/python3.10/site-packages/channels/layers.py", line 249, in receive File "/usr/local/lib/python3.10/asyncio/queues.py", line 161, in get File "/usr/local/lib/python3.10/asyncio/base_events.py", line 753, in call_soon File "/usr/local/lib/python3.10/asyncio/base_events.py", line 515, in _check_closed RuntimeError: Event loop is closed My application configuration: settings.py ASGI_APPLICATION = "hs_cubic.asgi.application" ASGI_MAX_WORKER_RUNTIME = 60 * 10 … -
Django StreamingHttpResponse with asgi setup warning
Hi I am using following class based view to define a endpoint to return large Django Queryset as either Json or CSV using Streaming Response class DataView(viewsets.ReadOnlyModelViewSet): permission_classes = [IsAuthenticated, StrictDjangoModelPermissions] authentication_classes = [JWTAuthentication] queryset = Data.objects.all() serializer_class = DataSerializer pagination_class = LimitOffsetPagination filter_backends = (DjangoFilterBackend,SearchFilter) filterset_class = DataFilter search_fields = ['^ts'] def generate_data(self, start, end, needCSV, points): cols = ['ts', 'topic_id', 'value_string'] if len(points) > 0: data = Data.objects.filter(ts__range=(start, end), topic_id__in=points) else: data = Data.objects.filter(ts__range=(start, end)) dis_dict = {point.id: point.navPoint for point in Points.objects.filter(id__in=points)} if needCSV: yield ','.join(cols + ['dis']) + '\n' for row in data: dis_value = dis_dict.get(row.topic_id, '') yield ','.join(map(str, [getattr(row, col) for col in cols] + [dis_value])) + '\n' else: yield '[' for i, row in enumerate(data): if i > 0: yield ',' dis_value = dis_dict.get(row.topic_id, '') row_dict = {col: str(getattr(row, col)) for col in cols} row_dict['dis'] = dis_value yield json.dumps(row_dict) yield ']' def list(self, request, *args, **kwargs): try: csv = request.GET.get('csv', False) csv = csv and csv.lower() == 'true' points = request.GET.getlist('point', []) start = request.GET['start'] end = request.GET['end'] contentType = 'text/csv' if csv else 'application/json' fileName = 'data.csv' if csv else 'data.json' response = StreamingHttpResponse(self.generate_data(start, end, csv, points), content_type=contentType) response['Content-Disposition'] = f'attachment; filename="{fileName}"' return … -
Django CustomUser Model Causes Lazy Reference Errors During Migration
I'm working on a Django project where I have defined a CustomUser model in an app called authentication. I've correctly set the AUTH_USER_MODEL to reference this model, but when I try to run migrations, I get the following errors: ValueError: The field admin.LogEntry.user was declared with a lazy reference to 'authentication.customuser', but app 'authentication' doesn't provide model 'customuser'. The field authentication.Device.user was declared with a lazy reference to 'authentication.customuser', but app 'authentication' doesn't provide model 'customuser'. The field authentication.MoodReview.user was declared with a lazy reference to 'authentication.customuser', but app 'authentication' doesn't provide model 'customuser'. The field authentication.PasswordResetCode.user was declared with a lazy reference to 'authentication.customuser', but app 'authentication' doesn't provide model 'customuser'. The field authtoken.Token.user was declared with a lazy reference to 'authentication.customuser', but app 'authentication' doesn't provide model 'customuser'. # authentication/models.py from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager from django.db import models from django.utils import timezone class CustomUserManager(BaseUserManager): def create_user(self, email, password=None, **extra_fields): if not email: raise ValueError("The Email field must be set") email = self.normalize_email(email) user = self.model(email=email, **extra_fields) user.set_password(password) user.save(using=self._db) return user def create_superuser(self, email, password=None, **extra_fields): extra_fields.setdefault('is_staff', True) extra_fields.setdefault('is_superuser', True) if extra_fields.get('is_staff') is not True: raise ValueError('Superuser must have is_staff=True.') if extra_fields.get('is_superuser') is not True: raise ValueError('Superuser … -
won't store form data in django database sqlite
I'm trying to build a django application for my web development class for an assignment but my form info won't save to the database. Alot of this code is based on his lectures so I don't wanna add any libraries or change this code up too much. The post request comes through in the terminal but no data is being stored. This is my code so far: views.py from django.shortcuts import render, redirect from django.http import HttpResponse, HttpRequest from . models import Activity # Create your views here. def index(request: HttpRequest): return render(request, "activity/index.html") def new_activity(request: HttpRequest): return render(request, "activity/new_activity.html") def create_new_activity(request: HttpRequest): params = request.POST activity = Activity( activity_name = params.get("activity_name") ) activity.save() return redirect("/") the page where my form is submitting new_activity.html <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>Document</title> </head> <body> <h1>New Activity</h1> <form action="/new_activity/" method="post"> <input type="text" name="activity_name" id="activity_name"> <button type="submit">Add Activity</button> </form> </body> </html> urls.pf from django.urls import path from . import views urlpatterns = [ path("", views.index, name="index"), path("new_activity/", views.new_activity, name="new_activity"), ] and this is the model I tried migrating from django.db import models # Create your models here. class Activity(models.Model): id = models.BigAutoField(primary_key=True) activity_name = models.TextField() class TimeLog(models.Model): id … -
Django Static Files Not Loading from AWS S3 After Running collectstatic
I'm working on a Django project where I want to store and serve static files using AWS S3. I have followed the usual configuration steps and set up my settings.py to use S3 for static files. However, after running collectstatic, my static files are not loading, and I can't access them through the S3 bucket. Here's a summary of what I've done so far: Configuration in settings.py: # AWS Credentials AWS_ACCESS_KEY_ID = 'my-access-key' AWS_SECRET_ACCESS_KEY = 'my-secret-key' AWS_STORAGE_BUCKET_NAME = 'cats-gallery-amanda' # S3 Static File Settings AWS_S3_CUSTOM_DOMAIN = f'{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com' AWS_DEFAULT_ACL = 'public-read' AWS_LOCATION = 'static' AWS_QUERYSTRING_AUTH = False AWS_S3_OBJECT_PARAMETERS = { 'CacheControl': 'max-age=86400', } # Static Files Settings STATIC_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/static/' STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage' # Directories STATICFILES_DIRS = [ os.path.join(BASE_DIR, 'setup/static'), ] What I've Tried: Running collectstatic: Command: python manage.py collectstatic Output says: 0 static files copied to 'static/', 176 unmodified. S3 Bucket Permissions: I’ve set the bucket policy to allow public access: { "Version": "2012-10-17", "Statement": [ { "Sid": "PublicReadGetObject", "Effect": "Allow", "Principal": "*", "Action": "s3:GetObject", "Resource": "arn:aws:s3:::cats-gallery-amanda/static/*" } ] } Checking Files in S3: After running collectstatic, there are no files in the S3 bucket under static/. Attempted Solutions: Verified that django-storages and boto3 are installed. Double-checked AWS credentials. Ensured … -
Django Framework CSRF verification failed with 403 Forbidden error. Request aborted for non-HTML python script
I am using Django Framework with DRF to generate APIs at the backend. I am using python script as an standalone windows based application to retrieve and send data to the backend server which is Django. It is not HTML application and no Cookies are involved. It is simply running python script with "request" library. I am also using JWT authentication token to secure my requests to server. While making request to fetch the token to the server I am getting below error: CSRF verification failed. Request aborted. You are seeing this message because this site requires a CSRF cookie when submitting forms. This cookie is required for security reasons, to ensure that your browser is not being hijacked by third parties. If you have configured your browser to disable cookies, please re-enable them, at least for this site, or for “same-origin” requests. Reason given for failure: CSRF cookie not set. My Settings.py is as below: MIDDLEWARE = [ 'autobiz.middleware.WebSocketMiddleware', 'django_tenants.middleware.main.TenantMainMiddleware', 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', "corsheaders.middleware.CorsMiddleware", 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] CORS_ORIGIN_ALLOW_ALL = True CORS_ALLOW_CREDENTIALS = True CORS_ALLOW_METHODS = [ "DELETE", "GET", "OPTIONS", "PATCH", "POST", "PUT", ] CORS_ALLOW_HEADERS = [ "accept", "accept-encoding", "authorization", "content-type", "dnt", "origin", "user-agent", "x-csrftoken", "x-requested-with", REST_FRAMEWORK={ 'DEFAULT_FILTER_BACKENDS':['django_filters.rest_framework.DjangoFilterBackend', … -
CSS styling for individual paths not working
I have a visual that has 11 hotspots spaced around a png graphic. I have created individual paths for each, and want to create a link as well as a hover that changes color to show which area is selected. The link and the hover work for the first graph in the list but not for any of the following ones. The CSS is: #path_cm:hover, #path_rm:hover, #path_is:hover, #path_bc:hover, #path_hr:hover, #path_ict:hover, #path_sc:hover, #path_fa:hover, #path_env:hover, #path_hs:hover, #path_em:hover { opacity: .5 } the HTML is (many lines of png graphic deleted, the second python url variable is a place holder to see if urls were needed to activate hover and would be replaced with the real one): <div class='containervert' style='top: 5%;'> <?xml version="1.0" encoding="UTF-8"?> <!-- Generated by Pixelmator Pro 3.6.9 --> <svg width="720" height="688" viewBox="0 0 720 688" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"> (png data) <a href='{% url 'introduction' %}'> <path id="path_cm" fill="#027aff" stroke="none" opacity="0" d="M 707.76001 330.686188 C 707.76001 311.409363 692.125793 295.78241 672.840027 295.78241 C 653.554199 295.78241 637.919983 311.409363 637.919983 330.686188 C 637.919983 349.963013 653.554199 365.589966 672.840027 365.589966 C 692.125793 365.589966 707.76001 349.963013 707.76001 330.686188 Z"/> <a> <a href='{% url 'introduction' %}'> <path id="path_rm" fill="#027aff" stroke="none" opacity="0" d="M 676.919983 195.78241 C 676.919983 176.452454 661.249939 160.78241 … -
How can I annotate my Django queryset with a count of related objects
I have 3 django models; Machine, Component, and Part. I am trying to annotate a machine queryset to include the count of unique parts used in that machine. In the example below, ps1 is used to make f1. ps1 and ps2 are used to make f2. f1 and f2 are used to make r1. Therefore, 2 unique parts are used to create r1 and this is the value I want to use to annotate r1. So far my code is outputting 3 in this case. # |------------+---------------------+----------------+--------------+--------------| # | machine | component | part | expected | result | # | | | | parts | parts | # |------------+---------------------+----------------+--------------+--------------| # | r1 | f1 | ps1 | 2 | 3 | # | | f2 | ps1 | | | # | | | ps2 | | | # |------------+---------------------+----------------+--------------+--------------| Part has a ManyToManyField of Component. Component has a ForeignKey to Machine. This is the function I am working with to try and accomplish this. def annotate_machine_query_with_num_parts(machine_query): count_subquery = ( Parts.objects.filter(pk=OuterRef("pk")) .annotate(count=Count("id", distinct=True)) .values("count") ) sum_subquery = ( Parts.objects.filter( Q(component__machine_id=OuterRef("pk")) ) .annotate(count=Subquery(count_subquery)) .values("count") .annotate(num_parts=Sum("count")) .values("num_parts") ) return machine_query.annotate(num_parts=Coalesce(sum_subquery, 0)) This works for many test cases but fails when a … -
Django in Azure - CSRF Errors for existing URL in CSRF_TRUSTED_ORIGINS list
Deployed as web app in Azure and added my custom purchased domain name, lets call it 'i-heart-java.com'. I added the URL into the ALLOWED_HOSTS and CSRF_TRUSTED_ORIGINS lists, both as https and http, including with extra 'www.' entries. App pulls up successfully on those URL's and my page works for the most part, except when logging into any part of the app ONLY WITH MY CUSTOM DOMAIN, otherwise login works fine with the azure default domain. Error shows 2024-09-24T14:24:35.1649753Z Forbidden (Origin checking failed - https://www.i-heart-java.com does not match any trusted origins.): /admin/login/ My settings are as follows, sanitized the real names for obvious reasons: ALLOWED_HOSTS = [ 'https://127.0.0.1:8000', '127.0.0.1', 'https://i-heart-java-XXX.eastus-0X.azurewebsites.net/', "http://i-heart-java.com", 'https://i-heart-java.com/', "http://www.i-heart-java.com", 'https://www.i-heart-java.com/', ..others.., ] SESSION_COOKIE_SECURE = True SECURE_SSL_REDIRECT = False CORS_ALLOW_ALL_ORIGINS = True CSRF_COOKIE_SECURE = True CSRF_COOKIE_HTTPONLY = True CSRF_USE_SESSIONS = False CSRF_COOKIE_SAMESITE = 'Lax' CSRF_TRUSTED_ORIGINS=[ "https://i-heart-java.com", "http://i-heart-java.com", "https://www.i-heart-java.com", "http://www.i-heart-java.com", ..others.., ] What could be causing the CSRF trigger only when logging in via my custom domain even though I have all my hostnames and URLs in the right places? Could it be the custom domain DNS? (please tell me no) I reconfirmed the custom domain DNS settings in the Azure web app and also my domain host. I … -
Neon branching with Django pytest
I want to use Neon’s branching feature to create a new branch (a copy of the main database branch on Neon) every time the test suite runs. In the project I’m working on, we use pytest-django, PostgreSQL as our database, and Django as the backend. Pytest allows specifying a live database for testing (pytest-django docs). I’ve been trying to dynamically create a branch and a compute endpoint using the Neon APIs (Neon API docs). My goal is to use the newly created HOST for the test run, but the tests always run in the test_[DEFAULT_DB_NAME]. Here’s what my conftest.py file looks like: @pytest.fixture(scope='session') def django_db_setup(): logger.info("Creating new test db branch") neon = NeonAPI(api_key=os.environ.get('NEON_API_KEY')) project_id = "project_id" branch = neon.branch_create(project_id=project_id) settings.DATABASES['default'] = { 'ENGINE': 'django.db.backends.postgresql', 'NAME': os.environ.get('PGDATABASE', ''), 'USER': os.environ.get('PGUSER', ''), 'PASSWORD': os.environ.get('PGPASSWORD', ''), 'HOST': 'soemthing', 'PORT': os.environ.get('PGPORT', '5432'), } But this does not seem to have any effect at all. -
django is displaying debug 404 page when DEBUG is False
My django app is not using custom 404 page or handler. In my local dev environment when DEBUG is False I get a standard 404 page but on my deployed app on Render.com I get the django debugging 404 page. I have verified that DEBUG is indeed False there. I don't understand at all how this could be happening. My site is otherwise working well so I know ALLOWED_HOSTS is correct, etc. This is really strange. I have no idea how to troubleshoot this further except maybe trying a different hosting provider. I am using CloudFlare but I don't see how that would affect this. -
Why is it getting objects for province but not for city when the city is in the province
I have a Django project and the following models for a location that is attached to a business in this case and related as branches: class Province(models.Model): name = models.CharField(max_length=50) created_at = models.DateTimeField("date post was created", auto_now_add=True) updated_at = models.DateTimeField("date post was updated", auto_now=True) class Meta: verbose_name = "province" verbose_name_plural = "provinces" db_table = "provinces" ordering = ["name"] def __str__(self): return self.name class City(models.Model): name = models.CharField(max_length=50) province = models.ForeignKey(Province, on_delete=models.CASCADE, default=None, related_name="cities") created_at = models.DateTimeField("date post was created", auto_now_add=True) updated_at = models.DateTimeField("date post was updated", auto_now=True) class Meta: verbose_name = "city" verbose_name_plural = "cities" db_table = "cities" ordering = ["name"] unique_together = ('name', 'province',) def __str__(self): return self.name class Area(models.Model): name = models.CharField(max_length=50) city = models.ForeignKey(City, on_delete=models.CASCADE, default=None, related_name="areas") zip_code = models.CharField(max_length=30) created_at = models.DateTimeField("date post was created", auto_now_add=True) updated_at = models.DateTimeField("date post was updated", auto_now=True) class Meta: verbose_name = "area" verbose_name_plural = "areas" db_table = "areas" ordering = ["name"] def __str__(self): return self.name class Location(models.Model): name = models.CharField(max_length=50, null=True, blank=True) complex = models.CharField(max_length=50, null=True, blank=True) street = models.CharField(max_length=100, null=True, blank=True) additional = models.CharField(max_length=100, null=True, blank=True) area = models.ForeignKey(Area, on_delete=models.SET_DEFAULT, default=1, related_name="profile_locations") phone = models.CharField(max_length=15, null=True, blank=True) whatsapp = models.CharField(max_length=15, null=True, blank=True) fax = models.CharField(max_length=15, null=True, blank=True) mobile = … -
Why does my Celery task not start on Heroku?
I currently have an app deployed on Heroku. I've recently added celery with redis. The app works fine on my device but when I try to deploy on Heroku everything works fine up until the Celery task should be called. However nothing happens and I don't get any error logs from Heroku. Here is my code: settings.py: CELERY_BROKER_URL = env('REDIS_URL', 'CELERY_BROKER_URL')#REDIS_URL #redis://localhost:6379 CELERY_RESULT_BACKEND = env('REDIS_URL', 'CELERY_RESULT_BACKEND') #'redis://redis:6379' CELERY_CACHE_BACKEND = "default" CELERY_ACCEPT_CONTENT = ['json'] CELERY_TASK_SERIALIZER = 'json' CELERY_RESULT_SERIALIZER = 'json' CELERY_TIMEZONE = 'UTC' views.py: task = transcribe_file_task.delay(file_path, audio_language, output_file_type, 'ai_transcribe_output', session_id) celery.py: from __future__ import absolute_import, unicode_literals import os from celery import Celery from django.conf import settings os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings") app = Celery("mysite") app.config_from_object("django.conf:settings", namespace="CELERY") app.autodiscover_tasks() docker-compose.yml: services: web: environment: ... - CELERY_BROKER_URL=redis://redis:6379/0 - CELERY_RESULT_BACKEND=redis://redis:6379/0 ... services: celery: environment: ... - CELERY_BROKER_URL=redis://redis:6379/0 - CELERY_RESULT_BACKEND=redis://redis:6379/0 heroku.yml: setup: addons: - plan: heroku-postgresql - plan: heroku-redis build: docker: web: Dockerfile celery: Dockerfile release: image: web command: - python manage.py collectstatic --noinput run: web: gunicorn mysite.wsgi celery: celery -A mysite worker --loglevel=info requirements.txt: environs[django]==9.5.0 I don't use a Procfile. To set up redis on Heroku I went read the steps in this tutorial and simply followed the url in the 'Heroku Data for Redis' line and … -
Managing all modules imports in a single file in Python/Django project, while avoiding circular imports
I’m working on a Django project that has numerous imported modules across various files, sometimes totaling up to 50 lines of imports per page. To reduce cluttering, I created a single file, "imports.py", to centralize my imports. Here’s a brief example of what it looks like: from datetime import date, datetime from typing import Any, Callable, Optional, Final, Type, TypeAlias, cast, Iterable from functools import wraps from cryptography.fernet import Fernet from requests.auth import HTTPBasicAuth from requests.models import Response from PIL import Image, UnidentifiedImageError from smtplib import SMTPRecipientsRefused from dotenv import load_dotenv from django.db import models, IntegrityError ... # a lot more __all__ = [ 'datetime', 'Any', 'Callable', 'Optional', 'Final', 'Type', 'TypeAlias', 'cast', 'Iterable', 'wraps', 'Fernet', 'HTTPBasicAuth', 'Response', 'Image', 'UnidentifiedImageError', 'SMTPRecipientsRefused', 'load_dotenv', 'models', 'IntegrityError',......] Then, in other files, I import everything from imports.py like this: from bi_app.py.imports import * While I know this might be unconventional, I find it more organized. This method works well for external modules and also for Linters, but when I try to include imports for my own project files, I often run into circular import issues. My question is: Is there a way to combine all imports from my own files, into a single file without … -
How to view Parallels VM Django webserver from Mac Sequoia host browser?
I've been running a Django webserver in an Ubuntu VM using Parallels (Standard Edition Version 20.0.1) on my M2 Mac. python manage.py runserver 0.0.0.0:8000 I then view the website in Chrome/Safari/Firefox on my Mac OS, pointing the browser to the IP of the VM at port 8000. http://ip.of.ubuntu.vm:8000 It's always worked great. But yesterday I upgraded my MacOS to Sequoia 15.0. Now I can't see the website served from the VM in the host browser anymore. I've tried switching VM > Configure > Hardware > Network > Source from Shared Network to Default Adapter, and also to WiFi, but those did not help. I can ping the VM's IP from the MacOS in Terminal and it finds it. But the browser cannot see the website. Chrome tells me: This site can’t be reached http://ip.of.ubuntu.vm:8000/donate/ is unreachable. ERR_ADDRESS_UNREACHABLE I've restarted everything. That hasn't helped. As a workaround, I can, of course, view the site in Ubuntu. That works properly. But I'd like to be able to see it in the host Mac also. Any suggestions? Thanks in advance! -
Nginx Displays Default Page Instead of Django App When Using uWSGI
I'm trying to deploy a Django application using Nginx and uWSGI on a Linux system (WSL2), but I'm encountering an issue where Nginx serves its default page instead of my Django application. I've confirmed that uWSGI is running and my Django app is functional by itself. Here's my current setup: Nginx Configuration: server { listen 80; server_name localhost; charset utf-8; location / { include /etc/nginx/uwsgi_params; uwsgi_pass unix:/var/run/uwsgi.webapppackage/master.sock; } } uWSGI Configuration (uwsgi.ini): [uwsgi] chdir = /mnt/c/Python/WonderPasNavi/wonderpasnavi home = /mnt/c/Python/WonderPasNavi/wonderpasnavi/.venv wsgi-file = /mnt/c/Python/WonderPasNavi/wonderpasnavi/wsgi.py module = wonderpasnavi.wsgi:application logto = /mnt/c/Python/WonderPasNavi/wonderpasnavi/uwsgi-my_app.log master = true vacuum = true pidfile = /var/run/uwsgi.webapppackage/master.pid socket = /var/run/uwsgi.webapppackage/master.sock processes = 2 die-on-term = true touch-reload = /mnt/c/Python/WonderPasNavi/reload.trigger lazy-apps = true Symptoms: Nginx serves the "Welcome to nginx!" page instead of the Django app. uWSGI logs indicate it's running without errors. No relevant errors in Nginx logs. I've tried restarting Nginx and uWSGI, checking the symlink in /etc/nginx/sites-enabled/, and clearing browser cache but no success. Could there be a configuration issue I'm overlooking, or is there a specific setting required for Nginx to properly forward requests to uWSGI? Any suggestions or guidance would be greatly appreciated! -
size=x' data error - 2 rows in data but 1 row heights
I get the following error when trying to upload an image and load it in a pdf with xhtml2pdf with a template, when viewing it in the admin it appears correct, but in the pdf it gives me the error ValueError at /multas/ <PmlTable@0x1F3B48B4320 2 rows x 1 cols> with cell(0,0) containing <PmlKeepInFrame at 0x1f3b48b4680> size=x data error - 2 rows in data but 1 row heights Request Method: GET Request URL: http://localhost:8080/multas/?id=33 Django Version: 3.2 Exception Type: ValueError Exception Value: <PmlTable@0x1F3B48B4320 2 rows x 1 cols> with cell(0,0) containing '<PmlKeepInFrame at 0x1f3b48b4680> size=x' data error - 2 rows in data but 1 row heights Exception Location: C:\Users\user\PycharmProjects\simertg\venv\Lib\site-packages\reportlab\platypus\tables.py, line 319, in __init__ Python Executable: C:\Users\user\PycharmProjects\simertg\venv\Scripts\python.exe Python Version: 3.12.2 enter image description here -
How do I maintain connection between Docker containers while having one of them connected to host machine?
I am currently deploying an application with Docker (version 27.2.1), using docker compose on debian 11. My docker-compose.yml file consists in defining 4 containers (app, api, db and ngninx). app and api are Django applications. db is a postgresql service. The whole is deployed on a machine that hosts an SMTP service on port 25. My Django app app needs to use this service to send emails to the users, but it also has to be able to connect with the other containers. For example, db is the host of the postgresql server that contains all the data managed by the Django models defined in app. I cannot find how my containers can be on the same network while having the app service in relation to the host machine. This prevents me from using the SMTP I need to use. From what I have read on this site and in the docs, it seems that it is not possible to have a service running on both network_mode=host and on a default bridge network. Here is the docker-compose.yml file. services: app: build: ./app depends_on: db: condition: service_healthy ports: - "8080:8080" volumes: ... extra_hosts: - "host.docker.internal:host-gateway" api: build: ./api depends_on: db: condition: … -
Meta Facebook Embedded Signup in Django Python
I work on a company that works with whatsapp chatbot. I created a page to make our clients signin with their facebook's business account to get permissions to send message on behalf of them. I'm implementing the embedded signup in a simple page just to get the access token. For test purposes, I created a simple backend in python to return the access token to the page. These are the codes I have: (I just replaced the ids with xxx,yyy and zzz because I think these are not needed here) Frontend <html> <head> <script src="https://cdn.jsdelivr.net/npm/axios/dist/axios.min.js"></script> <script> window.fbAsyncInit = function () { // JavaScript SDK configuration and setup FB.init({ appId: "xxx", // Facebook App ID cookie: true, // enable cookies xfbml: true, // parse social plugins on this page version: "v20.0", //Graph API version }); }; // Load the JavaScript SDK asynchronously (function (d, s, id) { var js, fjs = d.getElementsByTagName(s)[0]; if (d.getElementById(id)) return; js = d.createElement(s); js.id = id; js.src = "https://connect.facebook.net/en_US/sdk.js"; fjs.parentNode.insertBefore(js, fjs); })(document, "script", "facebook-jssdk"); // Facebook Login with JavaScript SDK function launchWhatsAppSignup() { // Launch Facebook login FB.login( function (response) { if (response.authResponse) { console.log("response", response); const code = response.authResponse.code; // Make a call to the … -
Can I have two Libraries within one Azure Repo and install them using requirements.txt
I have a Repo with two libraries committed. Basically two folder containing different libraries within one single repo. And I want this libraries to be installed using requirements.txt. Is this possible? django-auth-adfs @ git+https://{username}:{PAT}@dev.azure.com/{organization}/{project}/_git/{repository}/django-auth-adfs@main#egg=django-auth-adfs django @ git+https://{username}:{PAT}@dev.azure.com/{organization}/{project}/_git/{repository}/django@main#egg=django I found this command to put within requirements.txt but this throws errors while installing. -
How to filter Longitude/Latitude on Django PointField directly?
I want to filter directly on a lat/lng value on a GeoDjango PointField. e.g. geolocation__lat__lte=40.0 Typically in django i can access Latitude/Longitude directly like geolocation.x or geolocation.y so i tried to filter like geolocation_y__lte=40.0. FieldError: Unsupported lookup 'y' for PointField or join on the field not permitted. was the result. ChatGPT wasn't able to help out and I don't find a related question here on stackoverflow. Will be thankful for any help. -
How do I deploy my containerized Nextjs and Django app to render?
I successfully containerized my Django and Nextjs apps with docker, these containers are running locally, but deploying to Render a hosting platform, I have an issue binding my service to a port, at least that's the log error message I am getting, Port scan timeout reached, no open ports detected. Bind your service to at least one port. If you don't need to receive traffic on any port, create a background worker instead. Error Log docker-compose.yaml version: "3.9" services: postgis: image: geonode/postgis:15.3 container_name: postgis4agro_watcher volumes: - ./init-db:/docker-entrypoint-initdb.d/ - postgres_data:/var/lib/postgresql/data/ environment: - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} - DB_NAME=${DB_NAME} - DB_USER=${DB_USER} - DB_PASSWORD=${DB_PASSWORD} ports: - "5432:5432" restart: always networks: - agrowatcher-api nginx: build: context: ./nginx dockerfile: Dockerfile container_name: nginx4agro_watcher restart: always depends_on: - awatcher-frontend ports: - 8080:8080 networks: - agrowatcher-api django: build: context: . dockerfile: Dockerfile container_name: django4agro_watcher env_file: - .env command: python manage.py runserver 0.0.0.0:8000 volumes: - ./:/home/geouser/agrowatcher ports: - "8000:8000" restart: always depends_on: - postgis networks: - agrowatcher-api awatcher-frontend: build: context: ./awatcher-frontend dockerfile: Dockerfile container_name: awatcherfrontend4agro_watcher volumes: - ./awatcher-frontend:/usr/src/app - /usr/src/app/node_modules ports: - "3000:3000" environment: - NODE_ENV=development # stdin_open: true # tty: true depends_on: - django networks: - agrowatcher-api networks: agrowatcher-api: driver: bridge volumes: postgres_data: I created a shell script to use the … -
Problem installing libpq in Docker for Django project
I'm trying to setup my Django project with docker and use postgres for db, and for that I need to install libpq: RUN apk update --no-cache && apk add --no-cache libpq But I got this error during run docker compose up --build: => ERROR [backend builder 3/7] RUN apk update --no-cache && apk add --no-cache postgresql-dev gcc python3-dev musl-dev nano 10.6s => ERROR [backend stage-1 6/14] RUN apk update --no-cache && apk add --no-cache libpq dcron 10.6s ------ > [backend builder 3/7] RUN apk update --no-cache && apk add --no-cache postgresql-dev gcc python3-dev musl-dev nano: 0.559 fetch https://dl-cdn.alpinelinux.org/alpine/v3.20/main/x86_64/APKINDEX.tar.gz 5.560 WARNING: fetching https://dl-cdn.alpinelinux.org/alpine/v3.20/main: temporary error (try again later) 5.560 fetch https://dl-cdn.alpinelinux.org/alpine/v3.20/community/x86_64/APKINDEX.tar.gz 10.57 WARNING: fetching https://dl-cdn.alpinelinux.org/alpine/v3.20/community: temporary error (try again later) 10.57 4 unavailable, 0 stale; 37 distinct packages available ------ ------ > [backend stage-1 6/14] RUN apk update --no-cache && apk add --no-cache libpq dcron: 0.555 fetch https://dl-cdn.alpinelinux.org/alpine/v3.20/main/x86_64/APKINDEX.tar.gz 5.560 fetch https://dl-cdn.alpinelinux.org/alpine/v3.20/community/x86_64/APKINDEX.tar.gz 5.560 WARNING: fetching https://dl-cdn.alpinelinux.org/alpine/v3.20/main: temporary error (try again later) 10.57 WARNING: fetching https://dl-cdn.alpinelinux.org/alpine/v3.20/community: temporary error (try again later) 10.57 4 unavailable, 0 stale; 37 distinct packages available ------ failed to solve: process "/bin/sh -c apk update --no-cache && apk add --no-cache libpq dcron" did not complete successfully: exit code: 4 … -
Django-redis - Connection Closed errors after timeout set to 0
I am using Nautobot (Django-based application) that utilises Redis and Celery for asynchronous task execution. Randomly, when accessing the jobs page, I get the below error stating the Redis server connection was closed. Originally, I had Redis running in a container and later migrated to Google MemoryStore as part of the troubleshooting. Both platforms have the same issue. I've tried updating the redis configuration on both to disable timeouts: timeout 0 As well as setting the configuration in Django but the issue continues. # CACHE Configuration CACHE_RETRY = Retry(ExponentialBackoff(), 3) CACHES = { "default": { "BACKEND": "django_prometheus.cache.backends.redis.RedisCache", "LOCATION": f"{os.getenv('CACHE_BROKER_URL', 'redis://127.0.0.1:6379')}/0", "TIMEOUT": 300, "OPTIONS": { "CLIENT_CLASS": "django_redis.client.DefaultClient", "PASSWORD": f"{os.getenv('CACHE_AUTH_STRING', '')}", "SOCKET_CONNECT_TIMEOUT": 10, "SOCKET_TIMEOUT": 0, "CONNECTION_POOL_KWARGS": { "ssl_cert_reqs": os.getenv('CACHE_CERT_REQUIRED', 'None'), "retry": CACHE_RETRY, "retry_on_timeout": True } }, } } CONTENT_TYPE_CACHE_TIMEOUT = int(os.getenv("CACHE_CONTENT_TYPE_TIMEOUT", "0")) REDIS_LOCK_TIMEOUT = int(os.getenv("CACHE_LOCK_TIMEOUT", "0")) CELERY_BROKER_URL = os.getenv("CACHE_BROKER_URL", "redis://127.0.0.1:6379") CELERY_TASK_SOFT_TIME_LIMIT = int(os.getenv("CACHE_TASK_SOFT_TIME_LIMIT", str(5 * 60))) CELERY_TASK_TIME_LIMIT = int(os.getenv("CACHE_TASK_TIME_LIMIT", str(10 * 60))) CELERY_BROKER_TRANSPORT_OPTIONS = { "ssl": { "ssl_cert_reqs": os.getenv('CACHE_CERT_REQUIRED', 'None') } } CELERY_RESULT_BACKEND_TRANSPORT_OPTIONS = { "ssl": { "ssl_cert_reqs": os.getenv('CACHE_CERT_REQUIRED', 'None') } } if os.getenv('CACHE_CERT_REQUIRED', 'None') == 'required': CACHES['default']['OPTIONS']['CONNECTION_POOL_KWARGS']['ssl_ca_certs'] = os.getenv('CACHE_CERT_LOCATION', '') CELERY_BROKER_TRANSPORT_OPTIONS['ssl']['ssl_ca_certs'] = os.getenv('CACHE_CERT_LOCATION', '') CELERY_RESULT_BACKEND_TRANSPORT_OPTIONS['ssl']['ssl_ca_certs'] = os.getenv('CACHE_CERT_LOCATION', '') # Construct the new URL for Celery schema, host = os.getenv("CACHE_BROKER_URL", "redis://127.0.0.1:6379").split('://') CELERY_BROKER_URL …