commit 90c0ff61ed07441fac2dd696bf0101830fba65de Author: Holger Sielaff Date: Wed Aug 27 09:55:55 2025 +0200 initial diff --git a/django_proxmox_mikrotik/__init__.py b/django_proxmox_mikrotik/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/django_proxmox_mikrotik/asgi.py b/django_proxmox_mikrotik/asgi.py new file mode 100644 index 0000000..ff97218 --- /dev/null +++ b/django_proxmox_mikrotik/asgi.py @@ -0,0 +1,16 @@ +""" +ASGI config for django_proxmox_mikrotik project. + +It exposes the ASGI callable as a module-level variable named ``application``. + +For more information on this file, see +https://docs.djangoproject.com/en/5.2/howto/deployment/asgi/ +""" + +import os + +from django.core.asgi import get_asgi_application + +os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_proxmox_mikrotik.settings') + +application = get_asgi_application() diff --git a/django_proxmox_mikrotik/configs.py b/django_proxmox_mikrotik/configs.py new file mode 100644 index 0000000..ea8eb4a --- /dev/null +++ b/django_proxmox_mikrotik/configs.py @@ -0,0 +1,79 @@ +import json +import logging +import os + + +def env_true(value): + return value.lower() in ('yes', 'y', '1', 'on', 'true', 't') + + +def env_false(value): + return value.lower() in ('no', 'n', '0', 'off', 'false', 'f') + +PROXMOX_READONLY = env_true(os.environ.get('PROXMOX_READONLY', '0')) +MIKROTIK_READONLY = env_true(os.environ.get('MIKROTIK_READONLY', '0')) + +class ProxmoxConfig: + HOST = os.environ.get('PROXMOX_HOST') + USER = os.environ.get('PROXMOX_USER') + PASS = os.environ.get('PROXMOX_PASS') + NODE = os.environ.get('PROXMOX_NODE') + READONLY = PROXMOX_READONLY + MAX_MEM = int(os.environ.get('PROXMOX_MAX_MEM', 8192)) + MAX_DISK = int(os.environ.get('PROXMOX_MAX_DISK', 100)) + MAX_CORES = int(os.environ.get('PROXMOX_MAX_CORES', 8)) + CREATE_LXC_TIMEOUT = int(os.environ.get('PROXMOX_CREATE_LXC_TIMEOUT', 600)) + DEFAULT_STORAGE = os.environ.get('PROXMOX_DEFAULT_STORAGE', 'local') + + +class MikrotikConfig: + HOST = os.environ.get('MIKROTIK_HOST') + USER = os.environ.get('MIKROTIK_USER') + PASS = os.environ.get('MIKROTIK_PASS') + IP_8 = os.environ.get('MIKROTIK_IP_8', '192,172') + READONLY = MIKROTIK_READONLY + ROOT_NETWORKS = os.environ.get('MIKROTIK_ROOT_NETWORKS', '192.168.1.0,192.168.107.0').split(',') + + +class DatabaseConfig: + HOST = os.environ.get('DATABASE_HOST', 'localhost') or 'localhost' + ENGINE = os.environ.get('DATABASE_ENGINE', 'sqlite3') or 'sqlite3' + USER = os.environ.get('DATABASE_USER', '') or '' + PASSWORD = os.environ.get('DATABASE_PASSWORD', '') or '' + NAME = os.environ.get('DATABASE_NAME', '') + PORT = os.environ.get('DATABASE_PORT', 5432) or 5432 + + +class AuthLDAPConfig: + HOST = os.environ.get('AUTH_LDAP_HOST') + BIND_DN = os.environ.get('AUTH_LDAP_BIND_DN') + BIND_PASSWORD = os.environ.get('AUTH_LDAP_BIND_PASSWORD') + USER_BASE = os.environ.get('AUTH_LDAP_USER_BASE') + USER_FILTER = os.environ.get('AUTH_LDAP_USER_FILTER') + GROUP_SEARCH_BASE = os.environ.get('AUTH_LDAP_GROUP_SEARCH_BASE') + GROUP_SEARCH_FILTER = os.environ.get('AUTH_LDAP_GROUP_SEARCH_FILTER') + USER_ATTR_MAP = json.loads(os.environ.get('AUTH_LDAP_USER_ATTR_MAP')) + USER_FLAGS_BY_GROUP = json.loads(os.environ.get('AUTH_LDAP_USER_FLAGS_BY_GROUP')) + FIND_GROUP_PERMS = os.environ.get('AUTH_LDAP_FIND_GROUP_PERMS') + CACHE_GROUPS = os.environ.get('AUTH_LDAP_CACHE_GROUPS') + GROUP_CACHE_TIMEOUT = os.environ.get('AUTH_LDAP_GROUP_CACHE_TIMEOUT') + + +_missing = [] +for cfg in ['Proxmox', 'Mikrotik', 'Database', ('AuthLDAP', 'AUTH_LDAP')]: + if isinstance(cfg, tuple): + cfg, mapname = cfg + else: + mapname = cfg.upper() + cls = globals()[cfg + 'Config'] + for k, v in cls.__dict__.items(): + if k.startswith(mapname + '_'): + if v is None: + _missing.append(f'{cfg}.{k}') +if _missing: + raise Exception(f'Missing environment variables: \n{"\n".join(_missing)}\n') + +logging.debug(f'ProxmoxConfig: {ProxmoxConfig.__dict__}') +logging.debug(f'MikrotikConfig: {MikrotikConfig.__dict__}') +logging.debug(f'DatabaseConfig: {DatabaseConfig.__dict__}') +logging.debug(f'AuthLDAPConfig: {AuthLDAPConfig.__dict__}') diff --git a/django_proxmox_mikrotik/settings.py b/django_proxmox_mikrotik/settings.py new file mode 100644 index 0000000..2e1f38b --- /dev/null +++ b/django_proxmox_mikrotik/settings.py @@ -0,0 +1,181 @@ +""" +For more information on this file, see +https://docs.djangoproject.com/en/5.2/topics/settings/ + +For the full list of settings and their values, see +https://docs.djangoproject.com/en/5.2/ref/settings/ +""" +import json +import os +from pathlib import Path + +import ldap +from django_auth_ldap.config import GroupOfNamesType, LDAPSearch +from dotenv import dotenv_values, load_dotenv + +# Build paths inside the project like this: BASE_DIR / 'subdir'. +BASE_DIR = Path(__file__).resolve().parent.parent +load_dotenv(BASE_DIR / '.env', override=False) + +import logging + +LOGLEVEL = getattr(logging,os.environ.get('LOG_LEVEL', 'DEBUG').upper()) + +logging.basicConfig(level=LOGLEVEL) + +from django_proxmox_mikrotik.configs import ( + env_true, + env_false, + ProxmoxConfig, + DatabaseConfig, + AuthLDAPConfig, + MikrotikConfig, +) +LOGIN_URL = '/frontend/login/' +# fallback +DatabaseConfig.NAME = DatabaseConfig.NAME or BASE_DIR / 'db.sqlite3' + +SECRET_KEY = 'django-insecure-o$tw_(450z^cl%mq(h1&=jltu51mfnmiown&l^dinx+z-!nzem' + +# SECURITY WARNING: don't run with debug turned on in production! +DEBUG = True + +ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', '127.0.0.1,localhost').strip().replace(' ', '').split(',') + +INSTALLED_APPS = [ + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.messages', + 'django.contrib.staticfiles', + 'django.contrib.admin', + 'django_middleware_global_request', + 'proxmox', + 'tasklogger', + 'mikrotik', + 'manager', + 'frontend', +] + +MIDDLEWARE = [ + 'django.middleware.security.SecurityMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'frontend.middleware.FrontendSessionMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', + 'django_middleware_global_request.middleware.GlobalRequestMiddleware', +] + +ROOT_URLCONF = 'django_proxmox_mikrotik.urls' + +TEMPLATES = [ + { + 'BACKEND': 'django.template.backends.django.DjangoTemplates', + 'DIRS': [BASE_DIR / 'templates'] + , + 'APP_DIRS': True, + 'OPTIONS': { + 'context_processors': [ + 'django.template.context_processors.request', + 'django.contrib.auth.context_processors.auth', + 'django.contrib.messages.context_processors.messages', + ], + }, + }, +] + +WSGI_APPLICATION = 'django_proxmox_mikrotik.wsgi.application' + +# Database +# https://docs.djangoproject.com/en/5.2/ref/settings/#databases + +DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.' + DatabaseConfig.ENGINE, + 'NAME': DatabaseConfig.NAME, + 'PASSWORD': DatabaseConfig.PASSWORD, + 'USER': DatabaseConfig.USER, + 'HOST': DatabaseConfig.HOST, + 'PORT': DatabaseConfig.PORT, + } +} + +# Password validation +# https://docs.djangoproject.com/en/5.2/ref/settings/#auth-password-validators + +AUTH_PASSWORD_VALIDATORS = [ + { + 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', + }, +] + +# Internationalization +# https://docs.djangoproject.com/en/5.2/topics/i18n/ + +LANGUAGE_CODE = 'en-us' + +TIME_ZONE = 'UTC' + +USE_I18N = True + +USE_TZ = True + +# Static files (CSS, JavaScript, Images) +# https://docs.djangoproject.com/en/5.2/howto/static-files/ + +STATIC_URL = 'static/' +STATIC_ROOT = str(BASE_DIR / 'static') + +STATICFILES_DIRS = [ + BASE_DIR / "frontend" / "static", +] + +# Default primary key field type +# https://docs.djangoproject.com/en/5.2/ref/settings/#default-auto-field +DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' + + +AUTH_LDAP_SERVER_URI = AuthLDAPConfig.HOST +AUTH_LDAP_BIND_DN = AuthLDAPConfig.BIND_DN +AUTH_LDAP_BIND_PASSWORD = AuthLDAPConfig.BIND_PASSWORD + +AUTH_LDAP_USER_SEARCH = LDAPSearch( + AuthLDAPConfig.USER_BASE, + ldap.SCOPE_SUBTREE, + AuthLDAPConfig.USER_FILTER, +) + +AUTH_LDAP_GROUP_SEARCH = LDAPSearch( + AuthLDAPConfig.GROUP_SEARCH_BASE, + ldap.SCOPE_SUBTREE, + AuthLDAPConfig.GROUP_SEARCH_FILTER, +) + +AUTH_LDAP_USER_ATTR_MAP = AuthLDAPConfig.USER_ATTR_MAP + +AUTH_LDAP_USER_FLAGS_BY_GROUP = AuthLDAPConfig.USER_FLAGS_BY_GROUP + +AUTH_LDAP_FIND_GROUP_PERMS = bool(AuthLDAPConfig.FIND_GROUP_PERMS) + +AUTH_LDAP_CACHE_GROUPS = bool(AuthLDAPConfig.CACHE_GROUPS) +AUTH_LDAP_GROUP_CACHE_TIMEOUT = AuthLDAPConfig.GROUP_CACHE_TIMEOUT + +AUTH_LDAP_GROUP_TYPE = GroupOfNamesType(name_attr="cn") +AUTHENTICATION_BACKENDS = [ + 'django_auth_ldap.backend.LDAPBackend', + 'django.contrib.auth.backends.ModelBackend', +] + +SESSION_COOKIE_NAME_FRONTEND = 'django_pm_mk_frontend_sessionid' \ No newline at end of file diff --git a/django_proxmox_mikrotik/urls.py b/django_proxmox_mikrotik/urls.py new file mode 100644 index 0000000..dc8561d --- /dev/null +++ b/django_proxmox_mikrotik/urls.py @@ -0,0 +1,31 @@ +from django.contrib import admin +""" +URL configuration for django_proxmox_mikrotik project. + +The `urlpatterns` list routes URLs to views. For more information please see: + https://docs.djangoproject.com/en/5.2/topics/http/urls/ +Examples: +Function views + 1. Add an import: from my_app import views + 2. Add a URL to urlpatterns: path('', views.home, name='home') +Class-based views + 1. Add an import: from other_app.views import Home + 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') +Including another URLconf + 1. Import the include() function: from django.urls import include, path + 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) +""" +from django.urls import path, include +from django.shortcuts import redirect + +urlpatterns = [ + path('admin/', admin.site.urls), +] + +# django_proxmox_mikrotik/urls.py +urlpatterns += [ + path('', lambda request: redirect('frontend/'), name='home'), + path('frontend/', include('frontend.urls', namespace='frontend')), + path('manager/', include('manager.urls', namespace='manager')), + path('tasklogger/', include('tasklogger.urls', namespace='tasklogger')), +] \ No newline at end of file diff --git a/django_proxmox_mikrotik/wsgi.py b/django_proxmox_mikrotik/wsgi.py new file mode 100644 index 0000000..8c44ab9 --- /dev/null +++ b/django_proxmox_mikrotik/wsgi.py @@ -0,0 +1,16 @@ +""" +WSGI config for django_proxmox_mikrotik project. + +It exposes the WSGI callable as a module-level variable named ``application``. + +For more information on this file, see +https://docs.djangoproject.com/en/5.2/howto/deployment/wsgi/ +""" + +import os + +from django.core.wsgi import get_wsgi_application + +os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_proxmox_mikrotik.settings') + +application = get_wsgi_application() diff --git a/frontend/__init__.py b/frontend/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/frontend/admin.py b/frontend/admin.py new file mode 100644 index 0000000..ca5cfaf --- /dev/null +++ b/frontend/admin.py @@ -0,0 +1,24 @@ +# frontend/admin.py +from django.contrib import admin +from .models import UserProfile, FAQ + +@admin.register(UserProfile) +class UserProfileAdmin(admin.ModelAdmin): + list_display = ('user', 'ldap_uid', 'user_group') + search_fields = ('user__username', 'ldap_uid') + list_filter = ('user__groups',) + + +@admin.register(FAQ) +class FAQAdmin(admin.ModelAdmin): + list_display = ('title', 'order', 'created_at', 'updated_at') + list_filter = ('created_at',) + search_fields = ('title', 'content') + list_editable = ('order',) + ordering = ('order', 'title') + + fieldsets = ( + (None, { + 'fields': ('title', 'content', 'order') + }), + ) \ No newline at end of file diff --git a/frontend/apps.py b/frontend/apps.py new file mode 100644 index 0000000..1e0f7a1 --- /dev/null +++ b/frontend/apps.py @@ -0,0 +1,9 @@ +# frontend/apps.py +from django.apps import AppConfig + +class FrontendConfig(AppConfig): + default_auto_field = 'django.db.models.BigAutoField' + name = 'frontend' + + def ready(self): + import frontend.signals \ No newline at end of file diff --git a/frontend/forms.py b/frontend/forms.py new file mode 100644 index 0000000..54f4b0b --- /dev/null +++ b/frontend/forms.py @@ -0,0 +1,352 @@ +# frontend/forms.py +import re +from functools import cached_property + +from django import forms +from django.core.exceptions import ValidationError + +from django_proxmox_mikrotik.settings import ProxmoxConfig +from manager.models import CloneContainer, DevContainer +from mikrotik.models import DNSStatic, IPAddress, IPDHCPLease +from proxmox.models import Lxc +from .models import FAQ + + +def fk_to_hidden_input(formobj: forms.Form, instance, fieldname: str): + initial_value = str(getattr(instance, fieldname)) if instance else '' + formobj.fields[fieldname] = forms.CharField( + label=fieldname.title(), + initial=initial_value, + widget=forms.TextInput( + attrs={'readonly': 'readonly', 'style': 'background-color: #eee;', 'class': 'form-control'}), + required=False + ) + + +class DevContainerForm(forms.ModelForm): + disksize = forms.IntegerField( + required=False, + min_value=1, + max_value=100, + help_text="Festplattengröße (GB) - kann nicht verkleinert werden" + ) + + cores = forms.IntegerField( + required=False, + min_value=1, + help_text="Anzahl der CPU-Kerne" + ) + + memory = forms.CharField( + required=False, + help_text="Arbeitsspeicher in MB" + ) + + class Meta: + model = DevContainer + fields = ['lxc', 'lease', 'dns'] + + @cached_property + def dns_obj(self): + return DNSStatic.objects.get(pk=self.cleaned_data['dns']) + + @cached_property + def lease_obj(self): + return IPDHCPLease.objects.get(pk=self.cleaned_data['lease']) + + @cached_property + def lxc_obj(self): + return Lxc.objects.get(pk=self.cleaned_data['lxc']) + + def _clean_dns(self): + if self.dns_obj.address != self.lease_obj.address: + raise ValidationError("DNS has not the same address as Lease. Please check your input.") + return self.cleaned_data['dns'] + + def _clean_lease(self): + """Brauchts eigentlich nicht""" + if self.lease_obj.address != self.dns_obj.address: + raise ValidationError("Lease has not the same address as DNS. Please check your input.") + return self.cleaned_data['lease'] + + def _clean_lxc(self): + if self.lxc_obj.hwaddr != self.lease_obj.mac_address: + raise ValidationError("LXC has not the same MAC address as Lease. Please check your input.") + return self.cleaned_data['lxc'] + + def clean(self): + super().clean() + self.cleaned_data['dns'] = self._clean_dns() + self.cleaned_data['lease'] = self._clean_lease() + self.cleaned_data['lxc'] = self._clean_lxc() + return self.cleaned_data + + def clean_disksize(self): + if self.cleaned_data['disksize'] and int(self.cleaned_data['disksize']) > int(ProxmoxConfig.MAX_DISK): + raise ValidationError( + f"Disksize > {ProxmoxConfig.MAX_DISK}GB not allowed. Please check your input.") + return self.cleaned_data['disksize'] + + def clean_memory(self): + if self.cleaned_data['memory'] and int(self.cleaned_data['memory']) > ProxmoxConfig.MAX_MEM: + raise ValidationError(f"Memory > {ProxmoxConfig.MAX_MEM}MB not allowed. Please check your input.") + return self.cleaned_data['memory'] + + def clean_cores(self): + if self.cleaned_data['cores'] and int(self.cleaned_data['cores']) > ProxmoxConfig.MAX_CORES: + raise ValidationError(f"Cores > {ProxmoxConfig.MAX_CORES} not allowed. Please check your input.") + return self.cleaned_data['cores'] + + def __init__(self, *args, user_profile=None, **kwargs): + super().__init__(*args, **kwargs) + self.user_profile = user_profile + instance = kwargs.get('instance') + + # Für externe Benutzer: IP-Adressen filtern + if user_profile and user_profile.is_external(): + ldap_uid = user_profile.ldap_uid + ip_addresses = IPAddress.objects.filter(comment__icontains=f' {ldap_uid} ') + networks = [ip.network for ip in ip_addresses] + + # Lease-Feld einschränken + self.fields['lease'].queryset = self.fields['lease'].queryset.filter( + address__regex=r'^(' + '|'.join([re.escape(net) for net in networks]) + r')' + ) + + self.fields['dns'].queryset = self.fields['dns'].queryset.filter(address=instance.lease.address) + + if instance and hasattr(instance, 'lxc') and instance.lxc: + self.fields['disksize'].initial = instance.lxc.disksize + self.fields['disksize'].min_value = instance.lxc.disksize + self.fields['cores'].initial = instance.lxc.cores + self.fields['memory'].initial = instance.lxc.memory + fk_to_hidden_input(self, instance, 'lxc') + fk_to_hidden_input(self, instance, 'lease') + fk_to_hidden_input(self, instance, 'dns') + + def save(self, commit=True): + instance = super().save(commit=False) + + if hasattr(instance, 'lxc') and instance.lxc: + lxc = instance.lxc + + if 'disksize' in self.cleaned_data and self.cleaned_data['disksize']: + lxc.disksize = self.cleaned_data['disksize'] + + if 'cores' in self.cleaned_data and self.cleaned_data['cores']: + lxc.cores = self.cleaned_data['cores'] + + if 'memory' in self.cleaned_data and self.cleaned_data['memory']: + lxc.memory = self.cleaned_data['memory'] + + lxc.save() + + if commit: + instance.save() + + return instance + + +class CloneContainerForm(forms.ModelForm): + class Meta: + model = CloneContainer + fields = ['hostname', 'network', 'cores', 'memory', 'disksize', 'as_regexp', 'vm', 'template'] + + def __init__(self, *args, user_profile=None, vm=None, template=None, hostname=None, **kwargs): + super().__init__(*args, **kwargs) + self.user_profile = user_profile + + if vm: + self.fields['vm'].initial = vm[0].pk + self.fields['template'].disabled = True + elif template: + self.fields['template'].initial = template[0].pk + self.fields['vm'].disabled = True + if hostname: + self.fields['hostname'].initial = hostname + # Für externe Benutzer: Netzwerke filtern + if user_profile and user_profile.is_external(): + ldap_uid = user_profile.ldap_uid + ip_addresses = IPAddress.objects.filter(comment__icontains=f' {ldap_uid} ') + self.fields['network'].queryset = ip_addresses + + def clean_hostname(self): + hostname = self.cleaned_data.get('hostname') + if not hostname: + raise ValidationError("Der Hostname ist erforderlich.") + + # Prüfe, ob der Name bereits als LXC-Hostname existiert + if Lxc.objects.filter(hostname=hostname).exists(): + raise ValidationError(f"Ein LXC mit dem Hostnamen '{hostname}' existiert bereits.") + + # Prüfe, ob der Name bereits als DNS-Name existiert + from mikrotik.models import DNSStatic + if DNSStatic.objects.filter(name=hostname).exists(): + raise ValidationError(f"Ein DNS-Eintrag mit dem Namen '{hostname}' existiert bereits.") + + # Prüfe auf Regex-Übereinstimmung in DNS-Einträgen + dns_with_regex = DNSStatic.objects.exclude(regexp='') + for dns in dns_with_regex: + if dns.regexp and re.search(hostname + '$', dns.regexp): + raise ValidationError( + f"Der Name '{hostname}' entspricht dem regulären Ausdruck '{dns.regexp}' eines existierenden DNS-Eintrags.") + + # Prüfe, ob der Name bereits als CloneContainer-Name existiert + existing_clone_query = CloneContainer.objects.filter(hostname=hostname, status__in=('pending', 'running')) + if self.instance.pk: + existing_clone_query = existing_clone_query.exclude(pk=self.instance.pk) + + if existing_clone_query.exists(): + raise ValidationError(f"Ein CloneContainer mit dem Namen '{hostname}' existiert bereits.") + + return hostname + + def clean_disksize(self): + disksize = self.cleaned_data.get('disksize') + if disksize and disksize > 100: + raise ValidationError("Die Festplattengröße darf maximal 100 GB betragen.") + return disksize + + def clean(self): + cleaned_data = super().clean() + + if not cleaned_data.get('template') and not cleaned_data.get('vm'): + raise ValidationError("Es muss entweder ein Template oder eine VM ausgewählt werden.") + + return cleaned_data + + +class DNSStaticForm(forms.ModelForm): + # Optional: Container selection for automatic IP assignment + container = forms.ModelChoiceField( + queryset=DevContainer.objects.all(), + required=False, + empty_label="Select container (optional)", + help_text="Select a container to automatically use its IP address" + ) + + # Manual IP input + address = forms.GenericIPAddressField( + required=False, + help_text="IP address for the DNS entry" + ) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # Add CSS classes + for field in self.fields: + if isinstance(self.fields[field].widget, forms.widgets.TextInput): + self.fields[field].widget.attrs.update({'class': 'form-control'}) + elif isinstance(self.fields[field].widget, forms.widgets.Select): + self.fields[field].widget.attrs.update({'class': 'form-select'}) + + # Make container field use Select2 + self.fields['container'].widget.attrs.update({ + 'class': 'form-select container-select', + 'data-placeholder': 'Search containers...' + }) + + def clean(self): + cleaned_data = super().clean() + container = cleaned_data.get('container') + address = cleaned_data.get('address') + name = cleaned_data.get('name') + regexp = cleaned_data.get('regexp') + + # Either container or manual address must be provided + if not container and not address: + raise ValidationError("Either select a container or enter an IP address manually") + + # Either name or regexp must be provided (but not both) + if not name and not regexp: + raise ValidationError("Either DNS name or regexp must be provided") + + if name and regexp: + raise ValidationError("Provide either DNS name or regexp, not both") + + # If container is selected, use its IP address + if container: + try: + cleaned_data['address'] = container.lease.address + except AttributeError: + raise ValidationError("Selected container has no IP lease") + + # Check for existing DNS entries + if name: + existing = DNSStatic.objects.filter(name=name) + if self.instance.pk: + existing = existing.exclude(pk=self.instance.pk) + if existing.exists(): + raise ValidationError(f"DNS name '{name}' already exists") + + if regexp: + existing = DNSStatic.objects.filter(regexp=regexp) + if self.instance.pk: + existing = existing.exclude(pk=self.instance.pk) + if existing.exists(): + raise ValidationError(f"DNS regexp '{regexp}' already exists") + + return cleaned_data + + class Meta: + model = DNSStatic + fields = ['name', 'regexp', 'address', 'comment', 'container'] + widgets = { + 'name': forms.TextInput(attrs={'placeholder': 'e.g. server.example.com'}), + 'regexp': forms.TextInput(attrs={'placeholder': 'e.g. .*\\.test\\.com'}), + 'comment': forms.TextInput(attrs={'placeholder': 'Optional description'}), + } + help_texts = { + 'name': 'Specific DNS name (e.g. server.example.com)', + 'regexp': 'DNS regexp pattern (e.g. .*\\.test\\.com)', + 'comment': 'Optional description for this DNS entry' + } + + +class DNSSearchForm(forms.Form): + search = forms.CharField( + required=False, + widget=forms.TextInput(attrs={ + 'class': 'form-control', + 'placeholder': 'Search DNS entries...', + 'id': 'dns-search' + }) + ) + + entry_type = forms.ChoiceField( + choices=[ + ('', 'All types'), + ('name', 'Names only'), + ('regexp', 'Regexps only'), + ], + required=False, + widget=forms.Select(attrs={'class': 'form-select'}) + ) + + +class FAQForm(forms.ModelForm): + class Meta: + model = FAQ + fields = ['title', 'content', 'order'] + widgets = { + 'title': forms.TextInput(attrs={ + 'class': 'form-control', + 'placeholder': 'Enter FAQ question/title...' + }), + 'content': forms.Textarea(attrs={ + 'class': 'form-control', + 'rows': 10, + 'placeholder': 'Enter FAQ answer/content using Markdown syntax...\n\n# Heading\n## Subheading\n\n**Bold text**\n*Italic text*\n\n- List item 1\n- List item 2\n\n```python\ncode block\n```\n\n[Link text](http://example.com)', + 'style': 'font-family: monospace;' + }), + 'order': forms.NumberInput(attrs={ + 'class': 'form-control', + 'min': 0 + }) + } + help_texts = { + 'title': 'The FAQ question or title', + 'content': 'The detailed answer or content (supports Markdown formatting)', + 'order': 'Display order (lower numbers appear first)' + } diff --git a/frontend/middleware.py b/frontend/middleware.py new file mode 100644 index 0000000..5b9b65a --- /dev/null +++ b/frontend/middleware.py @@ -0,0 +1,15 @@ +from django.conf import settings + +class FrontendSessionMiddleware: + def __init__(self, get_response): + self.get_response = get_response + self.default_session_cookie_name = 'sessionid' + + def __call__(self, request): + if request.path.startswith('/frontend/'): + settings.SESSION_COOKIE_NAME = settings.SESSION_COOKIE_NAME_FRONTEND + else: + settings.SESSION_COOKIE_NAME = self.default_session_cookie_name + + response = self.get_response(request) + return response \ No newline at end of file diff --git a/frontend/migrations/0001_initial.py b/frontend/migrations/0001_initial.py new file mode 100644 index 0000000..4fa323e --- /dev/null +++ b/frontend/migrations/0001_initial.py @@ -0,0 +1,25 @@ +# Generated by Django 5.2.4 on 2025-07-10 09:30 + +import django.db.models.deletion +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ] + + operations = [ + migrations.CreateModel( + name='UserProfile', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('ldap_uid', models.CharField(max_length=100, unique=True)), + ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)), + ], + ), + ] diff --git a/frontend/migrations/0002_userprofile_settings.py b/frontend/migrations/0002_userprofile_settings.py new file mode 100644 index 0000000..d25960d --- /dev/null +++ b/frontend/migrations/0002_userprofile_settings.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-07-18 11:10 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('frontend', '0001_initial'), + ] + + operations = [ + migrations.AddField( + model_name='userprofile', + name='settings', + field=models.JSONField(default={'dashboard_view': 'list'}), + ), + ] diff --git a/frontend/migrations/0003_alter_userprofile_settings.py b/frontend/migrations/0003_alter_userprofile_settings.py new file mode 100644 index 0000000..fd49e4a --- /dev/null +++ b/frontend/migrations/0003_alter_userprofile_settings.py @@ -0,0 +1,19 @@ +# Generated by Django 5.2.4 on 2025-07-21 11:03 + +import frontend.models +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('frontend', '0002_userprofile_settings'), + ] + + operations = [ + migrations.AlterField( + model_name='userprofile', + name='settings', + field=models.JSONField(default=frontend.models.default_settings), + ), + ] diff --git a/frontend/migrations/__init__.py b/frontend/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/frontend/models.py b/frontend/models.py new file mode 100644 index 0000000..52be5a9 --- /dev/null +++ b/frontend/models.py @@ -0,0 +1,54 @@ +# frontend/models.py +from django.db import models +from django.contrib.auth.models import User +from lib.db import BaseModel, SearchableMixin +from django.db.models import Q + +def default_settings(): + return { + 'dashboard_view': 'list', + } +class UserProfile(models.Model): + user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile') + ldap_uid = models.CharField(max_length=100, unique=True) + settings = models.JSONField(default=default_settings) + + def __str__(self): + return f"{self.user.username} - {self.ldap_uid}" + + @property + def user_group(self): + if self.user.groups.filter(name='root').exists(): + return 'root' + elif self.user.groups.filter(name='intern').exists(): + return 'intern' + elif self.user.groups.filter(name='extern').exists(): + return 'extern' + return None + + def is_external(self): + return self.user_group == 'extern' + + def is_internal(self): + return self.user_group in ['intern', 'root'] + + def is_root(self): + return self.user_group == 'root' + + +class FAQ(BaseModel, SearchableMixin): + title = models.CharField(max_length=200, help_text='FAQ Question/Title') + content = models.TextField(help_text='FAQ Answer/Content') + order = models.IntegerField(default=0, help_text='Order for display (lower numbers first)') + + class Meta: + ordering = ['order', 'title'] + verbose_name = 'FAQ' + verbose_name_plural = 'FAQs' + + def __str__(self): + return self.title + + @classmethod + def term_filter(cls, search_string): + return Q(title__icontains=search_string) | Q(content__icontains=search_string) \ No newline at end of file diff --git a/frontend/permissions.py b/frontend/permissions.py new file mode 100644 index 0000000..3da4805 --- /dev/null +++ b/frontend/permissions.py @@ -0,0 +1,22 @@ +# frontend/permissions.py +from mikrotik.models import IPAddress + +def user_can_access_container(user_profile, container): + """Prüft, ob der Benutzer Zugriff auf den Container hat.""" + if user_profile.is_internal(): + return True + if user_profile.user.is_superuser: + return True + + # Für externe Benutzer: Nur Container im eigenen Netzwerkbereich + ldap_uid = user_profile.ldap_uid + ip_addresses = IPAddress.objects.filter(comment__icontains=f' {ldap_uid} ') + + # Prüfen, ob Container-Netzwerk mit einer der IP-Adressen übereinstimmt + networks = [ip.network for ip in ip_addresses] + + if hasattr(container, 'lease') and container.lease: + container_network = '.'.join(container.lease.address.split('.')[:3]) + return any(network.startswith(container_network) for network in networks) + + return False \ No newline at end of file diff --git a/frontend/signals.py b/frontend/signals.py new file mode 100644 index 0000000..1495119 --- /dev/null +++ b/frontend/signals.py @@ -0,0 +1,23 @@ +import logging +from django.db.models.signals import post_save +from django.dispatch import receiver +from django.contrib.auth.models import User + +from .models import UserProfile +from lib.ldap import Ldap +from lib.decorators import skip_signal + +@receiver(post_save, sender=User) +@skip_signal() +def post_save_user_profile(sender, instance: User, created, **kwargs): + with Ldap() as ldap: + try: + ldap.set_user_groups(instance, save_instance=True) + except Exception as e: + logging.exception(e) + return + try: + if created or not UserProfile.objects.filter(ldap_uid=instance.username).exists(): + UserProfile.objects.create(user=instance,ldap_uid=instance.username) + except Exception as e: + logging.exception("WTF???", str(e)) diff --git a/frontend/static/frontend/js/live-status.js b/frontend/static/frontend/js/live-status.js new file mode 100644 index 0000000..cd92b7a --- /dev/null +++ b/frontend/static/frontend/js/live-status.js @@ -0,0 +1,117 @@ +/** + * Live Status Monitor for Container Creation + * Provides real-time updates similar to Proxmox task viewer + */ + +class LiveStatusMonitor { + constructor(taskId, logContainer) { + this.taskId = taskId; + this.logContainer = logContainer; + this.pollInterval = 1000; // Poll every second + + this.init(); + } + + init() { + console.log(`Initializing live status for task: ${this.taskId}`); + + // Initial request with error handling + $.get(`/manager/task/${this.taskId}/status/`) + .done((data) => { + console.log('Initial status data received:', data); + this.updateDisplay(data); + + // Start polling only after successful initial request + this.startPolling(); + }) + .fail((xhr, status, error) => { + console.error('Failed to get initial status:', error, xhr.responseText); + this.showError(`Failed to connect to live status: ${error}`); + + // Still try to start polling in case it's a temporary issue + setTimeout(() => { + this.startPolling(); + }, 2000); + }); + } + + startPolling() { + setInterval(() => { + $.get(`/manager/task/${this.taskId}/status/`) + .done((data) => { + this.updateDisplay(data); + }) + .fail((xhr, status, error) => { + console.error('Polling error:', error); + // Don't show error for every polling failure + }); + }, this.pollInterval); + } + + showError(message) { + const logContent = this.logContainer.querySelector('.live-log-content'); + if (logContent) { + logContent.innerHTML = `
${message}
`; + } + } + + updateDisplay(data) { + const logContent = this.logContainer.querySelector('.live-log-content'); + if (!logContent) return; + + const logs = data.logs || {}; + const steps = logs.steps || []; + + if (steps.length === 0) { + logContent.innerHTML = '
Waiting for status updates...
'; + return; + } + + // Track existing entries to highlight new ones + const existingCount = logContent.querySelectorAll('.log-entry').length; + + logContent.innerHTML = ''; + + steps.forEach((step, index) => { + const entry = this.createLogEntry(step); + if (index >= existingCount) { + entry.classList.add('new'); + } + logContent.appendChild(entry); + }); + + // Auto-scroll to bottom + logContent.scrollTop = logContent.scrollHeight; + } + + createLogEntry(step) { + const entry = document.createElement('div'); + entry.className = 'log-entry'; + + const timestamp = new Date(step.timestamp).toLocaleTimeString(); + const message = step.message || ''; + + entry.innerHTML = ` +
${timestamp}
+
${this.escapeHtml(message)}
+ `; + + return entry; + } + + + escapeHtml(text) { + const div = document.createElement('div'); + div.textContent = text; + return div.innerHTML; + } +} + +let liveStatus = null; + +function initLiveStatus(taskId, containerId) { + const container = document.getElementById(containerId); + if (container && taskId) { + liveStatus = new LiveStatusMonitor(taskId, container); + } +} \ No newline at end of file diff --git a/frontend/static/frontend/js/proxmox-tasks.js b/frontend/static/frontend/js/proxmox-tasks.js new file mode 100644 index 0000000..710a619 --- /dev/null +++ b/frontend/static/frontend/js/proxmox-tasks.js @@ -0,0 +1,338 @@ +/** + * Proxmox Task Management - Generelle Funktionen für Task-Überwachung + */ + +try { + console.log('Proxmox Task Manager will start with ', taskMonitorOptions); +}catch(e){ + console.log('Proxmox Task Manager will start without options'); + const taskMonitorOptions = {}; +} + +class ProxmoxTaskManager { + constructor(options = taskMonitorOptions || {}) { + this.checkInterval = options.checkInterval || 2000; // 1 second default + this.maxTimeout = options.maxTimeout || 50000; // 30 seconds default + this.taskStatusUrl = options.taskStatusUrl || '/api/task-status/'; + this.activeMonitors = new Map(); // Track active monitoring processes + } + + /** + * Startet die Überwachung einer Proxmox Task + * @param {string} taskId - Die Proxmox Task ID + * @param {Object} callbacks - Callback-Funktionen für verschiedene Events + * @param {function} callbacks.onProgress - Wird bei jedem Status-Update aufgerufen + * @param {function} callbacks.onSuccess - Wird bei erfolgreichem Abschluss aufgerufen + * @param {function} callbacks.onError - Wird bei Fehlern aufgerufen + * @param {function} callbacks.onTimeout - Wird bei Timeout aufgerufen + * @param {Object} context - Zusätzlicher Kontext, der an Callbacks weitergegeben wird + */ + monitorTask(taskId, callbacks = {}, context = {}) { + // Stoppe eventuell bereits laufende Überwachung für diese Task + this.stopMonitoring(taskId); + + const monitor = { + taskId: taskId, + callbacks: callbacks, + context: context, + interval: null, + timeout: null, + startTime: Date.now() + }; + + // Starte Intervall für Status-Checks + monitor.interval = setInterval(() => { + this._checkTaskStatus(monitor); + }, this.checkInterval); + + // Safety timeout + monitor.timeout = setTimeout(() => { + this._handleTimeout(monitor); + }, this.maxTimeout); + + // Speichere Monitor für spätere Referenz + this.activeMonitors.set(taskId, monitor); + + // Ersten Check sofort ausführen + this._checkTaskStatus(monitor); + + return taskId; // Return taskId for reference + } + + /** + * Stoppt die Überwachung einer Task + * @param {string} taskId - Die Task ID + */ + stopMonitoring(taskId) { + const monitor = this.activeMonitors.get(taskId); + if (monitor) { + if (monitor.interval) clearInterval(monitor.interval); + if (monitor.timeout) clearTimeout(monitor.timeout); + this.activeMonitors.delete(taskId); + } + } + + /** + * Stoppt alle aktiven Überwachungen + */ + stopAllMonitoring() { + for (const [taskId] of this.activeMonitors) { + this.stopMonitoring(taskId); + } + } + + /** + * Prüft den Status einer Task + * @private + */ + _checkTaskStatus(monitor) { + $.get(this.taskStatusUrl, { 'task_id': monitor.taskId }) + .done((data) => { + if (data.status === 'success') { + const taskStatus = data.task_status; + const progress = data.progress || 0; + + // Progress callback + if (monitor.callbacks.onProgress) { + monitor.callbacks.onProgress(taskStatus, progress, data, monitor.context); + } + + // Check if task is completed + if (taskStatus === 'OK' || taskStatus === 'completed') { + this._handleSuccess(monitor, data); + } else if (taskStatus === 'stopped' || taskStatus === 'error') { + this._handleError(monitor, data); + } + // If still running, continue monitoring + } else { + this._handleError(monitor, data); + } + }) + .fail((xhr) => { + let errorMsg = 'Network error'; + try { + const response = JSON.parse(xhr.responseText); + errorMsg = response.message || errorMsg; + } catch (e) {} + + this._handleError(monitor, { message: errorMsg }); + }); + } + + /** + * Behandelt erfolgreichen Task-Abschluss + * @private + */ + _handleSuccess(monitor, data) { + this.stopMonitoring(monitor.taskId); + if (monitor.callbacks.onSuccess) { + monitor.callbacks.onSuccess(data, monitor.context); + } + } + + /** + * Behandelt Task-Fehler + * @private + */ + _handleError(monitor, data) { + this.stopMonitoring(monitor.taskId); + if (monitor.callbacks.onError) { + monitor.callbacks.onError(data, monitor.context); + } + } + + /** + * Behandelt Timeout + * @private + */ + _handleTimeout(monitor) { + this.stopMonitoring(monitor.taskId); + if (monitor.callbacks.onTimeout) { + monitor.callbacks.onTimeout(monitor.context); + } + } + + /** + * Gibt die Anzahl aktiver Überwachungen zurück + */ + getActiveMonitorCount() { + return this.activeMonitors.size; + } + + /** + * Gibt alle aktiven Task IDs zurück + */ + getActiveTaskIds() { + return Array.from(this.activeMonitors.keys()); + } +} + +/** + * Standard Proxmox Task Manager Instanz + * Kann global verwendet werden + */ +const proxmoxTaskManager = new ProxmoxTaskManager(); + +/** + * Hilfsfunktion für einfache Task-Überwachung mit Standard-Callbacks + * @param {string} taskId - Die Task ID + * @param {Object} element - DOM Element das visuell aktualisiert werden soll + * @param {string} action - Aktion die ausgeführt wird (für Logging) + * @param {function} onComplete - Optional: Custom completion callback + */ +function monitorProxmoxTask(taskId, element, action, onComplete = null) { + const $element = $(element); + + return proxmoxTaskManager.monitorTask(taskId, { + onProgress: (status, progress, data, context) => { + console.log(`Task ${taskId} progress: ${status} (${progress}%)`); + + // Update tooltip with progress + if (progress > 0) { + $element.attr('title', `${action}: ${progress}%`); + } + }, + + onSuccess: (data, context) => { + console.log(`Task ${taskId} completed successfully`); + $element.removeClass('pending error'); + + if (onComplete) { + onComplete(true, data, context); + } else { + // Standard success handling + $element.attr('title', `${action} completed successfully`); + } + }, + + onError: (data, context) => { + console.error(`Task ${taskId} failed:`, data.message); + $element.removeClass('pending').addClass('error'); + $element.attr('title', `${action} failed: ${data.message}`); + + if (onComplete) { + onComplete(false, data, context); + } + }, + + onTimeout: (context) => { + console.warn(`Task ${taskId} monitoring timed out`); + $element.removeClass('pending').addClass('error'); + $element.attr('title', `${action} timed out`); + + if (onComplete) { + onComplete(false, { message: 'Timeout' }, context); + } + } + }, { + element: element, + action: action, + taskId: taskId + }); +} + +/** + * Erweiterte Task-Überwachung mit Live-Log-Container (für Container-Erstellung etc.) + * @param {string} taskId - Die Task ID + * @param {string} logContainerId - ID des Log-Containers + * @param {function} onComplete - Callback bei Completion + */ +function monitorProxmoxTaskWithLiveLog(taskId, logContainerId, onComplete = null) { + const logContainer = document.getElementById(logContainerId); + const logContent = logContainer?.querySelector('.live-log-content'); + + if (!logContainer || !logContent) { + console.error('Log container not found:', logContainerId); + return; + } + + // Clear existing content + logContent.innerHTML = '
Monitoring task...
'; + + return proxmoxTaskManager.monitorTask(taskId, { + onProgress: (status, progress, data, context) => { + const timestamp = new Date().toLocaleTimeString(); + const progressText = progress > 0 ? ` (${progress}%)` : ''; + + const logEntry = document.createElement('div'); + logEntry.className = 'log-entry new'; + logEntry.innerHTML = ` + ${timestamp} + INFO + Task ${status}${progressText} + `; + + // Remove loading message if present + const loading = logContent.querySelector('.loading'); + if (loading) loading.remove(); + + logContent.appendChild(logEntry); + logContent.scrollTop = logContent.scrollHeight; + + // Remove 'new' class after animation + setTimeout(() => logEntry.classList.remove('new'), 500); + }, + + onSuccess: (data, context) => { + const timestamp = new Date().toLocaleTimeString(); + const logEntry = document.createElement('div'); + logEntry.className = 'log-entry new'; + logEntry.innerHTML = ` + ${timestamp} + + Task completed successfully + `; + + logContent.appendChild(logEntry); + logContent.scrollTop = logContent.scrollHeight; + + if (onComplete) { + setTimeout(() => onComplete(true, data, context), 1000); + } + }, + + onError: (data, context) => { + const timestamp = new Date().toLocaleTimeString(); + const logEntry = document.createElement('div'); + logEntry.className = 'log-entry new'; + logEntry.innerHTML = ` + ${timestamp} + + Task failed: ${data.message} + `; + + logContent.appendChild(logEntry); + logContent.scrollTop = logContent.scrollHeight; + + if (onComplete) { + setTimeout(() => onComplete(false, data, context), 1000); + } + }, + + onTimeout: (context) => { + const timestamp = new Date().toLocaleTimeString(); + const logEntry = document.createElement('div'); + logEntry.className = 'log-entry new'; + logEntry.innerHTML = ` + ${timestamp} + + Task monitoring timed out + `; + + logContent.appendChild(logEntry); + logContent.scrollTop = logContent.scrollHeight; + + if (onComplete) { + setTimeout(() => onComplete(false, { message: 'Timeout' }, context), 1000); + } + } + }, { + taskId: taskId, + logContainerId: logContainerId + }); +} + +// Cleanup bei Seitenwechsel +$(window).on('beforeunload', function() { + proxmoxTaskManager.stopAllMonitoring(); +}); \ No newline at end of file diff --git a/frontend/templates/frontend/base.html b/frontend/templates/frontend/base.html new file mode 100644 index 0000000..6ee63ea --- /dev/null +++ b/frontend/templates/frontend/base.html @@ -0,0 +1,99 @@ + +{% load static %} + + + + + + Container Management{% block title %}{% endblock %} + + + + + + + + {% block extra_head %}{% endblock %} + + + + +
+ {% if messages %} + {% for message in messages %} + + {% endfor %} + {% endif %} + + {% block content %}{% endblock %} +
+ +{% block extra_js %}{% endblock %} + + \ No newline at end of file diff --git a/frontend/templates/frontend/container_details.html b/frontend/templates/frontend/container_details.html new file mode 100644 index 0000000..a7b2007 --- /dev/null +++ b/frontend/templates/frontend/container_details.html @@ -0,0 +1,48 @@ + +{% extends 'frontend/base.html' %} + +{% block title %} - Container Details{% endblock %} + +{% block content %} + {% url 'frontend:edit_container' as editurl %} + {% url 'frontend:delete_container' as deleteurl %} +
+

Container: {{ container.name }}

+
+ Edit + Delete +
+
+ +
+
+
Container details
+
+
+
+
+

Name: {{ container.name }}

+

IP-Adresse: {{ container.address }}

+

Hostname: {{ container.hostname }}

+

MAC-Adresse: {{ container.hwaddr }}

+
+
+

Status: {{ container.status }}

+

Speicher: {{ container.lxc.memory }} MB

+

CPU-Kerne: {{ container.lxc.cores }}

+

Festplatte: {{ container.lxc.disksize }} GB

+
+
+
+
+ +
+
+
DNS-Settings
+
+
+

DNS-Name: {{ container.dns.name|default:"--" }}

+

DNS-Regex: {{ container.dns.regexp|default:"--" }}

+
+
+{% endblock %} \ No newline at end of file diff --git a/frontend/templates/frontend/create_container.html b/frontend/templates/frontend/create_container.html new file mode 100644 index 0000000..6b1ac39 --- /dev/null +++ b/frontend/templates/frontend/create_container.html @@ -0,0 +1,342 @@ +{% extends 'frontend/base.html' %} +{% load static %} + +{% block title %} - Create Dev Container{% endblock %} + +{% block extra_head %} + {{ block.super }} + +{% endblock %} + +{% block content %} + +
+ +
+
+
+

Create Dev Container

+
+
+
+ {% csrf_token %} + + +
+
+ + {{ form.hostname }} + {% if form.hostname.help_text %} +
{{ form.hostname.help_text }}
+ {% endif %} + {% if form.hostname.errors %} +
+ {% for error in form.hostname.errors %} +
{{ error }}
+ {% endfor %} +
+ {% endif %} +
+
+ + {{ form.as_regexp }} +
+
+ +
+
+ + + {{ form.vm }} + {% if form.vm.help_text %} +
{{ form.vm.help_text }}
+ {% endif %} + {% if form.vm.errors %} +
+ {% for error in form.vm.errors %} +
{{ error }}
+ {% endfor %} +
+ {% endif %} + +
+ {% if not request.GET.clone_lxc %} +
+ +
+
+ + {{ form.template }} + {% if form.template.help_text %} +
{{ form.template.help_text }}
+ {% endif %} + {% if form.template.errors %} +
+ {% for error in form.template.errors %} +
{{ error }}
+ {% endfor %} +
+ {% endif %} +
+ {% endif %} +
+ + +
+ + {{ form.network }} + {% if form.network.help_text %} +
{{ form.network.help_text }}
+ {% endif %} + {% if form.network.errors %} +
+ {% for error in form.network.errors %} +
{{ error }}
+ {% endfor %} +
+ {% endif %} +
+ + +
+
+ + {{ form.cores }} + {% if form.cores.help_text %} +
{{ form.cores.help_text }}
+ {% endif %} + {% if form.cores.errors %} +
+ {% for error in form.cores.errors %} +
{{ error }}
+ {% endfor %} +
+ {% endif %} +
+
+ + {{ form.memory }} + {% if form.memory.help_text %} +
{{ form.memory.help_text }}
+ {% endif %} + {% if form.memory.errors %} +
+ {% for error in form.memory.errors %} +
{{ error }}
+ {% endfor %} +
+ {% endif %} +
+
+ + {{ form.disksize }} + {% if form.disksize.help_text %} +
{{ form.disksize.help_text }}
+ {% endif %} + {% if form.disksize.errors %} +
+ {% for error in form.disksize.errors %} +
{{ error }}
+ {% endfor %} +
+ {% endif %} +
+
+ + + {% if form.description %} +
+ + {{ form.description }} + {% if form.description.help_text %} +
{{ form.description.help_text }}
+ {% endif %} + {% if form.description.errors %} +
+ {% for error in form.description.errors %} +
{{ error }}
+ {% endfor %} +
+ {% endif %} +
+ {% endif %} + + {% for field in form %} + {% if field.name != 'name' and field.name != 'hostname' and field.name != 'as_regexp' and field.name != 'template' and field.name != 'vm' and field.name != 'network' and field.name != 'cores' and field.name != 'memory' and field.name != 'disksize' and field.name != 'description' %} +
+ + {{ field }} + {% if field.help_text %} +
{{ field.help_text }}
+ {% endif %} + {% if field.errors %} +
+ {% for error in field.errors %} +
{{ error }}
+ {% endfor %} +
+ {% endif %} +
+ {% endif %} + {% endfor %} + +
+ Cancel + +
+
+ +
+
+
+
+ + + +{% endblock %} + +{% block extra_js %} + + + + +{% endblock %} \ No newline at end of file diff --git a/frontend/templates/frontend/dashboard.html b/frontend/templates/frontend/dashboard.html new file mode 100644 index 0000000..49f522e --- /dev/null +++ b/frontend/templates/frontend/dashboard.html @@ -0,0 +1,387 @@ +{% extends 'frontend/base.html' %} + +{% block title %} - Dashboard{% endblock %} + +{% block content %} +
+
+

+ Dev Container +

+ {% if request.user.is_staff %} +
+ Create Dev Container + {% if default_template %} + Create from default Template + {% endif %} +
+ {% endif %} +
+ {% url 'frontend:dashboard' as search_action_url %} + {% include 'frontend/includes/pagination_snippet.html' %} + +
+ {% for container in page_obj %} +
+
+
+
+ +
+ +
+
+ +
+ +
{{ container.vmid }} {{ container.name }}
+
+
+
+
Hostname: {{ container.hostname }} +
+
IP-Address: {{ container.address }} +
+
Root + Disk: {{ container.lxc.disksize }} GB +
+
Memory: {{ container.lxc.memory }} + MB +
+
Cores: {{ container.lxc.cores }} +
+
+ +
+
+ {% empty %} +
+
+ {% if request.GET.search %} + No Container for "{{ request.GET.search }}". + + {% else %} + No Containers found + {% endif %} +
+
+ {% endfor %} +
+ +
+
+
+ + + + + + + + + + + + + + + {% for container in page_obj %} + + + + + + + + + + + {% empty %} + + + + {% endfor %} + +
StatiNameHostnameIP-AddressRoot DiskMemoryCoresAktionen
+
+ +
+ +
+ +
+ +
+
+
{{ container.vmid }} {{ container.name }}{{ container.hostname }}{{ container.address }}{{ container.lxc.disksize }} GB{{ container.lxc.memory }} MB{{ container.lxc.cores }} + +
+ {% if request.GET.search %} + No Container for "{{ request.GET.search }}". + + {% else %} + No Containers found + {% endif %} +
+
+
+
+ + {% include 'frontend/includes/pagination_snippet.html' %} + + + +{% endblock %} \ No newline at end of file diff --git a/frontend/templates/frontend/delete_container.html b/frontend/templates/frontend/delete_container.html new file mode 100644 index 0000000..41da4b4 --- /dev/null +++ b/frontend/templates/frontend/delete_container.html @@ -0,0 +1,138 @@ + +{% extends 'frontend/base.html' %} + +{% block title %} - Container löschen{% endblock %} + +{% block content %} +
+
+

Container löschen

+
+
+

Sind Sie sicher, dass Sie den Container {{ container.name }} löschen möchten?

+

Diese Aktion kann nicht rückgängig gemacht werden!

+ +
+ {% csrf_token %} + + + Abbrechen +
+
+
+ + + +{% endblock %} + +{% block extra_js %} + +{% endblock %} \ No newline at end of file diff --git a/frontend/templates/frontend/dns_delete.html b/frontend/templates/frontend/dns_delete.html new file mode 100644 index 0000000..259f247 --- /dev/null +++ b/frontend/templates/frontend/dns_delete.html @@ -0,0 +1,75 @@ +{% extends 'frontend/base.html' %} + +{% block title %} - Delete DNS Entry{% endblock %} + +{% block content %} +
+
+
+
+
+

+ + Delete DNS Entry +

+
+
+
+ Warning! This action cannot be undone. +
+ +

Are you sure you want to delete the following DNS entry?

+ +
+
+
+
Type:
+
+ {% if dns_entry.name %} + DNS Name + {% else %} + RegExp Pattern + {% endif %} +
+ +
+ {% if dns_entry.name %}Name:{% else %}Pattern:{% endif %} +
+
+ {% if dns_entry.name %} + {{ dns_entry.name }} + {% else %} + {{ dns_entry.regexp }} + {% endif %} +
+ +
IP Address:
+
+ {{ dns_entry.address }} +
+ + {% if dns_entry.comment %} +
Comment:
+
{{ dns_entry.comment }}
+ {% endif %} +
+
+
+ +
+ {% csrf_token %} +
+ + Cancel + + +
+
+
+
+
+
+
+{% endblock %} \ No newline at end of file diff --git a/frontend/templates/frontend/dns_form.html b/frontend/templates/frontend/dns_form.html new file mode 100644 index 0000000..35ddb35 --- /dev/null +++ b/frontend/templates/frontend/dns_form.html @@ -0,0 +1,266 @@ +{% extends 'frontend/base.html' %} +{% load static %} + +{% block title %} - {{ title }}{% endblock %} + +{% block extra_head %} + {{ block.super }} + + + +{% endblock %} + +{% block content %} +
+
+
+
+
+

+ + {{ title }} +

+
+
+
+ {% csrf_token %} + + +
+
IP Address
+ +
+ + {{ form.container }} + {% if form.container.help_text %} +
{{ form.container.help_text }}
+ {% endif %} + {% if form.container.errors %} +
+ {% for error in form.container.errors %} +
{{ error }}
+ {% endfor %} +
+ {% endif %} +
+ +
+ OR +
+ +
+ + {{ form.address }} + {% if form.address.help_text %} +
{{ form.address.help_text }}
+ {% endif %} + {% if form.address.errors %} +
+ {% for error in form.address.errors %} +
{{ error }}
+ {% endfor %} +
+ {% endif %} +
+
+ + +
+
DNS Configuration
+ +
+ Choose ONE of the following: +
+ +
+ + {{ form.name }} + {% if form.name.help_text %} +
{{ form.name.help_text }}
+ {% endif %} + {% if form.name.errors %} +
+ {% for error in form.name.errors %} +
{{ error }}
+ {% endfor %} +
+ {% endif %} +
+ +
+ OR +
+ +
+ + {{ form.regexp }} + {% if form.regexp.help_text %} +
{{ form.regexp.help_text }}
+ {% endif %} + {% if form.regexp.errors %} +
+ {% for error in form.regexp.errors %} +
{{ error }}
+ {% endfor %} +
+ {% endif %} +
+
+ + +
+
Additional Information
+ +
+ + {{ form.comment }} + {% if form.comment.help_text %} +
{{ form.comment.help_text }}
+ {% endif %} + {% if form.comment.errors %} +
+ {% for error in form.comment.errors %} +
{{ error }}
+ {% endfor %} +
+ {% endif %} +
+
+ + + {% if form.non_field_errors %} +
+ Please correct the following errors: +
    + {% for error in form.non_field_errors %} +
  • {{ error }}
  • + {% endfor %} +
+
+ {% endif %} + + +
+ + Cancel + + +
+
+
+
+
+
+
+{% endblock %} + +{% block extra_js %} + + + + + +{% endblock %} \ No newline at end of file diff --git a/frontend/templates/frontend/dns_list.html b/frontend/templates/frontend/dns_list.html new file mode 100644 index 0000000..dfac6c0 --- /dev/null +++ b/frontend/templates/frontend/dns_list.html @@ -0,0 +1,144 @@ +{% extends 'frontend/base.html' %} +{% load static %} + +{% block title %} - DNS Management{% endblock %} + +{% block extra_head %} + {{ block.super }} + +{% endblock %} + +{% block content %} +
+
+

+ DNS Management +

+ + Add DNS Entry + +
+ + {% include 'frontend/includes/dns_pagination_snippet.html' %} + + +
+
+ {% if page_obj.object_list %} +
+ + + + + + + + + + + + + {% for dns in page_obj %} + + + + + + + + + {% endfor %} + +
TypeDNS Name/PatternIP AddressCommentCreatedActions
+ {% if dns.name %} + Name + {% else %} + RegExp + {% endif %} + + {% if dns.name %} + {{ dns.name }} + {% else %} + {{ dns.regexp }} + {% endif %} + + {{ dns.address }} + + {{ dns.comment|default:"-" }} + + + {% if dns.created_at %} + {{ dns.created_at|date:"M d, Y" }} + {% else %} + - + {% endif %} + + + {% if dns.pk not in dev_container_dns %} + + {% endif %} +
+
+ + + {% else %} +
+ +

No DNS Entries Found

+

+ {% if request.GET.search %} + No DNS entries match your search criteria. + {% else %} + Start by creating your first DNS entry. + {% endif %} +

+ {% if not request.GET.search %} + + Create First DNS Entry + + {% endif %} +
+ {% endif %} +
+
+ + {% include 'frontend/includes/dns_pagination_snippet.html' %} + +
+{% endblock %} + +{% block extra_js %} + +{% endblock %} \ No newline at end of file diff --git a/frontend/templates/frontend/edit_container.html b/frontend/templates/frontend/edit_container.html new file mode 100644 index 0000000..355a3d3 --- /dev/null +++ b/frontend/templates/frontend/edit_container.html @@ -0,0 +1,69 @@ + +{% extends 'frontend/base.html' %} +{% load static %} + +{% block title %} - Edit Container{% endblock %} + +{% block extra_head %} + {{ block.super }} +{% endblock %} + +{% block content %} +
+
+
+
+

Edit {{ container.name }}

+
+ +
+
+
+
+ {% csrf_token %} + + +
+ {% for field in form %} +
+ + + {{ field }} + {% if field.help_text %} +
{{ field.help_text }}
+ {% endif %} + {% if field.errors %} +
+ {% for error in field.errors %} + {{ error }} + {% endfor %} +
+ {% endif %} +
+ {% endfor %} +
+ +
+ + Abbrechen +
+
+
+
+{% endblock %} + +{% block extra_js %} + +{% endblock %} \ No newline at end of file diff --git a/frontend/templates/frontend/faq_delete.html b/frontend/templates/frontend/faq_delete.html new file mode 100644 index 0000000..7b73e0a --- /dev/null +++ b/frontend/templates/frontend/faq_delete.html @@ -0,0 +1,52 @@ +{% extends 'frontend/base.html' %} +{% load markdown_filters %} + +{% block title %} - Delete FAQ Entry{% endblock %} + +{% block content %} +
+
+

+ Delete FAQ Entry +

+ + Back to FAQ List + +
+ +
+
+
+
Confirm Deletion
+

Are you sure you want to delete this FAQ entry? This action cannot be undone.

+
+ +
+
FAQ Entry Details:
+
+
Title:
+

{{ faq.title }}

+ +
Content:
+
{{ faq.content|markdown }}
+ +
Order:
+

{{ faq.order }}

+
+
+ +
+ {% csrf_token %} +
+ + Cancel + + +
+
+
+
+
+{% endblock %} \ No newline at end of file diff --git a/frontend/templates/frontend/faq_form.html b/frontend/templates/frontend/faq_form.html new file mode 100644 index 0000000..5ec25c9 --- /dev/null +++ b/frontend/templates/frontend/faq_form.html @@ -0,0 +1,92 @@ +{% extends 'frontend/base.html' %} + +{% block title %} - {{ title }}{% endblock %} + +{% block content %} +
+
+

+ {{ title }} +

+ + Back to FAQ List + +
+ +
+
+
+ {% csrf_token %} + +
+ + {{ form.title }} + {% if form.title.help_text %} +
{{ form.title.help_text }}
+ {% endif %} + {% if form.title.errors %} +
+ {% for error in form.title.errors %} +
{{ error }}
+ {% endfor %} +
+ {% endif %} +
+ +
+ + {{ form.content }} + {% if form.content.help_text %} +
{{ form.content.help_text }}
+ {% endif %} + {% if form.content.errors %} +
+ {% for error in form.content.errors %} +
{{ error }}
+ {% endfor %} +
+ {% endif %} +
+ +
+ + {{ form.order }} + {% if form.order.help_text %} +
{{ form.order.help_text }}
+ {% endif %} + {% if form.order.errors %} +
+ {% for error in form.order.errors %} +
{{ error }}
+ {% endfor %} +
+ {% endif %} +
+ + {% if form.non_field_errors %} +
+ {% for error in form.non_field_errors %} +
{{ error }}
+ {% endfor %} +
+ {% endif %} + +
+ + Cancel + + +
+
+
+
+
+{% endblock %} \ No newline at end of file diff --git a/frontend/templates/frontend/faq_list.html b/frontend/templates/frontend/faq_list.html new file mode 100644 index 0000000..2b5c5ef --- /dev/null +++ b/frontend/templates/frontend/faq_list.html @@ -0,0 +1,153 @@ +{% extends 'frontend/base.html' %} +{% load markdown_filters %} + +{% block title %} - FAQ{% endblock %} + +{% block extra_head %} + {{ block.super }} + +{% endblock %} + +{% block content %} +
+
+

+ FAQ - Frequently Asked Questions +

+ + Add FAQ Entry + +
+ + +
+
+
+ + +
+
+
+ {% if faqs %} +
+ {% for faq in faqs %} +
+
+
+
+
{{ faq.title }}
+
+
+ + +
+
+
+
+ {{ faq.content|markdown }} +
+
+ {% endfor %} +
+ {% else %} +
+ +

No FAQ Entries Available

+

+ FAQ entries will appear here once they are added by an administrator. +

+
+ {% endif %} +
+{% endblock %} + +{% block extra_js %} + +{% endblock %} \ No newline at end of file diff --git a/frontend/templates/frontend/includes/dns_pagination_snippet.html b/frontend/templates/frontend/includes/dns_pagination_snippet.html new file mode 100644 index 0000000..ef16276 --- /dev/null +++ b/frontend/templates/frontend/includes/dns_pagination_snippet.html @@ -0,0 +1,81 @@ + + \ No newline at end of file diff --git a/frontend/templates/frontend/includes/pagination_snippet.html b/frontend/templates/frontend/includes/pagination_snippet.html new file mode 100644 index 0000000..383de96 --- /dev/null +++ b/frontend/templates/frontend/includes/pagination_snippet.html @@ -0,0 +1,95 @@ + + \ No newline at end of file diff --git a/frontend/templates/frontend/login.html b/frontend/templates/frontend/login.html new file mode 100644 index 0000000..618bff1 --- /dev/null +++ b/frontend/templates/frontend/login.html @@ -0,0 +1,30 @@ + +{% extends 'frontend/base.html' %} + +{% block title %} - Login{% endblock %} + +{% block content %} +
+
+
+
+

Anmelden

+
+
+
+ {% csrf_token %} +
+ + +
+
+ + +
+ +
+
+
+
+
+{% endblock %} \ No newline at end of file diff --git a/frontend/templates/frontend/lxc_console.html b/frontend/templates/frontend/lxc_console.html new file mode 100644 index 0000000..ce46eab --- /dev/null +++ b/frontend/templates/frontend/lxc_console.html @@ -0,0 +1,142 @@ +{% extends 'frontend/base.html' %} +{% load static %} + +{% block title %} - Console: {{ container.name }}{% endblock %} + +{% block extra_head %} + {{ block.super }} + +{% endblock %} + +{% block content %} +
+
+
+
+

+ + Console: {{ container.name }} + ({{ container.address }}) +

+ + Back to Dashboard + +
+ +
+
Console Access
+

+ This opens a direct console connection to the LXC container {{ container.name }} (VM-ID: {{ container.lxc.vmid }}). + The console connection is handled through the Proxmox web interface. +

+
+ +
+
+
+ + {{ container.hostname }} | VM-ID: {{ container.lxc.vmid }} | IP: {{ container.address }} +
+
+ + +
+
+ +
+ +
+
+
Console Help
+
    +
  • This console provides direct terminal access to your LXC container
  • +
  • You may need to press Enter to activate the console
  • +
  • Use Ctrl+Alt+Del to send reset signal (if supported)
  • +
  • If the console doesn't load, try refreshing or opening in a new tab
  • +
+
+
+
+
+
+{% endblock %} + +{% block extra_js %} + +{% endblock %} \ No newline at end of file diff --git a/frontend/templates/frontend/lxc_console_popup.html b/frontend/templates/frontend/lxc_console_popup.html new file mode 100644 index 0000000..f7b0fc7 --- /dev/null +++ b/frontend/templates/frontend/lxc_console_popup.html @@ -0,0 +1,163 @@ +{% extends 'frontend/base.html' %} +{% load static %} + +{% block title %} - Console: {{ container.name }}{% endblock %} + +{% block extra_head %} + {{ block.super }} + +{% endblock %} + +{% block content %} +
+
+
+
+
+

+ + LXC Console Access +

+
+
+
+
+ +
+ +
{{ container.name }}
+

VM-ID: {{ container.lxc.vmid }} | IP: {{ container.address }}

+ +
+
Console Information
+
+
Hostname:
+
{{ container.hostname }}
+
Node:
+
{{ container.lxc.node }}
+
Status:
+
+ + {{ container.lxc.status|title }} + +
+
+
+ + + +
+
Important Notes
+
    +
  • The console will open in the Proxmox web interface
  • +
  • You will need to login to Proxmox if not already authenticated
  • +
  • Please allow popups for this site if blocked
  • +
  • Navigate to the container console once in Proxmox
  • +
  • Alternatively, you can manually access: https://{{ proxmox_host }}:8006
  • +
+
+
+
+ +
+
+
+
+{% endblock %} + +{% block extra_js %} + +{% endblock %} \ No newline at end of file diff --git a/frontend/templatetags/__init__.py b/frontend/templatetags/__init__.py new file mode 100644 index 0000000..bba1bf7 --- /dev/null +++ b/frontend/templatetags/__init__.py @@ -0,0 +1 @@ +# frontend/templatetags/__init__.py \ No newline at end of file diff --git a/frontend/templatetags/markdown_filters.py b/frontend/templatetags/markdown_filters.py new file mode 100644 index 0000000..93b7093 --- /dev/null +++ b/frontend/templatetags/markdown_filters.py @@ -0,0 +1,29 @@ +import markdown +from django import template +from django.utils.safestring import mark_safe + +register = template.Library() + +@register.filter(name='markdown') +def markdown_format(text): + """ + Convert markdown text to HTML + Usage: {{ content|markdown }} + """ + if not text: + return "" + + # Configure markdown with useful extensions + md = markdown.Markdown(extensions=[ + 'extra', # Tables, fenced code blocks, etc. + 'codehilite', # Syntax highlighting + 'toc', # Table of contents + 'nl2br' # Convert newlines to
+ ], extension_configs={ + 'codehilite': { + 'css_class': 'highlight', + 'use_pygments': True + } + }) + + return mark_safe(md.convert(text)) \ No newline at end of file diff --git a/frontend/urls.py b/frontend/urls.py new file mode 100644 index 0000000..1128394 --- /dev/null +++ b/frontend/urls.py @@ -0,0 +1,32 @@ +# frontend/urls.py +from django.urls import path + +from . import views + +app_name = 'frontend' + +urlpatterns = [ + path('login/', views.login_view, name='login'), + path('logout/', views.logout_view, name='logout'), + path('', views.dashboard, name='dashboard'), + path('current-containers/', views.container_details, name='current_container_details'), + path('container//', views.container_detail, name='container_detail'), + path('container//edit/', views.edit_container, name='edit_container'), + path('container//delete/', views.delete_container, name='delete_container'), + path('container//stop/', views.stop_lxc, name='stop_lxc'), + path('container//start/', views.start_lxc, name='start_lxc'), + path('container/create/', views.create_container, name='create_container'), + # DNS Management + path('dns/', views.dns_list, name='dns_list'), + path('dns/create/', views.dns_create, name='dns_create'), + path('dns//edit/', views.dns_edit, name='dns_edit'), + path('dns//delete/', views.dns_delete, name='dns_delete'), + path('api/containers/', views.dns_container_api, name='dns_container_api'), + # FAQ + path('faq/', views.faq_list, name='faq_list'), + path('faq/raw/', views.faq_raw, name='faq_raw'), + path('faq/raw//', views.faq_raw, name='faq_raw_single'), + path('faq/create/', views.faq_create, name='faq_create'), + path('faq//edit/', views.faq_edit, name='faq_edit'), + path('faq//delete/', views.faq_delete, name='faq_delete'), +] \ No newline at end of file diff --git a/frontend/views.py b/frontend/views.py new file mode 100644 index 0000000..7360187 --- /dev/null +++ b/frontend/views.py @@ -0,0 +1,637 @@ +import json +import threading + +from django.contrib import messages +from django.contrib.auth import authenticate, login, logout +from django.contrib.auth.decorators import login_required +from django.db.models import Q +from django.forms import model_to_dict +from django.http import HttpResponse, HttpResponseForbidden +from django.shortcuts import get_object_or_404, redirect, render + +from django_proxmox_mikrotik.settings import ProxmoxConfig +from lib.decorators import readonly +from lib.proxmox import Proxmox +from lib.task_decorator import ( + create_container_with_task, + delete_container_with_task, + resize_container_disk_with_task, + start_container_with_task, + stop_container_with_task, + update_container_config_sync, +) +from lib.utils import paginator +from manager.models import DevContainer +from mikrotik.models import DNSStatic +from proxmox.models import Lxc, LxcTemplate +from tasklogger.models import TaskFactory +from .forms import CloneContainerForm, DNSSearchForm, DNSStaticForm, DevContainerForm, FAQForm +from .models import FAQ, UserProfile +from .permissions import user_can_access_container + + +# Oben in den Imports hinzufügen: + + +def login_view(request): + TaskFactory.reset_current_task(request=request) + if request.method == 'POST': + username = request.POST.get('username') + password = request.POST.get('password') + user = authenticate(request, username=username, password=password) + + if user is not None: + login(request, user) + if next_url := request.GET.get('next'): + return redirect(next_url) + else: + return redirect('frontend:dashboard') + else: + messages.error(request, 'Ungültige Anmeldedaten') + + return render(request, 'frontend/login.html') + + +def logout_view(request): + TaskFactory.reset_current_task(request=request) + logout(request) + if next_url := request.GET.get('next'): + return redirect('/frontend/login?next=' + next_url) + return redirect('frontend:login') + + +@login_required +def dashboard(request): + TaskFactory.reset_current_task(request=request) + try: + user_profile = request.user.profile + except UserProfile.DoesNotExist: + user_profile = UserProfile.objects.create( + user=request.user, + ldap_uid=request.user.username # Annahme: LDAP-UID = Username + ) + + searchdomain = Q() + if s := request.GET.get('search', ''): + searchdomain &= DevContainer.term_filter(s) + if user_profile.is_internal(): + from mikrotik.models import IPAddress + ip_addresses = IPAddress.objects.filter(comment__icontains=f' {user_profile.ldap_uid} ') + networks = [ip.network for ip in ip_addresses] + leasefilter = Q() + for nw in networks: + leasefilter |= Q(lease__address__startswith=nw) + searchdomain &= leasefilter + if user_profile.is_external(): + searchdomain &= Q(lease__address__startswith='172.2') + if lxc_status := request.GET.get('lxc_status', ''): + searchdomain &= Q(lxc__status=lxc_status) + if network_status := request.GET.get('network_status', ''): + searchdomain &= Q(lease__status=network_status) + + # Paginierung hinzufügen + containers = DevContainer.objects.filter(searchdomain) + page_obj = paginator(containers, request=request) + default_template = LxcTemplate.objects.filter(is_default_template=True).first() + return render(request, 'frontend/dashboard.html', { + 'containers': containers, + 'page_obj': page_obj, + 'user_profile': user_profile, + 'page_vmids': ','.join(map(str, page_obj.object_list.values_list('lxc__vmid', flat=True))), + 'page_ids': ','.join(map(str, page_obj.object_list.values_list('internal_id', flat=True))), + 'proxmox_host': ProxmoxConfig.HOST, + 'default_template': default_template, + }) + + +class ContainerStatus: + low_limit = 80 + + def __init__(self, **kwargs): + self.cpus = kwargs.get('cpus', 0) + self.cpu = kwargs.get('cpu', 0) + self.maxmem = kwargs.get('maxmem', 256) + self.mem = kwargs.get('mem', 256) + self.maxdisk = kwargs.get('maxdisk', 6) + self.disk = kwargs.get('disk', 6) + self.maxswap = kwargs.get('maxswap', 0) + self.swap = kwargs.get('swap', 0) + self.status = kwargs.get('status') + self.vmid = kwargs.get('vmid') + self.lease_status = kwargs.get('lease_status', 'waiting') + self.lxc_status = kwargs.get('lxc_status', 'stopped') + + def __hash__(self): + return int(self.vmid) + + def __getattr__(self, item): + if item.endswith('percent'): + act = int(getattr(self, item[:-8]) or 0) + soll = int(getattr(self, 'max' + item[:-8]) or 1) + return round(act / soll * 100, 2) + raise AttributeError(f'{item} not found') + + @property + def cpu_percent(self): + return round((self.cpu or 0) * 100, 2) + + @property + def is_low(self): + if not self.cpu_percent and not self.mem_percent and not self.disk_percent: + return False + return self.cpu_percent > self.low_limit or self.mem_percent > self.low_limit or self.disk_percent > self.low_limit + + @property + def to_json(self): + return { + 'cpus': self.cpus, + 'cpu': self.cpu, + 'maxmem': self.maxmem, + 'mem': self.mem, + 'maxdisk': self.maxdisk, + 'disk': self.disk, + 'maxswap': self.maxswap, + 'swap': self.swap, + 'is_low': self.is_low, + 'cpu_percent': self.cpu_percent, + 'mem_percent': self.mem_percent, + 'disk_percent': self.disk_percent, + 'swap_percent': self.swap_percent, + 'vmid': self.vmid, + 'lease_status': self.lease_status, + 'lxc_status': self.lxc_status, + } + + +@readonly +def container_details(request): + ids = filter(None, request.GET.get('ids', '').split(',')) + ret = [] + if ids: + with Proxmox() as pm: + for id in ids: + container = DevContainer.objects.get(internal_id=id) + ret.append(ContainerStatus(**(container.statuscache or {})).to_json) + return HttpResponse(json.dumps(ret), content_type='application/json') + + +@login_required +def start_lxc(request, id=None): + + if request.GET.get('id'): + id = request.GET.get('id') + container = get_object_or_404(DevContainer, internal_id=id) + + try: + # Create task and execute synchronously + task = TaskFactory(request=request) + success = start_container_with_task(str(task.uuid), container.lxc.vmid, request=request) + + if success: + messages.success(request, f'Container {container.name} wurde gestartet!') + else: + messages.error(request, f'Fehler beim Starten von Container {container.name}') + + return redirect('frontend:dashboard') + + except Exception as e: + messages.error(request, f'Fehler beim Starten des Containers: {str(e)}') + return redirect('frontend:dashboard') + + +@login_required +def stop_lxc(request, id=None): + + if request.GET.get('id'): + id = request.GET.get('id') + container = get_object_or_404(DevContainer, internal_id=id) + + try: + # Create task and execute synchronously + task = TaskFactory(request=request) + success = stop_container_with_task(str(task.uuid), container.lxc.vmid, request=request) + + if success: + messages.success(request, f'Container {container.name} wurde gestoppt!') + else: + messages.error(request, f'Fehler beim Stoppen von Container {container.name}') + + return redirect('frontend:dashboard') + + except Exception as e: + messages.error(request, f'Fehler beim Stoppen des Containers: {str(e)}') + return redirect('frontend:dashboard') + + + + + + +@login_required +def container_detail(request, container_id): + container = get_object_or_404(DevContainer, internal_id=container_id) + user_profile = request.user.profile + + if False and not user_can_access_container(user_profile, container): + return HttpResponseForbidden("Sie haben keine Berechtigung, diesen Container zu sehen.") + + return render(request, 'frontend/container_details.html', { + 'container': container + }) + + +@login_required +def edit_container(request, container_id): + container = get_object_or_404(DevContainer, internal_id=container_id) + user_profile = request.user.profile + task = TaskFactory(request=request) + + # Berechtigungsprüfung + if not user_can_access_container(user_profile, container): + return HttpResponseForbidden("Sie haben keine Berechtigung, diesen Container zu bearbeiten.") + + if request.method == 'POST': + form = DevContainerForm(request.POST, instance=container, user_profile=user_profile) + if form.is_valid(): + # Check if resource changes require updates + old_disksize = container.lxc.disksize + old_cores = container.lxc.cores + old_memory = container.lxc.memory + + new_disksize = form.cleaned_data.get('disksize') + new_cores = form.cleaned_data.get('cores') + new_memory = form.cleaned_data.get('memory') + + try: + # Save form first (updates local database) + form.save() + + # Handle disk resize (synchronously with TaskLogger) + if new_disksize and new_disksize != old_disksize: + success = resize_container_disk_with_task(str(task.uuid), container.lxc.vmid, new_disksize, request=request) + if not success: + messages.error(request, 'Fehler beim Vergrößern der Festplatte. Siehe Task-Log für Details.') + return redirect('frontend:dashboard') + + # Handle memory/cores changes (synchronously) + config_updates = {} + if new_cores and new_cores != old_cores: + config_updates['cores'] = new_cores + if new_memory and new_memory != old_memory: + config_updates['memory'] = new_memory + + if config_updates: + success = update_container_config_sync(container.lxc.vmid, task=task, **config_updates) + + if not success: + messages.error(request, 'Fehler beim Aktualisieren der Container-Konfiguration') + return redirect('frontend:dashboard') + + messages.success(request, 'Container wurde erfolgreich aktualisiert') + return redirect('frontend:dashboard') + + except Exception as e: + messages.error(request, f'Fehler beim Aktualisieren des Containers: {str(e)}') + return redirect('frontend:dashboard') + finally: + TaskFactory.reset_current_task(request=request) + else: + TaskFactory.reset_current_task(request=request) + task = TaskFactory(request=request) + form = DevContainerForm(instance=container, user_profile=user_profile) + + return render(request, 'frontend/edit_container.html', { + 'form': form, + 'container': container, + 'task_uuid': str(task.uuid), + }) + + +@login_required +def delete_container(request, container_id): + container = get_object_or_404(DevContainer, internal_id=container_id) + user_profile = request.user.profile + + # Berechtigungsprüfung + if not user_can_access_container(user_profile, container): + return HttpResponseForbidden("Sie haben keine Berechtigung, diesen Container zu löschen.") + + if request.method == 'GET': + TaskFactory.reset_current_task(request=request) + + task = TaskFactory(request=request) + + if request.method == 'POST': + try: + + # Execute container deletion in background thread + def _delete_container_async(): + delete_container_with_task(str(task.uuid), container, request=request) + + thread = threading.Thread(target=_delete_container_async) + thread.daemon = True + thread.start() + + # Return JSON response for async handling with task_uuid + return HttpResponse(json.dumps({ + 'status': 'task_started', + 'task_uuid': str(task.uuid), + 'message': 'Container deletion initiated' + }), content_type='application/json') + + except Exception as e: + return HttpResponse(json.dumps({ + 'status': 'error', + 'message': f'Fehler beim Löschen des Containers: {str(e)}' + }), content_type='application/json', status=500) + finally: + TaskFactory.reset_current_task(request=request) + + return render(request, 'frontend/delete_container.html', { + 'container': container, + 'task_id': str(task.uuid), + }) + + +def create_container(request): + user_profile = request.user.profile + + if request.method == 'POST': + # Get task_uuid from form + task_uuid = request.POST.get('task_uuid') + + form = CloneContainerForm(request.POST, user_profile=user_profile) + if form.is_valid(): + container = form.save() + container.is_active = True + container.save() + + # Start container creation asynchronously with TaskLogger + try: + + # Get the task created when form was loaded + # Create new task if not found + task = TaskFactory(request=request) + task_uuid = str(task.uuid) + + # Execute container creation in background thread + def _create_container_async(): + create_container_with_task(task_uuid, container, request=request) + + thread = threading.Thread(target=_create_container_async) + thread.daemon = True + thread.start() + + # Return JSON response for async handling with task_uuid + return HttpResponse(json.dumps({ + 'status': 'task_started', + 'task_uuid': task_uuid, + 'message': 'Container creation initiated' + }), content_type='application/json') + + except Exception as e: + return HttpResponse(json.dumps({ + 'status': 'error', + 'message': f'Fehler beim Erstellen des Containers: {str(e)}' + }), content_type='application/json', status=500) + else: + if cloneid := request.GET.get('clone_lxc'): + lxc = Lxc.objects.filter(pk=cloneid) + else: + lxc = None + if template_id := request.GET.get('clone_template'): + template = LxcTemplate.objects.filter(pk=template_id) + else: + template = None + form = CloneContainerForm( + user_profile=user_profile, + vm=lxc, + template=template, + hostname=request.GET.get('clone_hostname'), + ) + + # Create new task when form is loaded and put UUID in form + task = TaskFactory(request=request) + task_uuid = str(task.uuid) + + return render(request, 'frontend/create_container.html', { + 'form': form, + 'task_uuid': task_uuid + }) + + +# DNS Management Views + +@login_required +def dns_list(request): + """List all DNS entries with search functionality""" + search_form = DNSSearchForm(request.GET or None) + dev_container_dns = list(DevContainer.objects.all().values_list('dns_id', flat=True)) + dns_entries = DNSStatic.objects.all().order_by('name', 'regexp', 'address') + + # Apply search filters + if search_form.is_valid(): + search_query = search_form.cleaned_data.get('search') + entry_type = search_form.cleaned_data.get('entry_type') + + if search_query: + dns_entries = dns_entries.filter( + Q(name__icontains=search_query) | + Q(regexp__icontains=search_query) | + Q(address__icontains=search_query) | + Q(comment__icontains=search_query) + ) + + if entry_type == 'name': + dns_entries = dns_entries.exclude(name__isnull=True).exclude(name='') + elif entry_type == 'regexp': + dns_entries = dns_entries.exclude(regexp__isnull=True).exclude(regexp='') + + # Pagination + page_obj = paginator(dns_entries, request=request) + + return render(request, 'frontend/dns_list.html', { + 'dns_entries': dns_entries, + 'page_obj': page_obj, + 'search_form': search_form, + 'dev_container_dns': dev_container_dns, + }) + + +@login_required +def dns_create(request): + """Create a new DNS entry""" + if request.method == 'POST': + form = DNSStaticForm(request.POST) + if form.is_valid(): + # Remove container field before saving (it's just for UI) + dns_entry = form.save(commit=False) + dns_entry.save() + messages.success(request, f'DNS entry "{dns_entry}" created successfully') + return redirect('frontend:dns_list') + else: + form = DNSStaticForm() + + return render(request, 'frontend/dns_form.html', { + 'form': form, + 'title': 'Create DNS Entry', + 'submit_text': 'Create DNS Entry' + }) + + +@login_required +def dns_edit(request, dns_id): + """Edit an existing DNS entry""" + dns_entry = get_object_or_404(DNSStatic, id=dns_id) + + if request.method == 'POST': + form = DNSStaticForm(request.POST, instance=dns_entry) + if form.is_valid(): + dns_entry = form.save() + messages.success(request, f'DNS entry "{dns_entry}" updated successfully') + return redirect('frontend:dns_list') + else: + form = DNSStaticForm(instance=dns_entry) + + return render(request, 'frontend/dns_form.html', { + 'form': form, + 'dns_entry': dns_entry, + 'title': 'Edit DNS Entry', + 'submit_text': 'Update DNS Entry' + }) + + +@login_required +def dns_delete(request, dns_id): + """Delete a DNS entry""" + dns_entry = get_object_or_404(DNSStatic, id=dns_id) + + if request.method == 'POST': + dns_name = str(dns_entry) + dns_entry.delete() + messages.success(request, f'DNS entry "{dns_name}" deleted successfully') + return redirect('frontend:dns_list') + + return render(request, 'frontend/dns_delete.html', { + 'dns_entry': dns_entry, + }) + + +@login_required +def dns_container_api(request): + """API endpoint for container selection in DNS forms""" + search = request.GET.get('search', '') + containers = DevContainer.objects.all() + + if search: + containers = containers.filter( + Q(lxc__hostname__icontains=search) | + Q(lxc__name__icontains=search) | + Q(lease__address__icontains=search) + ) + + results = [] + for container in containers: + try: + results.append({ + 'id': container.pk, + 'text': f"{container.name} ({container.address})", + 'address': container.address, + }) + except: + continue + + return HttpResponse(json.dumps({ + 'results': results + }), content_type='application/json') + + +@login_required +def faq_list(request): + """Simple FAQ list view with accordion display""" + term = request.GET.get('search', '') + faqs = FAQ.term_search(term).order_by('order', 'title') + + return render(request, 'frontend/faq_list.html', { + 'faqs': faqs, + }) + + +def faq_raw(request, id=None): + response_type = 'text/markdown' if not request.GET.get('type', '') == 'json' else 'applicaton/json' + indent = int(request.GET.get('indent', 0)) or None + if id: + faq = get_object_or_404(FAQ, pk=id) + if response_type == 'text/markdown': + content = f"# {faq.title}\n\n{faq.content}" + else: + content = json.dumps(model_to_dict(faq), indent=indent) + else: + term = request.GET.get('search', '') + faqs = FAQ.term_search(term).order_by('order', 'title') + result = [] + if response_type == 'text/markdown': + for faq in faqs: + result.append(f"# {faq.title}\n{faq.content}\n\n") + content = '\n-----------------------------------------------\n\n'.join(result) + else: + for faq in faqs: + result.append(model_to_dict(faq)) + content = json.dumps(result, default=str, indent=indent) + return HttpResponse(content, content_type=response_type) + + +@login_required +def faq_create(request): + """Create new FAQ entry""" + if request.method == 'POST': + form = FAQForm(request.POST) + if form.is_valid(): + form.save() + messages.success(request, 'FAQ entry created successfully!') + return redirect('frontend:faq_list') + else: + form = FAQForm() + + return render(request, 'frontend/faq_form.html', { + 'form': form, + 'title': 'Create FAQ Entry', + 'submit_text': 'Create FAQ' + }) + + +@login_required +def faq_edit(request, faq_id): + """Edit existing FAQ entry""" + faq = get_object_or_404(FAQ, pk=faq_id) + + if request.method == 'POST': + form = FAQForm(request.POST, instance=faq) + if form.is_valid(): + form.save() + messages.success(request, 'FAQ entry updated successfully!') + return redirect('frontend:faq_list') + else: + form = FAQForm(instance=faq) + + return render(request, 'frontend/faq_form.html', { + 'form': form, + 'faq': faq, + 'title': 'Edit FAQ Entry', + 'submit_text': 'Update FAQ' + }) + + +@login_required +def faq_delete(request, faq_id): + """Delete FAQ entry""" + faq = get_object_or_404(FAQ, pk=faq_id) + + if request.method == 'POST': + faq.delete() + messages.success(request, 'FAQ entry deleted successfully!') + return redirect('frontend:faq_list') + + return render(request, 'frontend/faq_delete.html', { + 'faq': faq, + }) diff --git a/lib/__init__.py b/lib/__init__.py new file mode 100644 index 0000000..c08fed2 --- /dev/null +++ b/lib/__init__.py @@ -0,0 +1,24 @@ +def ip_in_net(ip, network): + ipaddr = int(''.join(['%02x' % int(x) for x in ip.split('.')]), 16) + netaddr, bits = network.split('/') + netmask = int(''.join(['%02x' % int(x) for x in netaddr.split('.')]), 16) + mask = (0xffffffff << (32 - int(bits))) & 0xffffffff + return (ipaddr & mask) == (netmask & mask) + + +def human_size(num, suffix=''): + for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: + if abs(num) < 1024.0: + return "%3.1f %s%s" % (num, unit, suffix) + num /= 1024.0 + return "%.1f%s%s" % (num, 'Yi', suffix) + + +class FactoryMixin: + _instance = None + + @classmethod + def factory(cls, *args, **kwargs): + if not cls._instance: + cls._instance = cls(*args, **kwargs) + return cls._instance diff --git a/lib/db.py b/lib/db.py new file mode 100644 index 0000000..0333cab --- /dev/null +++ b/lib/db.py @@ -0,0 +1,95 @@ +import logging + +from django.db import models +from django.db.models.query import QuerySet +from django.forms import model_to_dict + +BOOLEAN_CHOICES = ( + (False, 'No'), + (True, 'Yes'), +) +BOOLEAN_CHOICES_CHAR = ( + ('false', 'No'), + ('true', 'Yes'), +) +JOB_STATUS_CHOICES = ( + ('pending', 'Pending'), + ('running', 'Running'), + ('success', 'Success'), + ('error', 'Error'), +) + +class TaskAwareQuerySet(QuerySet): + def delete(self, task=None): + try: + if task: + task.add_entry(f'Deleting {self.model.__name__}s via Queryset') + except Exception as e: + logging.error(f'Failed to add task entry for {self.model.__name__}s: {e}') + finally: + return super().delete() + +class TaskAwareModelMixin(models.Model): + class Meta: + abstract = True + objects = TaskAwareQuerySet.as_manager() + + def delete(self, task=None, using=None, keep_parents=False): + try: + if task: + task.add_entry(f'Deleting {self.__class__.__name__} {self}') + except Exception as e: + logging.error(f'Failed to add task entry for {self.__class__.__name__} {self}: {e}') + finally: + super().delete(using=using, keep_parents=keep_parents) + + +class DateAwareMixin(models.Model): + class Meta: + abstract = True + created_at = models.DateTimeField(auto_now_add=True, editable=False) + updated_at = models.DateTimeField(auto_now=True, editable=False) + +class BaseModel(DateAwareMixin): + _old_values = {} + + class Meta: + abstract = True + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._old_values = self.to_json + + internal_id = models.BigAutoField(primary_key=True) + + @property + def to_json(self): + return model_to_dict(self) + + def write(self, throw_on_error=False, **kwargs): + logging.debug(f'Writing {self} to DB with {kwargs}') + for field, value in kwargs.items(): + if hasattr(self, field): + logging.debug(f'Setting {field} to {value} for {self}') + setattr(self, field, value) + else: + if throw_on_error: + raise AttributeError(f'Could not find {field} in {self.__class__.__name__}') + logging.warning(f'Could not find {field} in {self.__class__.__name__}') + if not self._state.adding: + self.save(update_fields=kwargs.keys()) + else: + logging.warning(f'Trying to write {self} to DB - object is not yet saved') + return self + + +class SearchableMixin: + @classmethod + def term_filter(cls, search_string): + raise NotImplemented('"{cls.__class__.__name__}.search_by_term" not implemented') + + @classmethod + def term_search(cls, search_string): + if search_string: + return cls.objects.filter(cls.term_filter(search_string)) + return cls.objects.all() diff --git a/lib/decorators.py b/lib/decorators.py new file mode 100644 index 0000000..968dd9d --- /dev/null +++ b/lib/decorators.py @@ -0,0 +1,83 @@ +import logging +from functools import wraps + +from django.db.models import Model + +from django_proxmox_mikrotik.configs import MIKROTIK_READONLY, MikrotikConfig, PROXMOX_READONLY, ProxmoxConfig + + +def readonly(func): + """Decorator to temporarily enable READONLY for both Proxmox and Mikrotik""" + @wraps(func) + def wrapper(*args, **kwargs): + pm_initial = ProxmoxConfig.READONLY + mk_initial = MikrotikConfig.READONLY + + ProxmoxConfig.READONLY = True + MikrotikConfig.READONLY = True + logging.debug(f"READONLY: Setting ProxmoxConfig.READONLY from {pm_initial} to {ProxmoxConfig.READONLY} " + f"and MikrotikConfig.READONLY from {mk_initial} to {MikrotikConfig.READONLY} for {func.__name__}") + + try: + return func(*args, **kwargs) + finally: + logging.debug(f"READONLY: Resetting ProxmoxConfig.READONLY from {pm_initial} to {ProxmoxConfig.READONLY} " + f"and MikrotikConfig.READONLY from {mk_initial} to {MikrotikConfig.READONLY} for {func.__name__}") + ProxmoxConfig.READONLY = PROXMOX_READONLY + MikrotikConfig.READONLY = MIKROTIK_READONLY + + return wrapper + +def force_write(func): + """Decorator to temporarily disable READONLY for both Proxmox and Mikrotik""" + @wraps(func) + def wrapper(*args, **kwargs): + pm_initial = ProxmoxConfig.READONLY + mk_initial = MikrotikConfig.READONLY + + ProxmoxConfig.READONLY = False + MikrotikConfig.READONLY = False + + logging.debug(f"FORCE WRITE: Setting ProxmoxConfig.READONLY from {pm_initial} to {ProxmoxConfig.READONLY} " + f"and MikrotikConfig.READONLY from {mk_initial} to {MikrotikConfig.READONLY} for {func.__name__}") + try: + return func(*args, **kwargs) + finally: + logging.debug(f"FORCE WRITE: Resetting ProxmoxConfig.READONLY from {pm_initial} to {ProxmoxConfig.READONLY} " + f"and MikrotikConfig.READONLY from {mk_initial} to {MikrotikConfig.READONLY} for {func.__name__}") + ProxmoxConfig.READONLY = PROXMOX_READONLY + MikrotikConfig.READONLY = MIKROTIK_READONLY + + return wrapper + + +def skip_signal(signaltype='post_save', **kwargs): + """This should be used as decorator for signal handlers to prevent recursion. + Mostly used for post_save and post_delete. + The signaltype is just for logging.""" + + def _decorator(signal_handler): + @wraps(signal_handler) + def _wrapper(sender, instance: Model, **kwargs): + + if getattr(instance, '_skip_signal', False): + logging.debug( + f'Skip signal handler for {signaltype} : {signal_handler.__name__} - {sender.__name__} - {instance.__class__.__name__}') + return + + instance._skip_signal = True + + try: + return signal_handler(sender, instance, **kwargs) + finally: + try: + del instance._skip_signal + except AttributeError: + logging.debug( + f'{instance.__class__.__name__} instance has no attribute "_skip_signal" - could not delete it.') + except Exception as e: + logging.exception('WTF????', str(e)) + + return _wrapper + + return _decorator \ No newline at end of file diff --git a/lib/ldap.py b/lib/ldap.py new file mode 100644 index 0000000..610f161 --- /dev/null +++ b/lib/ldap.py @@ -0,0 +1,94 @@ +import logging +import re + +import ldap +from django.contrib.auth.models import Group, User + +from django_proxmox_mikrotik.configs import AuthLDAPConfig + +groupmember_re = re.compile('^uid=([^,]+),') + + +class Ldap: + possible_groups = ['root', 'intern', 'extern'] + + def __init__(self): + self.initialize() + + def initialize(self): + self.conn = ldap.initialize(AuthLDAPConfig.HOST) + self.conn.simple_bind_s(AuthLDAPConfig.BIND_DN, AuthLDAPConfig.BIND_PASSWORD) + + def __enter__(self): + self.initialize() + return self + + def __exit__(self, *args): + self.conn.unbind_s() + + def search(self, base, filterstr='(objectClass=*)', attrlist=None): + logging.debug(f'LDAP to {base} with filter {filterstr} - {attrlist if attrlist else "all attributes"}') + return self.conn.search_s(base, ldap.SCOPE_SUBTREE, filterstr, attrlist) + + def __getattr__(self, item): + if hasattr(ldap, item): + return getattr(ldap, item) + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{item}'") + + def get_groups(self, filterstr='(objectClass=*)', attrlist=None): + return self.search(AuthLDAPConfig.GROUP_SEARCH_BASE, filterstr, attrlist) + + def get_users(self, filterstr='(objectClass=inetOrgPerson', attrlist=None): + return self.search(AuthLDAPConfig.USER_BASE, filterstr, attrlist) + + def get_user(self, username): + if userdata := self.get_users(f'(uid={username})'): + return {k: (v[0].decode('utf-8') if v else None) for k, v in userdata[0][1].items()} + return {} + + def get_user_groups(self, username, attrlist=None): + filterstr = (f'(&' + f'(objectClass=groupOfNames)' + f'(member=uid={username},{AuthLDAPConfig.USER_BASE})' + f')') + grps = self.get_groups(filterstr, attrlist) + return [data.get('ou')[0].decode('utf-8') for dn, data in grps] + + def get_group_members(self, groupname, attrlist=None): + if found := self.search(f'ou={groupname},{AuthLDAPConfig.GROUP_SEARCH_BASE}', + '(objectClass=groupOfNames)', attrlist=['member']): + return [groupmember_re.sub(r'\1', m.decode('utf-8')) for m in found[0][1].get('member')] + return [] + + def create_initial_groups(self): + return [ + Group.objects.get_or_create(name=name)[0] for name in self.possible_groups + ] + + def set_user_groups(self, userinstance: User, save_instance=True): + """This does NOT save the user instance!""" + root_group, intern_group, extern_group = self.create_initial_groups() + try: + ldap_user = self.get_user(userinstance.username) + if ldap_user: + logging.debug(f'LDAP-User found: {ldap_user}') + groups = self.get_user_groups(userinstance.username) + if 'root' in groups and (userinstance.is_superuser is False or userinstance.is_staff is False): + logging.debug(f'LDAP-User is root: {ldap_user}') + userinstance.groups.add(root_group) + userinstance.is_superuser = True + userinstance.is_staff = True + elif 'intern' in groups and userinstance.is_staff is False: + logging.debug(f'LDAP-User is intern: {ldap_user}') + userinstance.groups.add(intern_group) + userinstance.is_staff = True + elif 'extern' in groups: + logging.debug(f'LDAP-User is extern: {ldap_user}') + userinstance.groups.add(extern_group) + else: + raise Exception(f'LDAP-User not found: {userinstance.username}') + except Exception as e: + logging.error(f"LDAP-Fehler: {e}") + raise e + if save_instance: + userinstance.save() diff --git a/lib/messages.py b/lib/messages.py new file mode 100644 index 0000000..b184d97 --- /dev/null +++ b/lib/messages.py @@ -0,0 +1,13 @@ +from django_middleware_global_request import get_request +from django.contrib import messages +import logging + + +def __getattr__(name): + def wrapper(*args, **kwargs): + request = get_request() + getattr(logging, name, logging.info)(*args) + return getattr(messages, name)(request, *args, **kwargs) + if hasattr(messages, name): + return wrapper + raise AttributeError(f"'{__name__}' object has no attribute '{name}'") diff --git a/lib/mikrotik.py b/lib/mikrotik.py new file mode 100644 index 0000000..1ae2bd1 --- /dev/null +++ b/lib/mikrotik.py @@ -0,0 +1,379 @@ +import collections +import logging +from functools import cached_property + +import routeros_api +from django.db import models +from django.db.utils import IntegrityError +from django.forms import model_to_dict +from django.forms.models import ValidationError +from routeros_api.api_structure import StringField +from routeros_api.exceptions import RouterOsApiCommunicationError + +from django_proxmox_mikrotik.settings import MikrotikConfig +from lib import FactoryMixin +from lib.db import BOOLEAN_CHOICES_CHAR +from lib.router_abstract import RoutedModelAbstract, RouterAbstract + +ip_8 = MikrotikConfig.IP_8 + +_logger = logging.getLogger(__name__) + + +def is_local_ip(ip): + return ip[:3] in ip_8 + + +class MikrotikApi(FactoryMixin): + def __init__(self): + # Create fresh connection for each instance - no shared state + self.connection = None + self.api = None + self._connect() + + def _connect(self): + """Create a new connection and API instance""" + try: + self.connection = routeros_api.RouterOsApiPool( + MikrotikConfig.HOST, + username=MikrotikConfig.USER, + password=MikrotikConfig.PASS, + port=8728, + plaintext_login=True, + use_ssl=False, + ssl_verify=True, + ssl_verify_hostname=True, + ssl_context=None, + ) + self.api = self.connection.get_api() + except Exception as e: + _logger.error(f"Failed to create Mikrotik connection: {e}") + self.connection = None + self.api = None + raise + + def disconnect(self): + """Safely disconnect the connection""" + if self.connection: + try: + self.connection.disconnect() + except (OSError, AttributeError, BrokenPipeError) as e: + _logger.debug(f"Error during disconnect (expected): {e}") + finally: + self.connection = None + self.api = None + + @property + def _default_structure(self): + return collections.defaultdict(lambda: StringField(encoding='windows-1250')) + + def resource(self, route): + if not self.api: + raise ConnectionError("No active Mikrotik connection") + return self.api.get_resource(route, self._default_structure) + + +class Mikrotik(RouterAbstract): + _instances = {} + + class _resource: + def __init__(self, route): + self._route = route + + def __getattr__(self, item): + """Dynamic method creation for RouterOS API calls""" + def method_wrapper(*args, **kwargs): + api = None + max_retries = 3 + + for attempt in range(max_retries): + try: + api = MikrotikApi() + resource = api.resource(self._route) + method = getattr(resource, item) + result = method(*args, **kwargs) + return result + except (OSError, BrokenPipeError, ConnectionError, AttributeError) as e: + _logger.warning(f"Connection error in _resource.{item}() attempt {attempt + 1}/{max_retries}: {e}") + if attempt == max_retries - 1: + raise + finally: + if api: + api.disconnect() + + return method_wrapper + + """" + def call(self, *args, **kwargs): + api = MikrotikApi() + try: + resource = api.resource(self._route) + return resource.call(*args, **kwargs) + finally: + api.connection.disconnect() + """ + + def __init__(self, route): + self._route = route + + @staticmethod + def pool(route): + if isinstance(route, MikrotikModelMixin): + route = route.router_base + return Mikrotik._instances.setdefault(route, Mikrotik(route)) + + def initialize(self, route): + if isinstance(route, MikrotikModelMixin): + self._route = route.router_base + else: + self._route = route + + Mikrotik._instances.setdefault(route, self) + + def get(self, **kwargs): + mikrotik_kwargs = {} + additional_queries = [] + for k in list(kwargs.keys()): + if '__' in k: + v = kwargs.pop(k) + field, lookup = k.split('__', 1) + + if lookup == 'startswith': + # Mikrotik verwendet ~"^pattern" für startswith + mikrotik_kwargs[field] = f'^{v}' + elif lookup == 'contains': + # Mikrotik verwendet ~"pattern" für contains + mikrotik_kwargs[field] = f'{v}' + elif lookup == 'endswith': + # Mikrotik verwendet ~"pattern$" für endswith + mikrotik_kwargs[field] = f'{v}$' + else: + # Unbekannter Lookup-Typ, behalte den ursprünglichen Wert bei + kwargs[k] = v + _logger.debug(f'Getting {self._route} with transformed kwargs: {mikrotik_kwargs}') + + for field, pattern in mikrotik_kwargs.items(): + additional_queries.append(f'{field}~"{pattern}"') + + logging.info(f'Getting {self._route}/print with kwargs: {kwargs} and additional queries: {additional_queries}') + return self._resource(self._route).call('print', queries=kwargs, additional_queries=additional_queries) + + def set(self, **kwargs): + assert 'id' in kwargs, "id must be set" + if MikrotikConfig.READONLY: + _logger.warning(f'Trying to set {self._route} to {kwargs} on read-only router') + return True # Simulate success in readonly mode + else: + return self._resource(self._route).set(**kwargs) + + def add(self, **kwargs): + kwargs.pop('id', None) + if MikrotikConfig.READONLY: + _logger.warning(f'Trying to add {self._route} to {kwargs} on read-only router') + return '*READONLY' # Simulate success with fake ID in readonly mode + else: + return self._resource(self._route).add(**kwargs) + + def remove(self, **kwargs): + assert 'id' in kwargs, "id must be set" + if MikrotikConfig.READONLY: + _logger.warning(f'Trying to remove {self._route} with {kwargs} on read-only router') + return True # Simulate success in readonly mode + else: + return self._resource(self._route).remove(id=kwargs['id']) + + +class MikrotikModelMixin(RoutedModelAbstract): + class Meta: + abstract = True + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._old_values = self.to_json + + internal_id = models.BigAutoField(primary_key=True) + + disabled = models.CharField(max_length=10, null=True, blank=True, default='false', choices=BOOLEAN_CHOICES_CHAR) + comment = models.TextField(null=True, blank=True, default='') + + """ + Those we need for configuration + """ + + @property + def router_base(self): + raise NotImplemented('Not implemented') + + @property + def router_object_without_id(self): + _logger.warning(f'Deprecated - use router_object instead of router_object_without_id for {self}') + return self.router_object() + + """ + Common stuff - may be overwritten + """ + + @property + def no_mikrotik_props(self): + """Props, that are not send to router + """ + return ['internal_id', 'dynamic'] + + @property + def router(self): + return Mikrotik.pool(self) + + @classmethod + def class_props(cls): + return [p for p in model_to_dict(cls()).keys() if p not in ('internal_id', 'dynamic')] + + @classmethod + def get_all_as_object(cls): + all = cls().router_get_all + print(all, type(all)) + return [cls.from_dict(**r) for r in all] + + @classmethod + def translate_keys(cls, **kwargs): + return {k.replace('-', '_'): v for k, v in kwargs.items()} + + def router_object(self, translate_keys=True): + if self.id: + args = {'id': self.id} + else: + args = {} + for k in self.unique_on_router: + # here, we take the first one set + if isinstance(k, (tuple, list)): + for k2 in k: + if v2 := getattr(self, k2, None): + args[k2] = v2 + break + else: + if v := getattr(self, k, None): + args[k] = v + + if not args: + raise ValueError(f"Empty args to get info from router for {self}") + + data = self.router.get(**args) + if data: + return self.translate_keys(**data[0]) if translate_keys else data[0] + return None + + def sync_from_router(self, data=None): + if data := data or self.router_object(): + _logger.debug(f'Syncing {self} from router with {data}') + self.assign(**data) + else: + _logger.debug(f'Could not sync {self} from router') + return self + + def assign(self, **kwargs): + updatefields = [] + for k, v in kwargs.items(): + if hasattr(self, k): + if v != getattr(self, k): + updatefields.append(k) + setattr(self, k, v) + return self + + def sync_all_from_router(self): + for obj in self.router_get_all: + self.from_dict(**obj) + + def delete_from_router(self): + if self.id: + return self.router.remove(id=self.id) + return True + + @classmethod + def from_dict(cls, **kwargs): + self_props = cls.class_props() + args = {} + for k, v in cls.translate_keys(**kwargs).items(): + if k not in self_props: + _logger.warning(f'Unknown property {k} for {cls.__class__.__name__}') + else: + args[k] = v + try: + obj = cls.objects.get(id=args['id']) + _logger.debug(f'Found {obj} from {kwargs}') + except cls.DoesNotExist: + obj = cls.objects.create(**args) + _logger.debug(f'Created {obj} from {kwargs}') + except Exception as e: + _logger.error(f'Could not create {cls.__class__.__name__} from {kwargs} - {e}') + raise e + return obj + + @property + def mikrotik_send_params(self): + return {k: v for k, v in self.to_json.items() if k not in self.no_mikrotik_props} + + def sync_to_router(self, created=False): + data = self.mikrotik_send_params + _logger.debug(f'Syncing {self.__dict__}') + if self.id: + _logger.debug(f'Syncing {self} to router with {data}') + return self.router_set(**data) + _logger.debug(f'Adding {self} to router with {data}') + return self.router_add(**data) + + @cached_property + def router_get_all(self): + return self.router.get() + + def router_get(self, **kwargs): + response = self.router.get(**kwargs) + _logger.debug(f'Got {self} from router with {response}') + return response + + def router_set(self, **kwargs): + kwargs['id'] = self.id + response = self.router.set(**kwargs) + _logger.debug(f'Set {self} to router with {response}') + return response + + def router_add(self, **kwargs): + if self.id: + _logger.warning(f'Trying to add {self} to router - already has id {self.id}') + return True + kwargs.pop('id', None) + try: + response = self.router.add(**kwargs) + except RouterOsApiCommunicationError as e: + _logger.error(f'Could not add {self} to router - {e}') + routerdata = self.router_object() + if routerdata: + return self.sync_from_router(data=routerdata) + raise ValidationError(f'Could not add {self} to router - {e}') + try: + new_on_router = self.router_object() + _logger.debug(f'Got {new_on_router} from router') + self.id = new_on_router['id'] + _logger.debug(f'Added {self} to router with {response}') + self.save() + except (IndexError, KeyError, NotImplementedError) as e: + _logger.info(f'Could not set id for {self} - git no id {e}') + return response + + +def sync_from_mikrotik(classname): + _st = classname() + for i in _st.router_get_all: + i = {k.replace('-', '_'): v for k, v in i.items()} + try: + existing = classname.objects.get(id=i['id']) + for k, v in i.items(): + if hasattr(existing, k): + _logger.debug(f'Updating {k} to {v} - {existing}') + setattr(existing, k, v) + existing.save() + except classname.DoesNotExist: + _logger.info(f'Creating {i["id"]}') + try: + classname.objects.create(**i) + except IntegrityError as e: + _logger.error(f'Could not create {i["id"]}, already exists') + _logger.exception(e) diff --git a/lib/proxmox.py b/lib/proxmox.py new file mode 100644 index 0000000..a5c6299 --- /dev/null +++ b/lib/proxmox.py @@ -0,0 +1,214 @@ +import json +import logging +import time + +import proxmoxer + +from django_proxmox_mikrotik.settings import ProxmoxConfig + + +def get_comma_separated_values(value): + _vlist = [v.strip().split('=', 1) for v in value.split(',') if '=' in v] if value else [] + return {k: v for k, v in _vlist} + + +class PMDict(dict): + def __getattr__(self, name): + if name in self: + return self[name] + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + +class PMCollection(list): + def append(self, item): + assert isinstance(item, dict) + if not isinstance(item, PMDict): + item = PMDict(**item) + super().append(item) + + def __getattr__(self, item): + if self and hasattr(self[0], item): + for i in self: + yield getattr(i, item) + raise AttributeError(f"'{self.__class__.__name__}' object (or its content[0]) has no attribute '{item}'") + + +class Proxmox: + + def __init__(self, node=ProxmoxConfig.NODE): + self.initialize(node) + + def initialize(self, node=ProxmoxConfig.NODE, ): + self.api = proxmoxer.ProxmoxAPI( + ProxmoxConfig.HOST, + user=ProxmoxConfig.USER, + password=ProxmoxConfig.PASS, + verify_ssl=False + ) + self.nodepath = f'nodes/{node}' + return self + + def __enter__(self): + return self.initialize() + + def __exit__(self, *args): + """Actually, this is a no-op, just for the with statement :)""" + return + + def nodes(self, route=''): + return self.api(f'{self.nodepath}/{route.lstrip("/")}') + + def lxc(self, route=''): + return self.nodes(f'lxc/{route.lstrip("/")}') + + def storage(self, route=''): + return self.nodes(f'storage/{route.lstrip("/")}') + + def qemu(self, route=''): + return self.nodes(f'qemu/{route.lstrip("/")}') + + def cluster(self, route=''): + return self.api(f'cluster/{route.lstrip("/")}') + + @property + def next_vmid(self): + return int(self.cluster_get('nextid')) + + def __getattr__(self, name): + """This makes a 'magic' trick + We can call the proxmox api like this: + * proxmox.lxc_115_config_get() + * proxmox.lxc_115_get('config') + * proxmox.lxc_get('115/config') + * proxmox.lxc('115/config').get() + * ... + seems handy at the moment ... + The first in *args will always be taken as route! + """ + if "_" in name: + nameparts = name.split("_") + action = nameparts.pop() + if action not in ["get", "post", "put", "delete"]: + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}' - 1") + + if hasattr(self, nameparts[0]): + base_method = getattr(self, nameparts.pop(0)) + else: + base_method = self.nodes + + route_part = "/".join(nameparts) + + def wrapper(*args, **kwargs): + if args: + route = str(args[0]).rstrip('/') + args = args[1:] + else: + route = '' + if route_part: + route = f'{route_part}/{route}' + if ProxmoxConfig.READONLY and action != 'get': + logging.warning(f'PROXMOX_READONLY is set - not calling {route} ' + f'with method {base_method.__name__}.{action}' + f'({args}, {kwargs})') + # Return appropriate mock response based on action + if action == 'post': + return 'UPID:READONLY:00000000:00000000:00000000:vzcreate:readonly:root@pam:' + elif action == 'put': + return None + elif action == 'delete': + return None + return {} + logging.debug(f'Calling {base_method.__name__}.{action}({route}, {args}, {kwargs})') + return getattr(base_method(route), action)(*args, **kwargs) + + return wrapper + + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + @classmethod + def get_task_status(cls, taskhash, sleeptime=10, pm=None): + cls = pm or cls() + logging.debug(f"Getting task status for {taskhash}") + maxtime = ProxmoxConfig.CREATE_LXC_TIMEOUT + response = cls.nodes_get(f'tasks/{taskhash}/status') + logging.debug(f"Status response for {taskhash}: {response}") + while True: + response = cls.nodes_get(f'tasks/{taskhash}/status') + logging.debug(f"Status response for {taskhash}: {response}") + yield response + logging.debug(f"Status response for {taskhash}: {response}") + status = response['status'] + if status == 'stopped': + if not response.get('exitstatus') == 'OK': + raise ValueError(f"Exitstatus is {response.get('exitstatus')}") + break + time.sleep(sleeptime) + maxtime -= sleeptime + if maxtime <= 0: + raise TimeoutError("Took to long") + + def get_all_lxc(self, *filterargs, as_dict=True, **filterkwargs): + logging.debug(f"Getting all LXC with filter {filterargs} and {filterkwargs}") + from proxmox.models import Lxc + lxc_filter = {} + _raw = self.lxc_get() + logging.debug(f"All LXC: {_raw}") + if not _raw: + return [] + all = _raw + comps = {} + for key in list(_raw[0].keys()): + if key in filterkwargs: + comps[key] = filterkwargs.pop(key) + for key, comp in comps.items(): + all = filter(lambda x: ( + logging.debug(f'{key} of lxc is {x[key]}, must be {comp}' if key in x else f"{key} not in {x}") + or key not in x + or x[key] == comp + ), all) + if not all: + logging.debug(f"No LXC found with filter {filterargs} and {filterkwargs}") + logging.debug(f"All LXC: {json.dumps(all, indent=2, default=str)}") + return [] + if filterargs: + for prop, c, v in filterargs: + invert = False + if c.startswith('!'): + invert = True + if c.endswith('='): + comparer = lambda x: x != v if invert else x == v + elif c.endswith('in'): + comparer = lambda x: x not in v if invert else x in v + elif c.endswith('startswith'): + comparer = lambda x: v.startswith(v) if invert else v.startswith(v) + elif c.endswith('endswith'): + comparer = lambda x: v.endswith(v) if invert else v.endswith(v) + lxc_filter[prop] = comparer + if filterkwargs: + for k, v in filterkwargs.items(): + lxc_filter[k] = lambda x: x == v + + def filter_out(lxc_): + if not lxc_filter: + return True + for prop, comparer in lxc_filter.items(): + if not prop in lxc_: + continue + if not comparer(lxc_.get(prop)): + logging.debug(f"Filter out {lxc_} because {prop} is {lxc_.get(prop)}") + return False + return True + + ret = [] + for lxc in filter(filter_out, all): + lxc_config = self.lxc_get(f'{lxc["vmid"]}/config') + _lx_data = lxc | lxc_config + if as_dict: + ret.append(PMDict(**_lx_data)) + # yield PMDict(**_lx_data) + else: + ret.append(Lxc().from_proxmox(**_lx_data)) + # yield Lxc().from_proxmox(**_lx_data) + if not ret: + logging.warning(f"Found no LXC with filter {filterargs} and {filterkwargs}") + return ret diff --git a/lib/router_abstract.py b/lib/router_abstract.py new file mode 100644 index 0000000..b6151a9 --- /dev/null +++ b/lib/router_abstract.py @@ -0,0 +1,342 @@ +import json +import logging +from copy import copy + +from django.core.exceptions import ObjectDoesNotExist +from django.db import models +from django.forms import model_to_dict + +from lib import FactoryMixin + + +class RouterObjectCollection(set, FactoryMixin): + _objectclass = None + + def _check_member_class(self, member): + if self._objectclass: + if not isinstance(member, self._objectclass): + raise ValueError( + f'Only {self._objectclass.__class__.__name__} can be added to {self.__class__.__name__}') + else: + if not isinstance(member, RouterObjectAbstract): + raise ValueError( + f'Only {RouterObjectAbstract.__class__.__name__} can be added to {self.__class__.__name__}') + self._objectclass = member.__class__ + + def add(self, *args, **kwargs): + """Override add() to check if object is of correct type + we need a __hash__() function for this""" + self._check_member_class(args[0]) + return super().add(*args, **kwargs) + + def filter(self, *args, **kwargs): + """This returns a new object filtered by some criteria + """ + _new = self.__class__() + for obj in self: + if obj.filtered(*args, **kwargs): + _new.add(obj) + return _new + + def all(self): + """This returns all objects - a copy""" + return self.copy() + + def first(self, *args, **kwargs): + """This returns the first object + from a copy of self""" + if args or kwargs: + objects = self.filter(*args, **kwargs) + else: + objects = self.all() + return objects[0] if objects else None + + def remove(self, *args, **kwargs): + """Override remove() to not throw KeyError (like discard())""" + try: + super().remove(*args, **kwargs) + except KeyError: + pass + return self + + def values_list(self, keyset: list, flat=False): + if flat: + _ret = [] + else: + _ret = self.__class__() + + for obj in self: + new_obj = obj.__class__(**{k: v for k, v in copy(obj).items() if k in keyset}) + if flat: + valuesdict = {} + for k, v in new_obj.items(): + valuesdict.setdefault(k, []).append(v) + return list(valuesdict.items()) + else: + _ret.add(new_obj) + + return _ret + + +class RoutedModelAbstract(models.Model): + class Meta: + abstract = True + + internal_id = models.BigAutoField(primary_key=True) + + @property + def router_object(self): + return self._router_object + + @router_object.setter + def router_object(self, value): + self._router_object = value + + def __init__(self, *args, **kwargs): + self._router_object = kwargs.pop('router_object', None) + super().__init__(*args, **kwargs) + self._old_values = self.to_json + + @property + def to_json(self): + return model_to_dict(self) + + @property + def unique_on_router(self): + raise NotImplemented(f'{self.__class__.__name__} must implement unique_on_router') + + +class RouterObjectAbstract(dict, FactoryMixin): + _router_instance = None + _model_class = None + _model_instance = None + + def __init__(self, router=None, model=None, **kwargs): + if router: + self.set_router(router) + if model: + self.set_model(model) + super().__init__(**kwargs) + + class DoesNotExistsOnRouter(ObjectDoesNotExist): + pass + + @property + def model_class(self): + return self._model_class + + @property + def get_from_db(self): + raise NotImplemented(f"{self.__class__.__name__} must implement get_from_db") + + @model_class.setter + def model_class(self, model): + if isinstance(model, RoutedModelAbstract): + self._model_class = model.__class__ + elif issubclass(model, RoutedModelAbstract): + self._model_class = model + else: + raise ValueError( + f"model must be of type {RoutedModelAbstract.__class__.__name__}, not {model.__class__.__name__}") + + @property + def model_instance(self): + return self._model_instance + + @model_instance.setter + def model_instance(self, model): + if self._model_class and not issubclass(model.__class__, self._model_class): + raise ValueError( + f'Model {model.__class__.__name__} must be of type {self._model_class.__class__.__name__}') + if not isinstance(model, RoutedModelAbstract): + raise ValueError( + f"model must be of type {RoutedModelAbstract.__class__.__name__}, not {model.__class__.__name__}") + self._model_instance = model + self._model_class = model.__class_ + model.router_object = self + + def set_model(self, model): + if isinstance(model, RoutedModelAbstract): + self.model_instance = model + else: + self.model_class = model.__class__ + + if not self._model_instance: + logging.debug(f'Creating new unsaved {self._model_class.__name__} from {self}') + self._model_instance = self.model_class(**({'router_object': self} | self)) + + return self + + def set_router(self, router): + assert isinstance(router, + RouterAbstract), f"router must be of type {RouterAbstract.__class__.__name__}, not {router.__class__.__name__}" + self._router_instance = router + return self + + def to_db_object(self, raise_on_keyerror=False) -> models.Model: + """This returns a dict representation of the object""" + if raise_on_keyerror: + _data = {k: self.get(k, '') for k in model_to_dict(self._model_class).keys()} + else: + _errors = [] + _data = {} + for k in model_to_dict(self._model_class): + try: + _data[k] = self[k] + except KeyError as e: + _errors.append(str(e)) + if _errors: + raise KeyError(f'Could not convert {self.__class__.__name__} to DB object - missing keys: {_errors}') + return self._model_class(**_data) + + @classmethod + def from_db_object(cls, db_object): + return cls(**model_to_dict(db_object)) + + @property + def router(self): + return self._router_instance + + def __hash__(self): + return len(self.to_json) + + def _filter_or(self, **kwargs): + for k, v in kwargs.items(): + if self.get(k) == v: + return True + return False + + def _filter_and(self, **kwargs): + for k, v in kwargs.items(): + if self.get(k) != v: + return False + return True + + def filtered(self, mode='or', raise_on_failure=False, **kwargs): + """This returns objects filtered by some criteria + Return self if criterias match, else None + """ + assert mode in ('or', 'and'), f"mode must be 'or' or 'and', not {mode}" + if getattr(self, f'_filter_{mode}')(**kwargs): + return self + if raise_on_failure: + raise self.DoesNotExists(f'Object {self} does not match criteria {kwargs}') + return None + + def __getattr__(self, name): + """This makes a 'magic' trick""" + if name in self: + return self[name] + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def to_json(self, **dump_params): + """This returns a dict representation of the object""" + if default_fun := dump_params.pop('default', None): + pass + else: + default_fun = lambda o: str(o) + return json.loads(json.dumps(self, default=default_fun, **dump_params)) + + @classmethod + def router_get(cls, *args, **kwargs): + """This returns a RouterObjectCollection of objects""" + raise NotImplemented(f"{cls.__name__} must implement router_get()") + + def router_post(self): + """This adds an object to the router""" + raise NotImplemented(f"{self.__class__.__name__} must implement router_post()") + + def router_put(self, **kwargs): + """This removes an object from the router""" + raise NotImplemented(f"{self.__class__.__name__} must implement router_put()") + + def router_delete(self): + """This removes an object from the router""" + raise NotImplemented(f"{self.__class__.__name__} must implement router_delete()") + + +class DNSStaticAbstract(RoutedModelAbstract): + """The DNSStatic Object""" + + class Meta: + abstract = True + + @property + def get_name(self): + raise NotImplemented(f'{self.__class__.__name__} must implement get_name') + + @property + def get_regex(self): + raise NotImplemented(f'{self.__class__.__name__} must implement get_name') + + @property + def get_ip4(self): + raise NotImplemented(f'{self.__class__.__name__} must implement get_ip4') + + @property + def get_ip6(self): + raise NotImplemented(f'{self.__class__.__name__} must implement get_ip6') + + +class LeaseAbstract(RoutedModelAbstract): + """The IP Lease Object""" + + class Meta: + abstract = True + + @property + def get_mac(self): + raise NotImplemented(f'{self.__class__.__name__} must implement get_mac') + + @property + def get_ip4(self): + raise NotImplemented(f'{self.__class__.__name__} must implement get_ip4') + + @property + def get_ip6(self): + raise NotImplemented(f'{self.__class__.__name__} must implement get_ip6') + + @property + def get_status(self): + raise NotImplemented(f'{self.__class__.__name__} must implement get_status') + + +class IPAddressAbstract(RoutedModelAbstract): + """The Address Pool""" + + class Meta: + abstract = True + + @property + def get_address(self): + raise NotImplemented(f'{self.__class__.__name__} must implement get_address') + + @property + def get_network(self): + raise NotImplemented(f'{self.__class__.__name__} must implement get_network') + + +class RouterAbstract: + + def __init__(self, *args, **kwargs): + self.initialize(*args, **kwargs) + + @property + def api(self): + raise NotImplemented(f"{self.__class__.__name__} must implement api or init with api parameter") + + def initialize(self, *args, **kwargs): + """This initializes the connection to the router""" + raise NotImplemented(f"{self.__class__.__name__} must implement initialize()") + + def get(self, **kwargs): + raise NotImplemented(f"{self.__class__.__name__} must implement get()") + + def add(self, **kwargs): + raise NotImplemented(f"{self.__class__.__name__} must implement post()") + + def set(self, **kwargs): + raise NotImplemented(f"{self.__class__.__name__} must implement put()") + + def remove(self, **kwargs): + raise NotImplemented(f"{self.__class__.__name__} must implement delete()") diff --git a/lib/task_decorator.py b/lib/task_decorator.py new file mode 100644 index 0000000..e1ab431 --- /dev/null +++ b/lib/task_decorator.py @@ -0,0 +1,111 @@ +""" +Einfache Proxmox Task-Funktionen mit TaskLogger-Integration +""" + +import logging + +from lib.proxmox import Proxmox +from tasklogger.models import TaskFactory + + +def start_container_with_task(task_uuid: str, vmid: int, request=None) -> bool: + """Container starten mit TaskLogger-Monitoring""" + task = TaskFactory(task_uuid=task_uuid, request=request) + + task.add_entry(f"Starting container {vmid}...") + + def _start_container(): + with Proxmox() as pm: + return pm.lxc_post(f'{vmid}/status/start') + + # Wrap the proxmox function - this handles UPID monitoring synchronously + task.wrap_proxmox_function(_start_container) + task.add_entry(f"Container {vmid} started successfully") + task.unset_as_current() + return True + + +def stop_container_with_task(task_uuid: str, vmid: int, request=None) -> bool: + """Container stoppen mit TaskLogger-Monitoring""" + task = TaskFactory(task_uuid=task_uuid, request=request) + task.add_entry(f"Stopping container {vmid}...") + + def _stop_container(): + with Proxmox() as pm: + return pm.lxc_post(f'{vmid}/status/stop') + + # Wrap the proxmox function - this handles UPID monitoring synchronously + task.wrap_proxmox_function(_stop_container) + task.add_entry(f"Container {vmid} stopped successfully") + task.unset_as_current() + return True + + +def resize_container_disk_with_task(task_uuid: str, vmid: int, disk_size: int, request=None) -> bool: + """Container-Disk vergrößern mit TaskLogger-Monitoring""" + + task = TaskFactory(task_uuid=task_uuid, request=request) + task.add_entry(f"Resizing disk for container {vmid} to {disk_size}GB...") + + def _resize_container_disk(): + with Proxmox() as pm: + return pm.lxc_put(f'{vmid}/resize', disk='rootfs', size=f'{disk_size}G') + + # Wrap the proxmox function - this handles UPID monitoring synchronously + task.wrap_proxmox_function(_resize_container_disk) + task.add_entry(f"Container {vmid} disk resized to {disk_size}GB successfully") + task.unset_as_current() + return True + + +def create_container_with_task(task_uuid: str, clone_container, request=None) -> bool: + """Container erstellen mit TaskLogger-Monitoring""" + try: + # CloneContainer.execute() uses the tasklogger directly now + clone_container.execute(task_uuid_override=task_uuid, request=request) + return True + except Exception as e: + logging.exception(f"Container creation failed: {e}") + return False + + +def delete_container_with_task(task_uuid: str, container, request=None) -> bool: + """Container löschen mit TaskLogger-Monitoring""" + task = TaskFactory(task_uuid=task_uuid, request=request) + task.add_entry(f"Deleting container {container.name}...") + + try: + # Delete Proxmox LXC (if needed) + task.add_entry(f"Deleting Proxmox container {container.lxc.vmid}...") + # Wrap the proxmox function - this handles UPID monitoring synchronously + task.wrap_proxmox_function(container.delete, task=task) + task.add_entry("Container deleted successfully!") + + task.status = 'completed' + task.save() + return True + + except Exception as e: + task.add_entry(f"Error deleting container: {str(e)}") + task.status = 'error' + task.save() + logging.exception(f"Container deletion failed: {e}") + return False + finally: + task.unset_as_current() + + +def update_container_config_sync(vmid: int, task=None,**config_params) -> bool: + """Synchrone Container-Config-Updates (Memory, Cores) - kein Task-Monitoring nötig""" + try: + with Proxmox() as pm: + result = pm.lxc_put(f'{vmid}/config', **config_params) + logging.info(f"Updated container {vmid} config: {config_params}") + if task: + task.add_entry(f"Updated container {vmid} config: {config_params}") + return True + except Exception as e: + logging.error(f"Failed to update container {vmid} config: {e}") + if task: + task.add_entry(f"Failed to update container {vmid} config: {e}") + return False diff --git a/lib/utils.py b/lib/utils.py new file mode 100644 index 0000000..054b5a7 --- /dev/null +++ b/lib/utils.py @@ -0,0 +1,44 @@ +import time +import logging +import functools +from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger +from django_middleware_global_request import get_request +from django_proxmox_mikrotik import settings + + +def measure_time(func): + """Decorator, der die Ausführungszeit einer Funktion misst und mit logging.debug ausgibt.""" + + @functools.wraps(func) + def wrapper(*args, **kwargs): + start_time = time.time() + result = func(*args, **kwargs) + end_time = time.time() + execution_time = end_time - start_time + logging.debug(f"Funktion '{func.__name__}' wurde in {execution_time:.4f} Sekunden ausgeführt") + return result + + return wrapper + + +def paginator(queryset, page: int = 1, per_page: int = 18, request=None): + request = request or get_request() + per_page = request.GET.get('per_page', + request.POST.get('per_page', getattr(settings, 'PAGINATOR_PER_PAGE', per_page))) + paginator = Paginator(queryset, per_page) + page = request.GET.get('page', request.POST.get('page', page)) + try: + return paginator.page(page) + except PageNotAnInteger: + return paginator.page(1) + except EmptyPage: + return paginator.page(paginator.num_pages) + + +class PaginatedModel: + def __init__(self, model): + self.model = model + + def paginate(self, domainfilter, page: int = 1, per_page: int = 18, request=None): + queryset = self.model.objects.filter(domainfilter) + return paginator(queryset, page, per_page, request) diff --git a/manage.py b/manage.py new file mode 100755 index 0000000..7463b6e --- /dev/null +++ b/manage.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python +"""Django's command-line utility for administrative tasks.""" +import os +import sys + + +def main(): + """Run administrative tasks.""" + os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_proxmox_mikrotik.settings') + try: + from django.core.management import execute_from_command_line + except ImportError as exc: + raise ImportError( + "Couldn't import Django. Are you sure it's installed and " + "available on your PYTHONPATH environment variable? Did you " + "forget to activate a virtual environment?" + ) from exc + execute_from_command_line(sys.argv) + + +if __name__ == '__main__': + main() diff --git a/manager/__init__.py b/manager/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/manager/admin.py b/manager/admin.py new file mode 100644 index 0000000..2c876c1 --- /dev/null +++ b/manager/admin.py @@ -0,0 +1,262 @@ +import logging + +from django import forms +from django.contrib import admin +from django.core.exceptions import ValidationError +from django.db.models import Q + +from lib.decorators import readonly +from manager.models import CloneContainer, DevContainer +from mikrotik.models import DNSStatic +from proxmox.models import Lxc + + +@readonly +def resync_all(*args, **kwargs): + from proxmox.models import Lxc + from lib.proxmox import Proxmox + from mikrotik.models import DNSStatic, IPDHCPLease + from manager.models import DevContainer + from mikrotik.admin import sync_ipaddress_from_mikrotik, sync_ipdhcplease_from_mikrotik, sync_dns_static_from_mikrotik + from proxmox.admin import sync_all_lxc_templates_from_proxmox, sync_all_lxc_from_proxmox + + fulls = [] + pm = Proxmox() + lxcs = {} + leases = {} + dnse = {} + + sync_all_lxc_templates_from_proxmox() + sync_all_lxc_from_proxmox() + sync_ipaddress_from_mikrotik() + sync_ipdhcplease_from_mikrotik() + sync_dns_static_from_mikrotik() + + for lxc in Lxc.objects.all(): + lxcs[lxc.hwaddr.upper()] = lxc + for lease in IPDHCPLease.objects.all(): + leases[lease.mac_address.upper()] = lease + for dns in DNSStatic.objects.all(): + dnse[dns.address.upper()] = dns + + container = {c.hwaddr.upper():c for c in DevContainer.objects.all()} + + for hwaddr, lxc in lxcs.items(): + if hwaddr not in leases: + logging.warning(f'LXC {lxc} has no DHCP lease') + continue + lease = leases[hwaddr] + if lease.address not in dnse: + logging.warning(f'DHCP lease {lease} for {lxc} has no DNS entry') + continue + dns = dnse[lease.address] + + if hwaddr in container: + container[hwaddr].dns = dns + container[hwaddr].lease = lease + container[hwaddr].save() + elif lxc: + DevContainer.objects.create( + dns=dns, + lease=lease, + lxc=lxc, + ) + # Now remove the non lxc devcontainers + DevContainer.objects.filter(lxc__isnull=True).delete() + +def shell_baseimport(): + from proxmox.models import Lxc + from lib.proxmox import Proxmox + from mikrotik.models import DNSStatic, IPDHCPLease, IPAddress + from manager.models import DevContainer + from lib.mikrotik import MikrotikModelMixin + + fulls = [] + for empt in (DevContainer, Lxc, IPAddress, IPDHCPLease, DNSStatic): + if empt.objects.count() != 0: + fulls.append(empt) + if fulls: + msg = [] + queries = [] + for f in fulls: + logging.error(f'{f.__name__} is not empty.') + queries.append(f'DELETE FROM {f._meta.db_table};') + msg.append( + f"\n\nPlease delete all objects and try again.\n" + f"\nThis can only be done with a raw query\n" + ) + msg.append('\n'.join(queries)) + logging.error('\n'.join(msg) + '\n\n') + raise ValidationError("Some tables not empty - see above output") + + pm = Proxmox() + lxcs = {} + + for lxc in pm.get_all_lxc(as_dict=False): + lxc.save() + lxcs[lxc.hwaddr.upper()] = lxc + + addresses = IPAddress.get_all_as_object() + for a in addresses: + a.save() + + leases = {} + for lease in IPDHCPLease.get_all_as_object(): + if isinstance(lease, MikrotikModelMixin): + lease.save() + if lease.mac_address in lxcs: + leases[lease.address.upper()] = lease + else: + logging.warning(f'IPDHCPLease {lease} is not a MikrotikModelMixin') + + dnse = {} + for dns in DNSStatic.get_all_as_object(): + if isinstance(dns, MikrotikModelMixin): + dns.save() + if dns.address in leases: + dnse[dns.address.upper()] = dns + else: + logging.warning(f'DNSStatic {dns} is not a MikrotikModelMixin') + + for _a, dns in dnse.items(): + lease = leases[_a] + lxc = lxcs[lease.mac_address.upper()] + DevContainer.objects.get_or_create( + lxc=lxc, + dns=dns, + lease=lease, + ) + + +class DevContainerAdminForm(forms.ModelForm): + disksize = forms.IntegerField( + required=False, + min_value=1, + max_value=100, + help_text="Disk Size of rootfs - can not be shrinked" + ) + + cores = forms.IntegerField( + required=False, + min_value=1, + help_text="Number of Cores" + ) + + memory = forms.CharField( + required=False, + help_text="Memory in MB" + ) + + class Meta: + model = DevContainer + fields = '__all__' + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + instance = kwargs.get('instance') + + if instance and hasattr(instance, 'lxc') and instance.lxc: + self.fields['disksize'].initial = instance.lxc.disksize + self.fields['disksize'].min_value = instance.lxc.disksize + self.fields['cores'].initial = instance.lxc.cores + self.fields['memory'].initial = instance.lxc.memory + + def save(self, commit=True): + instance = super().save(commit=False) + + if hasattr(instance, 'lxc') and instance.lxc: + lxc = instance.lxc + + if 'disksize' in self.cleaned_data and self.cleaned_data['disksize']: + lxc.disksize = self.cleaned_data['disksize'] + + if 'cores' in self.cleaned_data and self.cleaned_data['cores']: + lxc.cores = self.cleaned_data['cores'] + + if 'memory' in self.cleaned_data and self.cleaned_data['memory']: + lxc.memory = self.cleaned_data['memory'] + + lxc.save() + + if commit: + instance.save() + + return instance + + +def clone_selected_containers(modeladmin, request, queryset): + for container in queryset: + container.execute() + + +class CloneContainerAdminForm(forms.ModelForm): + class Meta: + model = CloneContainer + fields = '__all__' + + def clean_name(self): + name = self.cleaned_data.get('name') + if not name: + return name + + dns_domain = Q(name=name) | Q(name__endswith=name) | Q(name__endswith=name.replace('.', r'\.')) + if DNSStatic.objects.filter(dns_domain).exists(): + raise ValidationError(f"Ein DNS-Eintrag mit dem Namen '{name}' existiert bereits.") + + if Lxc.objects.filter(hostname=name).exists(): + raise ValidationError(f"A LXC with name or regex '{name}' exists.") + # Prüfe, ob der Name bereits als CloneContainer-Name existiert + # Ausschluss der aktuellen Instanz bei Updates + existing_clone_query = CloneContainer.objects.filter(name=name) + if self.instance.pk: + existing_clone_query = existing_clone_query.exclude(pk=self.instance.pk) + + if existing_clone_query.exists(): + raise ValidationError(f"A CloneContainer with name '{name}' exists.") + + return name + + def clean(self): + cleaned_data = super().clean() + + if not cleaned_data.get('template') and not cleaned_data.get('vm'): + raise ValidationError("Please select a template or a VM.") + + return cleaned_data + + +@admin.register(CloneContainer) +class CloneContainerAdmin(admin.ModelAdmin): + autocomplete_fields = ['vm', 'network'] + list_display = ('hostname', 'cloned_from', 'network', 'memory', 'cores', 'disksize', 'status',) + search_fields = ('hostname', 'vm__name', 'vm__vmid',) + actions = [clone_selected_containers] + form = CloneContainerAdminForm + + def cloned_from(self, obj): + return obj.vm + + cloned_from.short_description = 'Cloned from' + cloned_from.admin_order_field = 'vm__name' + + +@admin.register(DevContainer) +class DevContainerAdmin(admin.ModelAdmin): + form = DevContainerAdminForm + autocomplete_fields = ['dns', 'lxc', 'lease'] + # list_display = ('name', 'address', 'hostname_or_regexp', 'lxc__disksize', 'hwaddr', 'status',) + list_filter = ( + 'lxc__status', + 'lease__status', + ('lxc', admin.EmptyFieldListFilter), + ('lease', admin.EmptyFieldListFilter), + ('dns', admin.EmptyFieldListFilter), + ) + search_fields = ('dns__name', 'dns__address', 'lease__address', 'dns__regexp') + actions = [resync_all] + + def hostname_or_regexp(self, obj): + return obj.hostname + + hostname_or_regexp.short_description = 'Hostname or Regexp' + hostname_or_regexp.admin_order_field = 'dns__name' diff --git a/manager/apps.py b/manager/apps.py new file mode 100644 index 0000000..4c9b2dd --- /dev/null +++ b/manager/apps.py @@ -0,0 +1,6 @@ +from django.apps import AppConfig + + +class ManagerConfig(AppConfig): + default_auto_field = 'django.db.models.BigAutoField' + name = 'manager' diff --git a/manager/migrations/0001_initial.py b/manager/migrations/0001_initial.py new file mode 100644 index 0000000..c5e8a91 --- /dev/null +++ b/manager/migrations/0001_initial.py @@ -0,0 +1,48 @@ +# Generated by Django 5.2.4 on 2025-07-07 11:19 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ('mikrotik', '0001_initial'), + ('proxmox', '0001_initial'), + ] + + operations = [ + migrations.CreateModel( + name='CloneContainer', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('name', models.CharField(help_text='Will be used as hostname', max_length=150)), + ('cpus', models.IntegerField(default=1)), + ('memory', models.IntegerField(default=1024)), + ('disksize', models.IntegerField(default=10, help_text='in GB')), + ('as_regexp', models.BooleanField(choices=[(False, 'No'), (True, 'Yes')], default=True, help_text='Add a ".*" instead of a hostname')), + ('is_active', models.BooleanField(choices=[(False, 'No'), (True, 'Yes')], default=False)), + ('status', models.CharField(blank=True, choices=[('pending', 'Pending'), ('running', 'Running'), ('success', 'Success'), ('error', 'Error')], default='pending', max_length=150, null=True)), + ('network', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, related_name='clone_network', to='mikrotik.ipaddress')), + ('template', models.ForeignKey(null=True, on_delete=django.db.models.deletion.RESTRICT, related_name='clone_template', to='proxmox.lxctemplate')), + ('vm', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='clone_lxc', to='proxmox.lxc')), + ], + options={ + 'abstract': False, + }, + ), + migrations.CreateModel( + name='DevContainer', + fields=[ + ('internal_id', models.BigAutoField(primary_key=True, serialize=False)), + ('dns', models.OneToOneField(on_delete=django.db.models.deletion.RESTRICT, related_name='devcontainer_dns', to='mikrotik.dnsstatic')), + ('lease', models.OneToOneField(on_delete=django.db.models.deletion.RESTRICT, related_name='devcontainer_lease', to='mikrotik.ipdhcplease')), + ('lxc', models.OneToOneField(on_delete=django.db.models.deletion.RESTRICT, related_name='devcontainer_lxc', to='proxmox.lxc')), + ], + options={ + 'abstract': False, + }, + ), + ] diff --git a/manager/migrations/0002_clonecontainer_hostname_and_more.py b/manager/migrations/0002_clonecontainer_hostname_and_more.py new file mode 100644 index 0000000..601bb23 --- /dev/null +++ b/manager/migrations/0002_clonecontainer_hostname_and_more.py @@ -0,0 +1,40 @@ +# Generated by Django 5.2.4 on 2025-07-09 06:31 + +import manager.models +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('manager', '0001_initial'), + ] + + operations = [ + migrations.AddField( + model_name='clonecontainer', + name='hostname', + field=models.CharField(default=0, help_text='Will be used as hostname', max_length=150), + preserve_default=False, + ), + migrations.AlterField( + model_name='clonecontainer', + name='as_regexp', + field=models.BooleanField(choices=[(False, 'No'), (True, 'Yes')], default=True, help_text='Add a ".*.replace(".",r"\\.")$" instead of a hostname'), + ), + migrations.AlterField( + model_name='clonecontainer', + name='cpus', + field=models.IntegerField(default=1, validators=[manager.models.MinValueValidatorExtended(1), manager.models.MaxValueValidatorExtended(8)]), + ), + migrations.AlterField( + model_name='clonecontainer', + name='disksize', + field=models.IntegerField(default=10, help_text='in GB', validators=[manager.models.MinValueValidatorExtended(10), manager.models.MaxValueValidatorExtended(100)]), + ), + migrations.AlterField( + model_name='clonecontainer', + name='memory', + field=models.IntegerField(default=1024, help_text='in MB', validators=[manager.models.MinValueValidatorExtended(256), manager.models.MaxValueValidatorExtended(8192)]), + ), + ] diff --git a/manager/migrations/0003_rename_cpus_clonecontainer_cores_and_more.py b/manager/migrations/0003_rename_cpus_clonecontainer_cores_and_more.py new file mode 100644 index 0000000..1881bbf --- /dev/null +++ b/manager/migrations/0003_rename_cpus_clonecontainer_cores_and_more.py @@ -0,0 +1,27 @@ +# Generated by Django 5.2.4 on 2025-07-09 08:08 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('manager', '0002_clonecontainer_hostname_and_more'), + ] + + operations = [ + migrations.RenameField( + model_name='clonecontainer', + old_name='cpus', + new_name='cores', + ), + migrations.RemoveField( + model_name='clonecontainer', + name='name', + ), + migrations.AddField( + model_name='clonecontainer', + name='node', + field=models.CharField(default='proxmox', editable=False, max_length=150), + ), + ] diff --git a/manager/migrations/0004_alter_clonecontainer_memory_and_more.py b/manager/migrations/0004_alter_clonecontainer_memory_and_more.py new file mode 100644 index 0000000..1e7b801 --- /dev/null +++ b/manager/migrations/0004_alter_clonecontainer_memory_and_more.py @@ -0,0 +1,31 @@ +# Generated by Django 5.2.4 on 2025-07-10 09:30 + +import django.db.models.deletion +import manager.models +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('manager', '0003_rename_cpus_clonecontainer_cores_and_more'), + ('proxmox', '0005_alter_lxc_lxc'), + ] + + operations = [ + migrations.AlterField( + model_name='clonecontainer', + name='memory', + field=models.IntegerField(default=1024, help_text='in MB', validators=[manager.models.MinValueValidatorExtended(256), manager.models.MaxValueValidatorExtended(8092)]), + ), + migrations.AlterField( + model_name='clonecontainer', + name='template', + field=models.ForeignKey(blank=True, help_text='If set, will use this template instead of a VM', null=True, on_delete=django.db.models.deletion.RESTRICT, related_name='clone_template', to='proxmox.lxctemplate'), + ), + migrations.AlterField( + model_name='clonecontainer', + name='vm', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='clone_lxc', to='proxmox.lxc'), + ), + ] diff --git a/manager/migrations/0005_alter_clonecontainer_vm.py b/manager/migrations/0005_alter_clonecontainer_vm.py new file mode 100644 index 0000000..599ac6d --- /dev/null +++ b/manager/migrations/0005_alter_clonecontainer_vm.py @@ -0,0 +1,20 @@ +# Generated by Django 5.2.4 on 2025-07-14 12:46 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('manager', '0004_alter_clonecontainer_memory_and_more'), + ('proxmox', '0005_alter_lxc_lxc'), + ] + + operations = [ + migrations.AlterField( + model_name='clonecontainer', + name='vm', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='clone_lxc', to='proxmox.lxc', verbose_name='LXC Container'), + ), + ] diff --git a/manager/migrations/0006_alter_devcontainer_dns_alter_devcontainer_lease_and_more.py b/manager/migrations/0006_alter_devcontainer_dns_alter_devcontainer_lease_and_more.py new file mode 100644 index 0000000..355faa4 --- /dev/null +++ b/manager/migrations/0006_alter_devcontainer_dns_alter_devcontainer_lease_and_more.py @@ -0,0 +1,31 @@ +# Generated by Django 5.2.4 on 2025-07-14 13:22 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('manager', '0005_alter_clonecontainer_vm'), + ('mikrotik', '0002_alter_dnsstatic_comment_alter_ipaddress_comment_and_more'), + ('proxmox', '0005_alter_lxc_lxc'), + ] + + operations = [ + migrations.AlterField( + model_name='devcontainer', + name='dns', + field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='devcontainer_dns', to='mikrotik.dnsstatic'), + ), + migrations.AlterField( + model_name='devcontainer', + name='lease', + field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='devcontainer_lease', to='mikrotik.ipdhcplease'), + ), + migrations.AlterField( + model_name='devcontainer', + name='lxc', + field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='devcontainer_lxc', to='proxmox.lxc'), + ), + ] diff --git a/manager/migrations/0007_taskstatuslog_devcontainer_created_at_and_more.py b/manager/migrations/0007_taskstatuslog_devcontainer_created_at_and_more.py new file mode 100644 index 0000000..8627a41 --- /dev/null +++ b/manager/migrations/0007_taskstatuslog_devcontainer_created_at_and_more.py @@ -0,0 +1,38 @@ +# Generated by Django 5.2.4 on 2025-07-21 11:03 + +import django.utils.timezone +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('manager', '0006_alter_devcontainer_dns_alter_devcontainer_lease_and_more'), + ] + + operations = [ + migrations.CreateModel( + name='TaskStatusLog', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('task_id', models.CharField(help_text='The task ID', max_length=150)), + ('task_result', models.JSONField(help_text='The task result')), + ], + options={ + 'abstract': False, + }, + ), + migrations.AddField( + model_name='devcontainer', + name='created_at', + field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), + preserve_default=False, + ), + migrations.AddField( + model_name='devcontainer', + name='updated_at', + field=models.DateTimeField(auto_now=True), + ), + ] diff --git a/manager/migrations/0008_alter_devcontainer_dns_alter_devcontainer_lease_and_more.py b/manager/migrations/0008_alter_devcontainer_dns_alter_devcontainer_lease_and_more.py new file mode 100644 index 0000000..72ceb78 --- /dev/null +++ b/manager/migrations/0008_alter_devcontainer_dns_alter_devcontainer_lease_and_more.py @@ -0,0 +1,31 @@ +# Generated by Django 5.2.4 on 2025-07-23 11:16 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('manager', '0007_taskstatuslog_devcontainer_created_at_and_more'), + ('mikrotik', '0002_alter_dnsstatic_comment_alter_ipaddress_comment_and_more'), + ('proxmox', '0007_lxctemplate_net0_alter_lxc_net0'), + ] + + operations = [ + migrations.AlterField( + model_name='devcontainer', + name='dns', + field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='devcontainer_dns', to='mikrotik.dnsstatic'), + ), + migrations.AlterField( + model_name='devcontainer', + name='lease', + field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='devcontainer_lease', to='mikrotik.ipdhcplease'), + ), + migrations.AlterField( + model_name='devcontainer', + name='lxc', + field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='devcontainer_lxc', to='proxmox.lxc'), + ), + ] diff --git a/manager/migrations/0009_add_task_id.py b/manager/migrations/0009_add_task_id.py new file mode 100644 index 0000000..2361c7a --- /dev/null +++ b/manager/migrations/0009_add_task_id.py @@ -0,0 +1,18 @@ +# Generated manually for task_id field + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('manager', '0008_alter_devcontainer_dns_alter_devcontainer_lease_and_more'), + ] + + operations = [ + migrations.AddField( + model_name='clonecontainer', + name='task_id', + field=models.CharField(blank=True, help_text='UUID for tracking live status', max_length=36, null=True), + ), + ] \ No newline at end of file diff --git a/manager/migrations/0010_alter_clonecontainer_task_id.py b/manager/migrations/0010_alter_clonecontainer_task_id.py new file mode 100644 index 0000000..a2a44f8 --- /dev/null +++ b/manager/migrations/0010_alter_clonecontainer_task_id.py @@ -0,0 +1,19 @@ +# Generated manually for task_id default + +import uuid +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('manager', '0009_add_task_id'), + ] + + operations = [ + migrations.AlterField( + model_name='clonecontainer', + name='task_id', + field=models.CharField(default=uuid.uuid4, help_text='UUID for tracking live status', max_length=36), + ), + ] \ No newline at end of file diff --git a/manager/migrations/__init__.py b/manager/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/manager/models.py b/manager/models.py new file mode 100644 index 0000000..f656128 --- /dev/null +++ b/manager/models.py @@ -0,0 +1,454 @@ +import json +import logging +import time +import uuid + +from django.core.validators import MaxValueValidator, MinValueValidator +from django.db import models +from django.db.models import Q +from django.dispatch import receiver + +from django_proxmox_mikrotik.settings import ProxmoxConfig +from lib.db import ( + BOOLEAN_CHOICES, + BaseModel, + DateAwareMixin, + JOB_STATUS_CHOICES, + SearchableMixin, + TaskAwareModelMixin +) +from lib.proxmox import Proxmox +from mikrotik.models import DNSStatic, IPAddress, IPDHCPLease + + +class MinValueValidatorExtended(MinValueValidator): + message = 'Ensure this value is greater than or equal to %(limit_value)s. You gave %(value)s.' + + +class MaxValueValidatorExtended(MaxValueValidator): + message = 'Ensure this value is less than or equal to %(limit_value)s. You gave %(value)s.' + + +def minmaxvalidators(min_value, max_value): + return ( + MinValueValidatorExtended(min_value), + MaxValueValidatorExtended(max_value), + ) + + +class CloneAbstract(models.Model): + class Meta: + abstract = True + + hostname = models.CharField(max_length=150, help_text='Will be used as hostname') + network = models.ForeignKey(IPAddress, on_delete=models.RESTRICT, related_name='clone_network') + cores = models.IntegerField(default=1, validators=minmaxvalidators(1, ProxmoxConfig.MAX_CORES)) + memory = models.IntegerField(default=1024, validators=minmaxvalidators(256, ProxmoxConfig.MAX_MEM), + help_text='in MB') + disksize = models.IntegerField(default=10, help_text='in GB', + validators=minmaxvalidators(10, ProxmoxConfig.MAX_DISK)) + as_regexp = models.BooleanField(default=True, choices=BOOLEAN_CHOICES, + help_text=r'Add a ".*.replace(".",r"\.")$" instead of a hostname') + is_active = models.BooleanField(default=False, choices=BOOLEAN_CHOICES) + status = models.CharField(max_length=150, null=True, blank=True, default='pending', choices=JOB_STATUS_CHOICES) + task_id = models.CharField(max_length=36, default=uuid.uuid4, help_text='UUID for tracking live status') + # Lxcs have no Node - FIXME + node = models.CharField(max_length=150, default=ProxmoxConfig.NODE, editable=False) + + def __str__(self): + return f"{self.hostname} from {self.vm} " + + @property + def regexp(self): + return f".*{self.hostname.replace('.', r'\.')}" if self.as_regexp else None + + @property + def next_vmid(self): + raise NotImplemented('Not implemented') + + @property + def proxmox_data(self): + raise NotImplemented('Not implemented') + + def execute(self): + raise NotImplemented('Not implemented') + + +class CloneContainer(CloneAbstract): + _old_active = None + vm = models.ForeignKey('proxmox.Lxc', on_delete=models.CASCADE, + related_name='clone_lxc', null=True, blank=True, + verbose_name='LXC Container') + template = models.ForeignKey('proxmox.LxcTemplate', on_delete=models.RESTRICT, related_name='clone_template', + null=True, blank=True, + help_text='If set, will use this template instead of a VM') + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._old_active = self.is_active + + def execute(self, task_uuid_override=None, *args, **kwargs): + from proxmox.models import Lxc + from tasklogger.models import TaskFactory + request = kwargs.pop('request', None) + + # Use override task_uuid if provided (from form), otherwise create new task + task = TaskFactory(task_uuid=task_uuid_override, request=request) + + task.add_entry(f"Creating container '{self.hostname}'") + + try: + data = self.proxmox_data + pm = Proxmox() + route = data.pop('route') + vmdata = data.pop('vm') + dnsdata = data.pop('dns') + leasedata = data.pop('lease') + + task.add_entry(f"Prepared container data: VM-ID {vmdata['newid']}") + + vmparams = { + 'hostname': vmdata['hostname'], + 'description': vmdata['description'], + 'cores': vmdata['cores'], + 'memory': vmdata['memory'], + } + if vmdata['template']: + vmparams |= { + 'vmid': vmdata['newid'], + 'ostemplate': self.template.volid, + 'net0': vmdata['net0'], + 'features': 'nesting=1', + 'rootfs': f'local-lvm:{vmdata["disksize"]}', + } + task.add_entry(f"Creating new container from template {self.template.volid}") + else: + vmparams |= { + 'newid': vmdata['newid'], + } + task.add_entry(f"Cloning container {self.vm.vmid}") + + success = False + lxc = None + lease = None + dns = None + + self.status = 'running' + super().save() + + # Step 1: Create/Clone Container (wrapper nur um lxc_post) + task.add_entry("Starting Proxmox container creation...") + + def _lxc_post(): + return pm.lxc_post(route, **vmparams) + + # Wrap the proxmox function - this handles UPID monitoring synchronously + task.wrap_proxmox_function(_lxc_post) + task.add_entry("Container creation completed") + success = True + + if success: + task.add_entry("Waiting for container to be ready...") + time.sleep(3) + + # Step 2: Get container info and update settings + task.add_entry("Retrieving container information...") + new_vm_data = pm.get_all_lxc(as_dict=True, vmid=vmdata['newid']) + if new_vm_data: + new_vm_data = new_vm_data[0] + lxc = Lxc().from_proxmox(**new_vm_data) + lxc.save() + task.add_entry(f"Container retrieved: {lxc.name} ({lxc.vmid})") + + # Update container resources if needed + changed = False + changes = [] + if self.disksize > lxc.disksize: + lxc.disksize = self.disksize + changes.append(f"disk: {lxc.disksize}GB") + changed = True + if self.memory != lxc.memory: + lxc.memory = self.memory + changes.append(f"memory: {lxc.memory}MB") + changed = True + if self.cores != lxc.cores: + lxc.cores = self.cores + changes.append(f"cores: {lxc.cores}") + changed = True + + if changed: + lxc.save() + task.add_entry(f"Updated container resources: {', '.join(changes)}") + + # Step 3: Create DHCP Lease (Mikrotik query) + task.add_entry("Creating Mikrotik DHCP lease...") + leaseargs = { + 'mac_address': lxc.hwaddr, + 'address': self.network.get_next_ip, + 'comment': f'Container - {self.hostname}', + 'dynamic': False, + } + lease = IPDHCPLease.objects.create(**leaseargs) + task.add_entry(f"DHCP lease created: {lease.address} → {lease.mac_address}") + + # Step 4: Create DNS Entry (Mikrotik query) + task.add_entry("Creating Mikrotik DNS entry...") + dnsargs = {} + if self.as_regexp: + dnsargs['regexp'] = self.regexp + task.add_entry(f"Using DNS regexp: {self.regexp}") + else: + dnsargs['name'] = self.hostname + task.add_entry(f"Using DNS hostname: {self.hostname}") + + dns = DNSStatic.objects.create(address=lease.address, **dnsargs) + task.add_entry(f"DNS entry created: {dns.name or dns.regexp} → {dns.address}") + + # Step 5: Create DevContainer link + task.add_entry("Creating container management entry...") + devcontainer = DevContainer.objects.create( + lxc=lxc, + lease=lease, + dns=dns, + ) + task.add_entry("Container management entry created") + + # Step 6: Start Container + task.add_entry("Starting container...") + lxc.start() + task.add_entry("Container started successfully!") + + self.status = 'success' + task.status = 'completed' + task.save() + + else: + error_msg = f"Error retrieving container data for VM {vmdata['newid']}" + task.add_entry(error_msg) + self.status = 'error' + else: + self.status = 'error' + task.add_entry("Container creation failed") + + except Exception as e: + task.add_entry(f"Exception during container creation: {str(e)}") + self.status = 'error' + logging.exception(e) + + # Cleanup on error + task.add_entry("Cleaning up failed resources...") + cleanup_errors = [] + + try: + if isinstance(lease, IPDHCPLease): + lease.delete(task=task) + task.add_entry("Cleaned up DHCP lease") + except Exception as a: + cleanup_errors.append(f"DHCP lease: {a}") + + try: + if isinstance(dns, DNSStatic): + dns.delete(task=task) + task.add_entry("Cleaned up DNS entry") + except Exception as s: + cleanup_errors.append(f"DNS entry: {s}") + + try: + if isinstance(lxc, Lxc): + lxc.delete(task=task) + task.add_entry("Cleaned up container") + except Exception as l: + cleanup_errors.append(f"Container: {l}") + + if cleanup_errors: + task.add_entry(f"Cleanup errors: {', '.join(cleanup_errors)}") + else: + task.add_entry("Cleanup completed") + + task.status = 'error' + task.save() + raise e + + finally: + # Keep task logs for debugging, cleanup CloneContainer + task.unset_as_current() + self.delete() + + @property + def proxmox_data(self): + data = { + # https://pve.proxmox.com/pve-docs/api-viewer/#/nodes/{node}/lxc/{vmid}/clone + # if template, the just a post to lxc - empty route + 'route': f'{self.vm.vmid}/clone' if self.vm else '', + 'vm': { + 'hostname': self.hostname, + 'cores': self.cores, + 'cpus': self.cores, + 'memory': self.memory, + 'net0': self.vm.net0 if self.vm else self.template.net0, + 'disksize': self.disksize, + 'description': f'Container - {self.hostname} ', + 'newid': Proxmox().next_vmid, + 'template': self.template.volid if self.template else None, + }, + 'lease': { + 'comment': f'Container - {self.hostname}' + }, + 'dns': { + # this is changed if regexp + 'name': self.hostname, + 'comment': f'Container - {self.hostname}' + }, + } + if regexp := self.regexp: + data['dns'].pop('name', None) + data['dns']['regexp'] = regexp + + return data + + def clean(self): + super().clean() + assert self.vm or self.template, "Either vm or template must be set" + +""" +class CloneVM(CloneAbstract): + vm = models.ForeignKey(VM, on_delete=models.CASCADE, related_name='clone_lxc') +""" + + +class DevContainer(BaseModel, SearchableMixin, TaskAwareModelMixin): + lxc = models.OneToOneField('proxmox.Lxc', on_delete=models.SET_NULL, related_name='devcontainer_lxc', null=True) + lease = models.OneToOneField(IPDHCPLease, on_delete=models.SET_NULL, related_name='devcontainer_lease', null=True) + + dns = models.OneToOneField(DNSStatic, on_delete=models.SET_NULL, related_name='devcontainer_dns', null=True) + + statuscache_data = models.JSONField(default=dict, editable=False) + + @property + def statuscache(self): + if self.statuscache_data and self.statuscache_data['last_sync'] > time.time() - 300: + logging.debug(f"Return cached status for {self.name} - {self.statuscache_data['last_sync']}") + return self.statuscache_data + else: + return self.sync_statuscache() + + + + def sync_statuscache(self): + logging.debug(f"No cached status for {self.name} - sync from proxmox") + try: + self.lxc.sync_from_proxmox() + except Exception as e: + logging.error(e) + try: + with Proxmox() as pm: + status_data = pm.lxc_get(f"{self.lxc.vmid}/status/current") + except Exception as e: + logging.error(e) + return None + try: + self.lease.sync_from_router() + except Exception as e: + logging.error(e) + try: + status_data['lease_status'] = self.lease.status + status_data['last_sync'] = time.time() + self.statuscache_data = status_data + super().save(update_fields=['statuscache_data']) + return status_data + except Exception as e: + logging.error(e) + return None + + def set_statuscache_value(self, key, value): + cachedata = self.statuscache + cachedata[key]= value + with open(self.status_cachefile, 'w') as f: + json.dump(cachedata, f) + return self + + + + + @classmethod + def term_filter(cls, search_string): + q = ( + # Q(lxc__comment__icontains=search_string) | + Q(lxc__hostname__icontains=search_string) | + Q(lease__comment__icontains=search_string) | + Q(dns__name__icontains=search_string) | + Q(dns__regexp__icontains=search_string) | + Q(lease__address__icontains=search_string) + ) + if str(search_string).isdigit(): + q |= Q(lxc__vmid=search_string) + return q + + @property + def name(self): + try: + return self.lxc.name + except Exception as e: + logging.error(e) + return f"{self.internal_id} - {self.lease} - {self.dns} - {self.lxc}" + + + @property + def hostname(self): + try: + return self.dns.name or self.dns.regexp + except Exception as e: + logging.error(e) + return f"{self.pk} hostname" + + @property + def hwaddr(self): + try: + return self.lxc.hwaddr + except Exception as e: + logging.error(e) + return f"{self.pk} hwaddr" + + @property + def address(self): + try: + return self.lease.address + except Exception as e: + logging.error(e) + return f"{self.pk} address" + + @property + def status(self): + try: + return self.lease.status + except Exception as e: + logging.error(e) + return f"{self.pk} status" + + def save(self, *args, **kwargs): + if self.dns.address != self.lease.address: + raise ValueError(f"{self.dns.address} != {self.lease.address}") + if self.lease.mac_address.upper() != self.lxc.hwaddr.upper(): + logging.error(f"{self.lease.mac_address} != {self.lxc.hwaddr} - try to change it") + self.lease.mac_address = self.lxc.hwaddr + self.lease.save() + + super().save(*args, **kwargs) + + def __str__(self): + return f"{self.name} ({self.address})" + + +@receiver(models.signals.pre_delete, sender=DevContainer) +def before_delete_devcontainer(sender, instance, **kwargs): + from tasklogger.models import TaskFactory + task = TaskFactory() + + if instance.dns: + instance.dns.delete(task=task) + if instance.lease: + instance.lease.delete(task=task) + if instance.lxc: + instance.lxc.delete(task=task) + + diff --git a/manager/tests.py b/manager/tests.py new file mode 100644 index 0000000..7ce503c --- /dev/null +++ b/manager/tests.py @@ -0,0 +1,3 @@ +from django.test import TestCase + +# Create your tests here. diff --git a/manager/urls.py b/manager/urls.py new file mode 100644 index 0000000..91311e5 --- /dev/null +++ b/manager/urls.py @@ -0,0 +1,9 @@ +from django.urls import path +from manager import views + +app_name = 'manager' + +urlpatterns = [ + path('resync-all/', views.sync_all, name='sync_all'), + path('test/mikrotik/', views.test_mikrotik, name='test_mikrotik'), +] \ No newline at end of file diff --git a/manager/views.py b/manager/views.py new file mode 100644 index 0000000..6e1f44b --- /dev/null +++ b/manager/views.py @@ -0,0 +1,46 @@ +import json +import logging + +from django.contrib import messages +from django.db.transaction import atomic +from django.forms import model_to_dict +from django.shortcuts import HttpResponse, redirect +from django_auth_ldap.backend import LDAPBackend +from django.contrib.auth.decorators import login_required +from django.http import JsonResponse +from django.views.decorators.http import require_http_methods + +from django_proxmox_mikrotik.configs import MikrotikConfig +from lib.decorators import force_write +from manager.models import CloneContainer + + +@atomic +@force_write +def test_mikrotik(request): + from mikrotik.models import DNSStatic + + dns = DNSStatic.objects.create(name='test.test1', address='192.168.1.254') + logging.debug(model_to_dict(dns)) + response = dns.sync_to_router() + + return HttpResponse(json.dumps([model_to_dict(dns), response], indent=4, default=lambda x: str(x)), + content_type="application/json") + + + + +def sync_all(request): + """TODO - just get the user and check on superuser + settings via groups does not work at the moment""" + from manager.admin import resync_all + + backend = LDAPBackend() + user = backend.authenticate(request, request.user.username, request.POST.get('password')) + if user and user.is_superuser: + resync_all() + messages.success(request, 'Sync all initiated') + else: + messages.error(request, 'Not authorized') + + return redirect('frontend:dashboard') \ No newline at end of file diff --git a/mikrotik/__init__.py b/mikrotik/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mikrotik/admin.py b/mikrotik/admin.py new file mode 100644 index 0000000..0544406 --- /dev/null +++ b/mikrotik/admin.py @@ -0,0 +1,56 @@ +from django.contrib import admin + +from lib.decorators import readonly +from lib.mikrotik import sync_from_mikrotik as _sync_from_mikrotik +from mikrotik.models import DNSStatic, IPAddress, IPDHCPLease + + +@readonly +def sync_dns_static_from_mikrotik(*args, **kwargs): + _sync_from_mikrotik(DNSStatic) + + +@readonly +def sync_ipaddress_from_mikrotik(*args, **kwargs): + _sync_from_mikrotik(IPAddress) + + +@readonly +def sync_ipdhcplease_from_mikrotik(*args, **kwargs): + _sync_from_mikrotik(IPDHCPLease) + +def sync_to_mikrotik(_, request, queryset): + for i in queryset: + i.sync_to_router() + +class MikrotikModelMixinAdmin(admin.ModelAdmin): + def change_view(self, request, object_id, form_url='', extra_context=None): + self.get_object(request, object_id).sync_from_router() + return super().change_view( + request, object_id, form_url, extra_context=extra_context or {'show_save_and_continue': False} + ) + + +@admin.register(DNSStatic) +class DNSStaticAdmin(admin.ModelAdmin): + actions = [sync_dns_static_from_mikrotik, sync_to_mikrotik] + list_display = ('name', 'regexp', 'address','disabled', 'comment', 'id') + list_filter = ('disabled',('id', admin.EmptyFieldListFilter), ('name', admin.EmptyFieldListFilter), ('address', admin.EmptyFieldListFilter), ('regexp', admin.EmptyFieldListFilter),) + search_fields = ('name', 'address','regexp') + + + +@admin.register(IPAddress) +class IPAddressAdmin(admin.ModelAdmin): + actions = [sync_ipaddress_from_mikrotik, sync_to_mikrotik] + list_display = ('address','network', 'disabled', 'comment', 'id') + list_filter = ('disabled',('id', admin.EmptyFieldListFilter), ('address', admin.EmptyFieldListFilter), ('network', admin.EmptyFieldListFilter),) + search_fields = ('address', 'network', 'comment') + + +@admin.register(IPDHCPLease) +class IPDHCPLeaseAdmin(admin.ModelAdmin): + actions = [sync_ipdhcplease_from_mikrotik, sync_to_mikrotik] + list_display = ('address', 'mac_address', 'hostname', 'disabled', 'dynamic', 'comment', 'status', 'id') + list_filter = ('disabled', 'dynamic', 'status', ('id', admin.EmptyFieldListFilter), ('address', admin.EmptyFieldListFilter), ('mac_address', admin.EmptyFieldListFilter), ('hostname', admin.EmptyFieldListFilter), ('comment', admin.EmptyFieldListFilter),) + search_fields = ('address', 'mac_address', 'hostname') diff --git a/mikrotik/apps.py b/mikrotik/apps.py new file mode 100644 index 0000000..6816a90 --- /dev/null +++ b/mikrotik/apps.py @@ -0,0 +1,6 @@ +from django.apps import AppConfig + + +class MikrotikConfig(AppConfig): + default_auto_field = 'django.db.models.BigAutoField' + name = 'mikrotik' diff --git a/mikrotik/migrations/0001_initial.py b/mikrotik/migrations/0001_initial.py new file mode 100644 index 0000000..a900765 --- /dev/null +++ b/mikrotik/migrations/0001_initial.py @@ -0,0 +1,83 @@ +# Generated by Django 5.2.4 on 2025-07-07 11:19 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ] + + operations = [ + migrations.CreateModel( + name='DNSStatic', + fields=[ + ('internal_id', models.BigAutoField(primary_key=True, serialize=False)), + ('disabled', models.CharField(blank=True, choices=[('false', 'No'), ('true', 'Yes')], default='false', max_length=10, null=True)), + ('comment', models.CharField(blank=True, default='', max_length=150, null=True)), + ('id', models.CharField(blank=True, default='', max_length=150, null=True)), + ('name', models.CharField(blank=True, max_length=150, null=True, unique=True)), + ('address', models.CharField(blank=True, default='', max_length=150, null=True)), + ('ttl', models.CharField(blank=True, default='', max_length=150, null=True)), + ('dynamic', models.CharField(blank=True, default='', editable=False, max_length=150, null=True)), + ('regexp', models.CharField(blank=True, max_length=150, null=True, unique=True)), + ], + options={ + 'abstract': False, + }, + ), + migrations.CreateModel( + name='IPAddress', + fields=[ + ('internal_id', models.BigAutoField(primary_key=True, serialize=False)), + ('disabled', models.CharField(blank=True, choices=[('false', 'No'), ('true', 'Yes')], default='false', max_length=10, null=True)), + ('comment', models.CharField(blank=True, default='', max_length=150, null=True)), + ('id', models.CharField(blank=True, default='', max_length=150, null=True)), + ('address', models.CharField(blank=True, default='', max_length=150, null=True)), + ('network', models.CharField(blank=True, default='', max_length=150, null=True)), + ('interface', models.CharField(blank=True, default='', max_length=150, null=True)), + ('actual_interface', models.CharField(blank=True, default='', max_length=150, null=True)), + ('invalid', models.CharField(blank=True, default='', max_length=150, null=True)), + ('dynamic', models.CharField(blank=True, default='', editable=False, max_length=150, null=True)), + ], + options={ + 'abstract': False, + }, + ), + migrations.CreateModel( + name='IPDHCPLease', + fields=[ + ('internal_id', models.BigAutoField(primary_key=True, serialize=False)), + ('disabled', models.CharField(blank=True, choices=[('false', 'No'), ('true', 'Yes')], default='false', max_length=10, null=True)), + ('comment', models.CharField(blank=True, default='', max_length=150, null=True)), + ('id', models.CharField(blank=True, default='', max_length=150, null=True)), + ('address', models.CharField(blank=True, default='', max_length=150, null=True)), + ('mac_address', models.CharField(blank=True, default='', max_length=150, null=True)), + ('client_id', models.CharField(blank=True, default='', max_length=150, null=True)), + ('hostname', models.CharField(blank=True, default='', max_length=150, null=True)), + ('valid_until', models.CharField(blank=True, default='', max_length=150, null=True)), + ('dynamic', models.CharField(blank=True, default='', editable=False, max_length=150, null=True)), + ('blocked', models.CharField(blank=True, default='', max_length=150, null=True)), + ('active_client_id', models.CharField(blank=True, default='', max_length=150, null=True)), + ('active_mac_address', models.CharField(blank=True, default='', max_length=150, null=True)), + ('expires_after', models.CharField(blank=True, default='', max_length=150, null=True)), + ('age', models.CharField(blank=True, default='', max_length=150, null=True)), + ('active_server', models.CharField(blank=True, default='', max_length=150, null=True)), + ('active_address', models.CharField(blank=True, default='', max_length=150, null=True)), + ('host_name', models.CharField(blank=True, default='', max_length=150, null=True)), + ('radius', models.CharField(blank=True, default='', max_length=150, null=True)), + ('last_seen', models.CharField(blank=True, default='', max_length=150, null=True)), + ('dhcp_option', models.CharField(blank=True, default='', max_length=150, null=True)), + ('status', models.CharField(blank=True, default='', max_length=150, null=True)), + ('server', models.CharField(blank=True, default='', max_length=150, null=True)), + ('address_lists', models.CharField(blank=True, default='', max_length=150, null=True)), + ('always_broadcast', models.CharField(blank=True, default='', max_length=150, null=True)), + ('lease_time', models.CharField(blank=True, default='', max_length=150, null=True)), + ], + options={ + 'abstract': False, + }, + ), + ] diff --git a/mikrotik/migrations/0002_alter_dnsstatic_comment_alter_ipaddress_comment_and_more.py b/mikrotik/migrations/0002_alter_dnsstatic_comment_alter_ipaddress_comment_and_more.py new file mode 100644 index 0000000..435304d --- /dev/null +++ b/mikrotik/migrations/0002_alter_dnsstatic_comment_alter_ipaddress_comment_and_more.py @@ -0,0 +1,28 @@ +# Generated by Django 5.2.4 on 2025-07-08 12:00 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('mikrotik', '0001_initial'), + ] + + operations = [ + migrations.AlterField( + model_name='dnsstatic', + name='comment', + field=models.TextField(blank=True, default='', null=True), + ), + migrations.AlterField( + model_name='ipaddress', + name='comment', + field=models.TextField(blank=True, default='', null=True), + ), + migrations.AlterField( + model_name='ipdhcplease', + name='comment', + field=models.TextField(blank=True, default='', null=True), + ), + ] diff --git a/mikrotik/migrations/__init__.py b/mikrotik/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mikrotik/models.py b/mikrotik/models.py new file mode 100644 index 0000000..53307e6 --- /dev/null +++ b/mikrotik/models.py @@ -0,0 +1,215 @@ +import logging +from functools import cached_property + +from django.db import models +from django.db.models.signals import post_save, pre_delete, pre_save +from django.dispatch import receiver + +from lib.db import TaskAwareModelMixin +from lib.decorators import skip_signal +from lib.mikrotik import MikrotikModelMixin, is_local_ip + + +class DNSStatic(MikrotikModelMixin, TaskAwareModelMixin): + id = models.CharField(max_length=150, null=True, blank=True, default='') + name = models.CharField(max_length=150, null=True, blank=True, unique=True, ) + address = models.CharField(max_length=150, null=True, blank=True, default='') + ttl = models.CharField(max_length=150, null=True, blank=True, default='') + dynamic = models.CharField(max_length=150, null=True, blank=True, default='', editable=False) + regexp = models.CharField(max_length=150, null=True, blank=True, unique=True, ) + + # lease = models.ForeignKey('IPDHCPLease', on_delete=models.SET_NULL, null=True, blank=True, default=None, related_name='dns_statics', name) + + @property + def unique_on_router(self): + return ['address', ('name', 'regexp')] + + @property + def get_self_params(self): + return {f:getattr(self, f) for f in ['address', 'name', 'regexp', 'comment'] if getattr(self, f, None)} + + def __str__(self): + return f'{self.address} - {self.name or self.regexp}' + + @property + def no_mikrotik_props(self): + return super().no_mikrotik_props + ['ttl'] + + @property + def router_base(self): + return '/ip/dns/static' + + @cached_property + def router_list(self): + ret = [] + for r in self.router_get_all: + if is_local_ip(r['address']): + ret.append(r) + self.response = ret + return self.response + + +class IPAddress(MikrotikModelMixin, TaskAwareModelMixin): + id = models.CharField(max_length=150, null=True, blank=True, default='') + address = models.CharField(max_length=150, null=True, blank=True, default='') + network = models.CharField(max_length=150, null=True, blank=True, default='') + interface = models.CharField(max_length=150, null=True, blank=True, default='') + actual_interface = models.CharField(max_length=150, null=True, blank=True, default='') + invalid = models.CharField(max_length=150, null=True, blank=True, default='') + dynamic = models.CharField(max_length=150, null=True, blank=True, default='', editable=False) + + @property + def unique_on_router(self): + return ['address', 'network', 'interface'] + + def __str__(self): + return f'{self.address} - {self.comment}' + + @property + def router_base(self): + return '/ip/address' + + @property + def no_mikrotik_props(self): + return super().no_mikrotik_props + [ + 'actual_interface', + 'invalid', + ] + + @property + def get_next_ip(self): + # sync_from_mikrotik(IPDHCPLease) + net = '.'.join(self.network.split('.')[:-1]) + '.' + used = sorted(IPDHCPLease.objects.filter(address__startswith=net).values_list('address', flat=True), + reverse=True) + next32 = 1 + if used: + next32 += int(used[0].split('.')[-1]) + return f'{net}{next32}' + + @cached_property + def router_list(self): + """ { "id": "*XXX", "address": "X.X.X.X/XX", + "network": "X.X.X.X", "interface": "<>", + "actual-interface": "", "invalid": "false", + "dynamic": "true", "disabled": "false" } """ + ret = [] + for r in self.router_get_all: + if all([ + r['invalid'] == 'false', + r['disabled'] == 'false', + 'container' in r.get('comment', '').lower(), + is_local_ip(r['address']), + r['address'].endswith('.1/24'), + ]): + ret.append(r) + return ret + + +class IPDHCPLease(MikrotikModelMixin, TaskAwareModelMixin): + id = models.CharField(max_length=150, null=True, blank=True, default='') + address = models.CharField(max_length=150, null=True, blank=True, default='') + mac_address = models.CharField(max_length=150, null=True, blank=True, default='') + client_id = models.CharField(max_length=150, null=True, blank=True, default='') + hostname = models.CharField(max_length=150, null=True, blank=True, default='') + valid_until = models.CharField(max_length=150, null=True, blank=True, default='') + dynamic = models.CharField(max_length=150, null=True, blank=True, default='', editable=False) + blocked = models.CharField(max_length=150, null=True, blank=True, default='') + active_client_id = models.CharField(max_length=150, null=True, blank=True, default='') + active_mac_address = models.CharField(max_length=150, null=True, blank=True, default='') + expires_after = models.CharField(max_length=150, null=True, blank=True, default='') + age = models.CharField(max_length=150, null=True, blank=True, default='') + active_server = models.CharField(max_length=150, null=True, blank=True, default='') + active_address = models.CharField(max_length=150, null=True, blank=True, default='') + host_name = models.CharField(max_length=150, null=True, blank=True, default='') + radius = models.CharField(max_length=150, null=True, blank=True, default='') + last_seen = models.CharField(max_length=150, null=True, blank=True, default='') + dhcp_option = models.CharField(max_length=150, null=True, blank=True, default='') + status = models.CharField(max_length=150, null=True, blank=True, default='') + server = models.CharField(max_length=150, null=True, blank=True, default='') + address_lists = models.CharField(max_length=150, null=True, blank=True, default='') + always_broadcast = models.CharField(max_length=150, null=True, blank=True, default='') + lease_time = models.CharField(max_length=150, null=True, blank=True, default='') + + @property + def unique_on_router(self): + return ['address', 'nac_address'] + + + @property + def mikrotik_send_params(self): + return { + 'address': self.address, + 'mac-address': self.mac_address, + 'comment': self.comment, + } + + def save(self, *args, **kwargs): + self.mac_address = self.mac_address.upper() if self.mac_address else '' + super().save(*args, **kwargs) + + @property + def network_24(self): + return '.'.join(self.address.split('.')[:-1]) + '.' + + def __str__(self): + return f'{self.address} - {self.mac_address} - {self.status}' + + @property + def router_base(self): + return '/ip/dhcp-server/lease' + + @cached_property + def router_list(self): + return self.router_get_all + + @property + def no_mikrotik_props(self): + return super().no_mikrotik_props + [ + 'active_client_id', + 'active_mac_address', + 'expires_after', + 'age', + 'active_server', + 'active_address', + 'hostname', + 'host_name', + 'radius', + 'last_seen', + 'status', + 'lease_time', + 'server', + ] + + +for cl in [IPAddress, IPDHCPLease, DNSStatic]: + @receiver(pre_save, sender=cl) + def send_before_save(sender, instance: MikrotikModelMixin, **kwargs): + if instance._state.adding: + logging.info(f'Created {instance} via pre_save event - do nothing') + return instance + try: + response = instance.sync_to_router() + logging.debug(f'Update {instance} to router with {response}') + except Exception as e: + logging.error(f'Error while updating {instance} to router: {e}') + return instance + + @receiver(post_save, sender=cl) + @skip_signal(signaltype='post_save') + def send_after_save(sender, instance: MikrotikModelMixin, created, **kwargs): + if created: + logging.info(f'Created {instance} via post_save event - sync once') + instance.sync_to_router() + return instance + + + # @skip_signal(signaltype='pre_delete') + @receiver(pre_delete, sender=cl) + def send_before_delete(sender, instance, **kwargs): + try: + response = instance.delete_from_router() + logging.debug(f'Deleted {instance} from router with {response}') + except Exception as e: + logging.error(f'Error while deleting {instance} from router: {e}') + return instance diff --git a/mikrotik/tests.py b/mikrotik/tests.py new file mode 100644 index 0000000..7ce503c --- /dev/null +++ b/mikrotik/tests.py @@ -0,0 +1,3 @@ +from django.test import TestCase + +# Create your tests here. diff --git a/mikrotik/views.py b/mikrotik/views.py new file mode 100644 index 0000000..91ea44a --- /dev/null +++ b/mikrotik/views.py @@ -0,0 +1,3 @@ +from django.shortcuts import render + +# Create your views here. diff --git a/proxmox/__init__.py b/proxmox/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/proxmox/admin.py b/proxmox/admin.py new file mode 100644 index 0000000..a701f1a --- /dev/null +++ b/proxmox/admin.py @@ -0,0 +1,79 @@ +import logging + +from django.contrib import admin +from django.db.models import Q + +from lib.decorators import readonly +from lib.proxmox import Proxmox +from proxmox.models import Lxc, LxcTemplate + + +@readonly +def sync_all_lxc_templates_from_proxmox(*args, **kwargs): + pm = Proxmox() + for storage in pm.storage_get(enabled=1): + logging.debug(f'Syncing Templates from storage {storage["storage"]}') + storage_name = storage['storage'] + for tmpl in pm.storage(f'{storage_name}/content').get(content='vztmpl'): + try: + logging.debug(f'Updating {tmpl["volid"]}') + template = LxcTemplate.objects.get(volid=tmpl['volid']) + except LxcTemplate.DoesNotExist: + logging.debug(f'Fail - Creating {tmpl["volid"]}') + template = LxcTemplate.objects.create(volid=tmpl['volid']) + + template.write(**tmpl) + + +@readonly +def sync_all_lxc_from_proxmox(*args, **kwargs): + from lib.proxmox import Proxmox + pm = Proxmox() + existing_vms = [] + for lxc_data in pm.get_all_lxc(as_dict=True, **kwargs): + vmid = lxc_data.pop('vmid') + try: + lx: Lxc = Lxc.objects.get(vmid=vmid) + logging.info(f'Updating {vmid}') + except Lxc.DoesNotExist: + logging.info(f'Creating {vmid}') + lx = Lxc.objects.create(**{'vmid': vmid}) + + lx.from_proxmox(**lxc_data) + + existing_vms.append(vmid) + + to_delete = Lxc.objects.filter(~Q(vmid__in=existing_vms)) + if to_delete: + logging.info(f'Deleting {[d.vmid for d in to_delete]}') + to_delete.delete() + + +sync_all_lxc_from_proxmox.short_description = 'Sync all LXC from Proxmox' + + +@admin.register(Lxc) +class LxcAdmin(admin.ModelAdmin): + actions = [sync_all_lxc_from_proxmox, 'sync_selected_from_proxmox'] + search_fields = ('name', 'vmid', 'hostname', 'hwaddr') + list_display = ('name', 'vmid', 'hwaddr', 'disksize', 'memory', 'cpus', 'status') + list_filter = ('status', 'disksize',) + + def get_readonly_fields(self, request, obj=None): + if obj: + return [k.name for k in obj._meta.fields if + k.name not in ['name', 'cores', 'hwaddr', 'size', 'cpus', 'memory', 'description', 'hostname']] + return self.readonly_fields + + @admin.action(description='Sync selected from proxmox') + @readonly + def sync_selected_from_proxmox(self, request, queryset): + for lx in queryset: + lx.sync_from_proxmox() + + +@admin.register(LxcTemplate) +class LxcTemplateAdmin(admin.ModelAdmin): + actions = [sync_all_lxc_templates_from_proxmox] + search_fields = ('volid',) + list_display = ('volid', 'human_size', 'content') diff --git a/proxmox/apps.py b/proxmox/apps.py new file mode 100644 index 0000000..1a4ecb3 --- /dev/null +++ b/proxmox/apps.py @@ -0,0 +1,6 @@ +from django.apps import AppConfig + + +class ProxmoxConfig(AppConfig): + default_auto_field = 'django.db.models.BigAutoField' + name = 'proxmox' diff --git a/proxmox/migrations/0001_initial.py b/proxmox/migrations/0001_initial.py new file mode 100644 index 0000000..5364d64 --- /dev/null +++ b/proxmox/migrations/0001_initial.py @@ -0,0 +1,80 @@ +# Generated by Django 5.2.4 on 2025-07-07 11:19 + +import django.core.validators +import uuid +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ] + + operations = [ + migrations.CreateModel( + name='Lxc', + fields=[ + ('internal_id', models.BigAutoField(primary_key=True, serialize=False)), + ('vmid', models.IntegerField(blank=True, null=True, unique=True)), + ('name', models.CharField(blank=True, default='', max_length=150, null=True, verbose_name='Container Name')), + ('hostname', models.CharField(blank=True, default='', max_length=150, null=True)), + ('hwaddr', models.CharField(blank=True, default=uuid.uuid4, max_length=150, null=True, unique=True)), + ('size', models.CharField(blank=True, max_length=4, null=True)), + ('cores', models.IntegerField(blank=True, default=1, null=True)), + ('memory', models.IntegerField(default=512, help_text='in MB', validators=[django.core.validators.MinValueValidator(128)])), + ('disksize', models.IntegerField(default=12, help_text='in GB', validators=[django.core.validators.MinValueValidator(8)])), + ('swap', models.IntegerField(blank=True, null=True)), + ('description', models.TextField(blank=True, default='', null=True)), + ('cpus', models.IntegerField(blank=True, default=1, null=True, validators=[django.core.validators.MinValueValidator(1)])), + ('uptime', models.IntegerField(blank=True, null=True)), + ('maxswap', models.IntegerField(blank=True, null=True)), + ('cpu', models.IntegerField(blank=True, null=True)), + ('disk', models.IntegerField(blank=True, null=True)), + ('netout', models.IntegerField(blank=True, null=True)), + ('diskwrite', models.IntegerField(blank=True, null=True)), + ('diskread', models.IntegerField(blank=True, null=True)), + ('pid', models.IntegerField(blank=True, null=True)), + ('maxdisk', models.IntegerField(blank=True, null=True)), + ('mem', models.IntegerField(blank=True, null=True)), + ('maxmem', models.IntegerField(blank=True, null=True)), + ('netin', models.IntegerField(blank=True, null=True)), + ('status', models.CharField(blank=True, default='', max_length=150, null=True)), + ('type', models.CharField(blank=True, default='', max_length=150, null=True)), + ('onboot', models.CharField(blank=True, default='', max_length=150, null=True)), + ('nameserver', models.CharField(blank=True, default='', max_length=150, null=True)), + ('digest', models.CharField(blank=True, default='', max_length=150, null=True)), + ('rootfs', models.CharField(blank=True, default='', max_length=150, null=True)), + ('arch', models.CharField(blank=True, default='', max_length=150, null=True)), + ('ostype', models.CharField(blank=True, default='', max_length=150, null=True)), + ('net0', models.CharField(blank=True, default='', max_length=150, null=True)), + ('features', models.CharField(blank=True, default='', max_length=250, null=True)), + ('snaptime', models.CharField(blank=True, default='', max_length=150, null=True)), + ('parent', models.CharField(blank=True, default='', max_length=150, null=True)), + ('tags', models.CharField(blank=True, default='', max_length=250, null=True)), + ('console', models.CharField(blank=True, default='', max_length=150, null=True)), + ('tty', models.CharField(blank=True, default='', max_length=150, null=True)), + ('searchdomain', models.CharField(blank=True, default='', max_length=150, null=True)), + ('unprivileged', models.CharField(blank=True, default='', max_length=10, null=True)), + ('lxc', models.CharField(blank=True, default='', max_length=10, null=True)), + ], + options={ + 'abstract': False, + }, + ), + migrations.CreateModel( + name='LxcTemplate', + fields=[ + ('internal_id', models.BigAutoField(primary_key=True, serialize=False)), + ('volid', models.CharField(max_length=150, unique=True)), + ('ctime', models.IntegerField(default=0)), + ('size', models.IntegerField(default=0)), + ('format', models.CharField(max_length=10)), + ('content', models.CharField(default='tgz', max_length=10)), + ], + options={ + 'abstract': False, + }, + ), + ] diff --git a/proxmox/migrations/0002_alter_lxc_disk_alter_lxc_diskread_and_more.py b/proxmox/migrations/0002_alter_lxc_disk_alter_lxc_diskread_and_more.py new file mode 100644 index 0000000..0f49e1c --- /dev/null +++ b/proxmox/migrations/0002_alter_lxc_disk_alter_lxc_diskread_and_more.py @@ -0,0 +1,38 @@ +# Generated by Django 5.2.4 on 2025-07-08 11:21 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('proxmox', '0001_initial'), + ] + + operations = [ + migrations.AlterField( + model_name='lxc', + name='disk', + field=models.CharField(blank=True, null=True), + ), + migrations.AlterField( + model_name='lxc', + name='diskread', + field=models.CharField(blank=True, null=True), + ), + migrations.AlterField( + model_name='lxc', + name='diskwrite', + field=models.CharField(blank=True, null=True), + ), + migrations.AlterField( + model_name='lxc', + name='netout', + field=models.CharField(blank=True, null=True), + ), + migrations.AlterField( + model_name='lxc', + name='uptime', + field=models.CharField(blank=True, null=True), + ), + ] diff --git a/proxmox/migrations/0003_alter_lxc_disksize_alter_lxc_memory.py b/proxmox/migrations/0003_alter_lxc_disksize_alter_lxc_memory.py new file mode 100644 index 0000000..c5d8aa7 --- /dev/null +++ b/proxmox/migrations/0003_alter_lxc_disksize_alter_lxc_memory.py @@ -0,0 +1,23 @@ +# Generated by Django 5.2.4 on 2025-07-08 11:23 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('proxmox', '0002_alter_lxc_disk_alter_lxc_diskread_and_more'), + ] + + operations = [ + migrations.AlterField( + model_name='lxc', + name='disksize', + field=models.IntegerField(default=12, help_text='in GB'), + ), + migrations.AlterField( + model_name='lxc', + name='memory', + field=models.IntegerField(default=512, help_text='in MB'), + ), + ] diff --git a/proxmox/migrations/0004_alter_lxc_cores_alter_lxc_cpu_alter_lxc_cpus_and_more.py b/proxmox/migrations/0004_alter_lxc_cores_alter_lxc_cpu_alter_lxc_cpus_and_more.py new file mode 100644 index 0000000..1a7acf7 --- /dev/null +++ b/proxmox/migrations/0004_alter_lxc_cores_alter_lxc_cpu_alter_lxc_cpus_and_more.py @@ -0,0 +1,74 @@ +# Generated by Django 5.2.4 on 2025-07-08 11:29 + +import django.core.validators +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('proxmox', '0003_alter_lxc_disksize_alter_lxc_memory'), + ] + + operations = [ + migrations.AlterField( + model_name='lxc', + name='cores', + field=models.BigIntegerField(blank=True, default=1, null=True), + ), + migrations.AlterField( + model_name='lxc', + name='cpu', + field=models.BigIntegerField(blank=True, null=True), + ), + migrations.AlterField( + model_name='lxc', + name='cpus', + field=models.BigIntegerField(blank=True, default=1, null=True, validators=[django.core.validators.MinValueValidator(1)]), + ), + migrations.AlterField( + model_name='lxc', + name='disksize', + field=models.BigIntegerField(default=12, help_text='in GB'), + ), + migrations.AlterField( + model_name='lxc', + name='maxdisk', + field=models.BigIntegerField(blank=True, null=True), + ), + migrations.AlterField( + model_name='lxc', + name='maxmem', + field=models.BigIntegerField(blank=True, null=True), + ), + migrations.AlterField( + model_name='lxc', + name='maxswap', + field=models.BigIntegerField(blank=True, null=True), + ), + migrations.AlterField( + model_name='lxc', + name='mem', + field=models.BigIntegerField(blank=True, null=True), + ), + migrations.AlterField( + model_name='lxc', + name='memory', + field=models.BigIntegerField(default=512, help_text='in MB'), + ), + migrations.AlterField( + model_name='lxc', + name='netin', + field=models.BigIntegerField(blank=True, null=True), + ), + migrations.AlterField( + model_name='lxc', + name='pid', + field=models.BigIntegerField(blank=True, null=True), + ), + migrations.AlterField( + model_name='lxc', + name='swap', + field=models.BigIntegerField(blank=True, null=True), + ), + ] diff --git a/proxmox/migrations/0005_alter_lxc_lxc.py b/proxmox/migrations/0005_alter_lxc_lxc.py new file mode 100644 index 0000000..5498500 --- /dev/null +++ b/proxmox/migrations/0005_alter_lxc_lxc.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-07-08 14:10 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('proxmox', '0004_alter_lxc_cores_alter_lxc_cpu_alter_lxc_cpus_and_more'), + ] + + operations = [ + migrations.AlterField( + model_name='lxc', + name='lxc', + field=models.CharField(blank=True, default='', max_length=150, null=True), + ), + ] diff --git a/proxmox/migrations/0006_lxc_created_at_lxc_updated_at_lxctemplate_created_at_and_more.py b/proxmox/migrations/0006_lxc_created_at_lxc_updated_at_lxctemplate_created_at_and_more.py new file mode 100644 index 0000000..662bac8 --- /dev/null +++ b/proxmox/migrations/0006_lxc_created_at_lxc_updated_at_lxctemplate_created_at_and_more.py @@ -0,0 +1,36 @@ +# Generated by Django 5.2.4 on 2025-07-21 11:03 + +import django.utils.timezone +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('proxmox', '0005_alter_lxc_lxc'), + ] + + operations = [ + migrations.AddField( + model_name='lxc', + name='created_at', + field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), + preserve_default=False, + ), + migrations.AddField( + model_name='lxc', + name='updated_at', + field=models.DateTimeField(auto_now=True), + ), + migrations.AddField( + model_name='lxctemplate', + name='created_at', + field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), + preserve_default=False, + ), + migrations.AddField( + model_name='lxctemplate', + name='updated_at', + field=models.DateTimeField(auto_now=True), + ), + ] diff --git a/proxmox/migrations/0007_lxctemplate_net0_alter_lxc_net0.py b/proxmox/migrations/0007_lxctemplate_net0_alter_lxc_net0.py new file mode 100644 index 0000000..4439aba --- /dev/null +++ b/proxmox/migrations/0007_lxctemplate_net0_alter_lxc_net0.py @@ -0,0 +1,23 @@ +# Generated by Django 5.2.4 on 2025-07-22 13:21 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('proxmox', '0006_lxc_created_at_lxc_updated_at_lxctemplate_created_at_and_more'), + ] + + operations = [ + migrations.AddField( + model_name='lxctemplate', + name='net0', + field=models.CharField(blank=True, default='name=eth0,bridge=vmbr0,firewall=0,ip=dhcp', max_length=150, null=True), + ), + migrations.AlterField( + model_name='lxc', + name='net0', + field=models.CharField(blank=True, default='name=eth0,bridge=vmbr0,firewall=0,ip=dhcp', max_length=150, null=True), + ), + ] diff --git a/proxmox/migrations/__init__.py b/proxmox/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/proxmox/models.py b/proxmox/models.py new file mode 100644 index 0000000..149d1ea --- /dev/null +++ b/proxmox/models.py @@ -0,0 +1,354 @@ +import logging +import re +from uuid import uuid4 + +from django.core.validators import MinValueValidator +from django.db import models +from django.db.models.signals import pre_save +from django.dispatch import receiver + +from django_proxmox_mikrotik.configs import ProxmoxConfig +from lib import human_size +from lib.db import BaseModel, TaskAwareModelMixin +from lib.decorators import skip_signal +from lib.proxmox import Proxmox, get_comma_separated_values +from manager.models import DevContainer + +no_int_re = re.compile(r'[^\d]') + + +class ProxmoxAbstractModel(BaseModel, TaskAwareModelMixin): + class Meta: + abstract = True + + @property + def change_map(self) -> dict: + raise NotImplemented('@property change_map" not implemented') + + @property + def comma_separated_values(self) -> list: + """A list of fields that are comma separated values (X=1,Y=2,... + """ + return [] + + @property + def no_csv_overwrite(self) -> list: + """Maybe a value within a csv options + has the same name than a non-csv option. + Ommit those""" + return [] + + @property + def non_proxmox_values(self) -> list: + """A list of fields that are not in proxmox, but in db + Ao so not sync directly (maybe as csv)""" + return [] + + def to_proxmox(self): + raise NotImplemented('Not implemented') + + def sync_from_proxmox(self): + raise NotImplemented('Not implemented') + + @property + def csv_field_map(self): + return {} + + @property + def int_fields(self): + return [] + + def from_proxmox(self, **kwargs): + """Sets the values in DB + Extracts csv Values into self, if given""" + logging.debug(f"Got {kwargs} from proxmox") + kwkeys = list(kwargs.keys()) + params = {} + for k in kwkeys: + logging.info(f"Process {k} with value {kwargs[k]}") + v = kwargs[k] + if not hasattr(self, k): + continue + if k in self.comma_separated_values: + _vals = get_comma_separated_values(v) + logging.debug(f"Got {_vals}") + for _k, _v in _vals.items(): + if _k in self.no_csv_overwrite: + logging.debug(f"{_k} is in no_csv_overwrite - omitting") + continue + csvkey, selfkey, csvfun = self.csv_field_map.get(k, (_k, _k, lambda x: x)) + + if hasattr(self, selfkey): + if csvfun: + _v = csvfun(_v) + elif selfkey in self.int_fields: + _v = int(no_int_re.sub('', _v) or 0) + logging.debug(f"Update {selfkey} to {_v}") + params[selfkey] = _v + else: + logging.info(f"{selfkey} not found in {type(self)}") + else: + if isinstance(getattr(self, k), models.IntegerField): + v = no_int_re.sub('', _v) or 0 + params[k] = v + logging.debug(f"No CSValues for {self}") + return self.write(**params) + + +class Lxc(ProxmoxAbstractModel): + + @property + def int_fields(self): + return ['cores', 'memory', 'disksize'] + + @property + def comma_separated_values(self): + return ['net0', 'rootfs', 'features'] + + @property + def no_csv_overwrite(self): + return ['name'] + + @property + def non_proxmox_values(self): + return ['vmid', 'hwaddr', 'disksize'] + + @property + def csv_field_map(self): + return { + 'rootfs': ('size', 'disksize', lambda x: int(no_int_re.sub('', x) or 0)), + } + + @property + def proxmox_console_url(self): + return ( + f"https://{ProxmoxConfig.HOST}:8006/" + f"?console=lxc&xtermjs=1&vmid={self.vmid}" + f"&vmname={self.hostname}&node={ProxmoxConfig.NODE}&cmd=" + ) + + def delete(self, task=None): + with Proxmox() as pm: + try: + if task: + task.wrap_proxmox_function(self.stop) + else: + self.stop() + except Exception as e: + if 'running' in str(e): + logging.info(f"Could not stop {self.vmid} - {e}") + else: + raise + try: + if task: + task.wrap_proxmox_function(pm.lxc_delete, self.vmid, force=1, purge=1) + else: + result = pm.lxc_delete(self.vmid, force=1, purge=1) + logging.info(f"Deleted {self.vmid} from proxmox - {result}") + except Exception as e: + logging.error(f"Could not delete {self.vmid} from proxmox", e) + finally: + super().delete() + + vmid = models.IntegerField(null=True, blank=True, unique=True) + name = models.CharField(max_length=150, null=True, blank=True, default='', verbose_name='Container Name') + hostname = models.CharField(max_length=150, null=True, blank=True, default='', ) + # This one is from net0 + hwaddr = models.CharField(max_length=150, null=True, blank=True, default=uuid4, unique=True) + # this comes from rootfs + size = models.CharField(max_length=4, null=True, blank=True) + cores = models.BigIntegerField(null=True, blank=True, default=1) + memory = models.BigIntegerField(default=512, help_text='in MB', ) # validators=[MinValueValidator(128)]) + disksize = models.BigIntegerField(default=12, help_text='in GB', ) # validators=[MinValueValidator(8)]) + swap = models.BigIntegerField(null=True, blank=True) + description = models.TextField(null=True, blank=True, default='') + cpus = models.BigIntegerField(null=True, blank=True, validators=[MinValueValidator(1)], default=1) + uptime = models.CharField(null=True, blank=True) + maxswap = models.BigIntegerField(null=True, blank=True) + cpu = models.BigIntegerField(null=True, blank=True) + disk = models.CharField(null=True, blank=True) + netout = models.CharField(null=True, blank=True) + diskwrite = models.CharField(null=True, blank=True) + diskread = models.CharField(null=True, blank=True) + pid = models.BigIntegerField(null=True, blank=True) + maxdisk = models.BigIntegerField(null=True, blank=True) + mem = models.BigIntegerField(null=True, blank=True) + maxmem = models.BigIntegerField(null=True, blank=True) + netin = models.BigIntegerField(null=True, blank=True) + status = models.CharField(max_length=150, null=True, blank=True, default='') + type = models.CharField(max_length=150, null=True, blank=True, default='') + onboot = models.CharField(max_length=150, null=True, blank=True, default='') + nameserver = models.CharField(max_length=150, null=True, blank=True, default='') + digest = models.CharField(max_length=150, null=True, blank=True, default='') + rootfs = models.CharField(max_length=150, null=True, blank=True, default='') + arch = models.CharField(max_length=150, null=True, blank=True, default='') + ostype = models.CharField(max_length=150, null=True, blank=True, default='') + net0 = models.CharField(max_length=150, null=True, blank=True, default='name=eth0,bridge=vmbr0,firewall=0,ip=dhcp') + features = models.CharField(max_length=250, null=True, blank=True, default='') + snaptime = models.CharField(max_length=150, null=True, blank=True, default='') + parent = models.CharField(max_length=150, null=True, blank=True, default='') + tags = models.CharField(max_length=250, null=True, blank=True, default='') + console = models.CharField(max_length=150, null=True, blank=True, default='') + tty = models.CharField(max_length=150, null=True, blank=True, default='') + searchdomain = models.CharField(max_length=150, null=True, blank=True, default='') + unprivileged = models.CharField(max_length=10, null=True, blank=True, default='') + lxc = models.CharField(max_length=150, null=True, blank=True, default='') + + def __str__(self): + return f'{self.name} ({self.vmid})' + + def sync_from_proxmox(self): + pm = Proxmox() + try: + data = pm.lxc_get(f'{self.vmid}/config') + logging.debug(f"Got raw data '{data}' from proxmox") + if not data: + logging.warning(f'Could not find {self.vmid} in proxmox - deleting from local database!') + return self.delete() + self.from_proxmox(**data) + super().save() + except Exception as e: + logging.error(f"Could not get config for {self.vmid} - {e}") + return self + + @property + def _ch_disksize(self): + if self._old_values['disksize'] == self.disksize: + return False + if self.disksize > 100: + logging.warning(f'disksize is > 100') + return False + if self.disksize < self._old_values['disksize']: + logging.warning(f"Can not shrink disksize") + return False + return True + + def change_disksize(self): + """Just to disable some magick at the moment""" + if self._ch_disksize: + route = f'{self.vmid}/resize' + args = { + 'disk': 'rootfs', + 'size': f'{self.disksize}G', + } + with Proxmox() as pm: + try: + result = pm.lxc_put(route, **args) + logging.info(f"Changed disksize for container {self.vmid} to {self.disksize}G - {result}") + logs = pm.get_task_status(taskhash=result) + logging.debug(f"Tasklog for {self.vmid} is {logs}") + except Exception as e: + logging.error(f"Could not change disksize for container {self.vmid} to {self.disksize}G - {e}") + return self + + def change_memory(self): + """Just to disable some magick at the moment""" + if self._old_values['memory'] != self.memory: + route = f'{self.vmid}/config' + args = { + 'memory': self.memory, + } + with Proxmox() as pm: + try: + result = pm.lxc_put(route, **args) + logging.info(f"Changed memory for container {self.vmid} to {self.memory}MB - {result}") + except Exception as e: + self.memory = self._old_values['memory'] + super().save(update_fields=['memory']) + logging.error(f"Could not change memory for container {self.vmid} to {self.memory}MB - {e}") + return self + + def change_cores(self): + if self._old_values['cores'] != self.cores: + logging.debug(f"Changing cores for {self.vmid} to {self.cores}") + route = f'{self.vmid}/config' + args = { + 'cores': self.cores, + } + with Proxmox() as pm: + try: + result = pm.lxc_put(route, **args) + logging.info(f"Changed cores for container {self.vmid} to {self.cores} - {result}") + return result + except Exception as e: + self.cores = self._old_values['cores'] + super().save(update_fields=['cores']) + logging.error(f"Could not change cores for container {self.vmid} to {self.cores} - {e}") + return None + + def start(self): + startresult = self._start_stop_actions('start') + if startresult: + self.status = 'running' + return startresult + + def stop(self): + stopresult = self._start_stop_actions('stop') + if stopresult: + self.status = 'stopped' + return stopresult + + def reboot(self): + rebootresult = self._start_stop_actions('reboot') + if rebootresult: + self.status = 'running' + return rebootresult + + def shutdown(self): + shresult = self._start_stop_actions('shutdown') + if shresult: + self.status = 'stopped' + return shresult + + def _start_stop_actions(self, action): + assert action in ('start', 'stop', 'shutdown', 'reboot') + with Proxmox() as pm: + try: + result = pm.lxc_post(f'{self.vmid}/status/{action}') + logging.info(f"{action}ed {self.vmid} - {result}") + return result + except Exception as e: + logging.error(f"Could not {action} {self.vmid} - {e}") + return False + + def save(self, *args, **kwargs): + logging.debug(f"Saving {self}") + super().save(*args, **kwargs) + + def to_proxmox(self): + self.change_disksize() + self.change_memory() + self.change_cores() + + +@receiver(pre_save, sender=Lxc) +@skip_signal() +def pre_save_lxc(sender, instance: Lxc, **kwargs): + instance.hwaddr = str(instance.hwaddr or uuid4()).upper() + if instance._state.adding: + logging.info(f'Created {instance} via post_save event - do nothing, must be done via CloneContainer') + return + instance.change_disksize() + instance.change_memory() + instance.change_cores() + + +class LxcTemplate(ProxmoxAbstractModel, TaskAwareModelMixin): + volid = models.CharField(max_length=150, unique=True) + ctime = models.IntegerField(default=0) + size = models.IntegerField(default=0) + format = models.CharField(max_length=10) + content = models.CharField(max_length=10, default='tgz') + net0 = models.CharField(max_length=150, null=True, blank=True, default='name=eth0,bridge=vmbr0,firewall=0,ip=dhcp') + is_default_template = models.BooleanField(default=False, + help_text='If true, this template will be used when creating new containers as default, or preselected') + + def __str__(self): + return self.volid.split('/')[-1] + + def __repr__(self): + return self.volid + + @property + def human_size(self): + return human_size(self.size) diff --git a/proxmox/tests.py b/proxmox/tests.py new file mode 100644 index 0000000..7ce503c --- /dev/null +++ b/proxmox/tests.py @@ -0,0 +1,3 @@ +from django.test import TestCase + +# Create your tests here. diff --git a/proxmox/views.py b/proxmox/views.py new file mode 100644 index 0000000..91ea44a --- /dev/null +++ b/proxmox/views.py @@ -0,0 +1,3 @@ +from django.shortcuts import render + +# Create your views here. diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..0f9fb2a --- /dev/null +++ b/requirements.txt @@ -0,0 +1,9 @@ +RouterOS-api +proxmoxer +python-dotenv +psycopg2 +django-middleware-global-request +django-auth-ldap +python-ldap +requests +markdown \ No newline at end of file diff --git a/src/deploy.sh b/src/deploy.sh new file mode 100755 index 0000000..fb674ef --- /dev/null +++ b/src/deploy.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +# Django Proxmox Mikrotik Deploy Script +set -e + +PROJECT_PATH="/usr/share/django-proxmox-mikrotik" +VENV_PATH="$PROJECT_PATH/venv" + +echo "Starting Django Proxmox Mikrotik deployment..." + +# Activate virtual environment +source $VENV_PATH/bin/activate + +# Change to project directory +cd $PROJECT_PATH + +# Install/update dependencies +echo "Installing dependencies..." +pip install -r requirements.txt + +# Run database migrations +echo "Running database migrations..." +python manage.py migrate + +# Collect static files +echo "Collecting static files..." +python manage.py collectstatic --noinput + +# Create static directory if it doesn't exist +mkdir -p /usr/share/django-proxmox-mikrotik/static + +# Set proper permissions +echo "Setting permissions..." +chown -R www-data:www-data $PROJECT_PATH +chmod -R 755 $PROJECT_PATH + +# Restart services +echo "Restarting services..." +systemctl restart django-proxmox-mikrotik +systemctl restart nginx + + +echo "Deployment completed successfully!" \ No newline at end of file diff --git a/src/etc/nginx/sites-available/django-proxmox-mikrotik b/src/etc/nginx/sites-available/django-proxmox-mikrotik new file mode 100644 index 0000000..5dbc7fc --- /dev/null +++ b/src/etc/nginx/sites-available/django-proxmox-mikrotik @@ -0,0 +1,77 @@ +server { + listen 80; + server_name localhost; + + # Security headers + add_header X-Frame-Options "SAMEORIGIN" always; + add_header X-Content-Type-Options "nosniff" always; + add_header X-XSS-Protection "1; mode=block" always; + + # Static files + location /static/ { + alias /usr/share/django-proxmox-mikrotik/static/; + expires 30d; + add_header Cache-Control "public, immutable"; + } + + # Media files + location /media/ { + alias /usr/share/django-proxmox-mikrotik/media/; + expires 30d; + add_header Cache-Control "public"; + } + + # API endpoints for live status updates + location /manager/task/ { + proxy_pass http://127.0.0.1:8000; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # Extended timeouts for long-running tasks + proxy_connect_timeout 600s; + proxy_send_timeout 600s; + proxy_read_timeout 600s; + + # Disable caching for API responses + add_header Cache-Control "no-cache, no-store, must-revalidate"; + add_header Pragma "no-cache"; + add_header Expires "0"; + } + + # AJAX endpoints + location /frontend/ { + proxy_pass http://127.0.0.1:8000; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # Standard timeouts for API calls + proxy_connect_timeout 60s; + proxy_send_timeout 60s; + proxy_read_timeout 60s; + + # Disable caching for API responses + add_header Cache-Control "no-cache, no-store, must-revalidate"; + } + + # Main application + location / { + proxy_pass http://127.0.0.1:8000; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # Timeout settings + proxy_connect_timeout 300s; + proxy_send_timeout 300s; + proxy_read_timeout 300s; + } + + # Logs + access_log /var/log/nginx/django-proxmox-mikrotik_access.log; + error_log /var/log/nginx/django-proxmox-mikrotik_error.log; +} \ No newline at end of file diff --git a/src/etc/systemd/system/django-proxmox-mikrotik.service b/src/etc/systemd/system/django-proxmox-mikrotik.service new file mode 100644 index 0000000..6a00af9 --- /dev/null +++ b/src/etc/systemd/system/django-proxmox-mikrotik.service @@ -0,0 +1,17 @@ +[Unit] +Description=Django Proxmox Mikrotik Application +After=network.target + +[Service] +Type=simple +User=www-data +Group=www-data +WorkingDirectory=/usr/share/django-proxmox-mikrotik +ExecStart=/usr/share/django-proxmox-mikrotik/venv/bin/python manage.py runserver 0.0.0.0:8000 +Restart=always +RestartSec=3 +StandardOutput=journal +StandardError=journal + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/tasklogger/__init__.py b/tasklogger/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tasklogger/admin.py b/tasklogger/admin.py new file mode 100644 index 0000000..8c38f3f --- /dev/null +++ b/tasklogger/admin.py @@ -0,0 +1,3 @@ +from django.contrib import admin + +# Register your models here. diff --git a/tasklogger/apps.py b/tasklogger/apps.py new file mode 100644 index 0000000..6763ab9 --- /dev/null +++ b/tasklogger/apps.py @@ -0,0 +1,6 @@ +from django.apps import AppConfig + + +class TaskloggerConfig(AppConfig): + default_auto_field = 'django.db.models.BigAutoField' + name = 'tasklogger' diff --git a/tasklogger/migrations/__init__.py b/tasklogger/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tasklogger/models.py b/tasklogger/models.py new file mode 100644 index 0000000..a6133c2 --- /dev/null +++ b/tasklogger/models.py @@ -0,0 +1,88 @@ +import logging +from uuid import uuid4 + +from django.db import models +from django_middleware_global_request.middleware import get_request + +from lib.db import DateAwareMixin + + +class TaskFactory: + def __new__(self, task_uuid=None, request=None, *args, **kwargs): + request = request or get_request() + if not request: + logging.error('No request found while creating task') + else: + session_uuid = request.session.get('current_task_uuid') + form_uuid = request.POST.get('task_uuid', request.GET.get('task_uuid')) + if form_uuid: + task_uuid = task_uuid or form_uuid + request.session['current_task_uuid'] = task_uuid + elif session_uuid: + task_uuid = task_uuid or session_uuid + task_uuid = task_uuid or kwargs.pop('uuid', None) + if not task_uuid: + task = Task.objects.create() + else: + task, _created = Task.objects.get_or_create(uuid=task_uuid) + if request: + request.session['current_task_uuid'] = str(task.uuid) + return task + + @staticmethod + def reset_current_task(request=None): + request = request or get_request() + if request: + request.session.pop('current_task_uuid', None) + return True + + +class Task(DateAwareMixin): + uuid = models.UUIDField(default=uuid4, editable=False, unique=True) + proxmox_upid = models.CharField(max_length=150, null=True, blank=True) + status = models.CharField(max_length=150, null=True, blank=True, default='running') + + current_task = None + + def __del__(self): + self.unset_as_current() + + def unset_as_current(self, request=None): + request = request or get_request() + if request: + uuid = request.session.get('current_task_uuid') + if uuid == self.uuid: + request.session.pop('current_task_uuid', None) + return True + else: + logging.warning(f'Tried to unset {self.uuid} as current, but it is not the current task ({uuid}') + return False + + def __str__(self): + return str(self.uuid) + + def add_entry(self, message): + TaskEntry.objects.create(task=self, message=message) + return self + + def task_is_stopped(self, result): + return result.get('status', 'running').lower() in ('ok', 'error', 'stopped') + + def wrap_proxmox_function(self, proxmox_function, *args, **kwargs): + from lib.proxmox import Proxmox + self.proxmox_upid = proxmox_function(*args, **kwargs) + self.save() + if not self.proxmox_upid: + logging.warning(f'Could not get upid for {proxmox_function} - {args} {kwargs}') + return self + while True: + with Proxmox() as pm: + for message in pm.get_task_status(taskhash=self.proxmox_upid): + self.add_entry(message) + if self.task_is_stopped(message): + return self + + +class TaskEntry(DateAwareMixin): + task = models.ForeignKey(Task, on_delete=models.CASCADE, related_name='entries') + message = models.JSONField(null=True, blank=True) diff --git a/tasklogger/templates/tasklogger/task_detail.html b/tasklogger/templates/tasklogger/task_detail.html new file mode 100644 index 0000000..f5e577d --- /dev/null +++ b/tasklogger/templates/tasklogger/task_detail.html @@ -0,0 +1,83 @@ +{% extends 'frontend/base.html' %} + +{% block title %} - Task Details{% endblock %} + +{% block content %} +
+
+
+
+

Task Details

+ Task UUID: {{ task.uuid }} +
+ +
+
+
+
+
Status:
+
+ + {{ task.status|default:"running" }} + +
+
+
+
Created:
+
{{ task.created_at }}
+
+ {% if task.proxmox_upid %} +
+
Proxmox UPID:
+
{{ task.proxmox_upid }}
+
+ {% endif %} + +
+ +
Task Log Entries
+ {% if entries %} +
+ {% for entry in entries %} +
+
+
+ {% if entry.message %} + {% if entry.message|length > 200 %} +
{{ entry.message }}
+ {% else %} + {{ entry.message }} + {% endif %} + {% else %} + No message + {% endif %} +
+
+ {{ entry.created_at|date:"H:i:s" }} +
+
+
+ {% endfor %} +
+ {% else %} +
+ No log entries found for this task. +
+ {% endif %} +
+
+ + +{% endblock %} \ No newline at end of file diff --git a/tasklogger/templates/tasklogger/task_list.html b/tasklogger/templates/tasklogger/task_list.html new file mode 100644 index 0000000..387587c --- /dev/null +++ b/tasklogger/templates/tasklogger/task_list.html @@ -0,0 +1,76 @@ +{% extends 'frontend/base.html' %} + +{% block title %} - Task List{% endblock %} + +{% block content %} +
+
+

Recent Tasks

+ Last 50 tasks +
+
+ {% if tasks %} +
+ + + + + + + + + + + + + + {% for task in tasks %} + + + + + + + + + + {% endfor %} + +
#UUIDStatusProxmox UPIDCreatedLatest EntryActions
{{ forloop.counter }} + {{ task.uuid|truncatechars:13 }} + + + {{ task.status|default:"running" }} + + + {% if task.proxmox_upid %} + {{ task.proxmox_upid|truncatechars:20 }} + {% else %} + - + {% endif %} + + {{ task.created_at|date:"d.m.Y H:i" }} + + {% with latest_entry=task.entries.last %} + {% if latest_entry %} + + {{ latest_entry.message|truncatechars:50 }} + + {% else %} + No entries + {% endif %} + {% endwith %} + + + View + +
+
+ {% else %} +
+ No tasks found. +
+ {% endif %} +
+
+{% endblock %} \ No newline at end of file diff --git a/tasklogger/tests.py b/tasklogger/tests.py new file mode 100644 index 0000000..7ce503c --- /dev/null +++ b/tasklogger/tests.py @@ -0,0 +1,3 @@ +from django.test import TestCase + +# Create your tests here. diff --git a/tasklogger/urls.py b/tasklogger/urls.py new file mode 100644 index 0000000..18227e0 --- /dev/null +++ b/tasklogger/urls.py @@ -0,0 +1,10 @@ +from django.urls import path +from . import views + +app_name = 'tasklogger' + +urlpatterns = [ + path('api/task-status/', views.task_status, name='task_status'), + path('task//', views.task_detail, name='task_detail'), + path('tasks/', views.task_list, name='task_list'), +] \ No newline at end of file diff --git a/tasklogger/views.py b/tasklogger/views.py new file mode 100644 index 0000000..8ef8641 --- /dev/null +++ b/tasklogger/views.py @@ -0,0 +1,71 @@ +import json + +from django.contrib.auth.decorators import login_required +from django.http import HttpResponse +from django.shortcuts import get_object_or_404, render + +from .models import Task + + +def task_response(data, status=200): + """Helper function to return a task response""" + return HttpResponse(json.dumps(data, default=str), status=status, content_type='application/json') + + +@login_required +def task_status(request): + """API endpoint to check task status""" + task_uuid = request.GET.get('task_uuid') + + if not task_uuid: + return task_response({ + 'status': 'error', + 'message': 'task_uuid parameter required' + }, status=400) + + try: + task = Task.objects.get(uuid=task_uuid) + + # Get latest entries + latest_entries = list(task.entries.order_by('-created_at')[:10].values( + 'message', 'created_at' + )) + + return task_response({ + 'status': 'success', + 'task': { + 'uuid': str(task.uuid), + 'proxmox_upid': task.proxmox_upid, + 'status': task.status, + 'created_at': task.created_at.isoformat(), + 'entries': latest_entries + } + }, status=200) + + except Task.DoesNotExist: + return task_response({ + 'status': 'error', + 'message': f'Task {task_uuid} not found' + }, status=404) + + +@login_required +def task_detail(request, task_uuid): + """View to show task details and log entries""" + task = get_object_or_404(Task, uuid=task_uuid) + entries = task.entries.order_by('created_at') + + return render(request, 'tasklogger/task_detail.html', { + 'task': task, + 'entries': entries + }) + + +@login_required +def task_list(request): + """View to list recent tasks""" + tasks = Task.objects.order_by('-created_at')[:50] + + return render(request, 'tasklogger/task_list.html', { + 'tasks': tasks + })