Files
Django-Proxmox-Mikrotik/manager/models.py

472 lines
17 KiB
Python
Raw Normal View History

2025-08-27 09:55:55 +02:00
import json
import logging
import time
import uuid
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.db.models import Q
from django.dispatch import receiver
from django_proxmox_mikrotik.settings import ProxmoxConfig
from lib.db import (
BOOLEAN_CHOICES,
BaseModel,
DateAwareMixin,
JOB_STATUS_CHOICES,
SearchableMixin,
TaskAwareModelMixin
)
from lib.proxmox import Proxmox
from mikrotik.models import DNSStatic, IPAddress, IPDHCPLease
from proxmox.models import Lxc
2025-08-27 09:55:55 +02:00
class MinValueValidatorExtended(MinValueValidator):
message = 'Ensure this value is greater than or equal to %(limit_value)s. You gave %(value)s.'
class MaxValueValidatorExtended(MaxValueValidator):
message = 'Ensure this value is less than or equal to %(limit_value)s. You gave %(value)s.'
def minmaxvalidators(min_value, max_value):
return (
MinValueValidatorExtended(min_value),
MaxValueValidatorExtended(max_value),
)
class CloneAbstract(models.Model):
class Meta:
abstract = True
hostname = models.CharField(max_length=150, help_text='Will be used as hostname')
network = models.ForeignKey(IPAddress, on_delete=models.RESTRICT, related_name='clone_network')
cores = models.IntegerField(default=1, validators=minmaxvalidators(1, ProxmoxConfig.MAX_CORES))
memory = models.IntegerField(default=1024, validators=minmaxvalidators(256, ProxmoxConfig.MAX_MEM),
help_text='in MB')
disksize = models.IntegerField(default=10, help_text='in GB',
validators=minmaxvalidators(10, ProxmoxConfig.MAX_DISK))
as_regexp = models.BooleanField(default=True, choices=BOOLEAN_CHOICES,
help_text=r'Add a ".*<hostname>.replace(".",r"\.")$" instead of a hostname')
is_active = models.BooleanField(default=False, choices=BOOLEAN_CHOICES)
status = models.CharField(max_length=150, null=True, blank=True, default='pending', choices=JOB_STATUS_CHOICES)
task_id = models.CharField(max_length=36, default=uuid.uuid4, help_text='UUID for tracking live status')
# Lxcs have no Node - FIXME
node = models.CharField(max_length=150, default=ProxmoxConfig.NODE, editable=False)
def __str__(self):
return f"{self.hostname} from {self.vm} "
@property
def regexp(self):
return f".*{self.hostname.replace('.', r'\.')}" if self.as_regexp else None
@property
def next_vmid(self):
raise NotImplemented('Not implemented')
@property
def proxmox_data(self):
raise NotImplemented('Not implemented')
def execute(self):
raise NotImplemented('Not implemented')
class CloneContainer(CloneAbstract):
_old_active = None
vm = models.ForeignKey('proxmox.Lxc', on_delete=models.CASCADE,
related_name='clone_lxc', null=True, blank=True,
verbose_name='LXC Container')
template = models.ForeignKey('proxmox.LxcTemplate', on_delete=models.RESTRICT, related_name='clone_template',
null=True, blank=True,
help_text='If set, will use this template instead of a VM')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._old_active = self.is_active
def get_clone_machine_status(self):
self.vm.sync_from_proxmox()
return self.vm.status
2025-08-27 09:55:55 +02:00
def execute(self, task_uuid_override=None, *args, **kwargs):
from proxmox.models import Lxc
from tasklogger.models import TaskFactory
request = kwargs.pop('request', None)
# Use override task_uuid if provided (from form), otherwise create new task
task = TaskFactory(task_uuid=task_uuid_override, request=request)
task.add_entry(f"Creating container '{self.hostname}'")
try:
data = self.proxmox_data
old_status = None
if self.vm:
old_status = self.get_clone_machine_status()
if old_status == 'running':
task.add_entry(f"Stopping container '{self.vm.name}'")
self.vm.stop()
2025-08-27 09:55:55 +02:00
pm = Proxmox()
route = data.pop('route')
vmdata = data.pop('vm')
dnsdata = data.pop('dns')
leasedata = data.pop('lease')
task.add_entry(f"Prepared container data: VM-ID {vmdata['newid']}")
vmparams = {
'hostname': vmdata['hostname'],
'description': vmdata['description'],
}
if vmdata['template']:
vmparams |= {
'vmid': vmdata['newid'],
2025-09-04 08:55:40 +02:00
'cores': vmdata['cores'],
'memory': vmdata['memory'],
2025-08-27 09:55:55 +02:00
'ostemplate': self.template.volid,
'net0': vmdata['net0'],
'features': 'nesting=1',
'rootfs': f'local-lvm:{vmdata["disksize"]}',
}
task.add_entry(f"Creating new container from template {self.template.volid}")
else:
vmparams |= {
'newid': vmdata['newid'],
}
task.add_entry(f"Cloning container {self.vm.vmid}")
2025-08-27 09:55:55 +02:00
success = False
lxc = None
lease = None
dns = None
self.status = 'running'
super().save()
# Step 1: Create/Clone Container (wrapper nur um lxc_post)
task.add_entry("Starting Proxmox container creation...")
def _lxc_post():
return pm.lxc_post(route, **vmparams)
def _restart_cloned():
return self.vm.start()
2025-08-27 09:55:55 +02:00
# Wrap the proxmox function - this handles UPID monitoring synchronously
task.wrap_proxmox_function(_lxc_post)
if old_status == 'running':
task.wrap_proxmox_function(_restart_cloned)
2025-08-27 09:55:55 +02:00
task.add_entry("Container creation completed")
success = True
if success:
task.add_entry("Waiting for container to be ready...")
time.sleep(3)
# Step 2: Get container info and update settings
task.add_entry("Retrieving container information...")
new_vm_data = pm.get_all_lxc(as_dict=True, vmid=vmdata['newid'])
if new_vm_data:
new_vm_data = new_vm_data[0]
lxc = Lxc().from_proxmox(**new_vm_data)
lxc.save()
task.add_entry(f"Container retrieved: {lxc.name} ({lxc.vmid})")
# Update container resources if needed
changed = False
changes = []
if self.disksize > lxc.disksize:
lxc.disksize = self.disksize
changes.append(f"disk: {lxc.disksize}GB")
changed = True
if self.memory != lxc.memory:
lxc.memory = self.memory
changes.append(f"memory: {lxc.memory}MB")
changed = True
if self.cores != lxc.cores:
lxc.cores = self.cores
changes.append(f"cores: {lxc.cores}")
changed = True
if changed:
lxc.save()
task.add_entry(f"Updated container resources: {', '.join(changes)}")
# Step 3: Create DHCP Lease (Mikrotik query)
task.add_entry("Creating Mikrotik DHCP lease...")
leaseargs = {
'mac_address': lxc.hwaddr,
'address': self.network.get_next_ip,
'comment': f'Container - {self.hostname}',
'dynamic': False,
}
lease = IPDHCPLease.objects.create(**leaseargs)
task.add_entry(f"DHCP lease created: {lease.address}{lease.mac_address}")
# Step 4: Create DNS Entry (Mikrotik query)
task.add_entry("Creating Mikrotik DNS entry...")
dnsargs = {}
if self.as_regexp:
dnsargs['regexp'] = self.regexp
task.add_entry(f"Using DNS regexp: {self.regexp}")
else:
dnsargs['name'] = self.hostname
task.add_entry(f"Using DNS hostname: {self.hostname}")
dns = DNSStatic.objects.create(address=lease.address, **dnsargs)
task.add_entry(f"DNS entry created: {dns.name or dns.regexp}{dns.address}")
# Step 5: Create DevContainer link
task.add_entry("Creating container management entry...")
devcontainer = DevContainer.objects.create(
lxc=lxc,
lease=lease,
dns=dns,
)
task.add_entry("Container management entry created")
# Step 6: Start Container
task.add_entry("Starting container...")
lxc.start()
task.add_entry("Container started successfully!")
self.status = 'success'
task.status = 'completed'
task.save()
else:
error_msg = f"Error retrieving container data for VM {vmdata['newid']}"
task.add_entry(error_msg)
self.status = 'error'
else:
self.status = 'error'
task.add_entry("Container creation failed")
except Exception as e:
task.add_entry(f"Exception during container creation: {str(e)}")
self.status = 'error'
logging.exception(e)
# Cleanup on error
task.add_entry("Cleaning up failed resources...")
cleanup_errors = []
try:
if isinstance(lease, IPDHCPLease):
lease.delete(task=task)
task.add_entry("Cleaned up DHCP lease")
except Exception as a:
cleanup_errors.append(f"DHCP lease: {a}")
try:
if isinstance(dns, DNSStatic):
dns.delete(task=task)
task.add_entry("Cleaned up DNS entry")
except Exception as s:
cleanup_errors.append(f"DNS entry: {s}")
try:
if isinstance(lxc, Lxc):
lxc.delete(task=task)
task.add_entry("Cleaned up container")
except Exception as l:
cleanup_errors.append(f"Container: {l}")
if cleanup_errors:
task.add_entry(f"Cleanup errors: {', '.join(cleanup_errors)}")
else:
task.add_entry("Cleanup completed")
task.status = 'error'
task.save()
raise e
finally:
# Keep task logs for debugging, cleanup CloneContainer
task.unset_as_current()
self.delete()
@property
def proxmox_data(self):
data = {
# https://pve.proxmox.com/pve-docs/api-viewer/#/nodes/{node}/lxc/{vmid}/clone
# if template, the just a post to lxc - empty route
'route': f'{self.vm.vmid}/clone' if self.vm else '',
'vm': {
'hostname': self.hostname,
'cores': self.cores,
'cpus': self.cores,
'memory': self.memory,
'net0': self.vm.net0 if self.vm else self.template.net0,
'disksize': self.disksize,
'description': f'Container - {self.hostname} ',
'newid': Proxmox().next_vmid,
'template': self.template.volid if self.template else None,
},
'lease': {
'comment': f'Container - {self.hostname}'
},
'dns': {
# this is changed if regexp
'name': self.hostname,
'comment': f'Container - {self.hostname}'
},
}
if regexp := self.regexp:
data['dns'].pop('name', None)
data['dns']['regexp'] = regexp
return data
def clean(self):
super().clean()
assert self.vm or self.template, "Either vm or template must be set"
"""
class CloneVM(CloneAbstract):
vm = models.ForeignKey(VM, on_delete=models.CASCADE, related_name='clone_lxc')
"""
class DevContainer(BaseModel, SearchableMixin, TaskAwareModelMixin):
lxc = models.OneToOneField('proxmox.Lxc', on_delete=models.SET_NULL, related_name='devcontainer_lxc', null=True)
lease = models.OneToOneField(IPDHCPLease, on_delete=models.SET_NULL, related_name='devcontainer_lease', null=True)
dns = models.OneToOneField(DNSStatic, on_delete=models.SET_NULL, related_name='devcontainer_dns', null=True)
statuscache_data = models.JSONField(default=dict, editable=False)
@property
def statuscache(self):
if self.statuscache_data and self.statuscache_data['last_sync'] > time.time() - 300:
logging.debug(f"Return cached status for {self.name} - {self.statuscache_data['last_sync']}")
return self.statuscache_data
else:
return self.sync_statuscache()
def sync_statuscache(self):
logging.debug(f"No cached status for {self.name} - sync from proxmox")
try:
self.lxc.sync_from_proxmox()
except Exception as e:
logging.error(e)
try:
with Proxmox() as pm:
status_data = pm.lxc_get(f"{self.lxc.vmid}/status/current")
except Exception as e:
logging.error(e)
return None
try:
self.lease.sync_from_router()
except Exception as e:
logging.error(e)
try:
status_data['lease_status'] = self.lease.status
status_data['last_sync'] = time.time()
self.statuscache_data = status_data
super().save(update_fields=['statuscache_data'])
return status_data
except Exception as e:
logging.error(e)
return None
def set_statuscache_value(self, key, value):
cachedata = self.statuscache
cachedata[key]= value
with open(self.status_cachefile, 'w') as f:
json.dump(cachedata, f)
return self
@classmethod
def term_filter(cls, search_string):
q = (
# Q(lxc__comment__icontains=search_string) |
Q(lxc__hostname__icontains=search_string) |
Q(lease__comment__icontains=search_string) |
Q(dns__name__icontains=search_string) |
Q(dns__regexp__icontains=search_string) |
Q(lease__address__icontains=search_string)
)
if str(search_string).isdigit():
q |= Q(lxc__vmid=search_string)
return q
@property
def name(self):
try:
return self.lxc.name
except Exception as e:
logging.error(e)
return f"{self.internal_id} - {self.lease} - {self.dns} - {self.lxc}"
@property
def hostname(self):
try:
return self.dns.name or self.dns.regexp
except Exception as e:
logging.error(e)
return f"{self.pk} hostname"
@property
def hwaddr(self):
try:
return self.lxc.hwaddr
except Exception as e:
logging.error(e)
return f"{self.pk} hwaddr"
@property
def address(self):
try:
return self.lease.address
except Exception as e:
logging.error(e)
return f"{self.pk} address"
@property
def status(self):
try:
return self.lease.status
except Exception as e:
logging.error(e)
return f"{self.pk} status"
def save(self, *args, **kwargs):
if self.dns.address != self.lease.address:
raise ValueError(f"{self.dns.address} != {self.lease.address}")
if self.lease.mac_address.upper() != self.lxc.hwaddr.upper():
logging.error(f"{self.lease.mac_address} != {self.lxc.hwaddr} - try to change it")
self.lease.mac_address = self.lxc.hwaddr
self.lease.save()
super().save(*args, **kwargs)
def __str__(self):
return f"{self.name} ({self.address})"
@receiver(models.signals.pre_delete, sender=DevContainer)
def before_delete_devcontainer(sender, instance, **kwargs):
from tasklogger.models import TaskFactory
task = TaskFactory()
if instance.dns:
instance.dns.delete(task=task)
if instance.lease:
instance.lease.delete(task=task)
if instance.lxc:
instance.lxc.delete(task=task)