Commit a4e70a81 by Karsa Zoltán István

Merge branch 'hotplug' into 'master'

Hotplug

See merge request !22
parents 4c1a75d2 5069b23e
...@@ -90,11 +90,14 @@ class OperationForm(NoFormTagMixin, forms.Form): ...@@ -90,11 +90,14 @@ class OperationForm(NoFormTagMixin, forms.Form):
class VmSaveForm(OperationForm): class VmSaveForm(OperationForm):
name = forms.CharField(max_length=100, label=_('Name'), name = forms.CharField(max_length=100, label=_('Name'),
help_text=_('Human readable name of template.')) help_text=_('Human readable name of template.'))
datastore = forms.ModelChoiceField(queryset=None, initial=0, empty_label=None,
help_text=_('Backing file location'))
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
default = kwargs.pop('default', None) default = kwargs.pop('default', None)
clone = kwargs.pop('clone', False) clone = kwargs.pop('clone', False)
super(VmSaveForm, self).__init__(*args, **kwargs) super(VmSaveForm, self).__init__(*args, **kwargs)
self.fields['datastore'].queryset = DataStore.objects.all()
if default: if default:
self.fields['name'].initial = default self.fields['name'].initial = default
if clone: if clone:
...@@ -643,6 +646,7 @@ class TemplateForm(forms.ModelForm): ...@@ -643,6 +646,7 @@ class TemplateForm(forms.ModelForm):
widgets = { widgets = {
'system': forms.TextInput, 'system': forms.TextInput,
'max_ram_size': forms.HiddenInput, 'max_ram_size': forms.HiddenInput,
'num_cores_max': forms.HiddenInput,
'parent': forms.Select(attrs={'disabled': ""}), 'parent': forms.Select(attrs={'disabled': ""}),
} }
...@@ -850,10 +854,12 @@ class VmCreateDiskForm(OperationForm): ...@@ -850,10 +854,12 @@ class VmCreateDiskForm(OperationForm):
widget=FileSizeWidget, initial=(10 << 30), label=_('Size'), widget=FileSizeWidget, initial=(10 << 30), label=_('Size'),
help_text=_('Size of disk to create in bytes or with units ' help_text=_('Size of disk to create in bytes or with units '
'like MB or GB.')) 'like MB or GB.'))
datastore = forms.ModelChoiceField(queryset=None, initial=0, empty_label=None)
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
default = kwargs.pop('default', None) default = kwargs.pop('default', None)
super(VmCreateDiskForm, self).__init__(*args, **kwargs) super(VmCreateDiskForm, self).__init__(*args, **kwargs)
self.fields['datastore'].queryset = DataStore.objects.all()
if default: if default:
self.fields['name'].initial = default self.fields['name'].initial = default
...@@ -988,6 +994,11 @@ class VmImportDiskForm(OperationForm): ...@@ -988,6 +994,11 @@ class VmImportDiskForm(OperationForm):
class VmDownloadDiskForm(OperationForm): class VmDownloadDiskForm(OperationForm):
name = forms.CharField(max_length=100, label=_("Name"), required=False) name = forms.CharField(max_length=100, label=_("Name"), required=False)
url = forms.CharField(label=_('URL'), validators=[URLValidator(), ]) url = forms.CharField(label=_('URL'), validators=[URLValidator(), ])
datastore = forms.ModelChoiceField(queryset=None, initial=0, empty_label=None)
def __init__(self, *args, **kwargs):
super(VmDownloadDiskForm, self).__init__(*args, **kwargs)
self.fields['datastore'].queryset = DataStore.objects.all()
def clean(self): def clean(self):
cleaned_data = super(VmDownloadDiskForm, self).clean() cleaned_data = super(VmDownloadDiskForm, self).clean()
......
...@@ -53,7 +53,7 @@ class InstanceTemplateSerializer(serializers.ModelSerializer): ...@@ -53,7 +53,7 @@ class InstanceTemplateSerializer(serializers.ModelSerializer):
model = InstanceTemplate model = InstanceTemplate
fields = [ 'id', 'name', 'description', 'parent', 'owner', 'access_method', 'boot_menu', fields = [ 'id', 'name', 'description', 'parent', 'owner', 'access_method', 'boot_menu',
'lease', 'raw_data', 'cloud_init', 'ci_network_config', 'ci_meta_data', 'ci_user_data', 'system', 'lease', 'raw_data', 'cloud_init', 'ci_network_config', 'ci_meta_data', 'ci_user_data', 'system',
'has_agent', 'num_cores', 'ram_size', 'max_ram_size', 'arch', 'priority', 'disks'] 'has_agent', 'num_cores', 'ram_size', 'max_ram_size', 'arch', 'priority', 'disks', 'num_cores_max']
class LeaseSerializer(serializers.ModelSerializer): class LeaseSerializer(serializers.ModelSerializer):
...@@ -82,8 +82,12 @@ class InstanceSerializer(serializers.ModelSerializer): ...@@ -82,8 +82,12 @@ class InstanceSerializer(serializers.ModelSerializer):
macaddr = serializers.SerializerMethodField('get_mac') macaddr = serializers.SerializerMethodField('get_mac')
sshportipv4 = serializers.SerializerMethodField('get_sshportipv4') sshportipv4 = serializers.SerializerMethodField('get_sshportipv4')
hostipv4 = serializers.SerializerMethodField('get_hostipv4') hostipv4 = serializers.SerializerMethodField('get_hostipv4')
disks = serializers.SerializerMethodField('get_disks')
#interfaces = serializers.SerializerMethodField('get_interfaces') #interfaces = serializers.SerializerMethodField('get_interfaces')
def get_disks(self, i):
return list(disk.id for disk in i.disks.filter(ci_disk=False).all())
def get_ipv4(self, i): def get_ipv4(self, i):
return str(i.ipv4) return str(i.ipv4)
...@@ -109,7 +113,7 @@ class InstanceSerializer(serializers.ModelSerializer): ...@@ -109,7 +113,7 @@ class InstanceSerializer(serializers.ModelSerializer):
model = Instance model = Instance
fields = ['id', 'name', 'description', 'status', 'owner', 'access_method', 'boot_menu', 'pw', 'is_base', 'macaddr', fields = ['id', 'name', 'description', 'status', 'owner', 'access_method', 'boot_menu', 'pw', 'is_base', 'macaddr',
'lease', 'raw_data', 'cloud_init', 'ci_meta_data', 'ci_user_data', 'ci_network_config', 'system', 'req_traits', 'interface_set', 'lease', 'raw_data', 'cloud_init', 'ci_meta_data', 'ci_user_data', 'ci_network_config', 'system', 'req_traits', 'interface_set',
'has_agent', 'num_cores', 'ram_size', 'max_ram_size', 'arch', 'priority', 'disks', 'node', 'ipv4addr', 'ipv6addr', 'vlans', 'has_agent', 'num_cores', 'num_cores_max', 'ram_size', 'max_ram_size', 'arch', 'priority', 'disks', 'node', 'ipv4addr', 'ipv6addr', 'vlans',
'hookurl', 'sshportipv4', 'hostipv4'] 'hookurl', 'sshportipv4', 'hostipv4']
extra_kwargs = { extra_kwargs = {
'disks': {'required': False, 'allow_empty': True,}, 'disks': {'required': False, 'allow_empty': True,},
...@@ -122,6 +126,7 @@ class InstanceSerializer(serializers.ModelSerializer): ...@@ -122,6 +126,7 @@ class InstanceSerializer(serializers.ModelSerializer):
'raw_data': {'required': False, }, 'raw_data': {'required': False, },
'sshportipv4': {'required': False, }, 'sshportipv4': {'required': False, },
'hostipv4': {'required': False, }, 'hostipv4': {'required': False, },
'num_cores_max': {'required': False, },
} }
...@@ -153,6 +158,7 @@ class VlanSerializer(serializers.ModelSerializer): ...@@ -153,6 +158,7 @@ class VlanSerializer(serializers.ModelSerializer):
class CreateDiskSerializer(serializers.Serializer): class CreateDiskSerializer(serializers.Serializer):
size = serializers.CharField(max_length=50) size = serializers.CharField(max_length=50)
name = serializers.CharField(max_length=100) name = serializers.CharField(max_length=100)
datastore = serializers.CharField(max_length=40, required=False, allow_blank=True, default=None)
class ResizeDiskSerializer(serializers.Serializer): class ResizeDiskSerializer(serializers.Serializer):
...@@ -163,6 +169,7 @@ class DownloadDiskSerializer(serializers.Serializer): ...@@ -163,6 +169,7 @@ class DownloadDiskSerializer(serializers.Serializer):
url = serializers.CharField(max_length=500) url = serializers.CharField(max_length=500)
name = serializers.CharField(max_length=100) name = serializers.CharField(max_length=100)
resize = serializers.CharField(max_length=30, required=False, allow_blank=True, default=None) resize = serializers.CharField(max_length=30, required=False, allow_blank=True, default=None)
datastore = serializers.CharField(max_length=40, required=False, allow_blank=True, default=None)
class Meta: class Meta:
extra_kwargs = {'resize': {'required': False, 'allow_blank': True, 'allow_empty': True }} extra_kwargs = {'resize': {'required': False, 'allow_blank': True, 'allow_empty': True }}
......
...@@ -5,8 +5,7 @@ ...@@ -5,8 +5,7 @@
{% if d.ci_disk %} <i class="fa fa-cloud-upload"></i> {% if d.ci_disk %} <i class="fa fa-cloud-upload"></i>
{% endif %} {% endif %}
{{ d.name }} (#{{ d.id }}) - {{ d.size|filesize }} {{ d.name }} (<i class="fa fa-database"></i> {{ d.datastore.name }} #{{ d.id }}) - {{ d.size|filesize }}
<span class="operation-wrapper pull-right"> <span class="operation-wrapper pull-right">
{% if d.is_exportable %} {% if d.is_exportable %}
......
{% load crispy_forms_tags %}
{% load i18n %}
<p class="text-muted">
{% trans "Create datastore" %}
</p>
<form method="POST" action="{% url "dashboard.views.group-create" %}">
{% csrf_token %}
{% crispy form %}
</form>
\ No newline at end of file
...@@ -9,11 +9,34 @@ ...@@ -9,11 +9,34 @@
{% block content %} {% block content %}
<div class="row">
<div class="col-md-12">
<div class="panel panel-default">
<div class="panel-body">
{% for ds in stores %}
{% if ds.name == name %}
<a href="{% url "dashboard.views.storage.name" name=ds.name %}" class="badge badge-dark">
<i class="fa fa-database"></i> {{ ds.hostname }}/{{ ds.name }}
</a>
{% else %}
<a href="{% url "dashboard.views.storage.name" name=ds.name %}" class="badge badge-primary">
{{ ds.hostname }}/{{ ds.name }}
</a>
{% endif %}
{% empty %}
{% trans "None" %}
{% endfor %}
<a href="" class="badge badge-primary"><i class="fa fa-plus"></i></a>
</div>
</div>
</div>
</div>
<div class="row"> <div class="row">
<div class="col-md-5"> <div class="col-md-5">
<div class="panel panel-default"> <div class="panel panel-default">
<div class="panel-heading"> <div class="panel-heading">
<h3 class="no-margin"><i class="fa fa-database"></i> {% trans "Datastore" %}</h3> <h3 class="no-margin"><i class="fa fa-database"></i> {{ name }} {% trans "Datastore" %}</h3>
</div> </div>
<div class="panel-body"> <div class="panel-body">
{% crispy form %} {% crispy form %}
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
<legend>{% trans "Resource configuration" %}</legend> <legend>{% trans "Resource configuration" %}</legend>
{% include "dashboard/_resources-sliders.html" with field_priority=form.priority field_num_cores=form.num_cores field_ram_size=form.ram_size %} {% include "dashboard/_resources-sliders.html" with field_priority=form.priority field_num_cores=form.num_cores field_ram_size=form.ram_size %}
{{ form.max_ram_size|as_crispy_field }} {{ form.max_ram_size|as_crispy_field }}
{{ form.num_cores_max|as_crispy_field }}
</fieldset> </fieldset>
<fieldset> <fieldset>
......
...@@ -68,7 +68,7 @@ from .views import ( ...@@ -68,7 +68,7 @@ from .views import (
EnableTwoFactorView, DisableTwoFactorView, EnableTwoFactorView, DisableTwoFactorView,
AclUserGroupAutocomplete, AclUserAutocomplete, AclUserGroupAutocomplete, AclUserAutocomplete,
RescheduleView, GroupImportView, GroupExportView, RescheduleView, GroupImportView, GroupExportView,
VariableREST, GetVariableREST, VariableREST, GetVariableREST, HotplugMemSetREST, HotplugVCPUSetREST
) )
from .views.node import node_ops, NodeREST, GetNodeREST from .views.node import node_ops, NodeREST, GetNodeREST
from .views.vm import vm_ops, vm_mass_ops from .views.vm import vm_ops, vm_mass_ops
...@@ -102,6 +102,8 @@ urlpatterns = [ ...@@ -102,6 +102,8 @@ urlpatterns = [
path('acpi/interface/<int:pk>/', GetInterfaceREST.as_view()), path('acpi/interface/<int:pk>/', GetInterfaceREST.as_view()),
path('acpi/ftusers/', InstanceFTforUsersREST.as_view()), path('acpi/ftusers/', InstanceFTforUsersREST.as_view()),
path('acpi/ftusersid/', InstanceFTforUsersIdREST.as_view()), path('acpi/ftusersid/', InstanceFTforUsersIdREST.as_view()),
path('acpi/vm/<int:pk>/hotplugmem/', HotplugMemSetREST.as_view()),
path('acpi/vm/<int:pk>/hotplugvcpu/', HotplugVCPUSetREST.as_view()),
path('acpi/vm/<int:pk>/downloaddisk/', DownloadDiskREST.as_view()), path('acpi/vm/<int:pk>/downloaddisk/', DownloadDiskREST.as_view()),
path('acpi/vm/<int:vm_id>/port/<int:vlan_id>/', SetupPortREST.as_view()), path('acpi/vm/<int:vm_id>/port/<int:vlan_id>/', SetupPortREST.as_view()),
path('acpi/vm/<int:vm_id>/rules/<int:vlan_id>/', RulesREST.as_view()), path('acpi/vm/<int:vm_id>/rules/<int:vlan_id>/', RulesREST.as_view()),
...@@ -297,8 +299,9 @@ urlpatterns = [ ...@@ -297,8 +299,9 @@ urlpatterns = [
name="dashboard.views.token-login"), name="dashboard.views.token-login"),
url(r'^vm/opensearch.xml$', OpenSearchDescriptionView.as_view(), url(r'^vm/opensearch.xml$', OpenSearchDescriptionView.as_view(),
name="dashboard.views.vm-opensearch"), name="dashboard.views.vm-opensearch"),
url(r'^storage/(?P<name>[^/]+)/$', StorageDetail.as_view(),
url(r'^storage/$', StorageDetail.as_view(), name="dashboard.views.storage.name"),
url(r'^storage/', StorageDetail.as_view(),
name="dashboard.views.storage"), name="dashboard.views.storage"),
url(r'^disk/(?P<pk>\d+)/$', DiskDetail.as_view(), url(r'^disk/(?P<pk>\d+)/$', DiskDetail.as_view(),
name="dashboard.views.disk-detail"), name="dashboard.views.disk-detail"),
......
...@@ -43,15 +43,19 @@ class StorageDetail(SuperuserRequiredMixin, UpdateView): ...@@ -43,15 +43,19 @@ class StorageDetail(SuperuserRequiredMixin, UpdateView):
template_name = "dashboard/storage/detail.html" template_name = "dashboard/storage/detail.html"
def get_object(self): def get_object(self):
return DataStore.objects.get() datastore = 'default'
if 'name' in self.kwargs:
datastore = self.kwargs['name']
return DataStore.objects.filter(name=datastore).get()
def get_context_data(self, **kwargs): def get_context_data(self, **kwargs):
context = super(StorageDetail, self).get_context_data(**kwargs) context = super(StorageDetail, self).get_context_data(**kwargs)
context["stores"] = DataStore.objects.all()
ds = self.get_object() ds = self.get_object()
try: try:
context['name'] = ds.name
context['stats'] = self._get_stats() context['stats'] = self._get_stats()
context['missing_disks'] = ds.get_missing_disks() context['g'] = ds.get_missing_disks()
context['orphan_disks'] = ds.get_orphan_disks() context['orphan_disks'] = ds.get_orphan_disks()
except WorkerNotFound: except WorkerNotFound:
messages.error(self.request, _("The DataStore is offline.")) messages.error(self.request, _("The DataStore is offline."))
......
...@@ -133,7 +133,10 @@ class CreatePersistentDiskREST(APIView): ...@@ -133,7 +133,10 @@ class CreatePersistentDiskREST(APIView):
if serializer.is_valid(): if serializer.is_valid():
disk_size = str(size_util(str(data['size']))) disk_size = str(size_util(str(data['size'])))
disk_name = str(data['name']) disk_name = str(data['name'])
disk = Disk.create(size=disk_size, name=disk_name, type="qcow2-norm") datastore = 'default'
if 'datastore' in data:
datastore = data['datastore']
disk = Disk.create(size=disk_size, name=disk_name, type="qcow2-norm", datastore=datastore)
disk.full_clean() disk.full_clean()
disk.dev_num = 'f' disk.dev_num = 'f'
disk.save() disk.save()
...@@ -155,8 +158,12 @@ class DownloadPersistentDiskREST(APIView): ...@@ -155,8 +158,12 @@ class DownloadPersistentDiskREST(APIView):
resize = None resize = None
if 'resize' in data: if 'resize' in data:
resize = str(data['resize']) resize = str(data['resize'])
datastore = 'default'
if 'datastore' in data:
datastore = data['datastore']
store_act = StorageActivity.create(code_suffix="download_disk", user=request.user) store_act = StorageActivity.create(code_suffix="download_disk", user=request.user)
abortable_async_downloaddisk_operation.apply_async(args=(store_act.id, disk_url, disk_name, resize), queue='localhost.man.slow') abortable_async_downloaddisk_operation.apply_async(
args=(store_act.id, disk_url, disk_name, resize, datastore), queue='localhost.man.slow')
serializer = StorageActivitySerializer(store_act, many=False) serializer = StorageActivitySerializer(store_act, many=False)
return JsonResponse(serializer.data, status=201) return JsonResponse(serializer.data, status=201)
return JsonResponse(serializer.errors, status=400) return JsonResponse(serializer.errors, status=400)
...@@ -188,6 +195,34 @@ class GetVlanREST(APIView): ...@@ -188,6 +195,34 @@ class GetVlanREST(APIView):
return JsonResponse(serializer.data, safe=False) return JsonResponse(serializer.data, safe=False)
class HotplugMemSetREST(APIView):
authentication_classes = [TokenAuthentication,BasicAuthentication]
permission_classes = [IsAdminUser]
def put(self, request, pk, format=None):
instance = Instance.objects.get(pk=pk)
data = JSONParser().parse(request)
ram_size = int(data["ram_size"])
instance.hotplug_mem(user=request.user, memory=ram_size*1024 )
instance.ram_size = ram_size
instance.save()
serializer = InstanceSerializer(instance)
return JsonResponse(serializer.data, status=201)
class HotplugVCPUSetREST(APIView):
authentication_classes = [TokenAuthentication,BasicAuthentication]
permission_classes = [IsAdminUser]
def put(self, request, pk, format=None):
instance = Instance.objects.get(pk=pk)
data = JSONParser().parse(request)
num_cores = int(data["num_cores"])
instance.hotplug_vcpu(user=request.user, vcpu=num_cores)
instance.num_cores = num_cores
instance.save()
serializer = InstanceSerializer(instance)
return JsonResponse(serializer.data, status=201)
class InterfaceREST(APIView): class InterfaceREST(APIView):
authentication_classes = [TokenAuthentication,BasicAuthentication] authentication_classes = [TokenAuthentication,BasicAuthentication]
permission_classes = [IsAdminUser] permission_classes = [IsAdminUser]
...@@ -227,13 +262,21 @@ class InstanceREST(APIView): ...@@ -227,13 +262,21 @@ class InstanceREST(APIView):
data['status'] = 'STOPPED' data['status'] = 'STOPPED'
if 'owner' not in data: if 'owner' not in data:
data['owner'] = request.user.id data['owner'] = request.user.id
if 'num_cores_max' not in data:
data['num_cores_max'] = data['num_cores']
serializer = InstanceSerializer(data=data) serializer = InstanceSerializer(data=data)
if serializer.is_valid(): if serializer.is_valid():
inst = serializer.save() inst = serializer.save()
if 'disks' in data:
for d in data['disks']:
d = Disk.objects.get(id=d)
inst.disks.add(d)
inst.save()
networks = [] networks = []
if 'vlans' in data: if 'vlans' in data:
for v in data['vlans']: for v in data['vlans']:
v = Vlan.objects.filter(vid=v).get() v = Vlan.objects.get(id=v)
if not v.has_level(request.user, "user"): if not v.has_level(request.user, "user"):
raise PermissionDenied() raise PermissionDenied()
networks.append(InterfaceTemplate(vlan=v, managed=v.managed)) networks.append(InterfaceTemplate(vlan=v, managed=v.managed))
...@@ -1021,7 +1064,7 @@ class VmResourcesChangeView(VmOperationView): ...@@ -1021,7 +1064,7 @@ class VmResourcesChangeView(VmOperationView):
"#resources") "#resources")
else: else:
extra = form.cleaned_data extra = form.cleaned_data
extra['max_ram_size'] = extra['ram_size'] #extra['max_ram_size'] = extra['ram_size']
return super(VmResourcesChangeView, self).post(request, extra, return super(VmResourcesChangeView, self).post(request, extra,
*args, **kwargs) *args, **kwargs)
......
# Generated by Django 3.2.3 on 2022-12-31 10:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('storage', '0005_storageactivity'),
]
operations = [
migrations.AddField(
model_name='datastore',
name='driver_cache',
field=models.CharField(default='none', max_length=20, verbose_name='cache-mode - qemu'),
),
migrations.AlterField(
model_name='datastore',
name='hostname',
field=models.CharField(max_length=40, verbose_name='hostname'),
),
]
# Generated by Django 3.2.3 on 2023-01-13 16:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('storage', '0006_auto_20221231_1011'),
]
operations = [
migrations.AddField(
model_name='disk',
name='cache_size',
field=models.IntegerField(default=1024, help_text='Disk metadata cache max size (Kbyte)', verbose_name='cache size'),
),
]
...@@ -29,7 +29,7 @@ from celery.result import allow_join_result ...@@ -29,7 +29,7 @@ from celery.result import allow_join_result
from celery.exceptions import TimeoutError from celery.exceptions import TimeoutError
from django.core.exceptions import ObjectDoesNotExist from django.core.exceptions import ObjectDoesNotExist
from django.urls import reverse from django.urls import reverse
from django.db.models import (Model, BooleanField, CharField, DateTimeField, from django.db.models import (Model, BooleanField, CharField, DateTimeField, IntegerField,
ForeignKey) ForeignKey)
from django.db import models from django.db import models
from django.utils import timezone from django.utils import timezone
...@@ -51,8 +51,9 @@ class DataStore(Model): ...@@ -51,8 +51,9 @@ class DataStore(Model):
""" """
name = CharField(max_length=100, unique=True, verbose_name=_('name')) name = CharField(max_length=100, unique=True, verbose_name=_('name'))
path = CharField(max_length=200, unique=True, verbose_name=_('path')) path = CharField(max_length=200, unique=True, verbose_name=_('path'))
hostname = CharField(max_length=40, unique=True, hostname = CharField(max_length=40, unique=False,
verbose_name=_('hostname')) verbose_name=_('hostname'))
driver_cache = CharField(max_length=20, unique=False, verbose_name=_('cache-mode - qemu'), default='none')
class Meta: class Meta:
ordering = ['name'] ordering = ['name']
...@@ -109,7 +110,7 @@ class DataStore(Model): ...@@ -109,7 +110,7 @@ class DataStore(Model):
queue_name = self.get_remote_queue_name('storage', "slow") queue_name = self.get_remote_queue_name('storage', "slow")
files = set(storage_tasks.list_files.apply_async( files = set(storage_tasks.list_files.apply_async(
args=[self.path], queue=queue_name).get(timeout=timeout)) args=[self.path], queue=queue_name).get(timeout=timeout))
disks = Disk.objects.filter(destroyed__isnull=True, is_ready=True) disks = Disk.objects.filter(destroyed__isnull=True, is_ready=True, datastore=self)
return disks.exclude(filename__in=files) return disks.exclude(filename__in=files)
@method_cache(120) @method_cache(120)
...@@ -146,6 +147,9 @@ class Disk(TimeStampedModel): ...@@ -146,6 +147,9 @@ class Disk(TimeStampedModel):
destroyed = DateTimeField(blank=True, default=None, null=True) destroyed = DateTimeField(blank=True, default=None, null=True)
ci_disk = BooleanField(default=False) ci_disk = BooleanField(default=False)
is_ready = BooleanField(default=False) is_ready = BooleanField(default=False)
cache_size = IntegerField(default=1024,
help_text=_("Disk metadata cache max size (Kbyte)"),
verbose_name=_('cache size'))
class Meta: class Meta:
ordering = ['name'] ordering = ['name']
...@@ -342,10 +346,11 @@ class Disk(TimeStampedModel): ...@@ -342,10 +346,11 @@ class Disk(TimeStampedModel):
'name': self.name, 'name': self.name,
'source': self.path, 'source': self.path,
'driver_type': self.vm_format, 'driver_type': self.vm_format,
'driver_cache': 'none', 'driver_cache': self.datastore.driver_cache,
'target_device': self.device_type + self.dev_num, 'target_device': self.device_type + self.dev_num,
'target_bus': self.device_bus, 'target_bus': self.device_bus,
'disk_device': 'cdrom' if self.type == 'iso' else 'disk' 'disk_device': 'cdrom' if self.type == 'iso' else 'disk',
'cache_size': self.cache_size
} }
def get_disk_desc(self): def get_disk_desc(self):
...@@ -357,7 +362,8 @@ class Disk(TimeStampedModel): ...@@ -357,7 +362,8 @@ class Disk(TimeStampedModel):
'format': self.format, 'format': self.format,
'size': self.size, 'size': self.size,
'base_name': self.base.filename if self.base else None, 'base_name': self.base.filename if self.base else None,
'type': 'snapshot' if self.base else 'normal' 'type': 'snapshot' if self.base else 'normal',
'cache_size': self.cache_size
} }
def get_remote_queue_name(self, queue_id='storage', priority=None, def get_remote_queue_name(self, queue_id='storage', priority=None,
...@@ -419,8 +425,8 @@ class Disk(TimeStampedModel): ...@@ -419,8 +425,8 @@ class Disk(TimeStampedModel):
return True return True
@classmethod @classmethod
def create(cls, user=None, **params): def create(cls, user=None, datastore='default', **params):
disk = cls.__create(user, params) disk = cls.__create(user, datastore, params)
disk.clean() disk.clean()
disk.save() disk.save()
logger.debug("Disk created from: %s", logger.debug("Disk created from: %s",
...@@ -428,8 +434,10 @@ class Disk(TimeStampedModel): ...@@ -428,8 +434,10 @@ class Disk(TimeStampedModel):
return disk return disk
@classmethod @classmethod
def __create(cls, user, params): def __create(cls, user, datastore, params):
datastore = params.pop('datastore', DataStore.objects.get()) if isinstance(datastore, str):
datastore = DataStore.objects.filter(name=datastore).get()
datastore = params.pop('datastore', datastore)
filename = params.pop('filename', str(uuid.uuid4())) filename = params.pop('filename', str(uuid.uuid4()))
disk = cls(filename=filename, datastore=datastore, **params) disk = cls(filename=filename, datastore=datastore, **params)
return disk return disk
...@@ -452,7 +460,7 @@ class Disk(TimeStampedModel): ...@@ -452,7 +460,7 @@ class Disk(TimeStampedModel):
params.setdefault('name', 'ci-disk') params.setdefault('name', 'ci-disk')
params.setdefault('type', 'raw-ro') params.setdefault('type', 'raw-ro')
params.setdefault('size', None) params.setdefault('size', None)
disk = cls.__create(params=params, user=user) disk = cls.__create(params=params, user=user, datastore='default')
queue_name = disk.get_remote_queue_name('storage', priority="fast") queue_name = disk.get_remote_queue_name('storage', priority="fast")
disk_desc = disk.get_disk_desc() disk_desc = disk.get_disk_desc()
result = storage_tasks.create_ci_disk.apply_async(args=[disk_desc, meta_data, user_data, network_data], result = storage_tasks.create_ci_disk.apply_async(args=[disk_desc, meta_data, user_data, network_data],
...@@ -469,7 +477,7 @@ class Disk(TimeStampedModel): ...@@ -469,7 +477,7 @@ class Disk(TimeStampedModel):
return disk return disk
@classmethod @classmethod
def download(cls, url, task, user=None, resize = None, **params): def download(cls, url, task, user=None, resize = None, datastore = None, **params):
"""Create disk object and download data from url synchronusly. """Create disk object and download data from url synchronusly.
:param url: image url to download. :param url: image url to download.
...@@ -487,7 +495,9 @@ class Disk(TimeStampedModel): ...@@ -487,7 +495,9 @@ class Disk(TimeStampedModel):
params.setdefault('name', url.split('/')[-1]) params.setdefault('name', url.split('/')[-1])
params.setdefault('type', 'iso') params.setdefault('type', 'iso')
params.setdefault('size', None) params.setdefault('size', None)
disk = cls.__create(params=params, user=user) if not datastore:
datastore = DataStore.objects.filter(name='default').get().name
disk = cls.__create(params=params, user=user, datastore=datastore)
queue_name = disk.get_remote_queue_name('storage', priority='slow') queue_name = disk.get_remote_queue_name('storage', priority='slow')
remote = storage_tasks.download.apply_async( remote = storage_tasks.download.apply_async(
kwargs={'url': url, 'parent_id': task.request.id, kwargs={'url': url, 'parent_id': task.request.id,
...@@ -554,7 +564,7 @@ class Disk(TimeStampedModel): ...@@ -554,7 +564,7 @@ class Disk(TimeStampedModel):
args=[self.datastore.path, self.filename], args=[self.datastore.path, self.filename],
queue=queue_name).get(timeout=timeout) queue=queue_name).get(timeout=timeout)
def save_as(self, task=None, user=None, task_uuid=None, timeout=300): def save_as(self, task=None, user=None, task_uuid=None, datastore=None, timeout=300):
"""Save VM as template. """Save VM as template.
Based on disk type: Based on disk type:
...@@ -583,7 +593,9 @@ class Disk(TimeStampedModel): ...@@ -583,7 +593,9 @@ class Disk(TimeStampedModel):
new_type, new_base = mapping[self.type] new_type, new_base = mapping[self.type]
disk = Disk.create(datastore=self.datastore, if not datastore:
datastore = self.datastore
disk = Disk.create(datastore=datastore,
base=new_base, base=new_base,
name=self.name, size=self.size, name=self.name, size=self.size,
type=new_type, dev_num=self.dev_num) type=new_type, dev_num=self.dev_num)
......
# Generated by Django 3.2.3 on 2022-12-27 19:39
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vm', '0014_auto_20221010_1442'),
]
operations = [
migrations.AddField(
model_name='instance',
name='num_cores_max',
field=models.IntegerField(default=2, help_text='Number of max virtual CPU cores available to the virtual machine.', validators=[django.core.validators.MinValueValidator(0)], verbose_name='number of cores'),
),
migrations.AddField(
model_name='instancetemplate',
name='num_cores_max',
field=models.IntegerField(default=2, help_text='Number of max virtual CPU cores available to the virtual machine.', validators=[django.core.validators.MinValueValidator(0)], verbose_name='number of cores'),
),
migrations.AddField(
model_name='namedbaseresourceconfig',
name='num_cores_max',
field=models.IntegerField(default=2, help_text='Number of max virtual CPU cores available to the virtual machine.', validators=[django.core.validators.MinValueValidator(0)], verbose_name='number of cores'),
),
migrations.AlterField(
model_name='instance',
name='ci_network_config',
field=models.TextField(blank=True, default="version: 2\nethernets:\n ens3:\n match:\n macaddress: '{{ net.mac }}'\n addresses: \n - {{ net.ipv4 }}/{{ net.mask4 }}\n gateway4: {{ net.gateway4 }}\n nameservers:\n addresses:\n - 8.8.8.8", help_text='When cloud-init is active, set network-config(YAML format)', verbose_name='CI Network Data'),
),
migrations.AlterField(
model_name='instancetemplate',
name='ci_network_config',
field=models.TextField(blank=True, default="version: 2\nethernets:\n ens3:\n match:\n macaddress: '{{ net.mac }}'\n addresses: \n - {{ net.ipv4 }}/{{ net.mask4 }}\n gateway4: {{ net.gateway4 }}\n nameservers:\n addresses:\n - 8.8.8.8", help_text='When cloud-init is active, set network-config(YAML format)', verbose_name='CI Network Data'),
),
]
# Generated by Django 3.2.3 on 2022-12-28 14:16
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vm', '0015_auto_20221227_1939'),
]
operations = [
migrations.AlterField(
model_name='instance',
name='num_cores_max',
field=models.IntegerField(default=0, help_text='Number of max virtual CPU cores available to the virtual machine.', validators=[django.core.validators.MinValueValidator(0)], verbose_name='number of cores'),
),
migrations.AlterField(
model_name='instancetemplate',
name='num_cores_max',
field=models.IntegerField(default=0, help_text='Number of max virtual CPU cores available to the virtual machine.', validators=[django.core.validators.MinValueValidator(0)], verbose_name='number of cores'),
),
migrations.AlterField(
model_name='namedbaseresourceconfig',
name='num_cores_max',
field=models.IntegerField(default=0, help_text='Number of max virtual CPU cores available to the virtual machine.', validators=[django.core.validators.MinValueValidator(0)], verbose_name='number of cores'),
),
]
# Generated by Django 3.2.3 on 2022-12-31 10:11
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vm', '0016_auto_20221228_1416'),
]
operations = [
migrations.AlterField(
model_name='instance',
name='max_ram_size',
field=models.IntegerField(help_text='Upper memory size limit for balloning (or hotplug).', validators=[django.core.validators.MinValueValidator(0)], verbose_name='maximal RAM size'),
),
migrations.AlterField(
model_name='instance',
name='num_cores_max',
field=models.IntegerField(default=0, help_text='Number of max virtual CPU cores available to the virtual machine (for hotplug).', validators=[django.core.validators.MinValueValidator(0)], verbose_name='number of max cores'),
),
migrations.AlterField(
model_name='instancetemplate',
name='max_ram_size',
field=models.IntegerField(help_text='Upper memory size limit for balloning (or hotplug).', validators=[django.core.validators.MinValueValidator(0)], verbose_name='maximal RAM size'),
),
migrations.AlterField(
model_name='instancetemplate',
name='num_cores_max',
field=models.IntegerField(default=0, help_text='Number of max virtual CPU cores available to the virtual machine (for hotplug).', validators=[django.core.validators.MinValueValidator(0)], verbose_name='number of max cores'),
),
migrations.AlterField(
model_name='namedbaseresourceconfig',
name='max_ram_size',
field=models.IntegerField(help_text='Upper memory size limit for balloning (or hotplug).', validators=[django.core.validators.MinValueValidator(0)], verbose_name='maximal RAM size'),
),
migrations.AlterField(
model_name='namedbaseresourceconfig',
name='num_cores_max',
field=models.IntegerField(default=0, help_text='Number of max virtual CPU cores available to the virtual machine (for hotplug).', validators=[django.core.validators.MinValueValidator(0)], verbose_name='number of max cores'),
),
]
...@@ -49,12 +49,16 @@ class BaseResourceConfigModel(Model): ...@@ -49,12 +49,16 @@ class BaseResourceConfigModel(Model):
help_text=_('Number of virtual CPU cores ' help_text=_('Number of virtual CPU cores '
'available to the virtual machine.'), 'available to the virtual machine.'),
validators=[MinValueValidator(0)]) validators=[MinValueValidator(0)])
num_cores_max = IntegerField(verbose_name=_('number of max cores'),
help_text=_('Number of max virtual CPU cores '
'available to the virtual machine (for hotplug).'),
validators=[MinValueValidator(0)], default=0)
ram_size = IntegerField(verbose_name=_('RAM size'), ram_size = IntegerField(verbose_name=_('RAM size'),
help_text=_('Mebibytes of memory.'), help_text=_('Mebibytes of memory.'),
validators=[MinValueValidator(0)]) validators=[MinValueValidator(0)])
max_ram_size = IntegerField(verbose_name=_('maximal RAM size'), max_ram_size = IntegerField(verbose_name=_('maximal RAM size'),
help_text=_('Upper memory size limit ' help_text=_('Upper memory size limit '
'for balloning.'), 'for balloning (or hotplug).'),
validators=[MinValueValidator(0)]) validators=[MinValueValidator(0)])
arch = CharField(max_length=10, verbose_name=_('architecture'), arch = CharField(max_length=10, verbose_name=_('architecture'),
choices=ARCHITECTURES) choices=ARCHITECTURES)
......
...@@ -545,10 +545,11 @@ class Instance(AclBase, VirtualMachineDescModel, StatusModel, OperatedMixin, ...@@ -545,10 +545,11 @@ class Instance(AclBase, VirtualMachineDescModel, StatusModel, OperatedMixin,
for network in networks: for network in networks:
if not network.vlan.has_level(params['owner'], 'user'): if not network.vlan.has_level(params['owner'], 'user'):
raise PermissionDenied() raise PermissionDenied()
# create instance and do additional setup # create instance and do additional setup
inst = cls(**params) inst = cls(**params)
#if not params["num_cores_max"]:
#inst.num_cores_max = inst.num_cores
# save instance # save instance
inst.full_clean() inst.full_clean()
inst.save() inst.save()
...@@ -606,7 +607,7 @@ class Instance(AclBase, VirtualMachineDescModel, StatusModel, OperatedMixin, ...@@ -606,7 +607,7 @@ class Instance(AclBase, VirtualMachineDescModel, StatusModel, OperatedMixin,
tags = template.tags.all() if tags is None else tags tags = template.tags.all() if tags is None else tags
# prepare parameters # prepare parameters
common_fields = ['name', 'description', 'num_cores', 'ram_size', common_fields = ['name', 'description', 'num_cores', 'num_cores_max','ram_size',
'max_ram_size', 'arch', 'priority', 'boot_menu', 'max_ram_size', 'arch', 'priority', 'boot_menu',
'raw_data', 'lease', 'access_method', 'system', 'raw_data', 'lease', 'access_method', 'system',
'cloud_init', 'ci_meta_data', 'ci_user_data', 'ci_network_config', 'has_agent'] 'cloud_init', 'ci_meta_data', 'ci_user_data', 'ci_network_config', 'has_agent']
...@@ -731,7 +732,7 @@ class Instance(AclBase, VirtualMachineDescModel, StatusModel, OperatedMixin, ...@@ -731,7 +732,7 @@ class Instance(AclBase, VirtualMachineDescModel, StatusModel, OperatedMixin,
datastore = self.disks.all()[0].datastore datastore = self.disks.all()[0].datastore
except IndexError: except IndexError:
from storage.models import DataStore from storage.models import DataStore
datastore = DataStore.objects.get() datastore = DataStore.objects.filter(name='default').get()
path = datastore.path + '/' + self.vm_name + '.dump' path = datastore.path + '/' + self.vm_name + '.dump'
return {'datastore': datastore, 'path': path} return {'datastore': datastore, 'path': path}
...@@ -851,6 +852,7 @@ class Instance(AclBase, VirtualMachineDescModel, StatusModel, OperatedMixin, ...@@ -851,6 +852,7 @@ class Instance(AclBase, VirtualMachineDescModel, StatusModel, OperatedMixin,
return { return {
'name': self.vm_name, 'name': self.vm_name,
'vcpu': self.num_cores, 'vcpu': self.num_cores,
'vcpu_max': self.num_cores_max,
'memory': int(self.ram_size) * 1024, # convert from MiB to KiB 'memory': int(self.ram_size) * 1024, # convert from MiB to KiB
'memory_max': int(self.max_ram_size) * 1024, # convert MiB to KiB 'memory_max': int(self.max_ram_size) * 1024, # convert MiB to KiB
'cpu_share': self.priority, 'cpu_share': self.priority,
......
...@@ -292,12 +292,12 @@ class CreateDiskOperation(InstanceOperation): ...@@ -292,12 +292,12 @@ class CreateDiskOperation(InstanceOperation):
accept_states = ('STOPPED', 'PENDING', 'RUNNING') accept_states = ('STOPPED', 'PENDING', 'RUNNING')
concurrency_check = False concurrency_check = False
def _operation(self, user, size, activity, name=None): def _operation(self, user, size, activity, datastore, name=None):
from storage.models import Disk from storage.models import Disk
if not name: if not name:
name = "new disk" name = "new disk"
disk = Disk.create(size=size, name=name, type="qcow2-norm") disk = Disk.create(size=size, name=name, datastore=datastore.name, type="qcow2-norm")
disk.full_clean() disk.full_clean()
devnums = list(ascii_lowercase) devnums = list(ascii_lowercase)
for d in self.instance.disks.all(): for d in self.instance.disks.all():
...@@ -365,8 +365,8 @@ class DownloadDiskOperation(InstanceOperation): ...@@ -365,8 +365,8 @@ class DownloadDiskOperation(InstanceOperation):
async_queue = "localhost.man.slow" async_queue = "localhost.man.slow"
concurrency_check = False # warning!!! concurrency_check = False # warning!!!
def _operation(self, user, url, task, activity, name=None): def _operation(self, user, url, task, activity, datastore, name=None):
disk = Disk.download(url=url, name=name, task=task) disk = Disk.download(url=url, name=name, task=task, datastore=datastore.name)
devnums = list(ascii_lowercase) devnums = list(ascii_lowercase)
for d in self.instance.disks.all(): for d in self.instance.disks.all():
devnums.remove(d.dev_num) devnums.remove(d.dev_num)
...@@ -857,7 +857,7 @@ class SaveAsTemplateOperation(InstanceOperation): ...@@ -857,7 +857,7 @@ class SaveAsTemplateOperation(InstanceOperation):
disk.destroy() disk.destroy()
def _operation(self, activity, user, system, name=None, def _operation(self, activity, user, system, name=None,
with_shutdown=True, clone=False, task=None, **kwargs): with_shutdown=True, clone=False, task=None, datastore='default', **kwargs):
try: try:
self.instance._cleanup(parent_activity=activity, user=user) self.instance._cleanup(parent_activity=activity, user=user)
except: except:
...@@ -880,6 +880,7 @@ class SaveAsTemplateOperation(InstanceOperation): ...@@ -880,6 +880,7 @@ class SaveAsTemplateOperation(InstanceOperation):
'max_ram_size': self.instance.max_ram_size, 'max_ram_size': self.instance.max_ram_size,
'name': name or self._rename(self.instance.name), 'name': name or self._rename(self.instance.name),
'num_cores': self.instance.num_cores, 'num_cores': self.instance.num_cores,
'num_cores_max': self.instance.num_cores_max,
'owner': user, 'owner': user,
'parent': self.instance.template or None, # Can be problem 'parent': self.instance.template or None, # Can be problem
'priority': self.instance.priority, 'priority': self.instance.priority,
...@@ -897,9 +898,9 @@ class SaveAsTemplateOperation(InstanceOperation): ...@@ -897,9 +898,9 @@ class SaveAsTemplateOperation(InstanceOperation):
from storage.models import Disk from storage.models import Disk
def __try_save_disk(disk): def __try_save_disk(disk, datastore):
try: try:
return disk.save_as(task) return disk.save_as(task, datastore=datastore)
except Disk.WrongDiskTypeError: except Disk.WrongDiskTypeError:
return disk return disk
...@@ -912,7 +913,7 @@ class SaveAsTemplateOperation(InstanceOperation): ...@@ -912,7 +913,7 @@ class SaveAsTemplateOperation(InstanceOperation):
ugettext_noop("saving disk %(name)s"), ugettext_noop("saving disk %(name)s"),
name=disk.name) name=disk.name)
): ):
self.disks.append(__try_save_disk(disk)) self.disks.append(__try_save_disk(disk, datastore=datastore))
# create template and do additional setup # create template and do additional setup
tmpl = InstanceTemplate(**params) tmpl = InstanceTemplate(**params)
...@@ -1457,6 +1458,41 @@ class ScreenshotOperation(RemoteInstanceOperation): ...@@ -1457,6 +1458,41 @@ class ScreenshotOperation(RemoteInstanceOperation):
@register_operation @register_operation
class HotPlugMem(RemoteInstanceOperation):
id = 'hotplug_mem'
name = _("hotplug_mem")
description = _("")
acl_level = "owner"
required_perms = ('vm.change_resources',)
accept_states = ('RUNNING',)
task = vm_tasks.hotplug_memset
def _get_remote_args(self, **kwargs):
return (super(HotPlugMem, self)._get_remote_args(**kwargs) + [kwargs["memory"]] )
def _operation(self, **kwargs):
super()._operation(**kwargs)
return create_readable(ugettext_noop("Hotplug memory: set to %(mem)sKb"), mem=kwargs["memory"])
@register_operation
class HotPlugVCPU(RemoteInstanceOperation):
id = 'hotplug_vcpu'
name = _("hotplug_vcpu")
description = _("")
acl_level = "owner"
required_perms = ('vm.change_resources',)
accept_states = ('RUNNING',)
task = vm_tasks.hotplug_vcpuset
def _get_remote_args(self, **kwargs):
return (super(HotPlugVCPU, self)._get_remote_args(**kwargs) + [kwargs["vcpu"]] )
def _operation(self, **kwargs):
super()._operation(**kwargs)
return create_readable(ugettext_noop("Hotplug vcpu: set to %(vcpu)s"), vcpu=kwargs["vcpu"])
@register_operation
class RecoverOperation(InstanceOperation): class RecoverOperation(InstanceOperation):
id = 'recover' id = 'recover'
name = _("recover") name = _("recover")
...@@ -1548,7 +1584,7 @@ class ResourcesOperation(InstanceOperation): ...@@ -1548,7 +1584,7 @@ class ResourcesOperation(InstanceOperation):
accept_states = ('STOPPED', 'PENDING', 'RUNNING') accept_states = ('STOPPED', 'PENDING', 'RUNNING')
def _operation(self, user, activity, def _operation(self, user, activity,
num_cores, ram_size, max_ram_size, priority, num_cores, ram_size, priority,
with_shutdown=False, task=None): with_shutdown=False, task=None):
if self.instance.status == 'RUNNING' and not with_shutdown: if self.instance.status == 'RUNNING' and not with_shutdown:
raise Instance.WrongStateError(self.instance) raise Instance.WrongStateError(self.instance)
...@@ -1562,7 +1598,6 @@ class ResourcesOperation(InstanceOperation): ...@@ -1562,7 +1598,6 @@ class ResourcesOperation(InstanceOperation):
self.instance.num_cores = num_cores self.instance.num_cores = num_cores
self.instance.ram_size = ram_size self.instance.ram_size = ram_size
self.instance.max_ram_size = max_ram_size
self.instance.priority = priority self.instance.priority = priority
self.instance.full_clean() self.instance.full_clean()
......
...@@ -60,12 +60,12 @@ def abortable_async_node_operation(task, operation_id, node_pk, activity_pk, ...@@ -60,12 +60,12 @@ def abortable_async_node_operation(task, operation_id, node_pk, activity_pk,
@celery.task(base=AbortableTask, bind=True) @celery.task(base=AbortableTask, bind=True)
def abortable_async_downloaddisk_operation(task, activity_pk, url, name, resize): def abortable_async_downloaddisk_operation(task, activity_pk, url, name, resize, datastore):
activity = StorageActivity.objects.get(pk=activity_pk) activity = StorageActivity.objects.get(pk=activity_pk)
activity.task_uuid = task.request.id activity.task_uuid = task.request.id
activity.save() activity.save()
disk = Disk.download(url=url, name=name, task=task, resize=resize) disk = Disk.download(url=url, name=name, task=task, resize=resize, datastore=datastore)
disk.dev_num = 'g' disk.dev_num = 'g'
disk.full_clean() disk.full_clean()
disk.save() disk.save()
......
...@@ -186,3 +186,11 @@ def get_node_metrics(params): ...@@ -186,3 +186,11 @@ def get_node_metrics(params):
@celery.task(name='vmdriver.screenshot') @celery.task(name='vmdriver.screenshot')
def screenshot(params): def screenshot(params):
pass pass
@celery.task(name='vmdriver.hotplug_memset')
def hotplug_memset(params):
pass
@celery.task(name='vmdriver.hotplug_vcpuset')
def hotplug_vcpuset(params):
pass
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment