Commit a228d7ae by Czémán Arnold

storage, vm: remove redundant code and small fix

parent feae17a6
...@@ -139,6 +139,11 @@ class DataStore(Model): ...@@ -139,6 +139,11 @@ class DataStore(Model):
@method_cache(30) @method_cache(30)
def get_orphan_disks(self, timeout=15): def get_orphan_disks(self, timeout=15):
"""Disk image files without Disk object in the database. """Disk image files without Disk object in the database.
Exclude cloud-xxxxxxxx.dump format images.
:param timeout: Seconds before TimeOut exception
:type timeout: int
""" """
queue_name = self.get_remote_queue_name('storage', "slow") queue_name = self.get_remote_queue_name('storage', "slow")
files = set(storage_tasks.list_files.apply_async( files = set(storage_tasks.list_files.apply_async(
...@@ -155,6 +160,9 @@ class DataStore(Model): ...@@ -155,6 +160,9 @@ class DataStore(Model):
@method_cache(30) @method_cache(30)
def get_missing_disks(self, timeout=15): def get_missing_disks(self, timeout=15):
"""Disk objects without disk image files. """Disk objects without disk image files.
:param timeout: Seconds before TimeOut exception
:type timeout: int
""" """
queue_name = self.get_remote_queue_name('storage', "slow") queue_name = self.get_remote_queue_name('storage', "slow")
files = set(storage_tasks.list_files.apply_async( files = set(storage_tasks.list_files.apply_async(
...@@ -599,7 +607,7 @@ class Disk(TimeStampedModel): ...@@ -599,7 +607,7 @@ class Disk(TimeStampedModel):
""" """
queue_name = self.datastore.get_remote_queue_name( queue_name = self.datastore.get_remote_queue_name(
'storage', priority='slow') 'storage', priority='slow')
res = storage_tasks.is_exists.apply_async( res = storage_tasks.exists.apply_async(
args=[self.datastore.type, args=[self.datastore.type,
self.datastore.path, self.datastore.path,
self.filename], self.filename],
......
...@@ -24,7 +24,7 @@ logger = logging.getLogger(__name__) ...@@ -24,7 +24,7 @@ logger = logging.getLogger(__name__)
@celery.task @celery.task
def garbage_collector(timeout=15): def garbage_collector(timeout=15, percent=10):
""" Garbage collector for disk images. """ Garbage collector for disk images.
If there is not enough free space on datastore (default 10%) If there is not enough free space on datastore (default 10%)
...@@ -45,9 +45,11 @@ def garbage_collector(timeout=15): ...@@ -45,9 +45,11 @@ def garbage_collector(timeout=15):
logger.info("Image: %s at Datastore: %s fetch for destroy." % logger.info("Image: %s at Datastore: %s fetch for destroy." %
(i, ds.path)) (i, ds.path))
try: try:
storage_tasks.make_free_space.apply_async( success = storage_tasks.make_free_space.apply_async(
args=[ds.type, ds.path, deletable_disks], args=[ds.type, ds.path, deletable_disks, percent],
queue=queue_name).get(timeout=timeout) queue=queue_name).get(timeout=timeout)
if not success:
logger.warning("Has no deletable disk.")
except Exception as e: except Exception as e:
logger.warning(str(e)) logger.warning(str(e))
...@@ -59,15 +61,11 @@ def list_orphan_disks(timeout=15): ...@@ -59,15 +61,11 @@ def list_orphan_disks(timeout=15):
Exclude cloud-xxxxxxxx.dump format images. Exclude cloud-xxxxxxxx.dump format images.
:param timeout: Seconds before TimeOut exception :param timeout: Seconds before TimeOut exception
:type timeoit: int :type timeout: int
""" """
import re import re
for ds in DataStore.objects.all(): for ds in DataStore.objects.all():
queue_name = ds.get_remote_queue_name('storage', "slow") for i in ds.get_orphan_disks(timeout=timeout):
files = set(storage_tasks.list_files.apply_async(
args=[ds.type, ds.path], queue=queue_name).get(timeout=timeout))
disks = set([disk.filename for disk in ds.disk_set.all()])
for i in files - disks:
if not re.match('cloud-[0-9]*\.dump', i): if not re.match('cloud-[0-9]*\.dump', i):
logging.warning("Orphan disk: %s" % i) logging.warning("Orphan disk: %s" % i)
...@@ -77,14 +75,9 @@ def list_missing_disks(timeout=15): ...@@ -77,14 +75,9 @@ def list_missing_disks(timeout=15):
"""List Disk objects without disk image files. """List Disk objects without disk image files.
:param timeout: Seconds before TimeOut exception :param timeout: Seconds before TimeOut exception
:type timeoit: int :type timeout: int
""" """
for ds in DataStore.objects.all(): for ds in DataStore.objects.all():
queue_name = ds.get_remote_queue_name('storage', "slow") for i in ds.get_missing_disks(timeout=timeout):
files = set(storage_tasks.list_files.apply_async(
args=[ds.type, ds.path], queue=queue_name).get(timeout=timeout))
disks = set([disk.filename for disk in
ds.disk_set.filter(destroyed__isnull=True)])
for i in disks - files:
logging.critical("Image: %s is missing from %s datastore." logging.critical("Image: %s is missing from %s datastore."
% (i, ds.path)) % (i, ds.path))
...@@ -63,8 +63,8 @@ def merge(src_disk_desc, dst_disk_desc): ...@@ -63,8 +63,8 @@ def merge(src_disk_desc, dst_disk_desc):
pass pass
@celery.task(name='storagedriver.is_exists') @celery.task(name='storagedriver.exists')
def is_exists(data_store_type, path, disk_name): def exists(data_store_type, path, disk_name):
pass pass
......
...@@ -429,7 +429,7 @@ class Instance(AclBase, VirtualMachineDescModel, StatusModel, OperatedMixin, ...@@ -429,7 +429,7 @@ class Instance(AclBase, VirtualMachineDescModel, StatusModel, OperatedMixin,
common_fields = ['name', 'description', 'num_cores', 'ram_size', common_fields = ['name', 'description', 'num_cores', 'ram_size',
'max_ram_size', 'arch', 'priority', 'boot_menu', 'max_ram_size', 'arch', 'priority', 'boot_menu',
'raw_data', 'lease', 'access_method', 'system', 'raw_data', 'lease', 'access_method', 'system',
'has_agent'] 'has_agent', 'datastore']
params = dict(template=template, owner=owner, pw=pwgen()) params = dict(template=template, owner=owner, pw=pwgen())
params.update([(f, getattr(template, f)) for f in common_fields]) params.update([(f, getattr(template, f)) for f in common_fields])
params.update(kwargs) # override defaults w/ user supplied values params.update(kwargs) # override defaults w/ user supplied values
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment