Commit 17a4af7f by Czémán Arnold

storage: rework garbage collector logic, synchronize tasks with storagedriver

parent b02d4a32
......@@ -75,9 +75,11 @@ class DataStore(Model):
raise WorkerNotFound()
def get_deletable_disks(self):
return [disk.filename for disk in
self.disk_set.filter(
destroyed__isnull=False) if disk.is_deletable]
deletables = [disk for disk in self.disk_set.filter(
destroyed__isnull=False) if disk.is_deletable]
deletables = sorted(deletables, key=lambda disk: disk.destroyed)
return [disk.filename for disk in deletables]
@method_cache(30)
def get_statistics(self, timeout=15):
......@@ -475,9 +477,16 @@ class Disk(TimeStampedModel):
'storage', priority='slow')
logger.info("Image: %s at Datastore: %s recovered from trash." %
(self.filename, self.datastore.path))
storage_tasks.recover_from_trash.apply_async(
args=[self.datastore.path, self.filename],
res = storage_tasks.exists.apply_async(
args=[self.datastore.path,
self.filename],
queue=queue_name).get(timeout=timeout)
if res:
logger.info("Image: %s at Datastore: %s recovered." %
(self.filename, self.datastore.path))
else:
logger.info("Image: %s at Datastore: %s not recovered." %
(self.filename, self.datastore.path))
def save_as(self, task=None, user=None, task_uuid=None, timeout=300):
"""Save VM as template.
......
......@@ -24,7 +24,7 @@ logger = logging.getLogger(__name__)
@celery.task
def garbage_collector(timeout=15):
def garbage_collector(timeout=15, percent=10):
""" Garbage collector for disk images.
If there is not enough free space on datastore (default 10%)
......@@ -37,16 +37,19 @@ def garbage_collector(timeout=15):
queue_name = ds.get_remote_queue_name('storage', priority='fast')
files = set(storage_tasks.list_files.apply_async(
args=[ds.path], queue=queue_name).get(timeout=timeout))
disks = set(ds.get_deletable_disks())
disks = ds.get_deletable_disks()
queue_name = ds.get_remote_queue_name('storage', priority='slow')
for i in disks & files:
logger.info("Image: %s at Datastore: %s moved to trash folder." %
deletable_disks = [disk for disk in disks if disk in files]
for i in deletable_disks:
logger.info("Image: %s at Datastore: %s fetch for destroy." %
(i, ds.path))
storage_tasks.move_to_trash.apply_async(
args=[ds.path, i], queue=queue_name).get(timeout=timeout)
try:
storage_tasks.make_free_space.apply_async(
args=[ds.path], queue=queue_name).get(timeout=timeout)
success = storage_tasks.make_free_space.apply_async(
args=[ds.path, deletable_disks, percent],
queue=queue_name).get(timeout=timeout)
if not success:
logger.warning("Has no deletable disk.")
except Exception as e:
logger.warning(str(e))
......
......@@ -63,18 +63,13 @@ def merge(src_disk_desc, dst_disk_desc):
pass
@celery.task(name='storagedriver.make_free_space')
def make_free_space(datastore, percent):
@celery.task(name='storagedriver.exists')
def exists(path, disk_name):
pass
@celery.task(name='storagedriver.move_to_trash')
def move_to_trash(datastore, disk_path):
pass
@celery.task(name='storagedriver.recover_from_trash')
def recover_from_trash(datastore, disk_path):
@celery.task(name='storagedriver.make_free_space')
def make_free_space(path, deletable_disks, percent):
pass
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment