Commit 729c191a by Czémán Arnold

Merge branch 'ceph' into new_ceph

Conflicts:
	storagedriver.py
parents 2b6c6fe1 21fe5b9b
import rados
import os
class CephConfig:
def __init__(self, user=None, config_path=None, keyring_path=None):
self.user = user or "admin"
self.config_path = (
config_path or os.getenv("CEPH_CONFIG", "/etc/ceph/ceph.conf"))
default_keyring = "/etc/ceph/ceph.client.%s.keyring" % self.user
self.keyring_path = (
keyring_path or os.getenv("CEPH_KEYRING", default_keyring))
def cmd_args(self):
return ["--keyring", self.keyring_path,
"--id", self.user,
"--conf", self.config_path]
class CephConnection:
def __init__(self, pool_name, conf=None, **kwargs):
self.pool_name = pool_name
self.conf = conf or CephConfig(**kwargs)
self.cluster = None
self.ioctx = None
def __enter__(self):
try:
self.cluster = rados.Rados(
conffile=self.conf.config_path,
conf=dict(keyring=self.conf.keyring_path))
timeout = os.getenv("CEPH_TIMEOUT", 2)
self.cluster.connect(timeout=timeout)
self.ioctx = self.cluster.open_ioctx(self.pool_name)
except rados.InterruptedOrTimeoutError as e:
raise Exception(e)
return self
def __exit__(self, type, value, traceback):
self.ioctx.close()
self.cluster.shutdown()
celery==3.1.17 celery==3.1.17
requests==2.5.3 requests==2.5.3
filemagic==1.6 filemagic==1.6
python-cephlibs==0.94.5-1
from disk import Disk from disk import Disk, CephDisk
from ceph import CephConnection
from storagecelery import celery from storagecelery import celery
import os import os
from os import path, unlink, statvfs, listdir from os import unlink, statvfs, listdir
from celery.contrib.abortable import AbortableTask from celery.contrib.abortable import AbortableTask
import logging import logging
import rbd
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
trash_directory = "trash" trash_directory = "trash"
@celery.task() @celery.task()
def list(dir): def list(data_store_type, dir):
return [d.get_desc() for d in Disk.list(dir)] cls = CephDisk if data_store_type == "ceph_block" else Disk
return [d.get_desc() for d in cls.list(dir)]
@celery.task() @celery.task()
def list_files(datastore): def list_files(data_store_type, dir):
return [l for l in listdir(datastore) if
path.isfile(path.join(datastore, l))] if data_store_type == "ceph_block":
with CephConnection(str(dir)) as conn:
rbd_inst = rbd.RBD()
return rbd_inst.list(conn.ioctx)
else:
return [l for l in listdir(dir) if
os.path.isfile(os.path.join(dir, l))]
@celery.task() @celery.task()
def create(disk_desc): def create(disk_desc):
disk = Disk.deserialize(disk_desc) cls = CephDisk if disk_desc["data_store_type"] == "ceph_block" else Disk
disk = cls.deserialize(disk_desc)
disk.create() disk.create()
...@@ -34,7 +45,8 @@ class download(AbortableTask): ...@@ -34,7 +45,8 @@ class download(AbortableTask):
disk_desc = kwargs['disk'] disk_desc = kwargs['disk']
url = kwargs['url'] url = kwargs['url']
parent_id = kwargs.get("parent_id", None) parent_id = kwargs.get("parent_id", None)
disk = Disk.deserialize(disk_desc) c = CephDisk if disk_desc["data_store_type"] == "ceph_block" else Disk
disk = c.deserialize(disk_desc)
disk.download(self, url, parent_id) disk.download(self, url, parent_id)
return {'size': disk.size, return {'size': disk.size,
'type': disk.format, 'type': disk.format,
...@@ -42,20 +54,28 @@ class download(AbortableTask): ...@@ -42,20 +54,28 @@ class download(AbortableTask):
@celery.task() @celery.task()
def delete(json_data): def delete(disk_desc):
disk = Disk.deserialize(json_data) cls = CephDisk if disk_desc["data_store_type"] == "ceph_block" else Disk
disk = cls.deserialize(disk_desc)
disk.delete() disk.delete()
@celery.task() @celery.task()
def delete_dump(disk_path): def delete_dump(data_store_type, dir, filename):
if disk_path.endswith(".dump") and path.isfile(disk_path): if data_store_type == "ceph_block":
with CephConnection(str(dir)) as conn:
rbd_inst = rbd.RBD()
rbd_inst.remove(conn.ioctx, str(filename))
else:
disk_path = dir + "/" + filename
if disk_path.endswith(".dump") and os.path.isfile(disk_path):
unlink(disk_path) unlink(disk_path)
@celery.task() @celery.task()
def snapshot(json_data): def snapshot(disk_desc):
disk = Disk.deserialize(json_data) cls = CephDisk if disk_desc["data_store_type"] == "ceph_block" else Disk
disk = cls.deserialize(disk_desc)
disk.snapshot() disk.snapshot()
...@@ -66,47 +86,92 @@ class merge(AbortableTask): ...@@ -66,47 +86,92 @@ class merge(AbortableTask):
old_json = kwargs['old_json'] old_json = kwargs['old_json']
new_json = kwargs['new_json'] new_json = kwargs['new_json']
parent_id = kwargs.get("parent_id", None) parent_id = kwargs.get("parent_id", None)
disk = Disk.deserialize(old_json) cls = CephDisk if old_json["data_store_type"] == "ceph_block" else Disk
new_disk = Disk.deserialize(new_json) disk = cls.deserialize(old_json)
new_disk = cls.deserialize(new_json)
disk.merge(self, new_disk, parent_id=parent_id) disk.merge(self, new_disk, parent_id=parent_id)
@celery.task() @celery.task()
def get(json_data): def get(disk_desc):
disk = Disk.get(dir=json_data['dir'], name=json_data['name']) disk = None
dir = disk_desc['dir']
if disk_desc["data_store_type"] == "ceph_block":
with CephConnection(dir) as conn:
disk = CephDisk.get(conn.ioctx, pool_name=dir,
name=disk_desc['name'])
else:
disk = Disk.get(dir=dir, name=disk_desc['name'])
return disk.get_desc() return disk.get_desc()
@celery.task() @celery.task()
def get_storage_stat(path): def get_storage_stat(data_store_type, path):
''' Return free disk space avaliable at path in bytes and percent.''' ''' Return free disk space avaliable at path in bytes and percent.'''
all_space = 1
free_space = 0
if data_store_type == "ceph_block":
with CephConnection(str(path)) as conn:
stat = conn.cluster.get_cluster_stats()
all_space = stat["kb"]
free_space = stat["kb_avail"]
else:
s = statvfs(path) s = statvfs(path)
all_space = s.f_bsize * s.f_blocks all_space = s.f_bsize * s.f_blocks
free_space = s.f_bavail * s.f_frsize free_space = s.f_bavail * s.f_frsize
free_space_percent = 100.0 * free_space / all_space free_space_percent = 100.0 * free_space / all_space
return {'free_space': free_space, return {'free_space': free_space,
'free_percent': free_space_percent} 'free_percent': free_space_percent}
@celery.task @celery.task
def exists(path, disk_name): def exists(data_store_type, path, disk_name):
return os.path.exists(os.path.join(path, disk_name)) ''' Recover named disk from the trash directory.
'''
if data_store_type == "ceph_block":
try:
with CephConnection(str(path)) as conn:
with rbd.Image(conn.ioctx, str(disk_name)):
pass
except rbd.ImageNotFound:
return False
else:
return True
elif os.path.exists(os.path.join(path, disk_name)):
return True
return False
@celery.task @celery.task
def make_free_space(path, deletable_disks, percent=10): def make_free_space(data_store_type, path, deletable_disks, percent=10):
''' Check for free space on datastore. ''' Check for free space on datastore.
If free space is less than the given percent If free space is less than the given percent
removes oldest files to satisfy the given requirement. removes oldest files to satisfy the given requirement.
''' '''
ds_type = data_store_type
logger.info("Free space on datastore: %s" % logger.info("Free space on datastore: %s" %
get_storage_stat(path).get('free_percent')) get_storage_stat(ds_type, path).get('free_percent'))
while get_storage_stat(path).get('free_percent') < percent: while get_storage_stat(ds_type, path).get('free_percent') < percent:
logger.debug(get_storage_stat(path)) logger.debug(get_storage_stat(ds_type, path))
try: try:
f = deletable_disks.pop(0) f = deletable_disks.pop(0)
if ds_type == "ceph_block":
with CephConnection(str(path)) as conn:
rbd_inst = rbd.RBD()
with rbd.Image(conn.ioctx, str(f)) as image:
for snapshot in image.list_snaps():
name = snapshot["name"]
image.unprotect_snap(name)
image.remove_snap(name)
rbd_inst.remove(conn.ioctx, str(f))
else:
unlink(os.path.join(path, f)) unlink(os.path.join(path, f))
logger.info('Image: %s removed.' % f) logger.info('Image: %s removed.' % f)
except IndexError: except IndexError:
raise Exception("There is not deletable disk.") logger.warning("Has no deletable disk.")
return False
return True return True
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment