diff --git a/proxstar/__init__.py b/proxstar/__init__.py index 1ff7818..a6c3ae6 100644 --- a/proxstar/__init__.py +++ b/proxstar/__init__.py @@ -17,38 +17,54 @@ import sentry_sdk from sentry_sdk.integrations.flask import FlaskIntegration from sentry_sdk.integrations.rq import RqIntegration from sentry_sdk.integrations.sqlalchemy import SqlalchemyIntegration -from proxstar.db import (Base, datetime, get_pool_cache, renew_vm_expire, set_user_usage_limits, get_template, - get_templates, get_allowed_users, add_ignored_pool, delete_ignored_pool, add_allowed_user, - delete_allowed_user, - get_template_disk, set_template_info) -from proxstar.vnc import (send_stop_ssh_tunnel, stop_ssh_tunnel, add_vnc_target, start_ssh_tunnel, get_vnc_targets, - delete_vnc_target, stop_websockify) +from proxstar.db import ( + Base, + datetime, + get_pool_cache, + renew_vm_expire, + set_user_usage_limits, + get_template, + get_templates, + get_allowed_users, + add_ignored_pool, + delete_ignored_pool, + add_allowed_user, + delete_allowed_user, + get_template_disk, + set_template_info, +) +from proxstar.vnc import ( + send_stop_ssh_tunnel, + stop_ssh_tunnel, + add_vnc_target, + start_ssh_tunnel, + get_vnc_targets, + delete_vnc_target, + stop_websockify, +) from proxstar.auth import get_auth from proxstar.util import gen_password from proxstar.starrs import check_hostname, renew_ip from proxstar.proxmox import connect_proxmox, get_isos, get_pools, get_ignored_pools -logging.basicConfig( - format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO) +logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO) app = Flask(__name__) app.config.from_object(rq_dashboard.default_settings) -if os.path.exists( - os.path.join( - app.config.get('ROOT_DIR', os.getcwd()), 'config_local.py')): - config = os.path.join( - app.config.get('ROOT_DIR', os.getcwd()), 'config_local.py') +if os.path.exists(os.path.join(app.config.get('ROOT_DIR', os.getcwd()), 'config_local.py')): + config = os.path.join(app.config.get('ROOT_DIR', os.getcwd()), 'config_local.py') else: config = os.path.join(app.config.get('ROOT_DIR', os.getcwd()), 'config.py') app.config.from_pyfile(config) -app.config['GIT_REVISION'] = subprocess.check_output( - ['git', 'rev-parse', '--short', 'HEAD']).decode('utf-8').rstrip() +app.config['GIT_REVISION'] = ( + subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).decode('utf-8').rstrip() +) # Sentry setup sentry_sdk.init( dsn=app.config['SENTRY_DSN'], integrations=[FlaskIntegration(), RqIntegration(), SqlalchemyIntegration()], - environment=app.config['SENTRY_ENV'] + environment=app.config['SENTRY_ENV'], ) with open('proxmox_ssh_key', 'w') as ssh_key_file: @@ -69,13 +85,23 @@ db = DBSession() starrs = psycopg2.connect( "dbname='{}' user='{}' host='{}' password='{}'".format( - app.config['STARRS_DB_NAME'], app.config['STARRS_DB_USER'], - app.config['STARRS_DB_HOST'], app.config['STARRS_DB_PASS'])) + app.config['STARRS_DB_NAME'], + app.config['STARRS_DB_USER'], + app.config['STARRS_DB_HOST'], + app.config['STARRS_DB_PASS'], + ) +) from proxstar.vm import VM from proxstar.user import User -from proxstar.tasks import (generate_pool_cache_task, process_expiring_vms_task, cleanup_vnc_task, - delete_vm_task, create_vm_task, setup_template_task) +from proxstar.tasks import ( + generate_pool_cache_task, + process_expiring_vms_task, + cleanup_vnc_task, + delete_vm_task, + create_vm_task, + setup_template_task, +) if 'generate_pool_cache' not in scheduler: logging.info('adding generate pool cache task to scheduler') @@ -83,12 +109,12 @@ if 'generate_pool_cache' not in scheduler: id='generate_pool_cache', scheduled_time=datetime.datetime.utcnow(), func=generate_pool_cache_task, - interval=90) + interval=90, + ) if 'process_expiring_vms' not in scheduler: logging.info('adding process expiring VMs task to scheduler') - scheduler.cron( - '0 5 * * *', id='process_expiring_vms', func=process_expiring_vms_task) + scheduler.cron('0 5 * * *', id='process_expiring_vms', func=process_expiring_vms_task) if 'cleanup_vnc' not in scheduler: logging.info('adding cleanup VNC task to scheduler') @@ -96,7 +122,8 @@ if 'cleanup_vnc' not in scheduler: id='cleanup_vnc', scheduled_time=datetime.datetime.utcnow(), func=cleanup_vnc_task, - interval=3600) + interval=3600, + ) def add_rq_dashboard_auth(blueprint): @@ -137,8 +164,7 @@ def list_vms(user_view=None): user_view = User(user_view) vms = user_view.vms for pending_vm in user_view.pending_vms: - vm = next((vm for vm in vms if vm['name'] == pending_vm['name']), - None) + vm = next((vm for vm in vms if vm['name'] == pending_vm['name']), None) if vm: vms[vms.index(vm)]['status'] = pending_vm['status'] vms[vms.index(vm)]['pending'] = True @@ -152,9 +178,7 @@ def list_vms(user_view=None): if user.active: vms = user.vms for pending_vm in user.pending_vms: - vm = next( - (vm for vm in vms if vm['name'] == pending_vm['name']), - None) + vm = next((vm for vm in vms if vm['name'] == pending_vm['name']), None) if vm: vms[vms.index(vm)]['status'] = pending_vm['status'] vms[vms.index(vm)]['pending'] = True @@ -162,8 +186,7 @@ def list_vms(user_view=None): vms.append(pending_vm) else: vms = 'INACTIVE' - return render_template( - 'list_vms.html', user=user, rtp_view=rtp_view, vms=vms) + return render_template('list_vms.html', user=user, rtp_view=rtp_view, vms=vms) @app.route('/isos') @@ -200,7 +223,8 @@ def vm_details(vmid): vm=vm, usage=user.usage, limits=user.limits, - usage_check=usage_check) + usage_check=usage_check, + ) else: return abort(403) @@ -214,8 +238,7 @@ def vm_power(vmid, action): vm = VM(vmid) if action == 'start': vmconfig = vm.config - usage_check = user.check_usage(vmconfig['cores'], vmconfig['memory'], - 0) + usage_check = user.check_usage(vmconfig['cores'], vmconfig['memory'], 0) if usage_check: return usage_check vm.start() @@ -415,7 +438,8 @@ def create(): percents=user.usage_percent, isos=stored_isos, pools=pools, - templates=templates) + templates=templates, + ) elif request.method == 'POST': name = request.form['name'].lower() cores = request.form['cores'] @@ -425,8 +449,7 @@ def create(): iso = request.form['iso'] ssh_key = request.form['ssh_key'] if iso != 'none': - iso = '{}:iso/{}'.format(app.config['PROXMOX_ISO_STORAGE'], - iso) + iso = '{}:iso/{}'.format(app.config['PROXMOX_ISO_STORAGE'], iso) if not user.rtp: if template == 'none': usage_check = user.check_usage(0, 0, disk) @@ -450,7 +473,8 @@ def create(): memory, disk, iso, - job_timeout=300) + job_timeout=300, + ) else: q.enqueue( setup_template_task, @@ -460,7 +484,8 @@ def create(): ssh_key, cores, memory, - job_timeout=600) + job_timeout=600, + ) return '', 200 return '', 200 return None @@ -505,7 +530,8 @@ def settings(): user=user, templates=templates, ignored_pools=db_ignored_pools, - allowed_users=db_allowed_users) + allowed_users=db_allowed_users, + ) else: return abort(403) @@ -540,13 +566,19 @@ def allowed_users(user): def cleanup_vnc(): if request.form['token'] == app.config['VNC_CLEANUP_TOKEN']: for target in get_vnc_targets(): - tunnel = next((tunnel for tunnel in ssh_tunnels - if tunnel.local_bind_port == int(target['port'])), - None) + tunnel = next( + (tunnel for tunnel in ssh_tunnels if tunnel.local_bind_port == int(target['port'])), + None, + ) if tunnel: - if not next((conn for conn in psutil.net_connections() - if conn.laddr[1] == int(target['port']) - and conn.status == 'ESTABLISHED'), None): + if not next( + ( + conn + for conn in psutil.net_connections() + if conn.laddr[1] == int(target['port']) and conn.status == 'ESTABLISHED' + ), + None, + ): try: tunnel.stop() except: diff --git a/proxstar/auth.py b/proxstar/auth.py index 242f07e..3405903 100644 --- a/proxstar/auth.py +++ b/proxstar/auth.py @@ -7,5 +7,6 @@ def get_auth(app): auth = OIDCAuthentication( app, issuer=app.config['OIDC_ISSUER'], - client_registration_info=app.config['OIDC_CLIENT_CONFIG']) + client_registration_info=app.config['OIDC_CLIENT_CONFIG'], + ) return auth diff --git a/proxstar/db.py b/proxstar/db.py index 2505299..c8da57e 100644 --- a/proxstar/db.py +++ b/proxstar/db.py @@ -4,14 +4,20 @@ from dateutil.relativedelta import relativedelta from sqlalchemy import exists from proxstar.ldapdb import is_rtp -from proxstar.models import (Base, Allowed_Users, Ignored_Pools, Pool_Cache, #pylint: disable=unused-import - Template, Usage_Limit, VM_Expiration) +from proxstar.models import ( + Base, + Allowed_Users, + Ignored_Pools, + Pool_Cache, # pylint: disable=unused-import + Template, + Usage_Limit, + VM_Expiration, +) def get_vm_expire(db, vmid, months): if db.query(exists().where(VM_Expiration.id == vmid)).scalar(): - expire = db.query(VM_Expiration).filter( - VM_Expiration.id == vmid).one().expire_date + expire = db.query(VM_Expiration).filter(VM_Expiration.id == vmid).one().expire_date else: expire = datetime.date.today() + relativedelta(months=months) new_expire = VM_Expiration(id=vmid, expire_date=expire) @@ -43,8 +49,7 @@ def delete_vm_expire(db, vmid): def get_expiring_vms(db): expiring = [] today = datetime.date.today() - expire = db.query(VM_Expiration).filter( - (VM_Expiration.expire_date - today) <= 10).all() + expire = db.query(VM_Expiration).filter((VM_Expiration.expire_date - today) <= 10).all() for vm in expire: expiring.append(vm.id) return expiring @@ -57,12 +62,9 @@ def get_user_usage_limits(db, user): limits['mem'] = 1000 limits['disk'] = 100000 elif db.query(exists().where(Usage_Limit.id == user)).scalar(): - limits['cpu'] = db.query(Usage_Limit).filter( - Usage_Limit.id == user).one().cpu - limits['mem'] = db.query(Usage_Limit).filter( - Usage_Limit.id == user).one().mem - limits['disk'] = db.query(Usage_Limit).filter( - Usage_Limit.id == user).one().disk + limits['cpu'] = db.query(Usage_Limit).filter(Usage_Limit.id == user).one().cpu + limits['mem'] = db.query(Usage_Limit).filter(Usage_Limit.id == user).one().mem + limits['disk'] = db.query(Usage_Limit).filter(Usage_Limit.id == user).one().disk else: limits['cpu'] = 4 limits['mem'] = 4 @@ -99,7 +101,8 @@ def store_pool_cache(db, pools): num_vms=pool['num_vms'], usage=pool['usage'], limits=pool['limits'], - percents=pool['percents']) + percents=pool['percents'], + ) db.add(pool_entry) db.commit() @@ -129,8 +132,7 @@ def get_ignored_pools(db): def delete_ignored_pool(db, pool): if db.query(exists().where(Ignored_Pools.id == pool)).scalar(): - ignored_pool = db.query(Ignored_Pools).filter( - Ignored_Pools.id == pool).one() + ignored_pool = db.query(Ignored_Pools).filter(Ignored_Pools.id == pool).one() db.delete(ignored_pool) db.commit() @@ -187,16 +189,24 @@ def add_allowed_user(db, user): def delete_allowed_user(db, user): if db.query(exists().where(Allowed_Users.id == user)).scalar(): - allowed_user = db.query(Allowed_Users).filter( - Allowed_Users.id == user).one() + allowed_user = db.query(Allowed_Users).filter(Allowed_Users.id == user).one() db.delete(allowed_user) db.commit() def set_template_info(db, template_id, name, disk): - if db.query(exists().where(Template.id == template_id, )).scalar(): - template = db.query(Template).filter( - Template.id == template_id, ).one() + if db.query( + exists().where( + Template.id == template_id, + ) + ).scalar(): + template = ( + db.query(Template) + .filter( + Template.id == template_id, + ) + .one() + ) template.name = name template.disk = disk db.commit() diff --git a/proxstar/proxmox.py b/proxstar/proxmox.py index a9b76be..fabc15c 100644 --- a/proxstar/proxmox.py +++ b/proxstar/proxmox.py @@ -13,14 +13,13 @@ def connect_proxmox(): host, user=app.config['PROXMOX_USER'], password=app.config['PROXMOX_PASS'], - verify_ssl=False) + verify_ssl=False, + ) proxmox.version.get() return proxmox except: - if app.config['PROXMOX_HOSTS'].index(host) == ( - len(app.config['PROXMOX_HOSTS']) - 1): - logging.error( - 'unable to connect to any of the given Proxmox servers') + if app.config['PROXMOX_HOSTS'].index(host) == (len(app.config['PROXMOX_HOSTS']) - 1): + logging.error('unable to connect to any of the given Proxmox servers') raise @@ -32,21 +31,19 @@ def connect_proxmox_ssh(): user=app.config['PROXMOX_SSH_USER'], private_key_file='proxmox_ssh_key', password=app.config['PROXMOX_SSH_KEY_PASS'], - backend='ssh_paramiko') + backend='ssh_paramiko', + ) proxmox.version.get() return proxmox except: - if app.config['PROXMOX_HOSTS'].index(host) == ( - len(app.config['PROXMOX_HOSTS']) - 1): - logging.error( - 'unable to connect to any of the given Proxmox servers') + if app.config['PROXMOX_HOSTS'].index(host) == (len(app.config['PROXMOX_HOSTS']) - 1): + logging.error('unable to connect to any of the given Proxmox servers') raise def get_node_least_mem(proxmox): nodes = proxmox.nodes.get() - sorted_nodes = sorted( - nodes, key=lambda x: ('mem' not in x, x.get('mem', None))) + sorted_nodes = sorted(nodes, key=lambda x: ('mem' not in x, x.get('mem', None))) return sorted_nodes[0]['node'] diff --git a/proxstar/starrs.py b/proxstar/starrs.py index a25fdde..14f4dec 100644 --- a/proxstar/starrs.py +++ b/proxstar/starrs.py @@ -5,8 +5,8 @@ def get_next_ip(starrs, range_name): c = starrs.cursor() try: c.execute('BEGIN') - c.callproc('api.initialize', ('root', )) - c.callproc('api.get_address_from_range', (range_name, )) + c.callproc('api.initialize', ('root',)) + c.callproc('api.get_address_from_range', (range_name,)) results = c.fetchall() c.execute('COMMIT') finally: @@ -18,8 +18,8 @@ def get_ip_for_mac(starrs, mac): c = starrs.cursor() try: c.execute('BEGIN') - c.callproc('api.initialize', ('root', )) - c.callproc('api.get_system_interface_addresses', (mac.lower(), )) + c.callproc('api.initialize', ('root',)) + c.callproc('api.get_system_interface_addresses', (mac.lower(),)) results = c.fetchall() c.execute('COMMIT') finally: @@ -33,8 +33,8 @@ def renew_ip(starrs, addr): c = starrs.cursor() try: c.execute('BEGIN') - c.callproc('api.initialize', ('root', )) - c.callproc('api.renew_interface_address', (addr, )) + c.callproc('api.initialize', ('root',)) + c.callproc('api.renew_interface_address', (addr,)) results = c.fetchall() c.execute('COMMIT') finally: @@ -48,18 +48,18 @@ def check_hostname(starrs, hostname): try: # Check for invalid characters in hostname c.execute('BEGIN') - c.callproc('api.initialize', ('root', )) - c.callproc('api.validate_name', (hostname, )) + c.callproc('api.initialize', ('root',)) + c.callproc('api.validate_name', (hostname,)) c.execute('COMMIT') # Validate the entire domain name using Data::Validate::Domain c.execute('BEGIN') - c.callproc('api.initialize', ('root', )) + c.callproc('api.initialize', ('root',)) c.callproc('api.validate_domain', (hostname, 'csh.rit.edu')) valid = c.fetchall()[0][0] c.execute('COMMIT') # Check if the hostname is available (checks A/SRV/CNAME records) c.execute('BEGIN') - c.callproc('api.initialize', ('root', )) + c.callproc('api.initialize', ('root',)) c.callproc('api.check_dns_hostname', (hostname, 'csh.rit.edu')) available = False if not c.fetchall()[0][0]: @@ -67,8 +67,8 @@ def check_hostname(starrs, hostname): c.execute('COMMIT') # Check if the system name is taken c.execute('BEGIN') - c.callproc('api.initialize', ('root', )) - c.callproc('api.get_system', (hostname, )) + c.callproc('api.initialize', ('root',)) + c.callproc('api.get_system', (hostname,)) if c.fetchall(): available = False c.execute('COMMIT') @@ -84,14 +84,15 @@ def register_starrs(starrs, name, owner, mac, addr): c = starrs.cursor() try: c.execute('BEGIN') - c.callproc('api.initialize', ('root', )) + c.callproc('api.initialize', ('root',)) c.callproc( 'api.create_system_quick', - (name, owner, 'members', mac, addr, 'csh.rit.edu', 'dhcp', True)) + (name, owner, 'members', mac, addr, 'csh.rit.edu', 'dhcp', True), + ) results = c.fetchall() c.execute('COMMIT') c.execute('BEGIN') - c.callproc('api.initialize', ('root', )) + c.callproc('api.initialize', ('root',)) c.callproc('api.modify_system', (name, 'comment', f'Owned by {owner}')) c.execute('COMMIT') finally: @@ -103,8 +104,8 @@ def delete_starrs(starrs, name): c = starrs.cursor() try: c.execute('BEGIN') - c.callproc('api.initialize', ('root', )) - c.callproc('api.remove_system', (name, )) + c.callproc('api.initialize', ('root',)) + c.callproc('api.remove_system', (name,)) results = c.fetchall() c.execute('COMMIT') finally: diff --git a/proxstar/tasks.py b/proxstar/tasks.py index 84af3cb..b194ff7 100644 --- a/proxstar/tasks.py +++ b/proxstar/tasks.py @@ -9,7 +9,14 @@ from rq import get_current_job from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker -from proxstar.db import Base, get_vm_expire, delete_vm_expire, datetime, store_pool_cache, get_template +from proxstar.db import ( + Base, + get_vm_expire, + delete_vm_expire, + datetime, + store_pool_cache, + get_template, +) from proxstar.mail import send_vm_expire_email, send_rtp_vm_delete_email from proxstar.proxmox import connect_proxmox, get_pools from proxstar.starrs import get_next_ip, register_starrs, delete_starrs @@ -17,15 +24,11 @@ from proxstar.user import User, get_vms_for_rtp from proxstar.vm import VM, clone_vm, create_vm from proxstar.vnc import send_stop_ssh_tunnel -logging.basicConfig( - format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO) +logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO) app = Flask(__name__) -if os.path.exists( - os.path.join( - app.config.get('ROOT_DIR', os.getcwd()), 'config.local.py')): - config = os.path.join( - app.config.get('ROOT_DIR', os.getcwd()), 'config.local.py') +if os.path.exists(os.path.join(app.config.get('ROOT_DIR', os.getcwd()), 'config.local.py')): + config = os.path.join(app.config.get('ROOT_DIR', os.getcwd()), 'config.local.py') else: config = os.path.join(app.config.get('ROOT_DIR', os.getcwd()), 'config.py') app.config.from_pyfile(config) @@ -42,8 +45,12 @@ def connect_db(): def connect_starrs(): starrs = psycopg2.connect( "dbname='{}' user='{}' host='{}' password='{}'".format( - app.config['STARRS_DB_NAME'], app.config['STARRS_DB_USER'], - app.config['STARRS_DB_HOST'], app.config['STARRS_DB_PASS'])) + app.config['STARRS_DB_NAME'], + app.config['STARRS_DB_USER'], + app.config['STARRS_DB_HOST'], + app.config['STARRS_DB_PASS'], + ) + ) return starrs @@ -61,8 +68,7 @@ def create_vm_task(user, name, cores, memory, disk, iso): logging.info('[{}] Creating VM.'.format(name)) set_job_status(job, 'creating VM') vmid = create_vm(proxmox, user, name, cores, memory, disk, iso) - logging.info( - '[{}] Waiting until Proxmox is done provisioning.'.format(name)) + logging.info('[{}] Waiting until Proxmox is done provisioning.'.format(name)) set_job_status(job, 'waiting for Proxmox') timeout = 20 retry = 0 @@ -81,8 +87,7 @@ def create_vm_task(user, name, cores, memory, disk, iso): set_job_status(job, 'registering in STARRS') vm = VM(vmid) ip = get_next_ip(starrs, app.config['STARRS_IP_RANGE']) - register_starrs(starrs, name, app.config['STARRS_USER'], vm.get_mac(), - ip) + register_starrs(starrs, name, app.config['STARRS_USER'], vm.get_mac(), ip) set_job_status(job, 'setting VM expiration') get_vm_expire(db, vmid, app.config['VM_EXPIRE_MONTHS']) logging.info('[{}] VM successfully provisioned.'.format(name)) @@ -137,8 +142,10 @@ def process_expiring_vms_task(): vm.stop() elif days <= -7: logging.info( - 'Deleting {} ({}) as it has been at least a week since expiration.' - .format(vm.name, vm.id)) + 'Deleting {} ({}) as it has been at least a week since expiration.'.format( + vm.name, vm.id + ) + ) send_stop_ssh_tunnel(vm.id) delete_vm_task(vm.id) if expiring_vms: @@ -161,14 +168,12 @@ def setup_template_task(template_id, name, user, ssh_key, cores, memory): proxmox = connect_proxmox() starrs = connect_starrs() db = connect_db() - logging.info('[{}] Retrieving template info for template {}.'.format( - name, template_id)) + logging.info('[{}] Retrieving template info for template {}.'.format(name, template_id)) get_template(db, template_id) logging.info('[{}] Cloning template {}.'.format(name, template_id)) set_job_status(job, 'cloning template') vmid = clone_vm(proxmox, template_id, name, user) - logging.info( - '[{}] Waiting until Proxmox is done provisioning.'.format(name)) + logging.info('[{}] Waiting until Proxmox is done provisioning.'.format(name)) set_job_status(job, 'waiting for Proxmox') timeout = 25 retry = 0 @@ -187,8 +192,7 @@ def setup_template_task(template_id, name, user, ssh_key, cores, memory): set_job_status(job, 'registering in STARRS') vm = VM(vmid) ip = get_next_ip(starrs, app.config['STARRS_IP_RANGE']) - register_starrs(starrs, name, app.config['STARRS_USER'], vm.get_mac(), - ip) + register_starrs(starrs, name, app.config['STARRS_USER'], vm.get_mac(), ip) get_vm_expire(db, vmid, app.config['VM_EXPIRE_MONTHS']) logging.info('[{}] Setting CPU and memory.'.format(name)) set_job_status(job, 'setting CPU and memory') @@ -199,9 +203,7 @@ def setup_template_task(template_id, name, user, ssh_key, cores, memory): vm.set_ci_user(user) vm.set_ci_ssh_key(ssh_key) vm.set_ci_network() - logging.info( - '[{}] Waiting for STARRS to propogate before starting VM.'.format( - name)) + logging.info('[{}] Waiting for STARRS to propogate before starting VM.'.format(name)) set_job_status(job, 'waiting for STARRS') job.save_meta() time.sleep(90) @@ -218,4 +220,5 @@ def cleanup_vnc_task(): requests.post( 'https://{}/console/cleanup'.format(app.config['SERVER_NAME']), data={'token': app.config['VNC_CLEANUP_TOKEN']}, - verify=False) + verify=False, + ) diff --git a/proxstar/user.py b/proxstar/user.py index 17b70ff..16f5c4d 100644 --- a/proxstar/user.py +++ b/proxstar/user.py @@ -9,11 +9,14 @@ from proxstar.util import lazy_property from proxstar.vm import VM -class User(): +class User: def __init__(self, username): self.name = username - self.active = is_active(self.name) or is_current_student( - self.name) or self.name in get_allowed_users(db) + self.active = ( + is_active(self.name) + or is_current_student(self.name) + or self.name in get_allowed_users(db) + ) self.rtp = is_rtp(self.name) self.limits = get_user_usage_limits(db, self.name) @@ -26,8 +29,7 @@ class User(): except ResourceException: # they likely don't have a pool yet, try to create it if is_user(self.name): - proxmox.pools.post( - poolid=self.name, comment='Managed by Proxstar') + proxmox.pools.post(poolid=self.name, comment='Managed by Proxstar') # if created, their pool is empty so return empty array return [] else: @@ -40,8 +42,7 @@ class User(): @lazy_property def pending_vms(self): - jobs = StartedJobRegistry( - 'default', connection=redis_conn).get_job_ids() + jobs = StartedJobRegistry('default', connection=redis_conn).get_job_ids() for job_id in q.job_ids: jobs.append(job_id) pending_vms = [] @@ -77,7 +78,7 @@ class User(): vm = VM(vm['vmid']) if vm.status == 'running' or vm.status == 'paused': usage['cpu'] += int(vm.cpu) - usage['mem'] += (int(vm.mem) / 1024) + usage['mem'] += int(vm.mem) / 1024 for disk in vm.disks: usage['disk'] += int(disk[1]) return usage @@ -87,8 +88,7 @@ class User(): percents = dict() percents['cpu'] = round(self.usage['cpu'] / self.limits['cpu'] * 100) percents['mem'] = round(self.usage['mem'] / self.limits['mem'] * 100) - percents['disk'] = round( - self.usage['disk'] / self.limits['disk'] * 100) + percents['disk'] = round(self.usage['disk'] / self.limits['disk'] * 100) for resource in percents: if percents[resource] > 100: percents[resource] = 100 @@ -108,12 +108,12 @@ class User(): proxmox = connect_proxmox() proxmox.pools(self.name).delete() users = proxmox.access.users.get() - if any(user['userid'] == '{}@csh.rit.edu'.format(self.name) - for user in users): - if 'rtp' not in proxmox.access.users('{}@csh.rit.edu'.format( - self.name)).get()['groups']: - proxmox.access.users('{}@csh.rit.edu'.format( - self.name)).delete() + if any(user['userid'] == '{}@csh.rit.edu'.format(self.name) for user in users): + if ( + 'rtp' + not in proxmox.access.users('{}@csh.rit.edu'.format(self.name)).get()['groups'] + ): + proxmox.access.users('{}@csh.rit.edu'.format(self.name)).delete() def get_vms_for_rtp(proxmox, database): diff --git a/proxstar/util.py b/proxstar/util.py index d218160..9588f2b 100644 --- a/proxstar/util.py +++ b/proxstar/util.py @@ -2,8 +2,7 @@ import random def gen_password( - length, - charset='ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*' + length, charset='ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*' ): # use secrets module once this works in python 3.6 return ''.join(random.choice(charset) for x in range(length)) diff --git a/proxstar/vm.py b/proxstar/vm.py index 26eca11..7555658 100644 --- a/proxstar/vm.py +++ b/proxstar/vm.py @@ -11,7 +11,7 @@ from proxstar.starrs import get_ip_for_mac from proxstar.util import lazy_property -class VM(): +class VM: def __init__(self, vmid): self.id = vmid @@ -61,8 +61,7 @@ class VM(): @retry(wait=wait_fixed(2), stop=stop_after_attempt(5)) def set_cpu(self, cores): proxmox = connect_proxmox() - proxmox.nodes(self.node).qemu(self.id).config.put( - cores=cores, sockets=1) + proxmox.nodes(self.node).qemu(self.id).config.put(cores=cores, sockets=1) @retry(wait=wait_fixed(2), stop=stop_after_attempt(5)) def set_mem(self, mem): @@ -119,12 +118,7 @@ class VM(): @lazy_property def boot_order(self): - boot_order_lookup = { - 'a': 'Floppy', - 'c': 'Hard Disk', - 'd': 'CD-ROM', - 'n': 'Network' - } + boot_order_lookup = {'a': 'Floppy', 'c': 'Hard Disk', 'd': 'CD-ROM', 'n': 'Network'} raw_boot_order = self.config.get('boot', 'cdn') boot_order = [] for order in raw_boot_order: @@ -138,12 +132,7 @@ class VM(): @retry(wait=wait_fixed(2), stop=stop_after_attempt(5)) def set_boot_order(self, boot_order): proxmox = connect_proxmox() - boot_order_lookup = { - 'Floppy': 'a', - 'Hard Disk': 'c', - 'CD-ROM': 'd', - 'Network': 'n' - } + boot_order_lookup = {'Floppy': 'a', 'Hard Disk': 'c', 'CD-ROM': 'd', 'Network': 'n'} raw_boot_order = '' for order in boot_order: raw_boot_order += boot_order_lookup[order] @@ -210,24 +199,22 @@ class VM(): proxmox = connect_proxmox() port = str(int(port) - 5900) proxmox.nodes(self.node).qemu(self.id).monitor.post( - command='change vnc 127.0.0.1:{}'.format(port)) + command='change vnc 127.0.0.1:{}'.format(port) + ) @retry(wait=wait_fixed(2), stop=stop_after_attempt(5)) def eject_iso(self): proxmox = connect_proxmox() - proxmox.nodes(self.node).qemu( - self.id).config.post(ide2='none,media=cdrom') + proxmox.nodes(self.node).qemu(self.id).config.post(ide2='none,media=cdrom') @retry(wait=wait_fixed(2), stop=stop_after_attempt(5)) def mount_iso(self, iso): proxmox = connect_proxmox() - proxmox.nodes(self.node).qemu( - self.id).config.post(ide2='{},media=cdrom'.format(iso)) + proxmox.nodes(self.node).qemu(self.id).config.post(ide2='{},media=cdrom'.format(iso)) def resize_disk(self, disk, size): proxmox = connect_proxmox() - proxmox.nodes(self.node).qemu(self.id).resize.put( - disk=disk, size='+{}G'.format(size)) + proxmox.nodes(self.node).qemu(self.id).resize.put(disk=disk, size='+{}G'.format(size)) @lazy_property def expire(self): @@ -267,7 +254,8 @@ def create_vm(proxmox, user, name, cores, memory, disk, iso): ide2='{},media=cdrom'.format(iso), net0='virtio,bridge=vmbr0', pool=user, - description='Managed by Proxstar') + description='Managed by Proxstar', + ) return vmid @@ -280,10 +268,6 @@ def clone_vm(proxmox, template_id, name, pool): delete_vm_expire(db, vmid) target = get_node_least_mem(proxmox) node.qemu(template_id).clone.post( - newid=vmid, - name=name, - pool=pool, - full=1, - description='Managed by Proxstar', - target=target) + newid=vmid, name=name, pool=pool, full=1, description='Managed by Proxstar', target=target + ) return vmid