add ability to view pending vm status on vm list page

This commit is contained in:
Jordan Rodgers 2018-03-06 16:28:17 -05:00
parent 3cc8268b9d
commit 786fd8ee2d
4 changed files with 69 additions and 3 deletions

View file

@ -94,14 +94,29 @@ def list_vms(user_view=None):
if user_view and not user.rtp:
return '', 403
elif user_view and user.rtp:
vms = User(user_view).vms
rtp_view = user_view
user_view = User(user_view)
vms = user_view.vms
for pending_vm in user_view.pending_vms:
vm = next((vm for vm in vms if vm['name'] == pending_vm['name']), None)
if vm:
vms[vms.index(vm)]['status'] = pending_vm['status']
vms[vms.index(vm)]['pending'] = True
else:
vms.append(pending_vm)
rtp_view = user_view.name
elif user.rtp:
vms = get_pool_cache(db)
rtp_view = True
else:
if user.active:
vms = user.vms
for pending_vm in user.pending_vms:
vm = next((vm for vm in vms if vm['name'] == pending_vm['name']), None)
if vm:
vms[vms.index(vm)]['status'] = pending_vm['status']
vms[vms.index(vm)]['pending'] = True
else:
vms.append(pending_vm)
else:
vms = 'INACTIVE'
return render_template(

View file

@ -4,6 +4,7 @@ import requests
import paramiko
import psycopg2
from flask import Flask
from rq import get_current_job
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from proxstar.db import *
@ -43,13 +44,22 @@ def connect_starrs():
def create_vm_task(user, name, cores, memory, disk, iso):
with app.app_context():
job = get_current_job()
proxmox = connect_proxmox()
db = connect_db()
starrs = connect_starrs()
job.meta['status'] = 'creating VM'
job.save_meta()
vmid, mac = create_vm(proxmox, user, name, cores, memory, disk, iso)
job.meta['status'] = 'registering in STARRS'
job.save_meta()
register_starrs(starrs, name, app.config['STARRS_USER'], mac,
get_next_ip(starrs, app.config['STARRS_IP_RANGE']))
job.meta['status'] = 'setting VM expiration'
job.save_meta()
get_vm_expire(db, vmid, app.config['VM_EXPIRE_MONTHS'])
job.meta['status'] = 'complete'
job.save_meta()
def delete_vm_task(vmid):
@ -111,6 +121,7 @@ def generate_pool_cache_task():
def setup_template_task(template_id, name, user, password, cores, memory):
with app.app_context():
job = get_current_job()
proxmox = connect_proxmox()
starrs = connect_starrs()
db = connect_db()
@ -118,24 +129,38 @@ def setup_template_task(template_id, name, user, password, cores, memory):
name, template_id))
template = get_template(db, template_id)
print("[{}] Cloning template {}.".format(name, template_id))
job.meta['status'] = 'cloning template'
job.save_meta()
vmid, mac = clone_vm(proxmox, template_id, name, user)
print("[{}] Registering in STARRS.".format(name))
job.meta['status'] = 'registering in STARRS'
job.save_meta()
ip = get_next_ip(starrs, app.config['STARRS_IP_RANGE'])
register_starrs(starrs, name, app.config['STARRS_USER'], mac, ip)
get_vm_expire(db, vmid, app.config['VM_EXPIRE_MONTHS'])
print("[{}] Setting CPU and memory.".format(name))
job.meta['status'] = 'setting CPU and memory'
job.save_meta()
vm = VM(vmid)
vm.set_cpu(cores)
vm.set_mem(memory)
print(
"[{}] Waiting for STARRS to propogate before starting VM.".format(
name))
job.meta['status'] = 'waiting for STARRS'
job.save_meta()
time.sleep(90)
print("[{}] Starting VM.".format(name))
job.meta['status'] = 'starting VM'
job.save_meta()
vm.start()
print("[{}] Waiting for VM to start before SSHing.".format(name))
job.meta['status'] = 'waiting for VM to start'
job.save_meta()
time.sleep(20)
print("[{}] Creating SSH session.".format(name))
job.meta['status'] = 'creating SSH session'
job.save_meta()
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
retry = 0
@ -150,6 +175,8 @@ def setup_template_task(template_id, name, user, password, cores, memory):
retry += 1
time.sleep(3)
print("[{}] Running user creation commands.".format(name))
job.meta['status'] = 'running user creation commands'
job.save_meta()
stdin, stdout, stderr = client.exec_command("useradd {}".format(user))
exit_status = stdout.channel.recv_exit_status()
root_password = gen_password(32)
@ -168,6 +195,8 @@ def setup_template_task(template_id, name, user, password, cores, memory):
exit_status = stdout.channel.recv_exit_status()
client.close()
print("[{}] Template successfully provisioned.".format(name))
job.meta['status'] = 'completed'
job.save_meta()
def cleanup_vnc_task():

View file

@ -33,9 +33,13 @@
<div class="col-md-3 col-sm-4 col-xs-6">
<div class="panel panel-default">
<div class="panel-body">
{% if not vm.get('pending', False) %}
<a href="/vm/{{ vm['vmid'] }}">
<p>{{ vm['name'] }}</p>
</a>
{% else %}
<p>{{ vm['name'] }}</p>
{% endif %}
<p>Status: {{ vm['status'] }}</p>
</div>
</div>

View file

@ -1,8 +1,9 @@
from proxstar import db
from proxstar import db, q, redis_conn
from proxstar.db import *
from proxstar.vm import VM
from proxstar.util import *
from proxstar.proxmox import *
from rq.registry import StartedJobRegistry
class User(object):
@ -30,6 +31,23 @@ class User(object):
vms = sorted(vms, key=lambda k: k['name'])
return vms
@lazy_property
def pending_vms(self):
jobs = StartedJobRegistry('default', connection=redis_conn).get_job_ids()
for job_id in q.job_ids:
jobs.append(job_id)
pending_vms = []
for job in jobs:
job = q.fetch_job(job)
if len(job.args) > 2:
if job.args[0] == self.name or job.args[2] == self.name:
vm_dict = dict()
vm_dict['name'] = job.args[1]
vm_dict['status'] = job.meta.get('status', 'no status yet')
vm_dict['pending'] = True
pending_vms.append(vm_dict)
return pending_vms
@lazy_property
def allowed_vms(self):
allowed_vms = []