mirror of
https://github.com/ThomasGsp/HyperProxmox.git
synced 2025-03-09 15:40:18 +00:00
Implement Theard
This commit is contained in:
parent
ae9847c075
commit
c77db71948
1 changed files with 66 additions and 44 deletions
|
@ -48,6 +48,7 @@ class Analyse:
|
||||||
"""
|
"""
|
||||||
self.generalconf = generalconf
|
self.generalconf = generalconf
|
||||||
self.logger = logger
|
self.logger = logger
|
||||||
|
self.idlist = []
|
||||||
|
|
||||||
""" LOAD MONGODB """
|
""" LOAD MONGODB """
|
||||||
self.mongo = MongoDB(generalconf["mongodb"]["ip"])
|
self.mongo = MongoDB(generalconf["mongodb"]["ip"])
|
||||||
|
@ -57,10 +58,8 @@ class Analyse:
|
||||||
self.clusters_conf = self.mongo.get_clusters_conf()["value"]
|
self.clusters_conf = self.mongo.get_clusters_conf()["value"]
|
||||||
|
|
||||||
|
|
||||||
def threadcrawl(self):
|
|
||||||
return
|
|
||||||
|
|
||||||
def run(self, instancetype="all"):
|
def run(self, instancetype="all"):
|
||||||
""" Active logger"""
|
""" Active logger"""
|
||||||
self.logger.write({"thread":threading.get_ident(), "result": "INFO", "type": "HYPERPROXMOX", "value": "Start logger - Analyst Module"})
|
self.logger.write({"thread":threading.get_ident(), "result": "INFO", "type": "HYPERPROXMOX", "value": "Start logger - Analyst Module"})
|
||||||
|
|
||||||
|
@ -74,26 +73,49 @@ class Analyse:
|
||||||
self.mongo.insert_datekey(insert_time, 'running')
|
self.mongo.insert_datekey(insert_time, 'running')
|
||||||
|
|
||||||
""" Init the ID list to detect the duplicates """
|
""" Init the ID list to detect the duplicates """
|
||||||
idlist = []
|
list_threads = []
|
||||||
for cluster in self.clusters_conf:
|
for cluster in self.clusters_conf:
|
||||||
self.logger.write({"thread":threading.get_ident(), "result": "DEBUG", "type": "HYPERPROXMOX", "value": "Start crawl on:"})
|
thc = threading.Thread(name=cluster["name"], target=self.threadcrawl, args=(cluster, insert_time, instancetype))
|
||||||
self.logger.write({"thread":threading.get_ident(), "result": "DEBUG", "type": "HYPERPROXMOX", "value": cluster})
|
list_threads.append(thc)
|
||||||
|
thc.start()
|
||||||
|
|
||||||
""" Decode data """
|
# Waiting all threads
|
||||||
proxmox_clusters_user = pdecrypt(base64.b64decode(cluster["user"]),
|
for thc in list_threads:
|
||||||
self.generalconf["keys"]["key_pvt"])["value"].decode('utf-8')
|
thc.join()
|
||||||
|
|
||||||
proxmox_clusters_pwd = pdecrypt(base64.b64decode(cluster["password"]),
|
self.mongo.update_datekey(int(insert_time), "OK")
|
||||||
self.generalconf["keys"]["key_pvt"])["value"].decode('utf-8')
|
|
||||||
|
|
||||||
""" AUTH """
|
""" Unlock file """
|
||||||
proxmox = Proxmox("Analyse")
|
locker.unlock(self.generalconf["analyst"]["walker_lock"], "analyst")
|
||||||
connection = proxmox.get_ticket("{0}:{1}".format(cluster["url"], int(cluster["port"])), proxmox_clusters_user, proxmox_clusters_pwd)
|
|
||||||
|
|
||||||
""" ByPass and log if connection has failed """
|
return
|
||||||
if connection["result"] != "OK":
|
|
||||||
self.logger.write({"thread": threading.get_ident(), "result": "ERROR", "type": "HYPERPROXMOX", "value": connection})
|
|
||||||
continue
|
|
||||||
|
def threadcrawl(self, cluster, insert_time, instancetype="all"):
|
||||||
|
self.logger.write(
|
||||||
|
{"thread": threading.get_ident(), "result": "DEBUG", "type": "HYPERPROXMOX", "value": "Start crawl on:"})
|
||||||
|
self.logger.write(
|
||||||
|
{"thread": threading.get_ident(), "result": "DEBUG", "type": "HYPERPROXMOX", "value": cluster})
|
||||||
|
|
||||||
|
""" Decode data """
|
||||||
|
proxmox_clusters_user = pdecrypt(base64.b64decode(cluster["user"]),
|
||||||
|
self.generalconf["keys"]["key_pvt"])["value"].decode('utf-8')
|
||||||
|
|
||||||
|
proxmox_clusters_pwd = pdecrypt(base64.b64decode(cluster["password"]),
|
||||||
|
self.generalconf["keys"]["key_pvt"])["value"].decode('utf-8')
|
||||||
|
|
||||||
|
""" AUTH """
|
||||||
|
proxmox = Proxmox("Analyse")
|
||||||
|
connection = proxmox.get_ticket("{0}:{1}".format(cluster["url"], int(cluster["port"])), proxmox_clusters_user,
|
||||||
|
proxmox_clusters_pwd)
|
||||||
|
|
||||||
|
""" ByPass and log if connection has failed """
|
||||||
|
if connection["result"] != "OK":
|
||||||
|
self.logger.write(
|
||||||
|
{"thread": threading.get_ident(), "result": "ERROR", "type": "HYPERPROXMOX", "value": connection})
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
"""
|
"""
|
||||||
##############
|
##############
|
||||||
|
@ -103,8 +125,10 @@ class Analyse:
|
||||||
|
|
||||||
""" Get excluded nodes """
|
""" Get excluded nodes """
|
||||||
exclude_nodes = cluster["exclude_nodes"]
|
exclude_nodes = cluster["exclude_nodes"]
|
||||||
self.logger.write({"thread":threading.get_ident(), "result": "DEBUG", "type": "HYPERPROXMOX", "value": "List nodes excludes:"})
|
self.logger.write({"thread": threading.get_ident(), "result": "DEBUG", "type": "HYPERPROXMOX",
|
||||||
self.logger.write({"thread":threading.get_ident(), "result": "DEBUG", "type": "HYPERPROXMOX", "value": exclude_nodes})
|
"value": "List nodes excludes:"})
|
||||||
|
self.logger.write(
|
||||||
|
{"thread": threading.get_ident(), "result": "DEBUG", "type": "HYPERPROXMOX", "value": exclude_nodes})
|
||||||
|
|
||||||
""" UPDATE CLUSTER STATUS """
|
""" UPDATE CLUSTER STATUS """
|
||||||
clusters_status = proxmox.get_clusters("{0}:{1}".format(cluster["url"], int(cluster["port"])))
|
clusters_status = proxmox.get_clusters("{0}:{1}".format(cluster["url"], int(cluster["port"])))
|
||||||
|
@ -136,15 +160,15 @@ class Analyse:
|
||||||
|
|
||||||
for type in types:
|
for type in types:
|
||||||
list_instances["data"] = list_instances["data"] + \
|
list_instances["data"] = list_instances["data"] + \
|
||||||
proxmox.get_instances("{0}:{1}".format(cluster["url"], int(cluster["port"])),
|
proxmox.get_instances(
|
||||||
|
"{0}:{1}".format(cluster["url"], int(cluster["port"])),
|
||||||
value_nodes_list["node"], type)["value"]["data"]
|
value_nodes_list["node"], type)["value"]["data"]
|
||||||
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
list_instances = \
|
list_instances = \
|
||||||
proxmox.get_instances("{0}:{1}".format(cluster["url"], int(cluster["port"])),
|
proxmox.get_instances("{0}:{1}".format(cluster["url"], int(cluster["port"])),
|
||||||
value_nodes_list["node"], instancetype)["value"]
|
value_nodes_list["node"], instancetype)["value"]
|
||||||
|
|
||||||
|
|
||||||
totalcpu = 0
|
totalcpu = 0
|
||||||
totalram = 0
|
totalram = 0
|
||||||
|
@ -168,8 +192,8 @@ class Analyse:
|
||||||
instance["type"] = "qemu"
|
instance["type"] = "qemu"
|
||||||
|
|
||||||
config_av = proxmox.get_configs("{0}:{1}".format(cluster["url"], int(cluster["port"])),
|
config_av = proxmox.get_configs("{0}:{1}".format(cluster["url"], int(cluster["port"])),
|
||||||
value_nodes_list["node"], instance["type"], instance["vmid"])["value"]
|
value_nodes_list["node"], instance["type"], instance["vmid"])[
|
||||||
|
"value"]
|
||||||
|
|
||||||
maclist = []
|
maclist = []
|
||||||
currentdesc = ""
|
currentdesc = ""
|
||||||
|
@ -191,30 +215,35 @@ class Analyse:
|
||||||
getidfromdesc = re.search("id=\"([A-Z\.\d\_]+)\"", currentdesc)
|
getidfromdesc = re.search("id=\"([A-Z\.\d\_]+)\"", currentdesc)
|
||||||
# Set unique id if not found
|
# Set unique id if not found
|
||||||
if getidfromdesc is None:
|
if getidfromdesc is None:
|
||||||
# ajouter un test de duplicateid
|
|
||||||
""" General description """
|
""" General description """
|
||||||
randtext = ''.join(random.choice('AZERTYUIOPQSDFGHJKLMWXCVBN') for i in range(8))
|
randtext = ''.join(random.choice('AZERTYUIOPQSDFGHJKLMWXCVBN') for i in range(8))
|
||||||
uniqid = "-- Please, do not change or delete this ID -- \n" \
|
uniqid = "-- Please, do not change or delete this ID -- \n" \
|
||||||
"id=\"{0}_{1}\"\n------------------\n{2}".format(insert_time, randtext,
|
"id=\"{0}_{1}\"\n------------------\n{2}".format(insert_time, randtext,
|
||||||
currentdesc)
|
currentdesc)
|
||||||
instance["description"] = uniqid
|
instance["description"] = uniqid
|
||||||
|
|
||||||
idlist.append(uniqid)
|
self.idlist.append(uniqid)
|
||||||
""" INSTANCE DEFINITION """
|
""" INSTANCE DEFINITION """
|
||||||
datadesc = {'description': uniqid}
|
datadesc = {'description': uniqid}
|
||||||
resultsetdesc = proxmox.change_instances("{0}:{1}".format(cluster["url"], int(cluster["port"])),
|
resultsetdesc = proxmox.change_instances(
|
||||||
value_nodes_list["node"], instance["type"], instance["vmid"], datadesc)
|
"{0}:{1}".format(cluster["url"], int(cluster["port"])),
|
||||||
|
value_nodes_list["node"], instance["type"], instance["vmid"], datadesc)
|
||||||
instance["uniqid"] = "{0}_{1}".format(insert_time, randtext)
|
instance["uniqid"] = "{0}_{1}".format(insert_time, randtext)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
instance["uniqid"] = getidfromdesc.group(1)
|
instance["uniqid"] = getidfromdesc.group(1)
|
||||||
if getidfromdesc.group(1) in idlist:
|
if getidfromdesc.group(1) in self.idlist:
|
||||||
self.logger.write(
|
self.logger.write(
|
||||||
{"thread":threading.get_ident(), "result": "WARNING", "type": "HYPERPROXMOX", "value": "Double ID detected: {0}".format(getidfromdesc.group(1))})
|
{"thread": threading.get_ident(), "result": "WARNING", "type": "HYPERPROXMOX",
|
||||||
self.logger.write({"thread":threading.get_ident(), "result": "WARNING", "type": "HYPERPROXMOX", "value": json.dumps(instance)})
|
"value": "Double ID detected: {0}".format(getidfromdesc.group(1))})
|
||||||
self.logger.write({"thread":threading.get_ident(), "result": "WARNING", "type": "HYPERPROXMOX", "value": "-------------------"})
|
self.logger.write(
|
||||||
|
{"thread": threading.get_ident(), "result": "WARNING", "type": "HYPERPROXMOX",
|
||||||
|
"value": json.dumps(instance)})
|
||||||
|
self.logger.write(
|
||||||
|
{"thread": threading.get_ident(), "result": "WARNING", "type": "HYPERPROXMOX",
|
||||||
|
"value": "-------------------"})
|
||||||
else:
|
else:
|
||||||
idlist.append(getidfromdesc.group(1))
|
self.idlist.append(getidfromdesc.group(1))
|
||||||
|
|
||||||
self.mongo.insert_instances(instance)
|
self.mongo.insert_instances(instance)
|
||||||
|
|
||||||
|
@ -269,9 +298,8 @@ class Analyse:
|
||||||
storage["date"] = int(insert_time)
|
storage["date"] = int(insert_time)
|
||||||
storage["cluster"] = cluster["name"]
|
storage["cluster"] = cluster["name"]
|
||||||
|
|
||||||
|
|
||||||
disks_list = proxmox.get_disks("{0}:{1}".format(cluster["url"], int(cluster["port"])),
|
disks_list = proxmox.get_disks("{0}:{1}".format(cluster["url"], int(cluster["port"])),
|
||||||
value_nodes_list["node"], storage["storage"])
|
value_nodes_list["node"], storage["storage"])
|
||||||
|
|
||||||
totalsize = 0
|
totalsize = 0
|
||||||
for disk in disks_list["value"]["data"]:
|
for disk in disks_list["value"]["data"]:
|
||||||
|
@ -288,12 +316,6 @@ class Analyse:
|
||||||
storage["totalallocdisk"] = totalsize
|
storage["totalallocdisk"] = totalsize
|
||||||
self.mongo.insert_storages(storage)
|
self.mongo.insert_storages(storage)
|
||||||
|
|
||||||
self.mongo.update_datekey(int(insert_time), "OK")
|
|
||||||
|
|
||||||
""" Unlock file """
|
|
||||||
locker.unlock(self.generalconf["analyst"]["walker_lock"], "alanyst")
|
|
||||||
|
|
||||||
return
|
|
||||||
|
|
||||||
def set_attribution(self, count):
|
def set_attribution(self, count):
|
||||||
""" RETURN cluster and node"""
|
""" RETURN cluster and node"""
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue