mirror of
https://github.com/ThomasGsp/HyperProxmox.git
synced 2025-03-09 15:40:18 +00:00
Purge system
This commit is contained in:
parent
ed48b3227a
commit
2f560c4257
2 changed files with 98 additions and 31 deletions
|
@ -29,7 +29,7 @@ def RunAnalyse(clusters_conf, generalconf, logger):
|
||||||
class Core:
|
class Core:
|
||||||
# def __init__(self, generalconf, Lredis):
|
# def __init__(self, generalconf, Lredis):
|
||||||
def __init__(self, generalconf, logger):
|
def __init__(self, generalconf, logger):
|
||||||
|
self.purge = False
|
||||||
self.generalconf = generalconf
|
self.generalconf = generalconf
|
||||||
self.logger = logger
|
self.logger = logger
|
||||||
self.logger.write({"thread":threading.get_ident() ,"result": "INFO", "type": "HYPERPROXMOX",
|
self.logger.write({"thread":threading.get_ident() ,"result": "INFO", "type": "HYPERPROXMOX",
|
||||||
|
@ -113,7 +113,7 @@ class Core:
|
||||||
|
|
||||||
cache = self.redis_cache.get_message(hash_hex)
|
cache = self.redis_cache.get_message(hash_hex)
|
||||||
|
|
||||||
if cache is None or self.generalconf["logger"]["debug"] == True:
|
if cache is None or self.generalconf["logger"]["logs_level"] == 5 or self.purge:
|
||||||
if dest == "instances":
|
if dest == "instances":
|
||||||
resultmbrequest = self.mongo.get_instances(date, cluster, node, vmid)
|
resultmbrequest = self.mongo.get_instances(date, cluster, node, vmid)
|
||||||
elif dest == "nodes":
|
elif dest == "nodes":
|
||||||
|
@ -130,7 +130,6 @@ class Core:
|
||||||
self.redis_cache.insert_message(hash_hex, resultmbrequest, 3600)
|
self.redis_cache.insert_message(hash_hex, resultmbrequest, 3600)
|
||||||
return resultmbrequest
|
return resultmbrequest
|
||||||
else:
|
else:
|
||||||
|
|
||||||
return json.loads(cache.replace("'", "\"").replace("None", "\"\""))
|
return json.loads(cache.replace("'", "\"").replace("None", "\"\""))
|
||||||
|
|
||||||
|
|
||||||
|
@ -467,40 +466,66 @@ class Core:
|
||||||
|
|
||||||
def managedata(self, json):
|
def managedata(self, json):
|
||||||
if json["action"] == "purge":
|
if json["action"] == "purge":
|
||||||
|
purgedata = {}
|
||||||
if json["type"] == "strict":
|
if json["type"] == "strict":
|
||||||
listdate = self.mongo.get_all_datekey()
|
listdate = self.mongo.get_all_datekey()["value"]
|
||||||
if json["date"] in listdate:
|
self.logger.write({"thread": threading.get_ident(), "result": "INFO", "type": "HYPERPROXMOX",
|
||||||
for date in listdate:
|
"value": "Starting purge process"})
|
||||||
if date <= json["date"]:
|
self.purge = True # Purge Lock
|
||||||
for instance in self.mongo.get_instances(date):
|
totalinstances, totalnodes, totaldisks, totalstorares, totalclusters, totaldates = 0, 0, 0, 0, 0, 0
|
||||||
self.mongo.deletedata("instances", instance["_id"])
|
for date in listdate:
|
||||||
for node in self.mongo.get_nodes(date):
|
if int(date['date']) <= int(json["date"]):
|
||||||
self.mongo.deletedata("nodes", node["_id"])
|
for instance in self.generalquerycacheinfra("instances", date['date'])["value"]:
|
||||||
for storage in self.mongo.get_storages(date):
|
purgedata[instance["_id"]["$oid"]] = self.mongo.generalmongodelete("instances", instance["_id"]["$oid"])
|
||||||
self.mongo.deletedata("storages", storage["_id"])
|
totalinstances += 1
|
||||||
for disk in self.mongo.get_disks(date):
|
|
||||||
self.mongo.deletedata("disks", disk["_id"])
|
|
||||||
|
|
||||||
else:
|
for node in self.generalquerycacheinfra("nodes", date['date'])["value"]:
|
||||||
purge = {
|
purgedata[node["_id"]["$oid"]] = self.mongo.generalmongodelete("nodes", node["_id"]["$oid"])
|
||||||
"value": "This date is not available",
|
totalnodes += 1
|
||||||
"result": "WARNING",
|
|
||||||
"type": "HYPERPROXMOX"
|
|
||||||
}
|
|
||||||
|
|
||||||
elif json["type"] == "sequencial":
|
for storage in self.generalquerycacheinfra("storages", date['date'])["value"]:
|
||||||
purge = {
|
purgedata[storage["_id"]["$oid"]] = self.mongo.generalmongodelete("storages", storage["_id"]["$oid"])
|
||||||
"value": "Not implemented",
|
totalstorares += 1
|
||||||
"result": "WARNING",
|
|
||||||
|
for disk in self.generalquerycacheinfra("disks", date['date'])["value"]:
|
||||||
|
purgedata[disk["_id"]["$oid"]] = self.mongo.generalmongodelete("disks", disk["_id"]["$oid"])
|
||||||
|
totaldisks += 1
|
||||||
|
|
||||||
|
for cluster in self.generalquerycacheinfra("clusters", date['date'])["value"]:
|
||||||
|
purgedata[cluster["_id"]["$oid"]] = self.mongo.generalmongodelete("clusters", cluster["_id"]["$oid"])
|
||||||
|
totalclusters += 1
|
||||||
|
|
||||||
|
purgedata[date["_id"]["$oid"]] = self.mongo.generalmongodelete("dates", date["_id"]["$oid"])
|
||||||
|
totaldates += 1
|
||||||
|
|
||||||
|
self.purge = False # Purge UnLock
|
||||||
|
valuedelete = {
|
||||||
|
"Instances": totalinstances, "Disks": totaldisks, "Nodes": totalnodes,
|
||||||
|
"Storages": totalstorares, "Dates": totaldates, "Clusters": totalclusters
|
||||||
|
}
|
||||||
|
self.logger.write({"thread": threading.get_ident(), "result": "INFO", "type": "HYPERPROXMOX",
|
||||||
|
"value": "{0} entries in your database deleted".format(valuedelete)})
|
||||||
|
self.logger.write({"thread": threading.get_ident(), "result": "INFO", "type": "HYPERPROXMOX",
|
||||||
|
"value": "Purge process terminated"})
|
||||||
|
data = {
|
||||||
|
"value": "{0} entries in your database deleted".format(valuedelete),
|
||||||
|
"result": "OK",
|
||||||
"type": "HYPERPROXMOX"
|
"type": "HYPERPROXMOX"
|
||||||
}
|
}
|
||||||
else:
|
else:
|
||||||
purge = {
|
data = {
|
||||||
"value": "Unknown purging type",
|
"value": "Bad request",
|
||||||
"result": "WARNING",
|
"result": "ERROR",
|
||||||
"type": "HYPERPROXMOX"
|
"type": "HYPERPROXMOX"
|
||||||
}
|
}
|
||||||
return purge
|
else:
|
||||||
|
data = {
|
||||||
|
"value": "Bad request",
|
||||||
|
"result": "ERROR",
|
||||||
|
"type": "HYPERPROXMOX"
|
||||||
|
}
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
"""
|
"""
|
||||||
#######################
|
#######################
|
||||||
|
|
|
@ -69,6 +69,24 @@ class MongoDB:
|
||||||
self.db = None
|
self.db = None
|
||||||
self.client = None
|
self.client = None
|
||||||
|
|
||||||
|
|
||||||
|
def __mappingcol(self, col):
|
||||||
|
if col == "instances":
|
||||||
|
collection = self.collection_instances
|
||||||
|
elif col == "nodes":
|
||||||
|
collection = self.collection_nodes
|
||||||
|
elif col == "disks":
|
||||||
|
collection = self.collection_disks
|
||||||
|
elif col == "storages":
|
||||||
|
collection = self.collection_storages
|
||||||
|
elif col == "dates":
|
||||||
|
collection = self.collection_datekey
|
||||||
|
elif col == "clusters":
|
||||||
|
collection = self.collection_clusters
|
||||||
|
else:
|
||||||
|
collection = ""
|
||||||
|
return collection
|
||||||
|
|
||||||
def connect(self):
|
def connect(self):
|
||||||
try:
|
try:
|
||||||
conn = MongoClient(self.server + ':' + str(self.port))
|
conn = MongoClient(self.server + ':' + str(self.port))
|
||||||
|
@ -97,6 +115,30 @@ class MongoDB:
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
def generalmongodelete(self, col, id):
|
||||||
|
try:
|
||||||
|
# mapping
|
||||||
|
collection = self.__mappingcol(col)
|
||||||
|
if collection:
|
||||||
|
print(col, id)
|
||||||
|
self.db[collection].remove({"_id": ObjectId(id)})
|
||||||
|
result = {
|
||||||
|
"result": "OK",
|
||||||
|
"value": "{0} has been deleted".format(id)
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
result = {
|
||||||
|
"result": "ERROR",
|
||||||
|
"value": "{0} collection not found".format(collection)
|
||||||
|
}
|
||||||
|
except BaseException as e:
|
||||||
|
result = {
|
||||||
|
"result": "ERROR",
|
||||||
|
"type": "MongoDB - Request delete failed",
|
||||||
|
"value": "Invalid request: {0}".format(e)
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
|
||||||
""" CLUSTER """
|
""" CLUSTER """
|
||||||
def get_clusters(self, date, cluster):
|
def get_clusters(self, date, cluster):
|
||||||
try:
|
try:
|
||||||
|
@ -228,12 +270,12 @@ class MongoDB:
|
||||||
self.db[self.collection_datekey].update({'date': int(date)}, {'$set': {'status': status}}, upsert=False)
|
self.db[self.collection_datekey].update({'date': int(date)}, {'$set': {'status': status}}, upsert=False)
|
||||||
|
|
||||||
def get_last_datekey(self):
|
def get_last_datekey(self):
|
||||||
last_id = self.db[self.collection_datekey].find({'status': 'OK'}, {"date": 1, "_id": 0}).sort("date", -1)
|
last_id = self.db[self.collection_datekey].find({'status': 'OK'}, {"date": 1, "_id": 1}).sort("date", -1)
|
||||||
return {"value": int(json.loads(dumps(last_id))[0]['date'])}
|
return {"value": int(json.loads(dumps(last_id))[0]['date'])}
|
||||||
|
|
||||||
def get_all_datekey(self):
|
def get_all_datekey(self):
|
||||||
keylist = self.db[self.collection_datekey].find({'status': 'OK'},
|
keylist = self.db[self.collection_datekey].find({'status': 'OK'},
|
||||||
{"date": 1, "_id": 0}).sort("date", -1)
|
{"date": 1, "_id": 1}).sort("date", -1)
|
||||||
return {"value": json.loads(dumps(keylist))}
|
return {"value": json.loads(dumps(keylist))}
|
||||||
|
|
||||||
""" NODES MANAGEMENT"""
|
""" NODES MANAGEMENT"""
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue