mirror of
https://github.com/ThomasGsp/HyperProxmox.git
synced 2025-02-12 11:12:10 +00:00
general update
This commit is contained in:
parent
8c46b7b6e9
commit
c6584dec25
9 changed files with 218 additions and 147 deletions
|
@ -31,6 +31,46 @@ class Auth:
|
|||
return
|
||||
|
||||
|
||||
|
||||
""" CLASS MONGO CACHE """
|
||||
|
||||
|
||||
class General_Search:
|
||||
def GET(self, id):
|
||||
return core.generalsearch('{ "_id": {0}}'.format(id))
|
||||
|
||||
|
||||
class QueryCache_Infra:
|
||||
def GET(self, dest, date, cluster=None, node=None, vmid=None):
|
||||
try:
|
||||
result = core.generalquerycacheinfra(dest, date, cluster, node, vmid)
|
||||
except BaseException as e:
|
||||
result = {
|
||||
"result": "ERROR",
|
||||
"type": "PYTHON - API",
|
||||
"value": "{0} {1}".format("Invalid request:", e)
|
||||
}
|
||||
return result
|
||||
|
||||
|
||||
|
||||
|
||||
class Static_Nodes:
|
||||
def GET(self, date, cluster=None, node=None):
|
||||
if node and cluster:
|
||||
return core.generalsearch('{ "date": {0}, "cluster": {1}, "node": {2} }'.format(date, cluster, node))
|
||||
elif cluster:
|
||||
return core.generalsearch('{ "date": {0}, "cluster": {1} }'.format(date, cluster))
|
||||
else:
|
||||
return core.generalsearch('{ "date": {0}}'.format(date))
|
||||
|
||||
|
||||
class Dates:
|
||||
def GET(self):
|
||||
return core.generalsearch('{ "_id": {id} }'.format(id=nodeid))
|
||||
|
||||
|
||||
""" CLASS DIRECT """
|
||||
class Cluster:
|
||||
def GET(self, cluster=None):
|
||||
try:
|
||||
|
@ -83,7 +123,7 @@ class Cluster:
|
|||
|
||||
|
||||
class Instance:
|
||||
def GET(self, vmid=None, status=None):
|
||||
def GET(self, vmid, status=None):
|
||||
try:
|
||||
if status:
|
||||
""" GET INSTANCE STATUS """
|
||||
|
@ -117,11 +157,14 @@ class Instance:
|
|||
sorted_nodes = dict(select.set_attribution(count))
|
||||
|
||||
""" START ALL Thread """
|
||||
for target, count in sorted_nodes.items():
|
||||
for nodeid, count in sorted_nodes.items():
|
||||
""" Find information by id mongodb"""
|
||||
realnode = core.generalsearch('{ "_id": {id} }'.format(id=nodeid))
|
||||
|
||||
# Limit to 5 instance per block
|
||||
thci = threading.Thread(name="Insert Instance",
|
||||
target=core.insert_instance,
|
||||
args=(target, str(count), command_id,))
|
||||
args=(realnode["name"], cluster["cluster"], str(count), command_id,))
|
||||
|
||||
thci.start()
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ import base64
|
|||
|
||||
|
||||
def RunAnalyse(clusters_conf, generalconf, delay=300):
|
||||
play = Crawler(clusters_conf, generalconf)
|
||||
play = Analyse(clusters_conf, generalconf)
|
||||
|
||||
|
||||
while True:
|
||||
|
@ -59,17 +59,49 @@ class Core:
|
|||
args=(self.clusters_conf, self.generalconf))
|
||||
thc.start()
|
||||
|
||||
|
||||
"""
|
||||
#######################
|
||||
# GENERAL FUNCTIONS #
|
||||
#######################
|
||||
"""
|
||||
|
||||
def is_json(myjson):
|
||||
try:
|
||||
json_object = json.loads(myjson)
|
||||
except ValueError, e:
|
||||
return False
|
||||
return True
|
||||
|
||||
def generalsearch(self, data):
|
||||
if is_json(data):
|
||||
return self.mongo.generalmongosearch(data)
|
||||
else:
|
||||
return json_decode({"value": "Bad request"})
|
||||
|
||||
def generalquerycacheinfra(self, dest, date, cluster, node, vmid):
|
||||
if dest == "instances":
|
||||
return self.mongo.get_instance(date, cluster, node, vmid)
|
||||
elif dest == "nodes":
|
||||
return self.mongo.get_nodes_informations(date, cluster, node)
|
||||
elif dest == "clusters":
|
||||
return self.mongo.get_clusters_conf(date, cluster)
|
||||
else:
|
||||
json_decode({"value": "Bad request"})
|
||||
|
||||
|
||||
|
||||
"""
|
||||
#######################
|
||||
# INSTANCE MANAGEMENT #
|
||||
#######################
|
||||
"""
|
||||
def insert_instance(self, target, count=1, command_id=000000, instancetype="lxc"):
|
||||
def insert_instance(self, node, cluster, count=1, command_id=000000, instancetype="lxc"):
|
||||
|
||||
""" Find cluster informations from node """
|
||||
lastkeyvalid = self.mongo.get_last_datekey()
|
||||
node_informations = self.mongo.get_nodes_informations((int(lastkeyvalid["value"])), target)
|
||||
cluster_informations = self.mongo.get_clusters_conf(node_informations["cluster"])["value"]
|
||||
node_informations = self.mongo.get_nodes_informations((int(lastkeyvalid["value"])), node, cluster)
|
||||
cluster_informations = self.mongo.get_clusters_conf(cluster)["value"]
|
||||
|
||||
proxmox_cluster_url = cluster_informations["url"]
|
||||
proxmox_cluster_port = cluster_informations["port"]
|
||||
|
@ -83,7 +115,7 @@ class Core:
|
|||
proxmox_storage_disk = cluster_informations["storage_disk"]
|
||||
|
||||
""" LOAD PROXMOX """
|
||||
proxmox = Proxmox(target)
|
||||
proxmox = Proxmox(node)
|
||||
|
||||
proxmox.get_ticket("{0}:{1}".format(proxmox_cluster_url,
|
||||
int(proxmox_cluster_port)),
|
||||
|
@ -139,12 +171,12 @@ class Core:
|
|||
|
||||
""" INSTANCE INSERTION """
|
||||
result_new = proxmox.create_instance("{0}:{1}".format(proxmox_cluster_url,
|
||||
int(proxmox_cluster_port)), target, instancetype,
|
||||
int(proxmox_cluster_port)), node, instancetype,
|
||||
data)
|
||||
""" Get first digest """
|
||||
digest_init = proxmox.get_config("{0}:{1}".format(proxmox_cluster_url,
|
||||
int(proxmox_cluster_port)),
|
||||
target, instancetype, next_instance_id)['value']['data']['digest']
|
||||
node, instancetype, next_instance_id)['value']['data']['digest']
|
||||
|
||||
|
||||
""" VERIFY THE RESULT BY PROXMOX STATUS REQUEST CODE """
|
||||
|
@ -164,7 +196,7 @@ class Core:
|
|||
""" Limit creation DDOS based on digest """
|
||||
while digest_init == proxmox.get_config("{0}:{1}".format(proxmox_cluster_url,
|
||||
int(proxmox_cluster_port)),
|
||||
target, instancetype, next_instance_id)['value']['data']['digest']:
|
||||
node, instancetype, next_instance_id)['value']['data']['digest']:
|
||||
time.sleep(5)
|
||||
|
||||
returnlistresult.append(result_new)
|
||||
|
@ -374,6 +406,20 @@ class Core:
|
|||
return cluster_delete
|
||||
|
||||
|
||||
"""
|
||||
#######################
|
||||
# STORAGES MANAGEMENT #
|
||||
#######################
|
||||
"""
|
||||
|
||||
"""
|
||||
#######################
|
||||
# NODES MANAGEMENT #
|
||||
#######################
|
||||
"""
|
||||
|
||||
|
||||
|
||||
def valid_cluster_data(data):
|
||||
key_required = ["name", "url", "port", "user", "password", "template", "storage_disk", "weight", "exclude_nodes"]
|
||||
result = []
|
||||
|
|
|
@ -9,6 +9,7 @@ import os
|
|||
from Crypto.PublicKey import RSA
|
||||
import hashlib
|
||||
|
||||
|
||||
def encodepassphrase(passphrase):
|
||||
return hashlib.sha512(passphrase.encode("UTF-8")).hexdigest()
|
||||
|
||||
|
@ -124,5 +125,4 @@ class CryticalData:
|
|||
"type": "PYTHON",
|
||||
"error": "Data decryption failed: {0}".format(e)
|
||||
}
|
||||
return result_decryption
|
||||
|
||||
return result_decryption
|
|
@ -16,6 +16,7 @@ import operator
|
|||
import random
|
||||
import base64
|
||||
|
||||
|
||||
def add_token(tokens_in_slots, slot_distributions):
|
||||
num_tokens = sum(tokens_in_slots)
|
||||
if not num_tokens:
|
||||
|
@ -74,44 +75,68 @@ class Analyse:
|
|||
nodes_list = proxmox.get_nodes("{0}:{1}".format(cluster["url"], int(cluster["port"])))
|
||||
if nodes_list["result"] == "OK":
|
||||
for value_nodes_list in nodes_list["value"]["data"]:
|
||||
if value_nodes_list["node"] not in exclude_nodes:
|
||||
""" TOTAL COUNT CPU and RAM allocate"""
|
||||
if (instancetype == "all"):
|
||||
types = ["lxc", "qemu"] # vz...
|
||||
for type in types:
|
||||
list_instances.update(
|
||||
proxmox.get_instance("{0}:{1}".format(cluster["url"], int(cluster["port"])),
|
||||
value_nodes_list["node"], type)["value"])
|
||||
else:
|
||||
list_instances = \
|
||||
proxmox.get_instance("{0}:{1}".format(cluster["url"], int(cluster["port"])),
|
||||
value_nodes_list["node"], instancetype)["value"]
|
||||
# if value_nodes_list["node"] not in exclude_nodes:
|
||||
""" TOTAL COUNT CPU and RAM allocate """
|
||||
if (instancetype == "all"):
|
||||
types = ["lxc", "qemu"] # vz...
|
||||
for type in types:
|
||||
list_instances.update(
|
||||
proxmox.get_instance("{0}:{1}".format(cluster["url"], int(cluster["port"])),
|
||||
value_nodes_list["node"], type)["value"])
|
||||
else:
|
||||
list_instances = \
|
||||
proxmox.get_instance("{0}:{1}".format(cluster["url"], int(cluster["port"])),
|
||||
value_nodes_list["node"], instancetype)["value"]
|
||||
|
||||
totalcpu = 0
|
||||
totalram = 0
|
||||
for key_list_instances, value_list_instances in list_instances.items():
|
||||
for instances in value_list_instances:
|
||||
totalcpu = totalcpu + instances["cpus"]
|
||||
totalram = totalram + instances["maxmem"]
|
||||
totalcpu = 0
|
||||
totalram = 0
|
||||
for key_list_instances, value_list_instances in list_instances.items():
|
||||
for instances in value_list_instances:
|
||||
""" Update cpu and ram for node """
|
||||
totalcpu = totalcpu + instances["cpus"]
|
||||
totalram = totalram + instances["maxmem"]
|
||||
|
||||
value_nodes_list["totalalloccpu"] = totalcpu
|
||||
value_nodes_list["totalallocram"] = totalram
|
||||
value_nodes_list["vmcount"] = len(list_instances.items())
|
||||
""" Update instance list """
|
||||
instance["cluster"] = cluster["name"]
|
||||
instance["node"] = value_nodes_list["node"]
|
||||
instance["date"] = int(insert_time)
|
||||
self.mongo.insert_instance(instance)
|
||||
"""
|
||||
# Test si l'instance existe
|
||||
if not self.mongo.get_instance(instance["vmid"], instance["node"], instance["cluster"]):
|
||||
# si non existante, alors il s'agit d'une instance manuelle
|
||||
instance["commandid"] = "000000"
|
||||
self.mongo.insert_instance(instance)
|
||||
|
||||
# Si elle existe deja, on l'update:
|
||||
else:
|
||||
self.mongo.update_instance(instance, instance["vmid"], instance["node"], instance["cluster"])
|
||||
"""
|
||||
|
||||
percent_cpu_alloc = (totalcpu / value_nodes_list["maxcpu"]) * 100
|
||||
percent_ram_alloc = (totalram / value_nodes_list["mem"]) * 100
|
||||
value_nodes_list["totalalloccpu"] = totalcpu
|
||||
value_nodes_list["totalallocram"] = totalram
|
||||
value_nodes_list["vmcount"] = len(list_instances.items())
|
||||
|
||||
"""
|
||||
weight of node =
|
||||
(((Percent Alloc CPU x coef) + ( Percent Alloc RAM x coef)) / Total coef ) * Cluster weight
|
||||
"""
|
||||
weight = (((percent_cpu_alloc * 2) + (percent_ram_alloc * 4)) / 6) * int(cluster["weight"])
|
||||
percent_cpu_alloc = (totalcpu / value_nodes_list["maxcpu"]) * 100
|
||||
percent_ram_alloc = (totalram / value_nodes_list["mem"]) * 100
|
||||
|
||||
value_nodes_list["weight"] = int(weight)
|
||||
value_nodes_list["date"] = int(insert_time)
|
||||
value_nodes_list["cluster"] = cluster["name"]
|
||||
"""
|
||||
weight of node =
|
||||
(((Percent Alloc CPU x coef) + ( Percent Alloc RAM x coef)) / Total coef ) * Cluster weight
|
||||
"""
|
||||
weight = (((percent_cpu_alloc * 2) + (percent_ram_alloc * 4)) / 6) * int(cluster["weight"])
|
||||
|
||||
self.mongo.insert_node(value_nodes_list)
|
||||
value_nodes_list["weight"] = int(weight)
|
||||
value_nodes_list["date"] = int(insert_time)
|
||||
value_nodes_list["cluster"] = cluster["name"]
|
||||
|
||||
""" Mark the node as an grata or not grata """
|
||||
if value_nodes_list["node"] in exclude_nodes:
|
||||
value_nodes_list["grata"] = 0
|
||||
else:
|
||||
value_nodes_list["grata"] = 1
|
||||
|
||||
self.mongo.insert_node(value_nodes_list)
|
||||
|
||||
else:
|
||||
print(nodes_list)
|
||||
|
@ -124,14 +149,14 @@ class Analyse:
|
|||
# Search the last valid key
|
||||
lastkeyvalid = self.mongo.get_last_datekey()
|
||||
|
||||
# Get nodes weight
|
||||
nodes_availables = self.mongo.get_nodes_informations(int(lastkeyvalid["value"]))
|
||||
# Get nodes weight with good grata !!
|
||||
nodes_availables = self.mongo.get_nodes(int(lastkeyvalid["value"]), None, 1)
|
||||
|
||||
if len(nodes_availables) > 1:
|
||||
# Select node name with weight
|
||||
nodes_values = {}
|
||||
for nodes in nodes_availables:
|
||||
nodes_values[nodes["node"]] = nodes["weight"]
|
||||
nodes_values[nodes["_id"]] = nodes["weight"]
|
||||
|
||||
# Sort node by weight
|
||||
sorted_nodes = sorted(nodes_values.items(), key=operator.itemgetter(1))
|
||||
|
@ -159,6 +184,6 @@ class Analyse:
|
|||
final = {k: int(v) for k, v in zip(sorted_nodes_name, distrib_final)}
|
||||
|
||||
else:
|
||||
final = {nodes_availables[0]['node']: count}
|
||||
final = {nodes_availables[0]['_id']: count}
|
||||
|
||||
return final
|
||||
|
|
|
@ -1,76 +0,0 @@
|
|||
"""
|
||||
Author: Tlams
|
||||
Langage: Python
|
||||
Minimum version require: 3.4
|
||||
|
||||
Module function:
|
||||
Crawl all node to update the database statistics
|
||||
"""
|
||||
|
||||
from core.modules.mod_proxmox import *
|
||||
from core.modules.mod_database import *
|
||||
from core.libs.hcrypt import *
|
||||
import time
|
||||
import base64
|
||||
|
||||
|
||||
class Crawler:
|
||||
def __init__(self, clusters_conf, generalconf):
|
||||
"""
|
||||
:param clusters_conf: Proxmox configurations
|
||||
:param generalconf : General configuration
|
||||
"""
|
||||
self.generalconf = generalconf
|
||||
self.clusters_conf = clusters_conf
|
||||
|
||||
""" LOAD MONGODB """
|
||||
self.mongo = MongoDB(generalconf["mongodb"]["ip"])
|
||||
self.mongo.client = self.mongo.connect()
|
||||
self.mongo.db = self.mongo.client.db
|
||||
|
||||
""" Instances types availables: lxc/qemu/all"""
|
||||
def run(self, instancetype="lxc"):
|
||||
insert_time = time.time()
|
||||
|
||||
self.mongo.insert_datekey(insert_time, 'running')
|
||||
|
||||
for cluster in self.clusters_conf:
|
||||
""" Decode data """
|
||||
|
||||
proxmox_cluster_user = pdecrypt(base64.b64decode(cluster["user"]),
|
||||
self.generalconf["keys"]["key_pvt"])["data"].decode('utf-8')
|
||||
|
||||
proxmox_cluster_pwd = pdecrypt(base64.b64decode(cluster["password"]),
|
||||
self.generalconf["keys"]["key_pvt"])["data"].decode('utf-8')
|
||||
|
||||
""" AUTH """
|
||||
proxmox = Proxmox("Analyse")
|
||||
proxmox.get_ticket("{0}:{1}".format(cluster["url"], int(cluster["port"])), proxmox_cluster_user, proxmox_cluster_pwd)
|
||||
|
||||
""" UPDATE NODES LIST """
|
||||
nodes_list = proxmox.get_nodes("{0}:{1}".format(cluster["url"], int(cluster["port"])))
|
||||
if nodes_list["result"] == "OK":
|
||||
for value_nodes_list in nodes_list["value"]["data"]:
|
||||
list_instances = ""
|
||||
""" TOTAL COUNT CPU and RAM allocate"""
|
||||
if(instancetype == "all"):
|
||||
types = ["lxc", "qemu"] # vz...
|
||||
for type in types:
|
||||
list_instances.update(proxmox.get_instance("{0}:{1}".format(cluster["url"], int(cluster["port"])),
|
||||
value_nodes_list["node"], type)["value"])
|
||||
else:
|
||||
list_instances = proxmox.get_instance("{0}:{1}".format(cluster["url"], int(cluster["port"])),
|
||||
value_nodes_list["node"], instancetype)["value"]
|
||||
|
||||
for key_list_instances, value_list_instances in list_instances.items():
|
||||
for instance in value_list_instances:
|
||||
instance["cluster"] = cluster["name"]
|
||||
instance["node"] = value_nodes_list["node"]
|
||||
# Test si l'instance existe
|
||||
if not self.mongo.get_instance(instance["vmid"], instance["node"], instance["cluster"]):
|
||||
# si non existante, alors il s'agit d'une instance manuelle
|
||||
instance["commandid"] = "000000"
|
||||
self.mongo.insert_instance(instance)
|
||||
# Si elle existe deja, on l'update:
|
||||
else:
|
||||
self.mongo.update_instance(instance, instance["vmid"], instance["node"], instance["cluster"])
|
|
@ -108,6 +108,8 @@ class MongoDB:
|
|||
except (TypeError, ValueError) as e:
|
||||
raise("MongoDB authentification error on {0}:{1} ({2})".format(self.server, self.port, e))
|
||||
|
||||
|
||||
|
||||
""" CLUSTER """
|
||||
def get_clusters_conf(self, cluster=None):
|
||||
try:
|
||||
|
@ -186,15 +188,7 @@ class MongoDB:
|
|||
def update_system_delete_ip(self, value):
|
||||
self.db[self.collection_system].update({'_id': "0"}, {'$pull': {'IP_free': value}}, upsert=False)
|
||||
|
||||
""" NODES MANAGEMENT"""
|
||||
def insert_node(self, data):
|
||||
return self.db[self.collection_nodes].insert(data)
|
||||
|
||||
def get_nodes_informations(self, time, node=None):
|
||||
if node:
|
||||
return json.loads(dumps(self.db[self.collection_nodes].find_one({'$and': [{'node': node, 'date': time}]})))
|
||||
else:
|
||||
return json.loads(dumps(self.db[self.collection_nodes].find({'date': time})))
|
||||
|
||||
""" KEY DATE MANAGEMENT"""
|
||||
def insert_datekey(self, date, status):
|
||||
|
@ -207,10 +201,44 @@ class MongoDB:
|
|||
last_id = self.db[self.collection_datekey].find({'status': 'OK'}).sort("date", -1).limit(1)
|
||||
return {"value": int(json.loads(dumps(last_id))[0]['date'])}
|
||||
|
||||
""" NODES MANAGEMENT"""
|
||||
def insert_node(self, data):
|
||||
try:
|
||||
return self.db[self.collection_nodes].insert(data)
|
||||
except BaseException as serr:
|
||||
raise ("MongoDB error on {0}:{1} ({2})".format(self.server, self.port, serr))
|
||||
|
||||
def get_node(self, date, cluster, node, grata=None):
|
||||
try:
|
||||
if grata:
|
||||
return json.loads(
|
||||
dumps(self.db[self.collection_nodes].find_one(
|
||||
{'$and': [{'date': date, 'cluster': cluster, 'node': node, 'grata': 1}]})))
|
||||
else:
|
||||
return json.loads(
|
||||
dumps(self.db[self.collection_nodes].find_one(
|
||||
{'$and': [{'date': date, 'cluster': cluster, 'node': node}]})))
|
||||
except BaseException as serr:
|
||||
raise ("MongoDB error on {0}:{1} ({2})".format(self.server, self.port, serr))
|
||||
|
||||
|
||||
""" INSTANCE MANAGEMENT"""
|
||||
def insert_instance(self, data):
|
||||
return self.db[self.collection_instance].insert(data)
|
||||
try:
|
||||
return self.db[self.collection_instance].insert(data)
|
||||
except BaseException as serr:
|
||||
raise ("MongoDB error on {0}:{1} ({2})".format(self.server, self.port, serr))
|
||||
|
||||
# Revoir la multiplicite des instances/nodes
|
||||
def get_instance(self, date, cluster, node, vmid):
|
||||
try:
|
||||
return json.loads(dumps(
|
||||
self.db[self.collection_instance].find_one(
|
||||
{'$and': [{"date": int(date), "cluster": cluster, "node": node, "vmid": int(vmid)}]})))
|
||||
except BaseException as serr:
|
||||
raise ("MongoDB error on {0}:{1} ({2})".format(self.server, self.port, serr))
|
||||
|
||||
"""
|
||||
def update_instance(self, data, vmid, node=None, cluster=None):
|
||||
if node and cluster:
|
||||
return self.db[self.collection_instance].update(
|
||||
|
@ -223,15 +251,4 @@ class MongoDB:
|
|||
self.db[self.collection_instance].remove({"vmid": int(vmid), "node": node, "cluster": cluster})
|
||||
else:
|
||||
self.db[self.collection_instance].remove({"_id": vmid})
|
||||
|
||||
def get_instance(self, vmid, node=None, cluster=None):
|
||||
try:
|
||||
if node and cluster:
|
||||
return json.loads(dumps(
|
||||
self.db[self.collection_instance].find_one(
|
||||
{"vmid": int(vmid), "node": node, "cluster": cluster})))
|
||||
else:
|
||||
return json.loads(dumps(
|
||||
self.db[self.collection_instance].find_one({"_id": vmid})))
|
||||
except BaseException as serr:
|
||||
raise ("MongoDB error on {0}:{1} ({2})".format(self.server, self.port, serr))
|
||||
"""
|
|
@ -2,6 +2,7 @@
|
|||
Queue Management
|
||||
"""
|
||||
|
||||
|
||||
class Queue():
|
||||
def __init__(self):
|
||||
self.name = "command"
|
||||
|
|
|
@ -88,6 +88,19 @@ if __name__ == "__main__":
|
|||
# MANAGEMENT
|
||||
'/api/v1/administration/cluster', 'Cluster',
|
||||
'/api/v1/administration/cluster/new', 'Cluster',
|
||||
|
||||
# CACHE DATA (MONGO)
|
||||
# date/cluster/node/vmid
|
||||
'/api/v1/static/(instances|nodes|clusters)/([0-9]+)/([0-9a-zA-Z]+)/([0-9a-zA-Z]+)/([0-9]+)', 'QueryCache_Infra',
|
||||
# date/cluster/node
|
||||
# '/api/v1/static/nodes/([0-9]+)/([0-9a-zA-Z]+)/([0-9a-zA-Z]+)', 'Static_Nodes',
|
||||
# cluster
|
||||
# '/api/v1/static/clusters/([0-9]+)/([0-9a-zA-Z]+)', 'Static_Clusters',
|
||||
# date
|
||||
'/api/v1/static/dates', 'QueryCache_Dates',
|
||||
# mongoid
|
||||
'/api/v1/static/id/[a-z0-9]+', 'General_Search',
|
||||
|
||||
)
|
||||
|
||||
generalconf = {
|
||||
|
|
|
@ -7,7 +7,9 @@
|
|||
"template": "local:vztmpl/debian-9.0-standard_9.0-2_amd64.tar.gz",
|
||||
"storage_disk": "disks",
|
||||
"exclude_nodes": [""],
|
||||
"weight": 1
|
||||
"weight": 1,
|
||||
"groups" : ["group1", "group2..."],
|
||||
"clients" : ["client1", "client2..."],
|
||||
}
|
||||
|
||||
{
|
||||
|
|
Loading…
Reference in a new issue