-
Notifications
You must be signed in to change notification settings - Fork 10
Expand file tree
/
Copy pathbuilder.py
More file actions
executable file
·378 lines (353 loc) · 20.4 KB
/
builder.py
File metadata and controls
executable file
·378 lines (353 loc) · 20.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
#!/usr/bin/env python3
from autobuild.utils import *
from autobuild.app import processcustom, processconfigs
from autobuild.utils.autoconfig import write_text_file, write_yaml_file, write_json_file, load_yaml_file, load_json_file, load_text_file
import autobuild.utils.autoconfig
import argparse, re, psutil, os, socket, logging
from datetime import datetime
from rich import print
from rich.table import Table
from rich import pretty
from dotenv import dotenv_values
from icecream import ic
import subprocess
# Global vars
KNOWN_HOSTS_FILE = '.known_hosts'
KNOWN_VOLUMES = '.known_volumes'
ENV_FILE = '.env'
CACHE = '.cache'
ABI = 'abi'
# Install pretty prints
pretty.install()
xrouter_emoticon = ":twisted_rightwards_arrows:"
app_title = f'{xrouter_emoticon} [bold cyan]EXRPROXY-ENV[/bold cyan] [bold red]BUILDER[/bold red]'
# Resources table
virtual_memory = psutil.virtual_memory()
virtual_memory = {'total':virtual_memory.total,'used':virtual_memory.used,'used%':virtual_memory.percent,'free':virtual_memory.total-virtual_memory.used,'free%':100-virtual_memory.percent}
hw_table = Table(title='',box=None)
hw_table.add_column('',justify='left', style='bold green', no_wrap=False)
hw_table.add_column('Hardware',justify='left', style='bold green', no_wrap=True)
hw_table.add_column('Size',justify='left', style='bold yellow', no_wrap=True)
hw_table.add_column('Used',justify='left', style='bold red', no_wrap=True)
hw_table.add_column('%',justify='left', style='bold red', no_wrap=True)
hw_table.add_column('Free',justify='left', style='bold green', no_wrap=True)
hw_table.add_column('%',justify='left', style='bold green', no_wrap=True)
hw_table.add_row(app_title, 'CPU', str(psutil.cpu_count())) #CPU CORES
hw_table.add_row('', 'RAM','{:.2f}GB'.format(round(virtual_memory['total']/ (1024.0 **3))),'{:.2f}GB'.format(round(virtual_memory['used']/ (1024.0 **3))),'{:.2f}%'.format(virtual_memory['used%']),'{:.2f}GB'.format(round(virtual_memory['free']/ (1024.0 **3))),'{:.2f}%'.format(virtual_memory['free%']))
for mount_point in [x.mountpoint for x in psutil.disk_partitions(all=False) if not x.mountpoint.startswith('/snap/')]:
hw_table.add_row(mount_point, 'DISK', '{:.2f}GB'.format(psutil.disk_usage(mount_point).total / (2**30)), '{:.2f}GB'.format(psutil.disk_usage(mount_point).used / (2**30)),'{:.2f}%'.format(psutil.disk_usage(mount_point).percent),'{:.2f}GB'.format(psutil.disk_usage(mount_point).free / (2**30)),'{:.2f}%'.format(100-psutil.disk_usage(mount_point).percent))
# CLI arguments
parser = argparse.ArgumentParser()
parser.add_argument('--nochecks', help="Don't check docker requirements", action='store_true')
parser.add_argument('--noenv', help="Don't check if .env file exists (only for advanced users)", action='store_true')
parser.add_argument('--deploy', help='Autodeploy stack', default=False, action='store_true')
parser.add_argument('--prune', help='Prune docker', default=False, action='store_true')
parser.add_argument('--source', help='Source file', default='autobuild/sources.yaml')
parser.add_argument('--yaml', help='Custom input yaml', default=False)
parser.add_argument('--branchpath', default='https://raw.githubusercontent.com/blocknetdx/blockchain-configuration-files/master')
parser.add_argument('--xquerytag', help="Override XQuery images tag", default='latest')
parser.add_argument('--prunecache', help='Reinit .known_hosts, .known_volumes, .env and .cache files', action='store_true')
parser.add_argument('--subnet', help='Subnet to configure docker-compose network', default="172.31.0.0/20")
args = parser.parse_args()
SOURCE = args.source
YAML = args.yaml
CHECKS = args.nochecks
ENV = args.noenv
DEPLOY = args.deploy
PRUNE = args.prune
BRANCHPATH = re.sub(r'(^(?!.*/$).*)',r'\1/',args.branchpath)
XQUERYTAG = args.xquerytag
PRUNE_CACHE = args.prunecache
SUBNET = args.subnet
# Delete cache files
if PRUNE_CACHE:
os.remove(KNOWN_HOSTS_FILE)
os.remove(KNOWN_VOLUMES)
os.remove(ENV_FILE)
os.remove(CACHE)
# Create .env
if ENV_FILE not in os.listdir(os.getcwd()):
data = "PUBLIC_IP=\nSN_NAME=\nSN_KEY=\nSN_ADDRESS=\nRPC_USER=\nRPC_PASSWORD="
write_text_file(ENV_FILE,data)
# Create .known_hosts
if KNOWN_HOSTS_FILE not in os.listdir(os.getcwd()):
data = {'hosts':{}}
write_text_file(KNOWN_HOSTS_FILE,json.dumps(data, indent=4, sort_keys=False))
# Create .known_volumes
if KNOWN_VOLUMES not in os.listdir(os.getcwd()):
data = {'volumes':{}}
write_text_file(KNOWN_VOLUMES,json.dumps(data, indent=4, sort_keys=False))
# Create .cache
if CACHE not in os.listdir(os.getcwd()):
data = {'version':'1','ticks':[],'payment_xquery':None,'payment_tier1':None,'payment_tier2':None,'discount_ablock':None,'discount_aablock':None,'discount_sysblock':None}
write_text_file(CACHE,json.dumps(data, indent=4, sort_keys=False))
# Load config files
dirname = os.path.basename(os.getcwd())
print(f'Working Directory [bold red]{os.getcwd()}[/bold red]')
source = load_yaml_file(SOURCE)
known_hosts = json.loads(load_text_file(KNOWN_HOSTS_FILE))
known_volumes = json.loads(load_text_file(KNOWN_VOLUMES))
cache = json.loads(load_text_file(CACHE))
# Upgrade cache if necessary
if 'version' not in cache:
cache['version'] = 1
if 'discount_sysblock' not in cache:
cache['discount_sysblock'] = None
if 'payment_xquery' not in cache:
cache['payment_xquery'] = None
if __name__ == '__main__':
print(hw_table)
print()
# Create Snode instance
snode = Snode(dirname, ENV_FILE)
try:
# Prune docker and exit
if PRUNE:
print(f"[bold magenta]{'-'*50}[/bold magenta]")
snode.docker_prune()
# Requirements checks
if not CHECKS:
print(f"[bold magenta]{'-'*50}[/bold magenta]")
snode.checks()
# Set env vars
if not ENV:
print(f"[bold magenta]{'-'*50}[/bold magenta]")
snode.env_vars()
dot_env_values = dotenv_values(".env")
if not YAML:
# Get sudo pass
print(f"[bold magenta]{'-'*50}[/bold magenta]")
snode.get_sudo()
# Parse sources.yaml categories
base = [x for x in source if x['type']=='base']
snode_in_base = 'SNODE' in [x['name'] for x in base] # set this flag true if SNODE deployed (not TNODE, TESTSNODE or TESTTNODE)
chains = [x for x in source if x['type'] in ['chain','hybrid']]
utxo_plugins = [x for x in source if x['type'] == 'utxo_plugin']
syschain = [x for x in chains if x['name']=='SYS']
evm_chains = [x for x in source if x['type']=='evm_chain']
apps = [x for x in source if x['type']=='app']
apps_deployed = []
print(f"[bold magenta]{'-'*50}[/bold magenta]")
# Start inquirer
utxo_plugins_todeploy = ['BLOCK']
chains_todeploy = snode.inquirer.pick_checkbox("What chains for XBridge do you wish to support?",[{'name':f"{str(x['name']).ljust(5,' ')} | RAM {str(x['ram']).ljust(4,' ')} GB | CPU {str(x['cpu']).ljust(4,' ')} Cores | DISK {str(x['disk']).ljust(6,' ')} GB | {x['volume'] if x['name'] not in known_volumes['volumes'].keys() else known_volumes['volumes'][x['name']]}",'checked':True if x['name'] in cache['ticks'] else False} for x in chains])
for cd in chains_todeploy:
cd = cd.split(' ')[0]
for c in chains:
if cd == c['name']:
if c['name'] in known_volumes['volumes'].keys():
c['volume'] = known_volumes['volumes'][c['name']]
input_template[0]['daemons'].append(c)
if c['name'] in snode.supported_utxo_plugin_chains and c['name'] not in utxo_plugins[0]['exclude_chains']:
utxo_plugins_todeploy.append(c['name'])
print(f"[bold magenta]{'-'*50}[/bold magenta]")
evm_chains_todeploy = snode.inquirer.pick_checkbox("What EVM chains do you wish to support?",[{'name':f"{str(x['name']).ljust(4,' ')} | RAM {str(x['ram']).ljust(4,' ')} GB | CPU {str(x['cpu']).ljust(4,' ')} Cores | DISK {str(x['disk']).ljust(6,' ')} GB | {x['volume'] if x['name'] not in known_volumes['volumes'].keys() else known_volumes['volumes'][x['name']]}",'checked':True if x['name'] in cache['ticks'] else False} for x in evm_chains])
for evcd in evm_chains_todeploy:
evcd = evcd.split(' ')[0]
for evc in evm_chains:
if evcd == evc['name']:
if evcd in known_hosts['hosts'].keys():
hosts = known_hosts['hosts'][evcd]
location = snode.inquirer.pick_one(f'Select one host for {evcd}',hosts+['New external host','Internally'])
if location == 'New external host':
location = snode.inquirer.get_input(f"Press enter external IP Address for {evcd}:")
if location not in ['',' '] and location not in hosts:
known_hosts['hosts'][evcd].append(location)
else:
location = snode.inquirer.get_input(f"Press enter to use {evcd} internally or type external IP Address:")
if location not in ['',' ','Internally']:
external = {"name":evcd,"type":"evm_chain","host":location}
if not evcd in known_hosts['hosts'].keys():
known_hosts['hosts'][evcd] = [location]
elif location not in known_hosts['hosts'][evcd]:
known_hosts['hosts'][evcd].append(location)
input_template[0]['daemons'].append(external)
else:
if evcd == 'AVAX':
evc['public_ip'] = socket.gethostbyname(dot_env_values['PUBLIC_IP'])
if evcd in known_volumes['volumes'].keys():
evc['volume'] = known_volumes['volumes'][evcd]
input_template[0]['daemons'].append(evc)
write_text_file(KNOWN_HOSTS_FILE,json.dumps(known_hosts, indent=4, sort_keys=False))
print(f"[bold magenta]{'-'*50}[/bold magenta]")
eth_deployed_hydra = False
if len(evm_chains_todeploy)>0:
for app in apps:
name = app['name']
app_check = snode.inquirer.ask_question(f"Do you wish to support {app['name']} | RAM {app['ram']} GB | CPU {app['cpu']} Cores | DISK {app['disk']} GB ?", default=True if app['name'] in cache['ticks'] else False)
if app_check == True:
app_chains = snode.inquirer.pick_checkbox(f"Select EVM chains to attach {name} to:",[{'name':x.split(' ')[0],'checked':True if name in cache['ticks'] else False} for x in evm_chains_todeploy])
# app_chains = [x.split(' ')[0] for x in app_chains]
if len(app_chains)==0:
print(f'{name} ignored... No selection.')
else:
if name == 'HYDRA':
if 'ETH' in app_chains:
eth_deployed_hydra = True
hydra_config = {'name':name,'free':False, 'type':app['type'], 'chains':[{'name':x} for x in app_chains]}
# free_access = snode.inquirer.ask_question(f"Do you wish to support FREE access to {name}?",default=False)
# if free_access != True:
# hydra_config['free'] = True
apps_deployed.append('HYDRA')
input_template[0]['daemons'].append(hydra_config)
if name == 'XQUERY':
indices = []
for evm_chain in app_chains:
if len([x for x in app['dexs'] if evm_chain in x['name']]) > 0:
indexers = snode.inquirer.pick_checkbox(f'Select which indices you want for {evm_chain}:',[{'name':'_'.join(x['name'].split('_')[1::]),'checked':True if x['name'] in cache['ticks'] else False} for x in app['dexs'] if evm_chain in x['name']])
else:
indexers = []
if len(indexers) == 0:
print(f'{evm_chain} ignored for {name}...No selection.')
else:
for index in app['dexs']:
for i in indexers:
if i in index['name']:
indices.append(index)
if len(indices) == 0:
print(f'{name} ignored... No selection.')
else:
app['dexs'] = indices
if name in known_volumes['volumes'].keys():
app['volume'] = known_volumes['volumes'][name]
apps_deployed.append('XQUERY')
input_template[0]['daemons'].append(app)
if len(apps_deployed) == 0:
print('[bold red]No EVM chain app configured. Removing EVM chains...[/bold red]')
for evm in evm_chains:
for entry in input_template[0]['daemons']:
if entry['name'] == evm['name']:
input_template[0]['daemons'].remove(entry)
print(f'Removed [bold red]{evm["name"]}[/bold red]')
for b in base:
if b['name'] == 'PAYMENT' and len(apps_deployed) > 0:
print(f"[bold magenta]{'-'*50}[/bold magenta]")
if cache["payment_xquery"] != None: b['payment_xquery'] = cache["payment_xquery"]
if cache["payment_tier1"] != None: b['payment_tier1'] = cache["payment_tier1"]
if cache["payment_tier2"] != None: b['payment_tier2'] = cache["payment_tier2"]
if cache["discount_ablock"] != None: b['discount_ablock'] = cache["discount_ablock"]
if cache["discount_aablock"] != None: b['discount_aablock'] = cache["discount_aablock"]
if cache["discount_sysblock"] != None: b['discount_sysblock'] = cache["discount_sysblock"]
if 'XQUERY' in apps_deployed:
xquery = snode.inquirer.get_input(f'Press enter to charge USD ${b["payment_xquery"]} for 6,000,000 XQuery API calls or type a new USD price:')
b['payment_xquery'] = float(xquery) if xquery !='' else b["payment_xquery"]
else:
b['payment_xquery'] = -1
if 'HYDRA' in apps_deployed:
tier1 = snode.inquirer.get_input(f'Press enter to charge USD ${b["payment_tier1"]} for 6,000,000 Hydra tier1 API calls or type a new USD price:')
b['payment_tier1'] = float(tier1) if tier1 !='' else b["payment_tier1"]
if eth_deployed_hydra:
tier2 = snode.inquirer.get_input(f'Press enter to charge USD ${b["payment_tier2"]} for 6,000,000 Hydra tier2 API calls or type a new USD price:')
b['payment_tier2'] = float(tier2) if tier2 !='' else b["payment_tier2"]
else:
b['payment_tier2'] = -1
else:
b['payment_tier1'] = -1
b['payment_tier2'] = -1
ablock_discount = snode.inquirer.get_input(f'Press enter for {b["discount_ablock"]}% aBLOCK discount or type a new discount (e.g. 15 for 15% aBLOCK discount):')
b['discount_ablock'] = float(ablock_discount) if ablock_discount !='' else b["discount_ablock"]
aablock_discount = snode.inquirer.get_input(f'Press enter for {b["discount_aablock"]}% aaBLOCK discount or type a new discount (e.g. 15 for 15% aaBLOCK discount):')
b['discount_aablock'] = float(aablock_discount) if aablock_discount !='' else b["discount_aablock"]
sysblock_discount = snode.inquirer.get_input(f'Press enter for {b["discount_sysblock"]}% sysBLOCK discount or type a new discount (e.g. 15 for 15% sysBLOCK discount):')
b['discount_sysblock'] = float(sysblock_discount) if sysblock_discount !='' else b["discount_sysblock"]
if b['name'] in known_volumes['volumes'].keys():
b['volume'] = known_volumes['volumes'][b['name']]
if b['name'] != 'PAYMENT' or len(apps_deployed) > 0:
input_template[0]['daemons'].append(b)
if snode_in_base: # this flag true if SNODE deployed (not TNODE, TESTSNODE or TESTTNODE)
# Add support for utxo plugins
# This section must be AFTER SNODE is appended to input_template[0]['daemons'] so SNODE container gets assigned an IP address BEFORE utxo plugin containers are constructed
print(f"[bold magenta]{'-'*50}[/bold magenta]")
utxo_plugins_check = snode.inquirer.ask_question(f"Do you wish to support {utxo_plugins[0]['name']} | RAM {utxo_plugins[0]['ram']} GB | CPU {utxo_plugins[0]['cpu']} Cores | DISK {utxo_plugins[0]['disk']} GB ?", default=True if utxo_plugins[0]['name'] in cache['ticks'] else False)
if utxo_plugins_check:
if utxo_plugins[0]['name'] in known_volumes['volumes'].keys():
utxo_plugins[0]['volume'] = known_volumes['volumes'][utxo_plugins[0]['name']]
utxo_plugins[0]['chains'] = [{'name':x} for x in utxo_plugins_todeploy]
print("Deploying ", utxo_plugins[0]['name'], " support for the following chains: ", utxo_plugins_todeploy)
input_template[0]['daemons'].append(utxo_plugins[0])
print(f"[bold magenta]{'-'*50}[/bold magenta]")
answer = snode.inquirer.ask_question('Do you wish to change install locations?', default=False)
if answer == True:
volumes = [{'name':x['name'],'volume':x['volume'],'disk':x['disk']} for x in input_template[0]['daemons'] if 'volume' in x.keys()]
answer = snode.inquirer.pick_checkbox('Select to which entries you wish to change install location:',[{'name':f"{x['name']} {x['volume']}",'checked':False} for x in volumes])
if len(answer) == 0:
print(f'Location change ignored... No selection.')
snode.comparedisksize([x['name'] for x in input_template[0]['daemons'] if 'volume' in x.keys()], [x['volume'] for x in input_template[0]['daemons'] if 'volume' in x.keys()], [x['disk'] for x in input_template[0]['daemons'] if 'volume' in x.keys()])
else:
for entry in answer:
not_abs_path = False
entry_answer = ''
while not not_abs_path:
entry_answer = snode.inquirer.get_input(f'Press enter to confirm {entry} or type a new absolute path:')
if entry_answer == '':
not_abs_path = True
elif os.path.isabs(entry_answer) == True:
not_abs_path = True
else:
print(f'[bold red]{entry_answer}[/bold red] is not a [bold yellow]absolute path[/bold yellow]... try again')
for i, e in enumerate(input_template[0]['daemons']):
if e['name'] == entry.split(' ')[0]:
if entry_answer == '':
entry_answer = input_template[0]['daemons'][i]['volume']
input_template[0]['daemons'][i]['volume'] = entry_answer
known_volumes['volumes'][e['name']] = entry_answer
snode.comparedisksize([entry.split(' ')[0]], [entry_answer], [input_template[0]['daemons'][i]['disk']])
print('[bold yellow]Updated install locations[/bold yellow]')
snode.comparedisksize([x['name'] for x in input_template[0]['daemons'] if 'volume' in x.keys()], [x['volume'] for x in input_template[0]['daemons'] if 'volume' in x.keys()], [x['disk'] for x in input_template[0]['daemons'] if 'volume' in x.keys()])
used_volumes = set([x['volume'] for x in input_template[0]['daemons'] if 'volume' in x.keys()])
for volume in used_volumes:
required = sum([x['disk'] for x in input_template[0]['daemons'] if 'volume' in x.keys() and x['volume']==volume])
snode.comparedisksize(['TOTAL'], [volume], [required])
else:
print('[bold yellow]Updated install locations[/bold yellow]')
snode.comparedisksize([x['name'] for x in input_template[0]['daemons'] if 'volume' in x.keys()], [x['volume'] for x in input_template[0]['daemons'] if 'volume' in x.keys()], [x['disk'] for x in input_template[0]['daemons'] if 'volume' in x.keys()])
used_volumes = set([x['volume'] for x in input_template[0]['daemons'] if 'volume' in x.keys()])
for volume in used_volumes:
required = sum([x['disk'] for x in input_template[0]['daemons'] if 'volume' in x.keys() and x['volume']==volume])
snode.comparedisksize(['TOTAL'], [volume], [required])
write_text_file(KNOWN_VOLUMES,json.dumps(known_volumes, indent=4, sort_keys=False))
print(f"[bold magenta]{'-'*50}[/bold magenta]")
now = datetime.now().strftime("%d-%m-%Y-%H:%M")
config_name = snode.inquirer.get_input(f"Press enter to save config as {now} or enter name:")
for app in input_template[0]['daemons']:
for to_del in ['disk','ram','cpu']:
if to_del in app.keys():
del app[to_del]
if config_name == '':
write_yaml_file(f'inputs_yaml/{now}.yaml',input_template)
else:
write_yaml_file(f'inputs_yaml/{config_name}.yaml',input_template)
cache = {'ticks':[],'payment_xquery':None,'payment_tier1':None,'payment_tier2':None,'discount_ablock':None,'discount_aablock':None,'discount_sysblock':None,'version':'1'}
for daemon in input_template[0]['daemons']:
cache['ticks'].append(daemon['name'])
if daemon['name'] == 'PAYMENT':
cache['payment_xquery'] = daemon['payment_xquery'] if daemon['payment_xquery'] >= 0 else None
cache['payment_tier1'] = daemon['payment_tier1'] if daemon['payment_tier1'] >= 0 else None
cache['payment_tier2'] = daemon['payment_tier2'] if daemon['payment_tier2'] >= 0 else None
cache['discount_ablock'] = daemon['discount_ablock']
cache['discount_aablock'] = daemon['discount_aablock']
cache['discount_sysblock'] = daemon['discount_sysblock']
if daemon['name'] == 'XQUERY':
for index in daemon['dexs']:
cache['ticks'].append(index['name'])
write_text_file(CACHE,json.dumps(cache, indent=4, sort_keys=False))
else:
# Parse input yaml if given as CLI arg
input_template = load_yaml_file(YAML)
if input_template == "ERROR":
print(f"Error loading {YAML}")
sys.exit(0)
input_template_args[0]['daemons'] = input_template[0]['daemons']
input_template_args[0]['xquery_tag'] = XQUERYTAG
input_template_args[0]['subnet'] = SUBNET
data_with_ips = processcustom(input_template_args, SUBNET, BRANCHPATH)
processconfigs(data_with_ips, BRANCHPATH)
# Deploy snode config
if DEPLOY:
snode.deploy()
snode.atexit()
except Exception as e:
logging.critical('Exception:',exc_info=True)
snode.atexit()
except KeyboardInterrupt:
snode.atexit()