-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpedibus_multi_solve.py
633 lines (489 loc) · 14.2 KB
/
pedibus_multi_solve.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
import numpy as np
import time
import math
import copy
import pprint as pp
import itertools
import operator
import threading
from itertools import chain
from collections import defaultdict
start = time.time()
class SolverThread (threading.Thread):
def __init__(self, clustersDict, first_path, threadCount):
threading.Thread.__init__(self)
self.first_path = first_path
self.clusters = clustersDict
self.threadCount = threadCount
self.solution = []
def run(self):
self.solution=solve_thread_run(self.clusters,self.first_path, self.threadCount)
threadLock.acquire()
#print "\nSOLUTION thread",self.threadCount, " leaves: ",len(self.solution)
#print self.solution
solutions_list.append(self.solution)
threadLock.release()
############## FUNCTION DECLARATION ##############
#Parsa il file, occhio che ritorna 5 valori, costs e' una matrice con tutti i costi
def parse_dat_file(dat_file):
file_dat = np.genfromtxt(file, delimiter='\n', dtype=None)
n = int(file_dat[1][11:]) #parse param n: dimension of array
ALPHA = float(file_dat[3][15:]) #parse param alpha
file_dat = file_dat[5:]
raw_x = []
raw_y = []
raw_d = []
costs = []
#start split coord x in vector raw_x and idem for y
for row in file_dat:
if "coordX" in row:
isX = True
isY = False
isD = False
if "coordY" in row:
isX = False
isY = True
isD = False
if "d [*,*]" in row:
isY = False
isD = True
if isX:
raw_x.append(" ".join(row.split()))
if isY:
raw_y.append(" ".join(row.split()))
if isD:
raw_d.append(" ".join(row.split()))
#delete initial words and final semicolumn
raw_x.pop(0)
raw_x.pop(len(raw_x)-1)
raw_y.pop(0)
raw_y.pop(len(raw_y)-1)
raw_d.pop(0)
raw_d.pop(0)
raw_d.pop(len(raw_d)-1)
raw_d =' '.join(raw_d)
raw_d = raw_d.split(' ')
raw_x =' '.join(raw_x)
raw_x = raw_x.split(' ')
raw_y =' '.join(raw_y)
raw_y = raw_y.split(' ')
#transfer vector raw_x in a dictionary. key=index, value=coordX
i=0
for column in raw_x:
if i%2 == 0:
even = int(column)
if i%2 != 0:
coord_x[even] = int(column)
i = i+1
#transfer vector raw_y in a dictionary. key=index, value=coordY
i=0
for column in raw_y:
if i%2 == 0:
even = int(column)
if i%2 != 0:
coord_y[even] = int(column)
i = i+1
#transfer raw_d in a matrix
row = []
danger = []
for i in range (0, len(raw_d)+1):
if (i%(n+2)) != 0:
row.append(float(raw_d[i]))
else:
if i != 0:
danger.append(row)
row = []
costs = [costs[:] for costs in [[0] * (n + 1)] * (n + 1)]
for i in range(0, (n+1)):
for j in range(0, (n+1)):
costs[i][j] = float("{0:.4f}".format(math.sqrt((coord_x[i]-coord_x[j])**2 + (coord_y[i]-coord_y[j])**2)))
#possibile ottimizzare le fusione in un unico dizionario, anche piu sopra
#merge the two dictionaries
coord = defaultdict(list)
for k, v in chain(coord_x.items(), coord_y.items()):
coord[k].append(v)
return n, ALPHA, coord, danger, costs
#calcola distanza euclidea tra due nodi
def node_dist(index_1, index_2):
sub_x = math.pow((node[index_1][0] - node[index_2][0]), 2)
sub_y = math.pow((node[index_1][1] - node[index_2][1]), 2)
return math.sqrt(sub_x + sub_y)
#crea dizionario con distanza di un nodo ad ogni altro nodo
def node_distance():
for key1, value1 in node.items():
distance.clear()
for key2, value2 in node.items():
if key1 != key2:
distance[key2] = node_dist(key1, key2)
neighbor[key1] = distance.copy()
return neighbor
#popola l'albero della soluzione con la soluzione base
def create_starting_solution():
for i in range(1,n+1):
tree[i].append(i)
tree[i].append(0)
return tree
#eliminare il nodo myNode
def delete_node(myNode):
for i in range (1, len(tree)+1):
if myNode in tree[i]:
tree[i].remove(myNode)
#rimuovere nodi solo con 0
def remove_zero_path(my_dict):
bool_elim = False
for i in range(1, len(my_dict)+1):
if len(my_dict[i]) == 1:
bool_elim = True
elim = i
if bool_elim:
del my_dict[elim]
return my_dict
#controlla alpha condition
def check_alpha(my_path, new_node):
tot_dist = 0 #inizializzo distanza totale a zero
times_alpha = ALPHA*neighbor[new_node][0] #alpha + distanza di new_node da 0
print "blblba ",times_alpha, neighbor[new_node][0]
for i in range (len(my_path)-1):
tot_dist = tot_dist + node_dist(my_path[i], my_path[i+1])
print "\nDistanza totale path: ", tot_dist + neighbor[new_node][0] #ATTENZIONEEEEE, distanza da il nodo mio agli altri
if tot_dist + neighbor[new_node][0] <= times_alpha:
print "true"
return True
else:
print "false"
return False
def validate_path(path):
max_lenght = costs[path[0]][path[len(path)-1]]*ALPHA
lenght = 0
i = 0;
while i < len(path)-1:
lenght = lenght+costs[path[i]][path[i+1]]
if lenght>max_lenght:
return False;
i=i+1
if(len(path)>2):
#check sub-path
sub_path = path[1:len(path)]
path_len = len(sub_path);
#check cluster di path[1]
first_node = sub_path[0]
first_node_cluster = clusters[first_node]
actual_depth = len(first_node_cluster)
if(actual_depth<path_len-1 or (not sub_path in clusters[first_node][len(sub_path)-2])):
return False
#return validate_path(path[1:len(path)])
return True;
def is_reachable(center_node, other_node):
d1 = costs[center_node][0]
d2 = costs[other_node][0]
if costs[center_node][other_node]+d2<=d1*ALPHA:
return True
else:
return False
def compareLists(l1, l2):
for i in range (len(array)):
if array[i] == element:
return True
return False
def clusterize(center_node, depth):
paths = []
node_cluster = clusters[center_node]
actual_depth = len(node_cluster)
#se il cluster is completo, esci
if(center_node in complete_clusters):
print "salto nodo ", center_node
return paths
#se il cluster precedente non esiste, tronca ed esci
if(actual_depth <= depth-1):
print "tronco cluster per nodo", center_node
complete_clusters.append(center_node);
cluster_depth[center_node]=actual_depth
return paths
for i in range (0,len(clusters[center_node][depth-1])):
old_path = clusters[center_node][depth-1][i]
for j in range (0,len(reachables[center_node])):
new_node = reachables[center_node][j]
if(not new_node in old_path):
#inserisco new_node in old_path in seconda posizione
new_path = copy.copy(old_path);
new_path.insert(1,new_node);
if(validate_path(new_path)):
paths.append(new_path)
return paths
def init_reachables(center_node):
node_list = [];
#init reachability
for i in range (1,n):
if i!=center_node and is_reachable(center_node, i):
node_list.append(i)
return node_list
def init_cluster(center_node):
clusterZero = {};
node_list = [];
node_list.append([center_node,0]);
clusterZero[0] = node_list;
cluster_depth[center_node] = MAX_DEPTH-1;
return clusterZero;
"""
def init_risk(center_node):
riskZero = {};
risk_lisk = [];
"""
def generate_cluster(depth):
#create cluster
# DEPTH
for i in range (1,n+1):
node_list = clusterize(i, depth);
if(len(node_list)>0):
clusters[i][depth]=node_list;
def solve_tree_multithread():
i=MAX_DEPTH-1;
threadCount = 0;
while (i>=0 and threadCount<=MAX_THREADS):
#per ogni cluster
for j in range (1,n+1):
#cerca il cluster di profondita i
#se eiste
if(cluster_depth[j]>i):
#assegna array dei path
pathList = clusters[j][i]
#se esiste
if(pathList):
#seleziona la prima occorrenza
for path in pathList:
clusters_dict = copy.deepcopy(clusters)
solvingThread = SolverThread(clusters_dict,path,threadCount)
solvingThread.start()
threadCount=threadCount+1
threads.append(solvingThread)
if(threadCount>=MAX_THREADS):
break
#print "\nClusters - SOLVE ITERATION ",MAX_DEPTH-i
#pp.pprint(clusters)
i=i-1
threadLock.acquire()
print "THREAD COUNT: ",threadCount
threadLock.release()
def solve_thread_run(clusters_dict, first_path, threadCount):
tree_solution = []
#append first_path
tree_solution.append(first_path)
#remove occurrencies of first_path nodes
for node in first_path:
if(node!=0):
#print "remove all ", node, " occurrencies"
removeAllOccurrencesMulti(node,clusters_dict)
'''
threadLock.acquire()
print "\nTHREAD",threadCount," - SOLVE ITERATION "
pp.pprint(clusters_dict)
threadLock.release()
'''
i=MAX_DEPTH-1;
while i>=0:
#per ogni cluster
for j in range (1,n+1):
#cerca il cluster di profondita i
#se eiste
if(cluster_depth[j]>i):
#assegna array dei path
pathList = clusters_dict[j][i]
#se esiste
if(pathList):
#seleziona la prima occorrenza
path = pathList[0]
tree_solution.append(path);
#rimuovi tutti i path che contengono i nodi del path scelto
for node in path:
if(node!=0):
removeAllOccurrencesMulti(node,clusters_dict)
i=i-1
return tree_solution
def solve_tree():
i=MAX_DEPTH-1;
while i>=0:
#per ogni cluster
for j in range (1,n+1):
#cerca il cluster di profondita i
#se eiste
if(cluster_depth[j]>i):
#assegna array dei path
pathList = clusters[j][i]
#se esiste
if(pathList):
#seleziona la prima occorrenza
path = pathList[0]
solution.append(path);
print "\n\nSelect path --->",path
#rimuovi tutti i path che contengono i nodi del path scelto
for node in path:
if(node!=0):
#print "remove all ", node, " occurrencies"
removeAllOccurrences(node)
#
#print "\nClusters - SOLVE ITERATION ",MAX_DEPTH-i
#pp.pprint(clusters)
i=i-1
def removeAllOccurrences(node):
for x in range (1,(n+1)):
cluster=clusters[x];
for y in range (0,MAX_DEPTH):
clusters[node][y] = []
if(cluster_depth[x]>y):
pathList=cluster[y];
pathListCopy=copy.copy(pathList)
for path in pathListCopy:
if(node in path):
pathList.remove(path)
def removeAllOccurrencesMulti(node, clusters_dict):
for x in range (1,(n+1)):
cluster=clusters_dict[x];
for y in range (0,MAX_DEPTH):
clusters_dict[node][y] = []
if(cluster_depth[x]>y):
pathList=cluster[y];
pathListCopy=copy.copy(pathList)
for path in pathListCopy:
if(node in path):
pathList.remove(path)
#calcola il pericolo di un path
def compute_danger(my_path):
path_danger = 0
for i in range(0, len(my_path)-1):
path_danger = path_danger + danger[my_path[i]][my_path[i+1]]
return path_danger
#tra un vettori di path ritorna quello con meno dangerous
def min_dangerous(paths):
min_danger = 9999
min_danger_path = []
for pat in paths:
if compute_danger(pat) < min_danger:
min_danger = compute_danger(pat)
min_danger_path = pat
return min_danger_path
def print_solution_multi(solution):
sol = {};
for i in range (1,(n+1)):
sol[i] = 0;
for path in solution:
for j in range(0,(len(path)-1)):
sol[path[j]]=path[j+1]
for k in range (1,n+1):
print k," ",sol[k]
def print_solution():
sol = {};
for i in range (1,(n+1)):
sol[i] = 0;
for path in solution:
for j in range(0,(len(path)-1)):
sol[path[j]]=path[j+1]
for k in range (1,n+1):
print k," ",sol[k]
def check_best_solution(final_solution,new_solution):
#first solution found
if(len(final_solution)==0):
final_solution=copy.deepcopy(new_solution)
return
#solution has less leaves
if(len(new_solution)<len(final_solution)):
final_solution=copy.deepcopy(new_solution)
return
#solution has same leaves
if(len(new_solution)<len(final_solution)):
#check risk
return
def GetBestLeaves():
return best_leaves
def SetBestLeaves(x):
best_leaves = x
############## VARIABLES ##############
#CLUSTERS contiene un oggetto per ogni nodo X
#all'interno di ogni oggetto c'e un array [i] che contiere i path possibili da X a 0 in [i] spostamenti
#gia ALPHA validati
#
# X: {0: [0], // elementi raggiungibili
# 1: [[2,0]] // path possibili con 1 spostamento
# 2: [[2,4,0],[2,8,0],[2,9,0]] // path possibili con 2 spostamenti
# 3: [[2,4,8,0] ... ] // path possibili con 3 spostamenti
# .
# .
# n: [...]
#}
clusters = {}
risks = {}
# contiene per ogni nodo i nodi raggiungibili
reachables = {}
#contiene per ogni nodo la profondita massima del cluster
cluster_depth = {}
#contiene i nodi per i quali i cluster sono completi
complete_clusters = []
solutions_list = []
#initialize dictionary for bus stop coordinates
coord_x = {} #per coordinate x quando parso il dat
coord_y = {} #per coordinate y quando parso il dat
neighbor = {} #ogni nodo con gli altri per distanza
distance = {} #distanza da un nodo ad un altro, per poi metterla in neighbor
danger = []
tree = defaultdict(list) #lista soluzioni
file = 'res/pedibus_40.dat'
############## BODY ##############
n, ALPHA, node, danger, costs = parse_dat_file(file)
best_leaves = n
#MAD-DEPTH -> limite di profondita con cui vendono generati i cluster per ogni nodo
#puo andare da 1 a n, se troppo alto crasha il programma
MAX_DEPTH = 6
MAX_THREADS = 300
threadLock = threading.Lock()
threads = []
#print parameters for check
print "n: ", n, "\n" "ALPHA: ", ALPHA, "\n\n"
#pp.pprint(danger)
#neighbor = node_distance()
#INITIALIZIATION
for i in range (1,n+1):
reachables[i]=init_reachables(i)
clusters[i] = init_cluster(i)
#risks[i] = init_risk()
#CLUSTER GENERATION
for i in range (1,MAX_DEPTH):
print "\nCALCOLO CLUSTER LEVEL : ",i;
generate_cluster(i)
print "Fatto.\n";
time_clustering = time.time()-start
'''
print "\nREACHABLES:"
pp.pprint(reachables)
print "\nCLUSTER DEPTHS:"
pp.pprint(cluster_depth)
print "\nCLUSTERS before solving:"
pp.pprint(clusters)
'''
#SOLUTION
#solve_tree()
solve_tree_multithread()
'''
print "\nSOLUTION PATHS:"
print solution
print "\nSOLUTION NEXT NODES:"
print_solution()
'''
# Wait for all threads to complete
for t in threads:
t.join()
print "Exiting Main Thread"
l = n;
final_solution = []
for i in range (0,len(solutions_list)):
if(len(solutions_list[i])<l):
final_solution = solutions_list[i]
l = len(solutions_list[i])
print '\n------------------------------------------------------'
print '\nBEST SOLUTION LEAVES -->',len(final_solution)
print final_solution
print_solution_multi(final_solution)
print '\n------------------------------------------------------'
#time
time_final = time.time()-start
print '\nClustering time:', round(time_clustering,3), 'seconds.'
print 'Solving time:', round(time_final-time_clustering,3), 'seconds.'
print 'TOTAL time:', round(time_final,3), 'seconds.\n\n'