Commit b3b529ff authored by Nicolas Seydoux's avatar Nicolas Seydoux
Browse files

Updated result scripts

parent 27146eaa
......@@ -10,12 +10,7 @@ fig = plt.figure()
ax = fig.add_subplot(111)
results = [
"/home/nseydoux/dev/edr/results_rpi23sb_1",
"/home/nseydoux/dev/edr/results_rpi23sb_2",
"/home/nseydoux/dev/edr/results_rpi23sb_3",
"/home/nseydoux/dev/edr/results_rpi23sb_4",
"/home/nseydoux/dev/edr/results_rpi23sb_5"
# "/home/nseydoux/dev/edr/results_tmp"
"/home/nseydoux/dev/edr/results/coopis/clone_f_0_reproduce/results_clone_f_0_reproduce_8"
]
index = 0
......@@ -26,37 +21,26 @@ processing = []
idle = []
for path in results:
index += 1
centralized = []
with open(path+"/journeys_c.pkl", "rb") as f:
centralized = pickle.load(f)
centralized_transit = [j["transit"] for j in centralized if j["transit"] < 7000]
centralized_idle = [j["idle"] for j in centralized if j["idle"] < 7000]
centralized_processing = [j["processing"] for j in centralized if j["processing"] < 7000]
distributed = []
with open(path+"/journeys_d.pkl", "rb") as f:
distributed = pickle.load(f)
distributed_transit = [j["transit"] for j in distributed if j["transit"] < 7000]
distributed_idle = [j["idle"] for j in distributed if j["idle"] < 7000]
distributed_processing = [j["processing"] for j in distributed if j["processing"]<7000]
width = 0.35 # the width of the bars: can also be len(x) sequence
# transit_mean = [np.average(centralized_transit), np.average(distributed_transit)]
# processing_mean = [np.average(centralized_processing), np.average(distributed_processing)]
# idle_mean = [np.average(centralized_idle), np.average(distributed_idle)]
labels.append(str(index)+"_c")
labels.append(str(index)+"_d")
# Absolute
transit.append(np.average(centralized_transit))
transit.append(np.average(distributed_transit))
processing.append(np.average(centralized_processing))
processing.append(np.average(distributed_processing))
idle.append(np.average(centralized_idle))
idle.append(np.average(distributed_idle))
for method in ["adp", "cdp", "cdr", "cir"]:
index += 1
journeys = []
with open(path+"/journeys_{0}.pkl".format(method), "rb") as f:
journeys = pickle.load(f)
method_transit = [j["transit"] for j in journeys if j["transit"] < 7000]
method_idle = [j["idle"] for j in journeys if j["idle"] < 7000]
method_processing = [j["processing"] for j in journeys if j["processing"] < 7000]
width = 0.35 # the width of the bars: can also be len(x) sequence
# transit_mean = [np.average(centralized_transit), np.average(distributed_transit)]
# processing_mean = [np.average(centralized_processing), np.average(distributed_processing)]
# idle_mean = [np.average(centralized_idle), np.average(distributed_idle)]
labels.append(str(index)+"_"+method)
# Absolute
transit.append(np.average(method_transit))
processing.append(np.average(method_processing))
idle.append(np.average(method_idle))
# p1 = plt.bar(["C", "D"], transit_mean, width, color="blue")
# p2 = plt.bar(["C", "D"], processing_mean, width, bottom=transit_mean, color="yellow")
......
import glob
import json
import argparse
def prepare_trace(logs_folder_path):
trace=[]
for tracefile_path in glob.glob(logs_folder_path+"/*.trace"):
with open(tracefile_path, "r") as tracefile:
for line in tracefile:
event = json.loads(line)
trace.append(event)
trace.sort(key=lambda x: x["time"])
return trace
def count_messages(trace):
counter = 0
for event in trace:
if event["event"] == "Sending data":
counter+=1
return counter
parser = argparse.ArgumentParser(description='Processes the traces of an edr execution.')
parser.add_argument('--adp', type=str)
parser.add_argument('--cir', type=str)
parser.add_argument('--cdp', type=str)
parser.add_argument('--cdr', type=str)
parser.add_argument('--cip', type=str)
parser.add_argument('-o', '--output', help="the folder to which results are sent", type=str)
args = parser.parse_args()
for method, folder in {"adp":args.adp,
"cdp":args.cdp,
"cip":args.cip,
"cdr":args.cdr,
"cir":args.cir}.items():
print("Counting messages for "+method)
trace = prepare_trace(folder)
with open(args.output+"/messages_{0}.txt".format(method), "w") as f:
f.write(str(count_messages(trace)))
del(trace)
......@@ -424,6 +424,13 @@ def histogram_reasonings(reasonings_list, folder):
plt.savefig(folder+"/reasoning_"+rule.split("#")[1], dpi=300)
plt.close()
def count_messages(trace):
counter = 0
for event in trace:
if event["event"] == "Sending data":
counter+=1
return counter
parser = argparse.ArgumentParser(description='Processes the traces of an edr execution.')
parser.add_argument('-c', '--centralized', help="the folder containing all logs files for the centralized approach", type=str)
parser.add_argument('-d', '--distributed', help="the folder containing all logs files for the distributed approach", type=str)
......@@ -511,6 +518,8 @@ if args.adp != None or args.cir != None or args.cdp != None or args.cip != None
delay = extract_delays_details(journey)
with open(args.output+"/delays_{0}.pkl".format(method), "wb") as f:
pickle.dump(delay, f)
with open(args.output+"/messages_{0}.txt".format(method), "w") as f:
f.write(str(count_messages(trace))
del(trace)
del(graph)
del(delay)
......
......@@ -10,12 +10,7 @@ fig = plt.figure()
ax = fig.add_subplot(111)
results = [
"/home/nseydoux/dev/edr/results_rpi23sb_1",
"/home/nseydoux/dev/edr/results_rpi23sb_2",
"/home/nseydoux/dev/edr/results_rpi23sb_3",
"/home/nseydoux/dev/edr/results_rpi23sb_4",
"/home/nseydoux/dev/edr/results_rpi23sb_5"
# "/home/nseydoux/dev/edr/results_tmp"
"/home/nseydoux/dev/edr/results/coopis/clone_f_0_reproduce/results_clone_f_0_reproduce_8"
]
index = 0
......@@ -26,47 +21,29 @@ processing = []
idle = []
for path in results:
index += 1
centralized = []
with open(path+"/journeys_c.pkl", "rb") as f:
centralized = pickle.load(f)
for method in ["adp", "cdp", "cdr", "cir"]:
index += 1
journeys = []
with open(path+"/journeys_{0}.pkl".format(method), "rb") as f:
journeys = pickle.load(f)
centralized_transit = [j["transit"] for j in centralized if j["transit"] < 7000]
centralized_idle = [j["idle"] for j in centralized if j["idle"] < 7000]
centralized_processing = [j["processing"] for j in centralized if j["processing"] < 7000]
method_transit = [j["transit"] for j in journeys if j["transit"] < 7000]
method_idle = [j["idle"] for j in journeys if j["idle"] < 7000]
method_processing = [j["processing"] for j in journeys if j["processing"] < 7000]
distributed = []
with open(path+"/journeys_d.pkl", "rb") as f:
distributed = pickle.load(f)
width = 0.35 # the width of the bars: can also be len(x) sequence
distributed_transit = [j["transit"] for j in distributed if j["transit"] < 7000]
distributed_idle = [j["idle"] for j in distributed if j["idle"] < 7000]
distributed_processing = [j["processing"] for j in distributed if j["processing"]<7000]
labels.append(str(index)+"_"+method)
width = 0.35 # the width of the bars: can also be len(x) sequence
# transit_mean = [np.average(centralized_transit), np.average(distributed_transit)]
# processing_mean = [np.average(centralized_processing), np.average(distributed_processing)]
# idle_mean = [np.average(centralized_idle), np.average(distributed_idle)]
# normalization
avg_t = np.average(method_transit)
avg_p = np.average(method_processing)
avg_i = np.average(method_idle)
avg_d = avg_t+avg_p+avg_i
labels.append(str(index)+"_c")
# labels.append(str(index)+"_d")
# normalization
avg_c_t = np.average(centralized_transit)
# avg_d_t = np.average(distributed_transit)
avg_c_p = np.average(centralized_processing)
# avg_d_p = np.average(distributed_processing)
avg_c_i = np.average(centralized_idle)
# avg_d_i = np.average(distributed_idle)
avg_c_d = avg_c_t+avg_c_p+avg_c_i
# avg_d_d = avg_d_t+avg_d_p+avg_d_i
transit.append(avg_c_t*100.0/avg_c_d)
# transit.append(avg_d_t*100.0/avg_d_d)
processing.append(avg_c_p*100.0/avg_c_d)
# processing.append(avg_d_p*100.0/avg_d_d)
idle.append(avg_c_i*100.0/avg_c_d)
# idle.append(avg_d_i*100.0/avg_d_d)
transit.append(avg_t*100.0/avg_d)
processing.append(avg_p*100.0/avg_d)
idle.append(avg_i*100.0/avg_d)
p1 = plt.bar(labels, transit, width, color="blue")
p2 = plt.bar(labels, processing, width, bottom=transit, color="yellow")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment