Manager class to help store the output directory and handle calls from the FL framework.
Source code in nebula/addons/trustworthiness/metric.py
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64 | class TrustMetricManager:
"""
Manager class to help store the output directory and handle calls from the FL framework.
"""
def __init__(self):
self.factsheet_file_nm = "factsheet.json"
self.eval_metrics_file_nm = "eval_metrics.json"
self.nebula_trust_results_nm = "nebula_trust_results.json"
def evaluate(self, scenario, weights, use_weights=False):
"""
Evaluates the trustworthiness score.
Args:
scenario (object): The scenario in whith the trustworthiness will be calculated.
weights (dict): The desired weghts of the pillars.
use_weights (bool): True to turn on the weights in the metric config file, default to False.
"""
# Get scenario name
scenario_name = scenario[0]
factsheet_file = os.path.join(dirname, f"files/{scenario_name}/{self.factsheet_file_nm}")
metrics_cfg_file = os.path.join(dirname, f"configs/{self.eval_metrics_file_nm}")
results_file = os.path.join(dirname, f"files/{scenario_name}/{self.nebula_trust_results_nm}")
if not os.path.exists(factsheet_file):
logger.error(f"{factsheet_file} is missing! Please check documentation.")
return
if not os.path.exists(metrics_cfg_file):
logger.error(f"{metrics_cfg_file} is missing! Please check documentation.")
return
with open(factsheet_file) as f, open(metrics_cfg_file) as m:
factsheet = json.load(f)
metrics_cfg = json.load(m)
metrics = metrics_cfg.items()
input_docs = {"factsheet": factsheet}
result_json = {"trust_score": 0, "pillars": []}
final_score = 0
result_print = []
for key, value in metrics:
pillar = TrustPillar(key, value, input_docs, use_weights)
score, result = pillar.evaluate()
weight = weights.get(key)
final_score += weight * score
result_print.append([key, score])
result_json["pillars"].append(result)
final_score = round(final_score, 2)
result_json["trust_score"] = final_score
write_results_json(results_file, result_json)
|
evaluate(scenario, weights, use_weights=False)
Evaluates the trustworthiness score.
Parameters:
Name |
Type |
Description |
Default |
scenario
|
object
|
The scenario in whith the trustworthiness will be calculated.
|
required
|
weights
|
dict
|
The desired weghts of the pillars.
|
required
|
use_weights
|
bool
|
True to turn on the weights in the metric config file, default to False.
|
False
|
Source code in nebula/addons/trustworthiness/metric.py
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64 | def evaluate(self, scenario, weights, use_weights=False):
"""
Evaluates the trustworthiness score.
Args:
scenario (object): The scenario in whith the trustworthiness will be calculated.
weights (dict): The desired weghts of the pillars.
use_weights (bool): True to turn on the weights in the metric config file, default to False.
"""
# Get scenario name
scenario_name = scenario[0]
factsheet_file = os.path.join(dirname, f"files/{scenario_name}/{self.factsheet_file_nm}")
metrics_cfg_file = os.path.join(dirname, f"configs/{self.eval_metrics_file_nm}")
results_file = os.path.join(dirname, f"files/{scenario_name}/{self.nebula_trust_results_nm}")
if not os.path.exists(factsheet_file):
logger.error(f"{factsheet_file} is missing! Please check documentation.")
return
if not os.path.exists(metrics_cfg_file):
logger.error(f"{metrics_cfg_file} is missing! Please check documentation.")
return
with open(factsheet_file) as f, open(metrics_cfg_file) as m:
factsheet = json.load(f)
metrics_cfg = json.load(m)
metrics = metrics_cfg.items()
input_docs = {"factsheet": factsheet}
result_json = {"trust_score": 0, "pillars": []}
final_score = 0
result_print = []
for key, value in metrics:
pillar = TrustPillar(key, value, input_docs, use_weights)
score, result = pillar.evaluate()
weight = weights.get(key)
final_score += weight * score
result_print.append([key, score])
result_json["pillars"].append(result)
final_score = round(final_score, 2)
result_json["trust_score"] = final_score
write_results_json(results_file, result_json)
|