Skip to content

pillar

TrustPillar

Class to represent a trust pillar.

Parameters:

Name Type Description Default
name string

Name of the pillar.

required
metrics dict

Metric definitions for the pillar.

required
input_docs dict

Input documents.

required
use_weights bool

True to turn on the weights in the metric config file.

False
Source code in nebula/addons/trustworthiness/pillar.py
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
class TrustPillar:
    """
    Class to represent a trust pillar.

    Args:
        name (string): Name of the pillar.
        metrics (dict): Metric definitions for the pillar.
        input_docs (dict): Input documents.
        use_weights (bool): True to turn on the weights in the metric config file.

    """

    def __init__(self, name, metrics, input_docs, use_weights=False):
        self.name = name
        self.input_docs = input_docs
        self.metrics = metrics
        self.result = []
        self.use_weights = use_weights

    def evaluate(self):
        """
        Evaluate the trust score for the pillar.

        Returns:
            float: Score of [0, 1].
        """
        score = 0
        avg_weight = 1 / len(self.metrics)
        for key, value in self.metrics.items():
            weight = value.get("weight", avg_weight) if self.use_weights else avg_weight
            score += weight * self.get_notion_score(key, value.get("metrics"))
        score = round(score, 2)
        return score, {self.name: {"score": score, "notions": self.result}}

    def get_notion_score(self, name, metrics):
        """
        Evaluate the trust score for the notion.

        Args:
            name (string): Name of the notion.
            metrics (list): Metrics definitions of the notion.

        Returns:
            float: Score of [0, 1].
        """

        notion_score = 0
        avg_weight = 1 / len(metrics)
        metrics_result = []
        for key, value in metrics.items():
            metric_score = self.get_metric_score(metrics_result, key, value)
            weight = value.get("weight", avg_weight) if self.use_weights else avg_weight
            notion_score += weight * float(metric_score)
        self.result.append({name: {"score": notion_score, "metrics": metrics_result}})
        return notion_score

    def get_metric_score(self, result, name, metric):
        """
        Evaluate the trust score for the metric.

        Args:
            result (object): The result object
            name (string): Name of the metric.
            metrics (dict): The metric definition.

        Returns:
            float: Score of [0, 1].
        """

        score = 0
        try:
            input_value = get_input_value(self.input_docs, metric.get("inputs"), metric.get("operation"))

            score_type = metric.get("type")
            if input_value is None:
                logger.warning(f"{name} input value is null")
            else:
                if score_type == "true_score":
                    score = calculation.get_true_score(input_value, metric.get("direction"))
                elif score_type == "score_mapping":
                    score = calculation.get_mapped_score(input_value, metric.get("score_map"))
                elif score_type == "ranges":
                    score = calculation.get_range_score(input_value, metric.get("ranges"), metric.get("direction"))
                elif score_type == "score_map_value":
                    score = calculation.get_map_value_score(input_value, metric.get("score_map"))
                elif score_type == "scaled_score":
                    score = calculation.get_scaled_score(input_value, metric.get("scale"), metric.get("direction"))
                elif score_type == "property_check":
                    score = 0 if input_value is None else input_value

                else:
                    logger.warning(f"The score type {score_type} is not yet implemented.")

        except KeyError:
            logger.warning(f"Null input for {name} metric")
        score = round(score, 2)
        result.append({name: {"score": score}})
        return score

evaluate()

Evaluate the trust score for the pillar.

Returns:

Name Type Description
float

Score of [0, 1].

Source code in nebula/addons/trustworthiness/pillar.py
28
29
30
31
32
33
34
35
36
37
38
39
40
41
def evaluate(self):
    """
    Evaluate the trust score for the pillar.

    Returns:
        float: Score of [0, 1].
    """
    score = 0
    avg_weight = 1 / len(self.metrics)
    for key, value in self.metrics.items():
        weight = value.get("weight", avg_weight) if self.use_weights else avg_weight
        score += weight * self.get_notion_score(key, value.get("metrics"))
    score = round(score, 2)
    return score, {self.name: {"score": score, "notions": self.result}}

get_metric_score(result, name, metric)

Evaluate the trust score for the metric.

Parameters:

Name Type Description Default
result object

The result object

required
name string

Name of the metric.

required
metrics dict

The metric definition.

required

Returns:

Name Type Description
float

Score of [0, 1].

Source code in nebula/addons/trustworthiness/pillar.py
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
def get_metric_score(self, result, name, metric):
    """
    Evaluate the trust score for the metric.

    Args:
        result (object): The result object
        name (string): Name of the metric.
        metrics (dict): The metric definition.

    Returns:
        float: Score of [0, 1].
    """

    score = 0
    try:
        input_value = get_input_value(self.input_docs, metric.get("inputs"), metric.get("operation"))

        score_type = metric.get("type")
        if input_value is None:
            logger.warning(f"{name} input value is null")
        else:
            if score_type == "true_score":
                score = calculation.get_true_score(input_value, metric.get("direction"))
            elif score_type == "score_mapping":
                score = calculation.get_mapped_score(input_value, metric.get("score_map"))
            elif score_type == "ranges":
                score = calculation.get_range_score(input_value, metric.get("ranges"), metric.get("direction"))
            elif score_type == "score_map_value":
                score = calculation.get_map_value_score(input_value, metric.get("score_map"))
            elif score_type == "scaled_score":
                score = calculation.get_scaled_score(input_value, metric.get("scale"), metric.get("direction"))
            elif score_type == "property_check":
                score = 0 if input_value is None else input_value

            else:
                logger.warning(f"The score type {score_type} is not yet implemented.")

    except KeyError:
        logger.warning(f"Null input for {name} metric")
    score = round(score, 2)
    result.append({name: {"score": score}})
    return score

get_notion_score(name, metrics)

Evaluate the trust score for the notion.

Parameters:

Name Type Description Default
name string

Name of the notion.

required
metrics list

Metrics definitions of the notion.

required

Returns:

Name Type Description
float

Score of [0, 1].

Source code in nebula/addons/trustworthiness/pillar.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
def get_notion_score(self, name, metrics):
    """
    Evaluate the trust score for the notion.

    Args:
        name (string): Name of the notion.
        metrics (list): Metrics definitions of the notion.

    Returns:
        float: Score of [0, 1].
    """

    notion_score = 0
    avg_weight = 1 / len(metrics)
    metrics_result = []
    for key, value in metrics.items():
        metric_score = self.get_metric_score(metrics_result, key, value)
        weight = value.get("weight", avg_weight) if self.use_weights else avg_weight
        notion_score += weight * float(metric_score)
    self.result.append({name: {"score": notion_score, "metrics": metrics_result}})
    return notion_score