Skip to content

Metrics

Accuracy

Bases: RunEvaluator

Evaluates Accuracy metric

Source code in supermat/langchain/metrics.py
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
class Accuracy(RunEvaluator):
    """Evaluates Accuracy metric"""

    def __init__(self, llm: BaseChatModel) -> None:
        self.llm = llm
        self.evaluator_accuracy = load_evaluator(
            "labeled_score_string",  # type: ignore
            criteria={
                "accuracy": """
                Score 1: The answer is completely unrelated to the reference.
                Score 3: The answer has minor relevance but does not align with the reference.
                Score 5: The answer has moderate relevance but contains inaccuracies.
                Score 7: The answer aligns with the reference but has minor errors or omissions.
                Score 10: The answer is completely accurate and aligns perfectly with the reference.""",
            },
            llm=self.llm,
            normalize_by=10,
        )

    def evaluate_run(self, run: Run, example: Example) -> EvaluationResult | EvaluationResults:
        res = self.evaluator_accuracy.evaluate_strings(
            prediction=next(iter(run.outputs.values())),
            input=run.inputs["Question"],
            # We are treating the documents as the reference context in this case.
            reference=example.outputs["Answer"],
        )
        return EvaluationResult(key="labeled_criteria:accuracy", **res)

CosineSimilarity

Bases: RunEvaluator

Evaluates cosine similarity metric

Source code in supermat/langchain/metrics.py
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
class CosineSimilarity(RunEvaluator):
    """Evaluates cosine similarity metric"""

    def __init__(self) -> None:
        self.embedding_model = HuggingFaceEmbeddings(
            model_name="thenlper/gte-base",
        )

    def evaluate_run(self, run: Run, example: Example | None = None):
        response = run.outputs["output"]
        reference = example.outputs["Answer"]

        response_embedding = np.array(self.embedding_model.embed_query(response))
        reference_embedding = np.array(self.embedding_model.embed_query(reference))

        dot_product = np.dot(response_embedding, reference_embedding)
        cosine_similarity = dot_product / (np.linalg.norm(response_embedding)) * (np.linalg.norm(reference_embedding))
        return EvaluationResult(
            **{
                "key": "cosine_similarity",
                "score": cosine_similarity,
            }
        )

FaithfullnessMetrics

Bases: RunEvaluator

Evaluates Faithfullness metric

Source code in supermat/langchain/metrics.py
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
class FaithfullnessMetrics(RunEvaluator):
    """Evaluates Faithfullness metric"""

    def __init__(self, llm: BaseChatModel) -> None:
        self.llm = llm
        self.evaluator_faithfullness = load_evaluator(
            "labeled_score_string",
            criteria={
                "faithful": "How faithful is the submission to the reference context?",
            },
            llm=self.llm,
            normalize_by=10,
        )

    def evaluate_run(self, run: Run, example: Example) -> dict:
        res = self.evaluator_faithfullness.evaluate_strings(
            prediction=next(iter(run.outputs.values())),
            input=run.inputs["Question"],
            reference=example.inputs["documents"],
        )
        return EvaluationResult(key="labeled_criteria:faithful", **res)

Rouge1

Bases: RunEvaluator

Evaluates ROGUE1 F1 metric

Source code in supermat/langchain/metrics.py
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
class Rouge1(RunEvaluator):
    """Evaluates ROGUE1 F1 metric"""

    def __init__(self) -> None:
        # "ROUGE-Lsum splits the text into sentences based on newlines
        # and computes the LCS for each pair of sentences and take the average score for all sentences
        self.scorer = rouge_scorer.RougeScorer(["rouge1"], use_stemmer=True)
        self.score_func = cache(self.scorer.score)

    def evaluate_run(self, run: Run, example: Example | None = None) -> EvaluationResult | EvaluationResults:
        response = run.outputs["output"]
        reference = example.outputs["Answer"]
        rouge_score = self.scorer.score(target=reference, prediction=response)

        result = EvaluationResult(
            **{
                "key": "rouge1_f1_score",
                "score": rouge_score["rouge1"].fmeasure,
                "comment": f"precision:{rouge_score['rouge1'].precision}, recall:{rouge_score['rouge1'].recall}",
            },
        )
        return result

Rouge1Precision

Bases: Rouge1

Evaluates ROGUE1 precision metric

Source code in supermat/langchain/metrics.py
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
class Rouge1Precision(Rouge1):
    """Evaluates ROGUE1 precision metric"""

    def evaluate_run(self, run: Run, example: Example | None = None) -> EvaluationResult | EvaluationResults:
        response = run.outputs["output"]
        reference = example.outputs["Answer"]
        rouge_score = self.scorer.score(target=reference, prediction=response)

        result = EvaluationResult(
            **{
                "key": "rouge1_precision",
                "score": rouge_score["rouge1"].precision,
            },
        )
        return result

Rouge1Recall

Bases: Rouge1

Evaluates ROGUE1 recall metric

Source code in supermat/langchain/metrics.py
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
class Rouge1Recall(Rouge1):
    """Evaluates ROGUE1 recall metric"""

    def evaluate_run(self, run: Run, example: Example | None = None) -> EvaluationResult | EvaluationResults:
        response = run.outputs["output"]
        reference = example.outputs["Answer"]
        rouge_score = self.scorer.score(target=reference, prediction=response)

        result = EvaluationResult(
            **{
                "key": "rouge1_recall",
                "score": rouge_score["rouge1"].recall,
            },
        )
        return result

Rouge2

Bases: RunEvaluator

Evaluates ROGUE2 F1 metric

Source code in supermat/langchain/metrics.py
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
class Rouge2(RunEvaluator):
    """Evaluates ROGUE2 F1 metric"""

    def __init__(self) -> None:
        # "ROUGE-Lsum splits the text into sentences based on newlines
        # and computes the LCS for each pair of sentences and take the average score for all sentences
        self.scorer = rouge_scorer.RougeScorer(["rouge2"], use_stemmer=True)
        self.score_func = cache(self.scorer.score)

    def evaluate_run(self, run: Run, example: Example | None = None) -> EvaluationResult | EvaluationResults:
        response = run.outputs["output"]
        reference = example.outputs["Answer"]
        rouge_score = self.scorer.score(target=reference, prediction=response)

        result = EvaluationResult(
            **{
                "key": "rouge2_f1_score",
                "score": rouge_score["rouge2"].fmeasure,
            },
        )
        return result

Rouge2Precision

Bases: Rouge2

Evaluates ROGUE2 precision metric

Source code in supermat/langchain/metrics.py
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
class Rouge2Precision(Rouge2):
    """Evaluates ROGUE2 precision metric"""

    def evaluate_run(self, run: Run, example: Example | None = None) -> EvaluationResult | EvaluationResults:
        response = run.outputs["output"]
        reference = example.outputs["Answer"]
        rouge_score = self.scorer.score(target=reference, prediction=response)

        result = EvaluationResult(
            **{
                "key": "rouge2_precision",
                "score": rouge_score["rouge2"].precision,
            },
        )
        return result

Rouge2Recall

Bases: Rouge2

Evaluates ROGUE2 recall metric

Source code in supermat/langchain/metrics.py
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
class Rouge2Recall(Rouge2):
    """Evaluates ROGUE2 recall metric"""

    def evaluate_run(self, run: Run, example: Example | None = None) -> EvaluationResult | EvaluationResults:
        response = run.outputs["output"]
        reference = example.outputs["Answer"]
        rouge_score = self.scorer.score(target=reference, prediction=response)

        result = EvaluationResult(
            **{
                "key": "rouge2_recall",
                "score": rouge_score["rouge2"].recall,
            },
        )
        return result

RougeLsum

Bases: RunEvaluator

Evaluates ROGUE-L F1 metric

Source code in supermat/langchain/metrics.py
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
class RougeLsum(RunEvaluator):
    """Evaluates ROGUE-L F1 metric"""

    def __init__(self) -> None:
        # "ROUGE-Lsum splits the text into sentences based on newlines
        # and computes the LCS for each pair of sentences and take the average score for all sentences
        self.scorer = rouge_scorer.RougeScorer(["rougeLsum"], use_stemmer=True)
        self.score_func = cache(self.scorer.score)

    def evaluate_run(self, run: Run, example: Example | None = None) -> EvaluationResult | EvaluationResults:
        response = run.outputs["output"]
        reference = example.outputs["Answer"]
        rouge_score = self.scorer.score(target=reference, prediction=response)

        result = EvaluationResult(
            **{
                "key": "rougeLsum_f1_score",
                "score": rouge_score["rougeLsum"].fmeasure,
                "comment": f"precision:{rouge_score['rougeLsum'].precision}, recall:{rouge_score['rougeLsum'].recall}",
            },
        )

        return result

RougeLsumPrecision

Bases: RougeLsum

Evaluates ROGUE-L sum precision metric

Source code in supermat/langchain/metrics.py
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
class RougeLsumPrecision(RougeLsum):
    """Evaluates ROGUE-L sum precision metric"""

    def evaluate_run(self, run: Run, example: Example | None = None) -> EvaluationResult | EvaluationResults:
        response = run.outputs["output"]
        reference = example.outputs["Answer"]
        rouge_score = self.scorer.score(target=reference, prediction=response)

        result = EvaluationResult(
            **{
                "key": "rougeLsum_precision",
                "score": rouge_score["rougeLsum"].precision,
            },
        )
        return result

RougeLsumRecall

Bases: RougeLsum

Evaluates ROGUE-L sum Recall metric

Source code in supermat/langchain/metrics.py
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
class RougeLsumRecall(RougeLsum):
    """Evaluates ROGUE-L sum Recall metric"""

    def evaluate_run(self, run: Run, example: Example | None = None) -> EvaluationResult | EvaluationResults:
        response = run.outputs["output"]
        reference = example.outputs["Answer"]
        rouge_score = self.scorer.score(target=reference, prediction=response)

        result = EvaluationResult(
            **{
                "key": "rougeLsum_recall",
                "score": rouge_score["rougeLsum"].recall,
            },
        )
        return result