forked from docarray/docarray
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathevaluation.py
More file actions
168 lines (134 loc) · 5.47 KB
/
evaluation.py
File metadata and controls
168 lines (134 loc) · 5.47 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
# some implementations are adopted from https://github.com/ncoop57/cute_ranking/blob/main/cute_ranking/core.py
# the original code is licensed under Apache-2.0
from typing import List, Optional
import numpy as np
def _check_k(k):
if k is not None and k < 1:
raise ValueError(f'`k` must be >=1 or `None`')
def r_precision(binary_relevance: List[int], **kwargs) -> float:
"""R Precision after all relevant documents have been retrieved
Relevance is binary (nonzero is relevant).
.. seealso::
https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#R-precision
:param binary_relevance: binary relevancy in rank order
:return: precision
"""
binary_relevance = np.array(binary_relevance) != 0
z = binary_relevance.nonzero()[0]
if not z.size:
return 0.0
return float(np.mean(binary_relevance[: z[-1] + 1]))
def precision_at_k(
binary_relevance: List[int], k: Optional[int] = None, **kwargs
) -> float:
"""Precision @K.
:param binary_relevance: binary relevancy in rank order
:param k: measured on top-k
:return: precision @k
"""
_check_k(k)
binary_relevance = np.array(binary_relevance)[:k] != 0
return float(np.mean(binary_relevance))
def hit_at_k(binary_relevance: List[int], k: Optional[int] = None, **kwargs) -> int:
"""Score is percentage of first relevant item in list that occur
:param binary_relevance: binary relevancy in rank order
:param k: measured on top-k
:return: hit @k if hit return 1 else 0
"""
_check_k(k)
return 1 if np.sum(binary_relevance[:k]) > 0 else 0
def average_precision(binary_relevance: List[int], **kwargs) -> float:
"""Score is average precision (area under PR curve)
Relevance is binary (nonzero is relevant).
:param binary_relevance: binary relevancy in rank order
:return: Average precision
"""
r = np.array(binary_relevance) != 0
out = [precision_at_k(r, k + 1) for k in range(r.size) if r[k]]
if not out:
return 0.0
return float(np.mean(out))
def reciprocal_rank(binary_relevance: List[int], **kwargs) -> float:
"""Score is reciprocal of the rank of the first relevant item
:param binary_relevance: binary relevancy in rank order
:return: Average precision
"""
rs = np.array(binary_relevance).nonzero()[0]
return 1.0 / (rs[0] + 1) if rs.size else 0.0
def recall_at_k(
binary_relevance: List[int], max_rel: int, k: Optional[int] = None, **kwargs
) -> float:
"""Score is recall after all relevant documents have been retrieved
Relevance is binary (nonzero is relevant).
:param binary_relevance: binary relevancy in rank order
:param k: measured on top-k
:param max_rel: Maximum number of documents that can be relevant
:return: Recall score
"""
_check_k(k)
binary_relevance = np.array(binary_relevance[:k]) != 0
if np.sum(binary_relevance) > max_rel:
raise ValueError(f'Number of relevant Documents retrieved > {max_rel}')
return np.sum(binary_relevance) / max_rel
def f1_score_at_k(
binary_relevance: List[int], max_rel: int, k: Optional[int] = None, **kwargs
) -> float:
"""Score is harmonic mean of precision and recall
Relevance is binary (nonzero is relevant).
:param binary_relevance: binary relevancy in rank order
:param k: measured on top-k
:param max_rel: Maximum number of documents that can be relevant
:return: F1 score @ k
"""
_check_k(k)
p = precision_at_k(binary_relevance, k)
r = recall_at_k(binary_relevance, max_rel, k)
if (p + r) > 0:
return 2 * p * r / (p + r)
else:
return 0.0
def dcg_at_k(
relevance: List[float], method: int = 0, k: Optional[int] = None, **kwargs
):
"""Score is discounted cumulative gain (dcg)
Relevance is positive real values. Can use binary
as the previous methods.
Example from
http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
:param relevance: Relevance scores (list or numpy) in rank order
(first element is the first item)
:param k: measured on top-k
:param method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]
If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]
:return: Discounted cumulative gain
"""
_check_k(k)
r = np.asfarray(relevance)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 or 1.')
return 0.0
def ndcg_at_k(
relevance: List[float], method: int = 0, k: Optional[int] = None, **kwargs
):
"""Score is normalized discounted cumulative gain (ndcg)
Relevance is positive real values. Can use binary
as the previous methods.
Example from
http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
:param relevance: Relevance scores (list or numpy) in rank order
(first element is the first item)
:param k: measured on top-k
:param method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]
If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]
:return: Normalized discounted cumulative gain
"""
_check_k(k)
dcg_max = dcg_at_k(sorted(relevance, reverse=True), method=method, k=k)
if not dcg_max:
return 0.0
return dcg_at_k(relevance, method=method, k=k) / dcg_max