-
Notifications
You must be signed in to change notification settings - Fork 28
/
Copy pathAlgorithm_DetEva.py
349 lines (303 loc) · 15.9 KB
/
Algorithm_DetEva.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Algorithm named DetEval
It is slightly different from original algorithm(see https://perso.liris.cnrs.fr/christian.wolf/software/deteval/index.html)
Please read《 Object Count / Area Graphs for the Evaluation of Object Detection and Segmentation Algorithms 》for details
'''
from collections import namedtuple
import rrc_evaluation_funcs
import importlib
def evaluation_imports():
"""
evaluation_imports: Dictionary ( key = module name , value = alias ) with python modules used in the evaluation.
"""
return {
'math': 'math',
'numpy': 'np'
}
def default_evaluation_params():
"""
default_evaluation_params: Default parameters to use for the validation and evaluation.
"""
return {
'AREA_RECALL_CONSTRAINT': 0.8,
'AREA_PRECISION_CONSTRAINT': 0.4,
'EV_PARAM_IND_CENTER_DIFF_THR': 1,
'MTYPE_OO_O': 1.,
'MTYPE_OM_O': 0.8,
'MTYPE_OM_M': 1.,
'GT_SAMPLE_NAME_2_ID': 'gt_img_([0-9]+).txt',
'DET_SAMPLE_NAME_2_ID': 'res_img_([0-9]+).txt',
'CRLF': False # Lines are delimited by Windows CRLF format
}
def validate_data(gtFilePath, submFilePath, evaluationParams):
"""
Method validate_data: validates that all files in the results folder are correct (have the correct name contents).
Validates also that there are no missing files in the folder.
If some error detected, the method raises the error
"""
gt = rrc_evaluation_funcs.load_zip_file(gtFilePath, evaluationParams['GT_SAMPLE_NAME_2_ID'])
subm = rrc_evaluation_funcs.load_zip_file(submFilePath, evaluationParams['DET_SAMPLE_NAME_2_ID'], True)
# Validate format of GroundTruth
for k in gt:
rrc_evaluation_funcs.validate_lines_in_file(k, gt[k], evaluationParams['CRLF'], True, True)
# Validate format of results
for k in subm:
if (k in gt) == False:
raise Exception("The sample %s not present in GT" % k)
rrc_evaluation_funcs.validate_lines_in_file(k, subm[k], evaluationParams['CRLF'], True, True)
def evaluate_method(gtFilePath, submFilePath, evaluationParams):
"""
Method evaluate_method: evaluate method and returns the results
Results. Dictionary with the following values:
- method (required) Global method metrics. Ex: { 'Precision':0.8,'Recall':0.9 }
- samples (optional) Per sample metrics. Ex: {'sample1' : { 'Precision':0.8,'Recall':0.9 } , 'sample2' : { 'Precision':0.8,'Recall':0.9 }
"""
for module, alias in evaluation_imports().iteritems():
globals()[alias] = importlib.import_module(module)
def one_to_one_match(row, col):
cont = 0
for j in range(len(recallMat[0])):
if recallMat[row, j] >= evaluationParams['AREA_RECALL_CONSTRAINT'] and precisionMat[row, j] >= \
evaluationParams['AREA_PRECISION_CONSTRAINT']:
cont = cont + 1
if (cont != 1):
return False
cont = 0
for i in range(len(recallMat)):
if recallMat[i, col] >= evaluationParams['AREA_RECALL_CONSTRAINT'] and precisionMat[i, col] >= \
evaluationParams['AREA_PRECISION_CONSTRAINT']:
cont = cont + 1
if (cont != 1):
return False
if recallMat[row, col] >= evaluationParams['AREA_RECALL_CONSTRAINT'] and precisionMat[row, col] >= \
evaluationParams['AREA_PRECISION_CONSTRAINT']:
return True
return False
def one_to_many_match(gtNum):
many_sum = 0
detRects = []
for detNum in range(len(recallMat[0])):
if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and detNum not in detDontCareRectsNum:
if precisionMat[gtNum, detNum] >= evaluationParams['AREA_PRECISION_CONSTRAINT']:
many_sum += recallMat[gtNum, detNum]
detRects.append(detNum)
if many_sum >= evaluationParams['AREA_RECALL_CONSTRAINT']:
return True, detRects
else:
return False, []
def many_to_one_match(detNum):
many_sum = 0
gtRects = []
for gtNum in range(len(recallMat)):
if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCareRectsNum:
if recallMat[gtNum, detNum] >= evaluationParams['AREA_RECALL_CONSTRAINT']:
many_sum += precisionMat[gtNum, detNum]
gtRects.append(gtNum)
if many_sum >= evaluationParams['AREA_PRECISION_CONSTRAINT']:
return True, gtRects
else:
return False, []
def area(a, b):
dx = min(a.xmax, b.xmax) - max(a.xmin, b.xmin) + 1
dy = min(a.ymax, b.ymax) - max(a.ymin, b.ymin) + 1
if (dx >= 0) and (dy >= 0):
return dx * dy
else:
return 0.
def center(r):
x = float(r.xmin) + float(r.xmax - r.xmin + 1) / 2.
y = float(r.ymin) + float(r.ymax - r.ymin + 1) / 2.
return Point(x, y)
def point_distance(r1, r2):
distx = math.fabs(r1.x - r2.x)
disty = math.fabs(r1.y - r2.y)
return math.sqrt(distx * distx + disty * disty)
def center_distance(r1, r2):
return point_distance(center(r1), center(r2))
def diag(r):
w = (r.xmax - r.xmin + 1)
h = (r.ymax - r.ymin + 1)
return math.sqrt(h * h + w * w)
perSampleMetrics = {}
methodRecallSum = 0
methodPrecisionSum = 0
Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax')
Point = namedtuple('Point', 'x y')
gt = rrc_evaluation_funcs.load_zip_file(gtFilePath, evaluationParams['GT_SAMPLE_NAME_2_ID'])
subm = rrc_evaluation_funcs.load_zip_file(submFilePath, evaluationParams['DET_SAMPLE_NAME_2_ID'], True)
numGt = 0
numDet = 0
for resFile in gt:
gtFile = rrc_evaluation_funcs.decode_utf8(gt[resFile])
recall = 0
precision = 0
hmean = 0
recallAccum = 0.
precisionAccum = 0.
gtRects = []
detRects = []
gtPolPoints = []
detPolPoints = []
gtDontCareRectsNum = [] # Array of Ground Truth Rectangles' keys marked as don't Care
detDontCareRectsNum = [] # Array of Detected Rectangles' matched with a don't Care GT
pairs = []
evaluationLog = ""
recallMat = np.empty([1, 1])
precisionMat = np.empty([1, 1])
pointsList, _, transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(gtFile,
evaluationParams[
'CRLF'],
True, True,
False)
for n in range(len(pointsList)):
points = pointsList[n]
transcription = transcriptionsList[n]
dontCare = transcription == "###"
# convert x1,y1,x2,y2,x3,y3,x4,y4 to xmin,ymin,xmax,ymax
if len(points) == 8:
points_tmp = np.array(points).reshape(4, 2)
points_x = points_tmp[:, 0]
points_y = points_tmp[:, 1]
xmin = points_x[np.argmin(points_x)]
xmax = points_x[np.argmax(points_x)]
ymin = points_y[np.argmin(points_y)]
ymax = points_y[np.argmax(points_y)]
points = [xmin, ymin, xmax, ymax]
gtRect = Rectangle(*points)
gtRects.append(gtRect)
gtPolPoints.append(points)
if dontCare:
gtDontCareRectsNum.append(len(gtRects) - 1)
evaluationLog += "GT rectangles: " + str(len(gtRects)) + (
" (" + str(len(gtDontCareRectsNum)) + " don't care)\n" if len(gtDontCareRectsNum) > 0 else "\n")
if resFile in subm:
detFile = rrc_evaluation_funcs.decode_utf8(subm[resFile])
pointsList, _, _ = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(detFile,
evaluationParams['CRLF'],
True, True, False)
for n in range(len(pointsList)):
points = pointsList[n]
# print points
detRect = Rectangle(*points)
detRects.append(detRect)
detPolPoints.append(points)
if len(gtDontCareRectsNum) > 0:
for dontCareRectNum in gtDontCareRectsNum:
dontCareRect = gtRects[dontCareRectNum]
intersected_area = area(dontCareRect, detRect)
rdDimensions = ((detRect.xmax - detRect.xmin + 1) * (detRect.ymax - detRect.ymin + 1))
if (rdDimensions == 0):
precision = 0
else:
precision = intersected_area / rdDimensions
if (precision > evaluationParams['AREA_PRECISION_CONSTRAINT']):
detDontCareRectsNum.append(len(detRects) - 1)
break
evaluationLog += "DET rectangles: " + str(len(detRects)) + (
" (" + str(len(detDontCareRectsNum)) + " don't care)\n" if len(detDontCareRectsNum) > 0 else "\n")
if len(gtRects) == 0:
recall = 1
precision = 0 if len(detRects) > 0 else 1
if len(detRects) > 0:
# Calculate recall and precision matrixs
outputShape = [len(gtRects), len(detRects)]
recallMat = np.empty(outputShape)
precisionMat = np.empty(outputShape)
gtRectMat = np.zeros(len(gtRects), np.int8)
detRectMat = np.zeros(len(detRects), np.int8)
for gtNum in range(len(gtRects)):
for detNum in range(len(detRects)):
rG = gtRects[gtNum]
rD = detRects[detNum]
intersected_area = area(rG, rD)
rgDimensions = ((rG.xmax - rG.xmin + 1) * (rG.ymax - rG.ymin + 1))
rdDimensions = ((rD.xmax - rD.xmin + 1) * (rD.ymax - rD.ymin + 1))
recallMat[gtNum, detNum] = 0 if rgDimensions == 0 else intersected_area / rgDimensions
precisionMat[gtNum, detNum] = 0 if rdDimensions == 0 else intersected_area / rdDimensions
# Find one-to-one matches
evaluationLog += "Find one-to-one matches\n"
for gtNum in range(len(gtRects)):
for detNum in range(len(detRects)):
if gtRectMat[gtNum] == 0 and detRectMat[
detNum] == 0 and gtNum not in gtDontCareRectsNum and detNum not in detDontCareRectsNum:
match = one_to_one_match(gtNum, detNum)
if match is True:
rG = gtRects[gtNum]
rD = detRects[detNum]
normDist = center_distance(rG, rD)
normDist /= diag(rG) + diag(rD)
normDist *= 2.0
if normDist < evaluationParams['EV_PARAM_IND_CENTER_DIFF_THR']:
gtRectMat[gtNum] = 1
detRectMat[detNum] = 1
recallAccum += evaluationParams['MTYPE_OO_O']
precisionAccum += evaluationParams['MTYPE_OO_O']
pairs.append({'gt': gtNum, 'det': detNum, 'type': 'OO'})
evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(detNum) + "\n"
else:
evaluationLog += "Match Discarded GT #" + str(gtNum) + " with Det #" + str(
detNum) + " normDist: " + str(normDist) + " \n"
# Find one-to-many matches
evaluationLog += "Find one-to-many matches\n"
for gtNum in range(len(gtRects)):
if gtNum not in gtDontCareRectsNum:
match, matchesDet = one_to_many_match(gtNum)
if match is True:
gtRectMat[gtNum] = 1
recallAccum += evaluationParams['MTYPE_OM_O']
precisionAccum += evaluationParams['MTYPE_OM_O'] * len(matchesDet)
pairs.append({'gt': gtNum, 'det': matchesDet, 'type': 'OM'})
for detNum in matchesDet:
detRectMat[detNum] = 1
evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(matchesDet) + "\n"
# Find many-to-one matches
evaluationLog += "Find many-to-one matches\n"
for detNum in range(len(detRects)):
if detNum not in detDontCareRectsNum:
match, matchesGt = many_to_one_match(detNum)
if match is True:
detRectMat[detNum] = 1
recallAccum += evaluationParams['MTYPE_OM_M'] * len(matchesGt)
precisionAccum += evaluationParams['MTYPE_OM_M']
pairs.append({'gt': matchesGt, 'det': detNum, 'type': 'MO'})
for gtNum in matchesGt:
gtRectMat[gtNum] = 1
evaluationLog += "Match GT #" + str(matchesGt) + " with Det #" + str(detNum) + "\n"
numGtCare = (len(gtRects) - len(gtDontCareRectsNum))
if numGtCare == 0:
recall = float(1)
precision = float(0) if len(detRects) > 0 else float(1)
else:
recall = float(recallAccum) / numGtCare
precision = float(0) if (len(detRects) - len(detDontCareRectsNum)) == 0 else float(
precisionAccum) / (len(detRects) - len(detDontCareRectsNum))
hmean = 0 if (precision + recall) == 0 else 2.0 * precision * recall / (precision + recall)
evaluationLog += "Recall = " + str(recall) + "\n"
evaluationLog += "Precision = " + str(precision) + "\n"
methodRecallSum += recallAccum
methodPrecisionSum += precisionAccum
numGt += len(gtRects) - len(gtDontCareRectsNum)
numDet += len(detRects) - len(detDontCareRectsNum)
perSampleMetrics[resFile] = {
'precision': precision,
'recall': recall,
'hmean': hmean,
'pairs': pairs,
'recallMat': [] if len(detRects) > 100 else recallMat.tolist(),
'precisionMat': [] if len(detRects) > 100 else precisionMat.tolist(),
'gtPolPoints': gtPolPoints,
'detPolPoints': detPolPoints,
'gtDontCare': gtDontCareRectsNum,
'detDontCare': detDontCareRectsNum,
'evaluationParams': evaluationParams,
'evaluationLog': evaluationLog
}
methodRecall = 0 if numGt == 0 else methodRecallSum / numGt
methodPrecision = 0 if numDet == 0 else methodPrecisionSum / numDet
methodHmean = 0 if methodRecall + methodPrecision == 0 else 2 * methodRecall * methodPrecision / (
methodRecall + methodPrecision)
methodMetrics = {'precision': methodPrecision, 'recall': methodRecall, 'hmean': methodHmean}
resDict = {'calculated': True, 'Message': '', 'method': methodMetrics, 'per_sample': perSampleMetrics}
return resDict