Coverage for /scratch/builds/bob/bob.ip.binseg/miniconda/conda-bld/bob.ip.binseg_1673966692152/_test_env_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_p/lib/python3.10/site-packages/bob/ip/common/test/test_significance.py: 17%

168 statements  

« prev     ^ index     » next       coverage.py v7.0.5, created at 2023-01-17 15:03 +0000

1#!/usr/bin/env python 

2# coding=utf-8 

3 

4"""Tests for significance tools""" 

5 

6 

7import numpy 

8import pytest 

9import torch 

10 

11from ...binseg.engine.significance import ( 

12 PERFORMANCE_FIGURES, 

13 _performance_summary, 

14 _winperf_measures, 

15) 

16from ..utils.measure import base_measures 

17 

18 

19def _check_window_measures(pred, gt, mask, threshold, size, stride, expected): 

20 

21 pred = torch.tensor(pred) 

22 gt = torch.tensor(gt) 

23 if mask is None: 

24 mask = torch.ones_like(gt) 

25 actual = _winperf_measures(pred, gt, mask, threshold, size, stride) 

26 

27 # transforms tp,tn,fp,fn through base_measures() 

28 expected_shape = numpy.array(expected).shape[:2] 

29 expected = numpy.array([base_measures(*c) for r in expected for c in r]).T 

30 expected = expected.reshape((len(PERFORMANCE_FIGURES),) + expected_shape) 

31 

32 assert numpy.allclose( 

33 actual, expected 

34 ), f"Actual output:\n{actual}\n **!=** Expected output:\n{expected}" 

35 

36 

37@pytest.mark.skip 

38def test_winperf_measures_alltrue(): 

39 

40 pred = numpy.ones((4, 4), dtype=float) 

41 gt = numpy.ones((4, 4), dtype=bool) 

42 mask = None 

43 threshold = 0.5 

44 size = (2, 2) 

45 stride = (1, 1) 

46 

47 expected = [ 

48 # tp, fp, tn, fn 

49 [(4, 0, 0, 0), (4, 0, 0, 0), (4, 0, 0, 0)], 

50 [(4, 0, 0, 0), (4, 0, 0, 0), (4, 0, 0, 0)], 

51 [(4, 0, 0, 0), (4, 0, 0, 0), (4, 0, 0, 0)], 

52 ] 

53 _check_window_measures(pred, gt, mask, threshold, size, stride, expected) 

54 

55 

56@pytest.mark.skip 

57def test_winperf_measures_alltrue_with_padding(): 

58 

59 pred = numpy.ones((3, 3), dtype=float) 

60 gt = numpy.ones((3, 3), dtype=bool) 

61 mask = None 

62 threshold = 0.5 

63 size = (2, 2) 

64 stride = (2, 2) 

65 

66 expected = [ 

67 # tp, fp, tn, fn 

68 [(4, 0, 0, 0), (2, 0, 2, 0)], 

69 [(2, 0, 2, 0), (1, 0, 3, 0)], 

70 ] 

71 _check_window_measures(pred, gt, mask, threshold, size, stride, expected) 

72 

73 

74@pytest.mark.skip 

75def test_winperf_measures_dot_with_padding(): 

76 

77 pred = numpy.ones((3, 3), dtype=float) 

78 gt = numpy.zeros((3, 3), dtype=bool) 

79 gt[1, 1] = 1.0 # white dot pattern 

80 mask = None 

81 threshold = 0.5 

82 size = (2, 2) 

83 stride = (2, 2) 

84 

85 expected = [ 

86 # tp, fp, tn, fn 

87 [(1, 3, 0, 0), (0, 2, 2, 0)], 

88 [(0, 2, 2, 0), (0, 1, 3, 0)], 

89 ] 

90 _check_window_measures(pred, gt, mask, threshold, size, stride, expected) 

91 

92 

93@pytest.mark.skip 

94def test_winperf_measures_cross(): 

95 

96 pred = numpy.zeros((5, 5), dtype=float) 

97 pred[2, :] = 1.0 

98 pred[:, 2] = 1.0 

99 pred[2, 2] = 0.0 # make one mistake at the center of the cross 

100 gt = numpy.zeros((5, 5), dtype=bool) 

101 gt[2, :] = 1.0 

102 gt[:, 2] = 1.0 # white cross pattern 

103 mask = None 

104 threshold = 0.5 

105 size = (3, 3) 

106 stride = (1, 1) 

107 

108 expected = [ 

109 # tp, fp, tn, fn 

110 [(4, 0, 4, 1), (4, 0, 4, 1), (4, 0, 4, 1)], 

111 [(4, 0, 4, 1), (4, 0, 4, 1), (4, 0, 4, 1)], 

112 [(4, 0, 4, 1), (4, 0, 4, 1), (4, 0, 4, 1)], 

113 ] 

114 _check_window_measures(pred, gt, mask, threshold, size, stride, expected) 

115 

116 

117@pytest.mark.skip 

118def test_winperf_measures_cross_with_padding(): 

119 

120 pred = numpy.zeros((5, 5), dtype=float) 

121 gt = numpy.zeros((5, 5), dtype=bool) 

122 gt[2, :] = 1.0 

123 gt[:, 2] = 1.0 # white cross pattern 

124 mask = None 

125 threshold = 0.5 

126 size = (4, 4) 

127 stride = (2, 2) 

128 

129 expected = [ 

130 # tp, fp, tn, fn 

131 [(0, 0, 9, 7), (0, 0, 10, 6)], 

132 [(0, 0, 10, 6), (0, 0, 11, 5)], 

133 ] 

134 _check_window_measures(pred, gt, mask, threshold, size, stride, expected) 

135 

136 

137@pytest.mark.skip 

138def test_winperf_measures_cross_with_padding_2(): 

139 

140 pred = numpy.zeros((5, 5), dtype=float) 

141 pred[2, :] = 1.0 

142 pred[:, 2] = 1.0 

143 pred[2, 2] = 0.0 # make one mistake at the center of the cross 

144 gt = numpy.zeros((5, 5), dtype=bool) 

145 gt[2, :] = 1.0 

146 gt[:, 2] = 1.0 # white cross pattern 

147 mask = None 

148 threshold = 0.5 

149 size = (4, 4) 

150 stride = (2, 2) 

151 

152 expected = [ 

153 # tp, fp, tn, fn 

154 [(6, 0, 9, 1), (5, 0, 10, 1)], 

155 [(5, 0, 10, 1), (4, 0, 11, 1)], 

156 ] 

157 _check_window_measures(pred, gt, mask, threshold, size, stride, expected) 

158 

159 

160def _check_performance_summary( 

161 pred, gt, mask, threshold, size, stride, s, figure 

162): 

163 

164 figsize = pred.shape 

165 pred = torch.tensor(pred) 

166 gt = torch.tensor(gt) 

167 if mask is None: 

168 mask = torch.ones_like(gt) 

169 

170 # notice _winperf_measures() was previously tested (above) 

171 measures = _winperf_measures(pred, gt, mask, threshold, size, stride) 

172 

173 n_actual, avg_actual, std_actual = _performance_summary( 

174 figsize, measures, size, stride, figure 

175 ) 

176 

177 n_expected = numpy.zeros_like(n_actual) 

178 avg_expected = numpy.zeros_like(avg_actual) 

179 std_expected = numpy.zeros_like(std_actual) 

180 figindex = PERFORMANCE_FIGURES.index(figure) 

181 for y, row in enumerate(s): 

182 for x, cell in enumerate(row): 

183 n_expected[y, x] = len(cell) 

184 entries = tuple(numpy.array(cell).T) # convert indexing to numpy 

185 avg_expected[y, x] = measures[figindex][entries].mean() 

186 if len(cell) == 1: 

187 std_expected[y, x] = 0 

188 else: 

189 std_expected[y, x] = measures[figindex][entries].std(ddof=1) 

190 

191 assert (n_actual == n_expected).all(), ( 

192 f"Actual N output:\n{n_actual}\n " 

193 f"**!=** Expected N output:\n{n_expected}" 

194 ) 

195 

196 assert numpy.allclose(avg_actual, avg_expected), ( 

197 f"Actual average output:\n{avg_actual}\n " 

198 f"**!=** Expected average output:\n{avg_expected}" 

199 ) 

200 

201 assert numpy.allclose(std_actual, std_expected), ( 

202 f"Actual std.deviation output:\n{std_actual}\n " 

203 f"**!=** Expected std.deviation output:\n{std_expected}" 

204 ) 

205 

206 

207@pytest.mark.skip 

208def test_performance_summary_alltrue_accuracy(): 

209 

210 pred = numpy.ones((4, 4), dtype=float) 

211 gt = numpy.ones((4, 4), dtype=bool) 

212 mask = None 

213 threshold = 0.5 

214 size = (2, 2) 

215 stride = (1, 1) 

216 

217 # what we expect will happen for the accumulation of statistics each number 

218 # represents the indexes in ``measures`` that needs to be accumulated for 

219 # that particular pixel in the original image 

220 stats = [ 

221 # first row of image 

222 [[(0, 0)], [(0, 0), (0, 1)], [(0, 1), (0, 2)], [(0, 2)]], 

223 # second row of image 

224 [ 

225 [(0, 0), (1, 0)], 

226 [(0, 0), (0, 1), (1, 0), (1, 1)], 

227 [(0, 1), (0, 2), (1, 1), (1, 2)], 

228 [(0, 2), (1, 2)], 

229 ], 

230 # third row of image 

231 [ 

232 [(1, 0), (2, 0)], 

233 [(1, 0), (1, 1), (2, 0), (2, 1)], 

234 [(1, 1), (1, 2), (2, 1), (2, 2)], 

235 [(1, 2), (2, 2)], 

236 ], 

237 # fourth row of image 

238 [[(2, 0)], [(2, 0), (2, 1)], [(2, 1), (2, 2)], [(2, 2)]], 

239 ] 

240 

241 for fig in PERFORMANCE_FIGURES: 

242 _check_performance_summary( 

243 pred, 

244 gt, 

245 mask, 

246 threshold, 

247 size, 

248 stride, 

249 stats, 

250 fig, 

251 ) 

252 

253 

254@pytest.mark.skip 

255def test_performance_summary_cross(): 

256 

257 pred = numpy.zeros((5, 5), dtype=float) 

258 pred[2, :] = 1.0 

259 pred[:, 2] = 1.0 

260 pred[2, 2] = 0.0 # make one mistake at the center of the cross 

261 gt = numpy.zeros((5, 5), dtype=bool) 

262 gt[2, :] = 1.0 

263 gt[:, 2] = 1.0 # white cross pattern 

264 mask = None 

265 threshold = 0.5 

266 size = (3, 3) 

267 stride = (1, 1) 

268 

269 # what we expect will happen for the accumulation of statistics 

270 # each number represents the pandas dataframe index in ``measures`` 

271 # that needs to be accumulated for that particular pixel in the 

272 # original image 

273 stats = [ 

274 # first row of image 

275 [ 

276 [(0, 0)], 

277 [(0, 0), (0, 1)], 

278 [(0, 0), (0, 1), (0, 2)], 

279 [(0, 1), (0, 2)], 

280 [(0, 2)], 

281 ], 

282 # second row of image 

283 [ 

284 [(0, 0), (1, 0)], 

285 [(0, 0), (0, 1), (1, 0), (1, 1)], 

286 [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)], 

287 [(0, 1), (0, 2), (1, 1), (1, 2)], 

288 [(0, 2), (1, 2)], 

289 ], 

290 # third row of image 

291 [ 

292 [(0, 0), (1, 0), (2, 0)], 

293 [(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1)], 

294 [ 

295 (0, 0), 

296 (0, 1), 

297 (0, 2), 

298 (1, 0), 

299 (1, 1), 

300 (1, 2), 

301 (2, 0), 

302 (2, 1), 

303 (2, 2), 

304 ], 

305 [(0, 1), (0, 2), (1, 1), (1, 2), (2, 1), (2, 2)], 

306 [(0, 2), (1, 2), (2, 2)], 

307 ], 

308 # fourth row of image 

309 [ 

310 [(1, 0), (2, 0)], 

311 [(1, 0), (1, 1), (2, 0), (2, 1)], 

312 [(1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2)], 

313 [(1, 1), (1, 2), (2, 1), (2, 2)], 

314 [(1, 2), (2, 2)], 

315 ], 

316 # fifth row of image 

317 [ 

318 [(2, 0)], 

319 [(2, 0), (2, 1)], 

320 [(2, 0), (2, 1), (2, 2)], 

321 [(2, 1), (2, 2)], 

322 [(2, 2)], 

323 ], 

324 ] 

325 

326 for fig in PERFORMANCE_FIGURES: 

327 _check_performance_summary( 

328 pred, 

329 gt, 

330 mask, 

331 threshold, 

332 size, 

333 stride, 

334 stats, 

335 fig, 

336 ) 

337 

338 

339@pytest.mark.skip 

340def test_performance_summary_cross_with_padding(): 

341 

342 pred = numpy.zeros((5, 5), dtype=float) 

343 gt = numpy.zeros((5, 5), dtype=bool) 

344 gt[2, :] = 1.0 

345 gt[:, 2] = 1.0 # white cross pattern 

346 mask = None 

347 threshold = 0.5 

348 size = (4, 4) 

349 stride = (2, 2) 

350 

351 # what we expect will happen for the accumulation of statistics 

352 # each number represents the pandas dataframe index in ``measures`` 

353 # that needs to be accumulated for that particular pixel in the 

354 # original image 

355 stats = [ 

356 # first row of image 

357 [[(0, 0)], [(0, 0)], [(0, 0), (0, 1)], [(0, 0), (0, 1)], [(0, 1)]], 

358 # second row of image 

359 [[(0, 0)], [(0, 0)], [(0, 0), (0, 1)], [(0, 0), (0, 1)], [(0, 1)]], 

360 # third row of image 

361 [ 

362 [(0, 0), (1, 0)], 

363 [(0, 0), (1, 0)], 

364 [(0, 0), (0, 1), (1, 0), (1, 1)], 

365 [(0, 0), (0, 1), (1, 0), (1, 1)], 

366 [(0, 1), (1, 1)], 

367 ], 

368 # fourth row of image 

369 [ 

370 [(0, 0), (1, 0)], 

371 [(0, 0), (1, 0)], 

372 [(0, 0), (0, 1), (1, 0), (1, 1)], 

373 [(0, 0), (0, 1), (1, 0), (1, 1)], 

374 [(0, 1), (1, 1)], 

375 ], 

376 # fifth row of image 

377 [[(1, 0)], [(1, 0)], [(1, 0), (1, 1)], [(1, 0), (1, 1)], [(1, 1)]], 

378 ] 

379 

380 for fig in PERFORMANCE_FIGURES: 

381 _check_performance_summary( 

382 pred, 

383 gt, 

384 mask, 

385 threshold, 

386 size, 

387 stride, 

388 stats, 

389 fig, 

390 ) 

391 

392 

393@pytest.mark.skip 

394def test_performance_summary_cross_with_padding_2(): 

395 

396 pred = numpy.zeros((5, 5), dtype=float) 

397 pred[2, :] = 1.0 

398 pred[:, 2] = 1.0 

399 pred[2, 2] = 0.0 # make one mistake at the center of the cross 

400 gt = numpy.zeros((5, 5), dtype=bool) 

401 gt[2, :] = 1.0 

402 gt[:, 2] = 1.0 # white cross pattern 

403 mask = None 

404 threshold = 0.5 

405 size = (4, 4) 

406 stride = (2, 2) 

407 

408 # what we expect will happen for the accumulation of statistics 

409 # each number represents the pandas dataframe index in ``measures`` 

410 # that needs to be accumulated for that particular pixel in the 

411 # original image 

412 stats = [ 

413 # first row of image 

414 [[(0, 0)], [(0, 0)], [(0, 0), (0, 1)], [(0, 0), (0, 1)], [(0, 1)]], 

415 # second row of image 

416 [[(0, 0)], [(0, 0)], [(0, 0), (0, 1)], [(0, 0), (0, 1)], [(0, 1)]], 

417 # third row of image 

418 [ 

419 [(0, 0), (1, 0)], 

420 [(0, 0), (1, 0)], 

421 [(0, 0), (0, 1), (1, 0), (1, 1)], 

422 [(0, 0), (0, 1), (1, 0), (1, 1)], 

423 [(0, 1), (1, 1)], 

424 ], 

425 # fourth row of image 

426 [ 

427 [(0, 0), (1, 0)], 

428 [(0, 0), (1, 0)], 

429 [(0, 0), (0, 1), (1, 0), (1, 1)], 

430 [(0, 0), (0, 1), (1, 0), (1, 1)], 

431 [(0, 1), (1, 1)], 

432 ], 

433 # fifth row of image 

434 [[(1, 0)], [(1, 0)], [(1, 0), (1, 1)], [(1, 0), (1, 1)], [(1, 1)]], 

435 ] 

436 

437 for fig in PERFORMANCE_FIGURES: 

438 _check_performance_summary( 

439 pred, 

440 gt, 

441 mask, 

442 threshold, 

443 size, 

444 stride, 

445 stats, 

446 fig, 

447 )