Coverage for src/bob/pad/face/preprocessor/Patch.py: 30%

44 statements  

« prev     ^ index     » next       coverage.py v7.6.5, created at 2024-11-14 23:14 +0100

1from collections import OrderedDict 

2 

3from sklearn.base import BaseEstimator, TransformerMixin 

4 

5from bob.bio.base.annotator.FailSafe import translate_kwargs 

6from bob.bio.video import VideoLikeContainer 

7 

8from ..utils import extract_patches 

9 

10 

11class ImagePatches(TransformerMixin, BaseEstimator): 

12 """Extracts patches of images and returns it in a VideoLikeContainer. You need 

13 to wrap the further blocks (extractor and algorithm) that come after this 

14 in bob.bio.video wrappers. 

15 """ 

16 

17 def __init__( 

18 self, block_size, block_overlap=(0, 0), n_random_patches=None, **kwargs 

19 ): 

20 super(ImagePatches, self).__init__(**kwargs) 

21 self.block_size = block_size 

22 self.block_overlap = block_overlap 

23 self.n_random_patches = n_random_patches 

24 

25 def transform(self, images): 

26 return [self.transform_one_image(img) for img in images] 

27 

28 def transform_one_image(self, image): 

29 patches = extract_patches( 

30 image, self.block_size, self.block_overlap, self.n_random_patches 

31 ) 

32 vc = VideoLikeContainer(patches, range(len(patches))) 

33 

34 return vc 

35 

36 

37class VideoPatches(TransformerMixin, BaseEstimator): 

38 """Extracts patches of images from video containers and returns it in a 

39 VideoLikeContainer. 

40 """ 

41 

42 def __init__( 

43 self, 

44 face_cropper, 

45 block_size, 

46 block_overlap=(0, 0), 

47 n_random_patches=None, 

48 normalizer=None, 

49 **kwargs, 

50 ): 

51 super(VideoPatches, self).__init__(**kwargs) 

52 self.face_cropper = face_cropper 

53 self.block_size = block_size 

54 self.block_overlap = block_overlap 

55 self.n_random_patches = n_random_patches 

56 self.normalizer = normalizer 

57 

58 def transform(self, videos, annotations=None): 

59 kwargs = translate_kwargs(dict(annotations=annotations), len(videos)) 

60 return [ 

61 self.transform_one_video(vid, **kw) 

62 for vid, kw in zip(videos, kwargs) 

63 ] 

64 

65 def transform_one_video(self, frames, annotations=None): 

66 annotations = annotations or {} 

67 if self.normalizer is not None: 

68 annotations = OrderedDict(self.normalizer(annotations)) 

69 

70 all_patches = [] 

71 for frame, index in zip(frames, frames.indices): 

72 # if annotations are given, and if particular frame annotations are 

73 # not missing we take them: 

74 annots = annotations.get(str(index)) 

75 

76 # preprocess image (by default: crops a face) 

77 preprocessed = self.face_cropper(frame, annots) 

78 if preprocessed is None: 

79 continue 

80 

81 # extract patches 

82 patches = extract_patches( 

83 preprocessed, 

84 self.block_size, 

85 self.block_overlap, 

86 self.n_random_patches, 

87 ) 

88 all_patches.extend(patches) 

89 

90 vc = VideoLikeContainer(all_patches, range(len(all_patches))) 

91 

92 if not len(vc): 

93 return None 

94 

95 return vc