summaryrefslogtreecommitdiff
path: root/apps/CameraITS/tests/scene2_d/test_autoframing.py
blob: cb1ac90bd634dbee92a2e4db9329e87f123ab2c4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
# Copyright 2023 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Verifies that autoframing can adjust fov to include all faces with different
skin tones."""


import logging
import os.path

from mobly import test_runner

import its_base_test
import camera_properties_utils
import capture_request_utils
import image_processing_utils
import its_session_utils
import opencv_processing_utils

_AUTOFRAMING_CONVERGED = 2
_CV2_FACE_SCALE_FACTOR = 1.05  # 5% step for resizing image to find face
_CV2_FACE_MIN_NEIGHBORS = 4  # recommended 3-6: higher for less faces
_NAME = os.path.splitext(os.path.basename(__file__))[0]
_NUM_TEST_FRAMES = 150
_NUM_FACES = 3
_W, _H = 640, 480


class AutoframingTest(its_base_test.ItsBaseTest):
  """Test autoframing for faces with different skin tones.
  """

  def test_autoframing(self):
    """Test if fov gets adjusted to accommodate all the faces in the frame.

    Do a large zoom on scene2_a using do_3a so that none of that faces are
    visible initially, trigger autoframing, wait for the state to converge and
    make sure all the faces are found.
    """
    with its_session_utils.ItsSession(
        device_id=self.dut.serial,
        camera_id=self.camera_id,
        hidden_physical_id=self.hidden_physical_id) as cam:
      props = cam.get_camera_properties()
      props = cam.override_with_hidden_physical_camera_props(props)

      # Load chart for scene
      its_session_utils.load_scene(
          cam, props, self.scene, self.tablet, self.chart_distance,
          log_path=self.log_path)

      # Check SKIP conditions
      # Don't run autoframing if face detection or autoframing is not supported
      camera_properties_utils.skip_unless(
          camera_properties_utils.face_detect(props) and
          camera_properties_utils.autoframing(props))

      # Do max-ish zoom with the help of do_3a, keeping all the 'A's off. This
      # zooms into the scene so that none of the faces are in the view
      # initially - which gives room for autoframing to take place.
      max_zoom_ratio = camera_properties_utils.get_max_digital_zoom(props)
      cam.do_3a(zoom_ratio=max_zoom_ratio)

      req = capture_request_utils.auto_capture_request(
          do_autoframing=True, zoom_ratio=max_zoom_ratio)
      req['android.statistics.faceDetectMode'] = 1  # Simple
      fmt = {'format': 'yuv', 'width': _W, 'height': _H}
      caps = cam.do_capture([req]*_NUM_TEST_FRAMES, fmt)
      for i, cap in enumerate(caps):
        faces = cap['metadata']['android.statistics.faces']
        autoframing_state = cap['metadata']['android.control.autoframingState']
        logging.debug('Frame %d faces: %d, autoframingState: %d', i, len(faces),
                      autoframing_state)

        # Face detection and autoframing could take several frames to warm up,
        # but should detect the correct number of faces before the last frame
        if autoframing_state == _AUTOFRAMING_CONVERGED:
          # Save image when autoframing state converges
          control_zoom_ratio = cap['metadata']['android.control.zoomRatio']
          logging.debug('Control zoom ratio: %d', control_zoom_ratio)
          img = image_processing_utils.convert_capture_to_rgb_image(
              cap, props=props)
          file_name_stem = os.path.join(self.log_path, _NAME)
          img_name = f'{file_name_stem}.jpg'

          # Save images with green boxes around faces
          crop_region = cap['metadata']['android.scaler.cropRegion']
          faces_cropped = opencv_processing_utils.correct_faces_for_crop(
              faces, img, crop_region)
          opencv_processing_utils.draw_green_boxes_around_faces(
              img, faces_cropped, img_name)

          num_faces_found = len(faces)
          if num_faces_found != _NUM_FACES:
            raise AssertionError('Wrong num of faces found! Found: '
                                 f'{num_faces_found}, expected: {_NUM_FACES}')

          # Also check the faces with open cv to make sure the scene is not
          # distorted or anything.
          opencv_faces = opencv_processing_utils.find_opencv_faces(
              img, _CV2_FACE_SCALE_FACTOR, _CV2_FACE_MIN_NEIGHBORS)
          opencv_processing_utils.match_face_locations(
              faces_cropped, opencv_faces, img, img_name)
          break

        # Autoframing didn't converge till the last frame
        elif i == _NUM_TEST_FRAMES - 1:
          raise AssertionError('Autoframing failed to converge')

        logging.debug('Faces: %s', str(faces))


if __name__ == '__main__':
  test_runner.main()