summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorXin Li <delphij@google.com>2019-07-01 20:59:53 +0000
committerXin Li <delphij@google.com>2019-07-01 20:59:53 +0000
commitcbea0e7da4f53a1c17e73bf201d17ccb2ce75ecc (patch)
tree4b39e887c11aad4d30d5859f274acb38dd5e7df8
parentc58c13980109262b47f3149a2d3f302d0a589055 (diff)
parent0133ca96adc566f803657c1cc74a5d2e87a81f21 (diff)
downloadlibhardware-cbea0e7da4f53a1c17e73bf201d17ccb2ce75ecc.tar.gz
DO NOT MERGE - Merge qt-dev-plus-aosp-without-vendor (5699924) into stage-aosp-master
Bug: 134405016 Change-Id: I236395150bd85dabab3266f6dcd14a0c8888b725
-rw-r--r--hardware.c26
-rw-r--r--include/hardware/audio.h30
-rw-r--r--include/hardware/camera3.h342
-rw-r--r--include/hardware/camera_common.h304
-rw-r--r--include/hardware/gralloc.h35
-rw-r--r--include/hardware/gralloc1.h2
-rw-r--r--include/hardware/hwcomposer2.h403
-rw-r--r--include/hardware/sound_trigger.h11
-rw-r--r--modules/audio_remote_submix/audio_hw.cpp20
-rw-r--r--modules/camera/3_4/Android.mk1
-rw-r--r--modules/camera/3_4/v4l2_camera_hal.cpp3
-rw-r--r--modules/soundtrigger/sound_trigger_hw.c37
-rw-r--r--modules/usbaudio/audio_hal.c231
-rw-r--r--tests/hardware/struct-offset.cpp14
14 files changed, 1328 insertions, 131 deletions
diff --git a/hardware.c b/hardware.c
index 40ae1d13..6e72ce9f 100644
--- a/hardware.c
+++ b/hardware.c
@@ -24,6 +24,7 @@
#include <errno.h>
#include <limits.h>
#include <stdio.h>
+#include <stdlib.h>
#include <unistd.h>
#define LOG_TAG "HAL"
@@ -148,6 +149,25 @@ static int load(const char *id,
}
/*
+ * If path is in in_path.
+ */
+static bool path_in_path(const char *path, const char *in_path) {
+ char real_path[PATH_MAX];
+ if (realpath(path, real_path) == NULL) return false;
+
+ char real_in_path[PATH_MAX];
+ if (realpath(in_path, real_in_path) == NULL) return false;
+
+ const size_t real_in_path_len = strlen(real_in_path);
+ if (strncmp(real_path, real_in_path, real_in_path_len) != 0) {
+ return false;
+ }
+
+ return strlen(real_path) > real_in_path_len &&
+ real_path[real_in_path_len] == '/';
+}
+
+/*
* Check if a HAL with given name and subname exists, if so return 0, otherwise
* otherwise return negative. On success path will contain the path to the HAL.
*/
@@ -156,18 +176,18 @@ static int hw_module_exists(char *path, size_t path_len, const char *name,
{
snprintf(path, path_len, "%s/%s.%s.so",
HAL_LIBRARY_PATH3, name, subname);
- if (access(path, R_OK) == 0)
+ if (path_in_path(path, HAL_LIBRARY_PATH3) && access(path, R_OK) == 0)
return 0;
snprintf(path, path_len, "%s/%s.%s.so",
HAL_LIBRARY_PATH2, name, subname);
- if (access(path, R_OK) == 0)
+ if (path_in_path(path, HAL_LIBRARY_PATH2) && access(path, R_OK) == 0)
return 0;
#ifndef __ANDROID_VNDK__
snprintf(path, path_len, "%s/%s.%s.so",
HAL_LIBRARY_PATH1, name, subname);
- if (access(path, R_OK) == 0)
+ if (path_in_path(path, HAL_LIBRARY_PATH1) && access(path, R_OK) == 0)
return 0;
#endif
diff --git a/include/hardware/audio.h b/include/hardware/audio.h
index 10a8789a..feebd23e 100644
--- a/include/hardware/audio.h
+++ b/include/hardware/audio.h
@@ -550,6 +550,36 @@ struct audio_stream_in {
size_t *mic_count);
/**
+ * Called by the framework to instruct the HAL to optimize the capture stream in the
+ * specified direction.
+ *
+ * \param[in] stream the stream object.
+ * \param[in] direction The direction constant (from audio-base.h)
+ * MIC_DIRECTION_UNSPECIFIED Don't do any directionality processing of the
+ * activated microphone(s).
+ * MIC_DIRECTION_FRONT Optimize capture for audio coming from the screen-side
+ * of the device.
+ * MIC_DIRECTION_BACK Optimize capture for audio coming from the side of the
+ * device opposite the screen.
+ * MIC_DIRECTION_EXTERNAL Optimize capture for audio coming from an off-device
+ * microphone.
+ * \return OK if the call is successful, an error code otherwise.
+ */
+ int (*set_microphone_direction)(const struct audio_stream_in *stream,
+ audio_microphone_direction_t direction);
+
+ /**
+ * Called by the framework to specify to the HAL the desired zoom factor for the selected
+ * microphone(s).
+ *
+ * \param[in] stream the stream object.
+ * \param[in] zoom the zoom factor.
+ * \return OK if the call is successful, an error code otherwise.
+ */
+ int (*set_microphone_field_dimension)(const struct audio_stream_in *stream,
+ float zoom);
+
+ /**
* Called when the metadata of the stream's sink has been changed.
* @param sink_metadata Description of the audio that is recorded by the clients.
*/
diff --git a/include/hardware/camera3.h b/include/hardware/camera3.h
index c3ea0aa7..7fb86dfb 100644
--- a/include/hardware/camera3.h
+++ b/include/hardware/camera3.h
@@ -21,7 +21,7 @@
#include "camera_common.h"
/**
- * Camera device HAL 3.5[ CAMERA_DEVICE_API_VERSION_3_5 ]
+ * Camera device HAL 3.6[ CAMERA_DEVICE_API_VERSION_3_6 ]
*
* This is the current recommended version of the camera device HAL.
*
@@ -29,7 +29,7 @@
* android.hardware.camera2 API as LIMITED or above hardware level.
*
* Camera devices that support this version of the HAL must return
- * CAMERA_DEVICE_API_VERSION_3_5 in camera_device_t.common.version and in
+ * CAMERA_DEVICE_API_VERSION_3_6 in camera_device_t.common.version and in
* camera_info_t.device_version (from camera_module_t.get_camera_info).
*
* CAMERA_DEVICE_API_VERSION_3_3 and above:
@@ -183,6 +183,25 @@
* for a logical multi camera, the application has the option to specify individual
* settings for a particular physical device.
*
+ * 3.6: Minor revisions to support HAL buffer management APIs:
+ *
+ * - Add ANDROID_INFO_SUPPORTED_BUFFER_MANAGEMENT_VERSION static metadata, which allows HAL to
+ * opt in to the new buffer management APIs described below.
+ *
+ * - Add request_stream_buffers() and return_stream_buffers() to camera3_callback_ops_t for HAL to
+ * request and return output buffers from camera service.
+ *
+ * - Add signal_stream_flush() to camera3_device_ops_t for camera service to notify HAL an
+ * upcoming configure_streams() call requires HAL to return buffers of certain streams.
+ *
+ * - Add CAMERA3_JPEG_APP_SEGMENTS_BLOB_ID to support BLOB with only JPEG apps
+ * segments and thumbnail (without main image bitstream). Camera framework
+ * uses such stream togerther with a HAL YUV_420_888/IMPLEMENTATION_DEFINED
+ * stream to encode HEIC (ISO/IEC 23008-12) image.
+ *
+ * - Add is_reconfiguration_required() to camera3_device_ops_t to enable HAL to skip or
+ * trigger stream reconfiguration depending on new session parameter values.
+ *
*/
/**
@@ -1313,7 +1332,7 @@
* To avoid excessive amount of noise reduction and insufficient amount of edge enhancement
* being applied to the input buffer, the application can hint the HAL how much effective
* exposure time improvement has been done by the application, then the HAL can adjust the
- * noise reduction and edge enhancement paramters to get best reprocessed image quality.
+ * noise reduction and edge enhancement parameters to get best reprocessed image quality.
* Below tag can be used for this purpose:
* - android.reprocess.effectiveExposureFactor
* The value would be exposure time increase factor applied to the original output image,
@@ -1941,21 +1960,25 @@ typedef struct camera3_stream_buffer_set {
/**
* camera3_jpeg_blob:
*
- * Transport header for compressed JPEG buffers in output streams.
+ * Transport header for compressed JPEG or JPEG_APP_SEGMENTS buffers in output streams.
*
- * To capture JPEG images, a stream is created using the pixel format
+ * To capture JPEG or JPEG_APP_SEGMENTS images, a stream is created using the pixel format
* HAL_PIXEL_FORMAT_BLOB. The buffer size for the stream is calculated by the
- * framework, based on the static metadata field android.jpeg.maxSize. Since
- * compressed JPEG images are of variable size, the HAL needs to include the
- * final size of the compressed image using this structure inside the output
- * stream buffer. The JPEG blob ID field must be set to CAMERA3_JPEG_BLOB_ID.
+ * framework, based on the static metadata field android.jpeg.maxSize for JPEG,
+ * and android.jpeg.maxAppsSegments for JPEG_APP_SEGMENTS.
*
- * Transport header should be at the end of the JPEG output stream buffer. That
+ * Since compressed JPEG/JPEG_APP_SEGMENTS images are of variable size, the HAL needs to
+ * include the final size of the image using this structure inside the output
+ * stream buffer. The JPEG blob ID field must be set to CAMERA3_JPEG_BLOB_ID for
+ * JPEG and CAMERA3_JPEG_APP_SEGMENTS_BLOB_ID for APP segments.
+ *
+ * Transport header should be at the end of the output stream buffer. That
* means the jpeg_blob_id must start at byte[buffer_size -
* sizeof(camera3_jpeg_blob)], where the buffer_size is the size of gralloc buffer.
- * Any HAL using this transport header must account for it in android.jpeg.maxSize
- * The JPEG data itself starts at the beginning of the buffer and should be
- * jpeg_size bytes long.
+ * The blob data itself starts at the beginning of the buffer and should be
+ * jpeg_size bytes long. HAL using this transport header for JPEG must account for
+ * it in android.jpeg.maxSize. For JPEG APP segments, camera framework makes
+ * sure that the output stream buffer is large enough for the transport header.
*/
typedef struct camera3_jpeg_blob {
uint16_t jpeg_blob_id;
@@ -1963,7 +1986,8 @@ typedef struct camera3_jpeg_blob {
} camera3_jpeg_blob_t;
enum {
- CAMERA3_JPEG_BLOB_ID = 0x00FF
+ CAMERA3_JPEG_BLOB_ID = 0x00FF,
+ CAMERA3_JPEG_APP_SEGMENTS_BLOB_ID = 0x0100,
};
/**********************************************************************
@@ -2029,6 +2053,14 @@ typedef enum camera3_error_msg_code {
* available. Subsequent requests are unaffected, and the device remains
* operational. The frame_number field specifies the request for which
* result metadata won't be available.
+ *
+ * >= CAMERA_DEVICE_API_VERSION_3_6:
+ *
+ * In case the result metadata is absent for a logical camera device, then the
+ * error_stream pointer must be set to NULL.
+ * If the result metadata cannot be produced for a physical camera device, then
+ * error_stream must contain a pointer to a respective stream associated with
+ * that physical device.
*/
CAMERA3_MSG_ERROR_RESULT = 3,
@@ -2126,6 +2158,153 @@ typedef struct camera3_notify_msg {
} camera3_notify_msg_t;
+
+/**********************************************************************
+ *
+ * Types definition for request_stream_buffers() callback.
+ *
+ */
+
+/**
+ * camera3_buffer_request_status_t:
+ *
+ * The overall buffer request status returned by request_stream_buffers()
+ */
+typedef enum camera3_buffer_request_status {
+ /**
+ * request_stream_buffers() call succeeded and all requested buffers are
+ * returned.
+ */
+ CAMERA3_BUF_REQ_OK = 0,
+
+ /**
+ * request_stream_buffers() call failed for some streams.
+ * Check per stream status for each returned camera3_stream_buffer_ret_t.
+ */
+ CAMERA3_BUF_REQ_FAILED_PARTIAL = 1,
+
+ /**
+ * request_stream_buffers() call failed for all streams and no buffers are
+ * returned at all. Camera service is about to or is performing
+ * configure_streams() call. HAL must wait until next configure_streams()
+ * call is finished before requesting buffers again.
+ */
+ CAMERA3_BUF_REQ_FAILED_CONFIGURING = 2,
+
+ /**
+ * request_stream_buffers() call failed for all streams and no buffers are
+ * returned at all. Failure due to bad camera3_buffer_request input, eg:
+ * unknown stream or repeated stream in the list of buffer requests.
+ */
+ CAMERA3_BUF_REQ_FAILED_ILLEGAL_ARGUMENTS = 3,
+
+ /**
+ * request_stream_buffers() call failed for all streams and no buffers are
+ * returned at all. This can happen for unknown reasons or a combination
+ * of different failure reasons per stream. For the latter case, caller can
+ * check per stream failure reason returned in camera3_stream_buffer_ret.
+ */
+ CAMERA3_BUF_REQ_FAILED_UNKNOWN = 4,
+
+ /**
+ * Number of buffer request status
+ */
+ CAMERA3_BUF_REQ_NUM_STATUS
+
+} camera3_buffer_request_status_t;
+
+/**
+ * camera3_stream_buffer_req_status_t:
+ *
+ * The per stream buffer request status returned by request_stream_buffers()
+ */
+typedef enum camera3_stream_buffer_req_status {
+ /**
+ * Get buffer succeeds and all requested buffers are returned.
+ */
+ CAMERA3_PS_BUF_REQ_OK = 0,
+
+ /**
+ * Get buffer failed due to timeout waiting for an available buffer. This is
+ * likely due to the client application holding too many buffers, or the
+ * system is under memory pressure.
+ * This is not a fatal error. HAL can try to request buffer for this stream
+ * later. If HAL cannot get a buffer for certain capture request in time
+ * due to this error, HAL can send an ERROR_REQUEST to camera service and
+ * drop processing that request.
+ */
+ CAMERA3_PS_BUF_REQ_NO_BUFFER_AVAILABLE = 1,
+
+ /**
+ * Get buffer failed due to HAL has reached its maxBuffer count. This is not
+ * a fatal error. HAL can try to request buffer for this stream again after
+ * it returns at least one buffer of that stream to camera service.
+ */
+ CAMERA3_PS_BUF_REQ_MAX_BUFFER_EXCEEDED = 2,
+
+ /**
+ * Get buffer failed due to the stream is disconnected by client
+ * application, has been removed, or not recognized by camera service.
+ * This means application is no longer interested in this stream.
+ * Requesting buffer for this stream will never succeed after this error is
+ * returned. HAL must safely return all buffers of this stream after
+ * getting this error. If HAL gets another capture request later targeting
+ * a disconnected stream, HAL must send an ERROR_REQUEST to camera service
+ * and drop processing that request.
+ */
+ CAMERA3_PS_BUF_REQ_STREAM_DISCONNECTED = 3,
+
+ /**
+ * Get buffer failed for unknown reason. This is a fatal error and HAL must
+ * send ERROR_DEVICE to camera service and be ready to be closed.
+ */
+ CAMERA3_PS_BUF_REQ_UNKNOWN_ERROR = 4,
+
+ /**
+ * Number of buffer request status
+ */
+ CAMERA3_PS_BUF_REQ_NUM_STATUS
+} camera3_stream_buffer_req_status_t;
+
+typedef struct camera3_buffer_request {
+ /**
+ * The stream HAL wants to request buffer from
+ */
+ camera3_stream_t *stream;
+
+ /**
+ * The number of buffers HAL requested
+ */
+ uint32_t num_buffers_requested;
+} camera3_buffer_request_t;
+
+typedef struct camera3_stream_buffer_ret {
+ /**
+ * The stream HAL wants to request buffer from
+ */
+ camera3_stream_t *stream;
+
+ /**
+ * The status of buffer request of this stream
+ */
+ camera3_stream_buffer_req_status_t status;
+
+ /**
+ * Number of output buffers returned. Must be 0 when above status is not
+ * CAMERA3_PS_BUF_REQ_OK; otherwise the value must be equal to
+ * num_buffers_requested in the corresponding camera3_buffer_request_t
+ */
+ uint32_t num_output_buffers;
+
+ /**
+ * The returned output buffers for the stream.
+ * Caller of request_stream_buffers() should supply this with enough memory
+ * (num_buffers_requested * sizeof(camera3_stream_buffer_t))
+ */
+ camera3_stream_buffer_t *output_buffers;
+} camera3_stream_buffer_ret_t;
+
+
/**********************************************************************
*
* Capture request/result definitions for the HAL process_capture_request()
@@ -2643,6 +2822,65 @@ typedef struct camera3_callback_ops {
void (*notify)(const struct camera3_callback_ops *,
const camera3_notify_msg_t *msg);
+ /**
+ * request_stream_buffers:
+ *
+ * <= CAMERA_DEVICE_API_VERISON_3_5:
+ *
+ * DO NOT USE: not defined and must be NULL.
+ *
+ * >= CAMERA_DEVICE_API_VERISON_3_6:
+ *
+ * Synchronous callback for HAL to ask for output buffer from camera service.
+ *
+ * This call may be serialized in camera service so it is strongly
+ * recommended to only call this method from one thread.
+ *
+ * When camera device advertises
+ * (android.info.supportedBufferManagementVersion ==
+ * ANDROID_INFO_SUPPORTED_BUFFER_MANAGEMENT_VERSION_HIDL_DEVICE_3_5), HAL
+ * can use this method to request buffers from camera service.
+ *
+ * Caller is responsible for allocating enough memory for returned_buf_reqs
+ * argument (num_buffer_reqs * sizeof(camera3_stream_buffer_ret_t)) bytes
+ * and also the memory for the output_buffers field in each
+ * camera3_stream_buffer_ret_t
+ * (num_buffers_requested * sizeof(camera3_stream_buffer_t)) bytes
+ *
+ * Performance requirements:
+ * This is a blocking call that takes more time with more buffers requested.
+ * HAL should not request large amount of buffers on a latency critical code
+ * path. It is highly recommended to use a dedicated thread to perform
+ * all requestStreamBuffer calls, and adjust the thread priority and/or
+ * timing of making the call in order for buffers to arrive before HAL is
+ * ready to fill the buffer.
+ */
+ camera3_buffer_request_status_t (*request_stream_buffers)(
+ const struct camera3_callback_ops *,
+ uint32_t num_buffer_reqs,
+ const camera3_buffer_request_t *buffer_reqs,
+ /*out*/uint32_t *num_returned_buf_reqs,
+ /*out*/camera3_stream_buffer_ret_t *returned_buf_reqs);
+
+ /**
+ * return_stream_buffers:
+ *
+ * <= CAMERA_DEVICE_API_VERISON_3_5:
+ *
+ * DO NOT USE: not defined and must be NULL.
+ *
+ * >= CAMERA_DEVICE_API_VERISON_3_6:
+ *
+ * Synchronous callback for HAL to return output buffers to camera service.
+ *
+ * If this method is called during a configure_streams() call, it will be
+ * blocked until camera service finishes the ongoing configure_streams() call.
+ */
+ void (*return_stream_buffers)(
+ const struct camera3_callback_ops *,
+ uint32_t num_buffers,
+ const camera3_stream_buffer_t* const* buffers);
+
} camera3_callback_ops_t;
/**********************************************************************
@@ -3221,8 +3459,82 @@ typedef struct camera3_device_ops {
*/
int (*flush)(const struct camera3_device *);
+ /**
+ * signal_stream_flush:
+ *
+ * <= CAMERA_DEVICE_API_VERISON_3_5:
+ *
+ * Not defined and must be NULL
+ *
+ * >= CAMERA_DEVICE_API_VERISON_3_6:
+ *
+ * Signaling HAL camera service is about to perform configure_streams() call
+ * and HAL must return all buffers of designated streams. HAL must finish
+ * inflight requests normally and return all buffers belonging to the
+ * designated streams through process_capture_result() or
+ * return_stream_buffers() API in a timely manner, or camera service will run
+ * into a fatal error.
+ *
+ * Note that this call serves as an optional hint and camera service may
+ * skip calling this if all buffers are already returned.
+ *
+ */
+ void (*signal_stream_flush)(const struct camera3_device*,
+ uint32_t num_streams,
+ const camera3_stream_t* const* streams);
+
+ /**
+ * is_reconfiguration_required:
+ *
+ * <= CAMERA_DEVICE_API_VERISON_3_5:
+ *
+ * Not defined and must be NULL
+ *
+ * >= CAMERA_DEVICE_API_VERISON_3_6:
+ *
+ * Check whether complete stream reconfiguration is required for possible new session
+ * parameter values.
+ *
+ * This method must be called by the camera framework in case the client changes
+ * the value of any advertised session parameters. Depending on the specific values
+ * the HAL can decide whether a complete stream reconfiguration is required. In case
+ * the HAL returns -ENVAL, the camera framework must skip the internal reconfiguration.
+ * In case Hal returns 0, the framework must reconfigure the streams and pass the
+ * new session parameter values accordingly.
+ * This call may be done by the framework some time before the request with new parameters
+ * is submitted to the HAL, and the request may be cancelled before it ever gets submitted.
+ * Therefore, the HAL must not use this query as an indication to change its behavior in any
+ * way.
+ * ------------------------------------------------------------------------
+ *
+ * Preconditions:
+ *
+ * The framework can call this method at any time after active
+ * session configuration. There must be no impact on the performance of
+ * pending camera requests in any way. In particular there must not be
+ * any glitches or delays during normal camera streaming.
+ *
+ * Performance requirements:
+ * HW and SW camera settings must not be changed and there must not be
+ * a user-visible impact on camera performance.
+ *
+ * @param oldSessionParams The currently applied session parameters.
+ * @param newSessionParams The new session parameters set by client.
+ *
+ * @return Status Status code for the operation, one of:
+ * 0: In case the stream reconfiguration is required
+ *
+ * -EINVAL: In case the stream reconfiguration is not required.
+ *
+ * -ENOSYS: In case the camera device does not support the
+ * reconfiguration query.
+ */
+ int (*is_reconfiguration_required)(const struct camera3_device*,
+ const camera_metadata_t* old_session_params,
+ const camera_metadata_t* new_session_params);
+
/* reserved for future use */
- void *reserved[8];
+ void *reserved[6];
} camera3_device_ops_t;
/**********************************************************************
diff --git a/include/hardware/camera_common.h b/include/hardware/camera_common.h
index edd1ada3..16651a94 100644
--- a/include/hardware/camera_common.h
+++ b/include/hardware/camera_common.h
@@ -117,6 +117,22 @@ __BEGIN_DECLS
* 4. Module initialization method. This will be called by the camera service
* right after the HAL module is loaded, to allow for one-time initialization
* of the HAL. It is called before any other module methods are invoked.
+ *
+ *******************************************************************************
+ * Version: 2.5 [CAMERA_MODULE_API_VERSION_2_5]
+ *
+ * This camera module version adds below API changes:
+ *
+ * 1. Support to query characteristics of a non-standalone physical camera, which can
+ * only be accessed as part of a logical camera. It also adds camera stream combination
+ * query.
+ *
+ * 2. Ability to query whether a particular camera stream combination is
+ * supported by the camera device.
+ *
+ * 3. Device state change notification. This module version also supports
+ * notification about the overall device state change, such as
+ * folding/unfolding, or covering/uncovering of shutter.
*/
/**
@@ -133,8 +149,9 @@ __BEGIN_DECLS
#define CAMERA_MODULE_API_VERSION_2_2 HARDWARE_MODULE_API_VERSION(2, 2)
#define CAMERA_MODULE_API_VERSION_2_3 HARDWARE_MODULE_API_VERSION(2, 3)
#define CAMERA_MODULE_API_VERSION_2_4 HARDWARE_MODULE_API_VERSION(2, 4)
+#define CAMERA_MODULE_API_VERSION_2_5 HARDWARE_MODULE_API_VERSION(2, 5)
-#define CAMERA_MODULE_API_VERSION_CURRENT CAMERA_MODULE_API_VERSION_2_4
+#define CAMERA_MODULE_API_VERSION_CURRENT CAMERA_MODULE_API_VERSION_2_5
/**
* All device versions <= HARDWARE_DEVICE_API_VERSION(1, 0xFF) must be treated
@@ -149,6 +166,7 @@ __BEGIN_DECLS
#define CAMERA_DEVICE_API_VERSION_3_3 HARDWARE_DEVICE_API_VERSION(3, 3)
#define CAMERA_DEVICE_API_VERSION_3_4 HARDWARE_DEVICE_API_VERSION(3, 4)
#define CAMERA_DEVICE_API_VERSION_3_5 HARDWARE_DEVICE_API_VERSION(3, 5)
+#define CAMERA_DEVICE_API_VERSION_3_6 HARDWARE_DEVICE_API_VERSION(3, 6)
// Device version 3.5 is current, older HAL camera device versions are not
// recommended for new devices.
@@ -644,6 +662,192 @@ typedef struct camera_module_callbacks {
} camera_module_callbacks_t;
+/**
+ * camera_stream_t:
+ *
+ * A handle to a single camera input or output stream. A stream is defined by
+ * the framework by its buffer resolution and format and gralloc usage flags.
+ *
+ * The stream structures are owned by the framework and pointers to a
+ * camera_stream passed into the HAL by is_stream_combination_supported() are
+ * only valid within the scope of the call.
+ *
+ * All camera_stream members are immutable.
+ */
+typedef struct camera_stream {
+ /**
+ * The type of the stream, one of the camera3_stream_type_t values.
+ */
+ int stream_type;
+
+ /**
+ * The width in pixels of the buffers in this stream
+ */
+ uint32_t width;
+
+ /**
+ * The height in pixels of the buffers in this stream
+ */
+ uint32_t height;
+
+ /**
+ * The pixel format for the buffers in this stream. Format is a value from
+ * the HAL_PIXEL_FORMAT_* list in system/core/include/system/graphics.h, or
+ * from device-specific headers.
+ *
+ * If HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED is used, then the platform
+ * gralloc module will select a format based on the usage flags provided by
+ * the camera device and the other endpoint of the stream.
+ *
+ */
+ int format;
+
+ /**
+ * The gralloc usage flags for this stream, as needed by the HAL. The usage
+ * flags are defined in gralloc.h (GRALLOC_USAGE_*), or in device-specific
+ * headers.
+ *
+ * For output streams, these are the HAL's producer usage flags. For input
+ * streams, these are the HAL's consumer usage flags. The usage flags from
+ * the producer and the consumer will be combined together and then passed
+ * to the platform gralloc HAL module for allocating the gralloc buffers for
+ * each stream.
+ *
+ * The usage flag for an output stream may be bitwise
+ * combination of usage flags for multiple consumers, for the purpose of
+ * sharing one camera stream between those consumers. The HAL must fail
+ * the stream combination query call with -EINVAL if the combined flags cannot be
+ * supported due to imcompatible buffer format, dataSpace, or other hardware
+ * limitations.
+ */
+ uint32_t usage;
+
+ /**
+ * A field that describes the contents of the buffer. The format and buffer
+ * dimensions define the memory layout and structure of the stream buffers,
+ * while dataSpace defines the meaning of the data within the buffer.
+ *
+ * For most formats, dataSpace defines the color space of the image data.
+ * In addition, for some formats, dataSpace indicates whether image- or
+ * depth-based data is requested. See system/core/include/system/graphics.h
+ * for details of formats and valid dataSpace values for each format.
+ *
+ * Always set by the camera service. The dataspace values are set
+ * using the V0 dataspace definitions in graphics.h
+ */
+ android_dataspace_t data_space;
+
+ /**
+ * The required output rotation of the stream, one of
+ * the camera3_stream_rotation_t values. This must be inspected by HAL along
+ * with stream width and height. For example, if the rotation is 90 degree
+ * and the stream width and height is 720 and 1280 respectively, camera service
+ * will supply buffers of size 720x1280, and HAL should capture a 1280x720 image
+ * and rotate the image by 90 degree counterclockwise. The rotation field is
+ * no-op when the stream type is input. Camera HAL must ignore the rotation
+ * field for an input stream.
+ *
+ * Always set by camera service. HAL must inspect this field during stream
+ * combination query and return -EINVAL if it cannot perform such rotation.
+ * HAL must always support CAMERA3_STREAM_ROTATION_0, so a
+ * is_stream_combination_supported() call must not fail for unsupported rotation if
+ * rotation field of all streams is CAMERA3_STREAM_ROTATION_0.
+ *
+ */
+ int rotation;
+
+ /**
+ * The physical camera id this stream belongs to.
+ * Always set by camera service. If the camera device is not a logical
+ * multi camera, or if the camera is a logical multi camera but the stream
+ * is not a physical output stream, this field will point to a 0-length
+ * string.
+ *
+ * A logical multi camera is a camera device backed by multiple physical
+ * cameras that are also exposed to the application. And for a logical
+ * multi camera, a physical output stream is an output stream specifically
+ * requested on an underlying physical camera.
+ *
+ * For an input stream, this field is guaranteed to be a 0-length string.
+ */
+ const char* physical_camera_id;
+
+} camera_stream_t;
+
+/**
+ * camera_stream_combination_t:
+ *
+ * A structure of stream definitions, used by is_stream_combination_supported(). This
+ * structure defines all the input & output streams for specific camera use case.
+ */
+typedef struct camera_stream_combination {
+ /**
+ * The total number of streams by the framework. This includes
+ * both input and output streams. The number of streams will be at least 1,
+ * and there will be at least one output-capable stream.
+ */
+ uint32_t num_streams;
+
+ /**
+ * An array of camera streams, defining the input/output
+ * stream combination for the camera HAL device.
+ *
+ * At most one input-capable stream may be defined.
+ *
+ * At least one output-capable stream must be defined.
+ */
+ camera_stream_t *streams;
+
+ /**
+ * The operation mode of streams in this stream combination, one of the value
+ * defined in camera3_stream_configuration_mode_t.
+ *
+ */
+ uint32_t operation_mode;
+
+} camera_stream_combination_t;
+
+/**
+ * device_state_t:
+ *
+ * Possible physical states of the overall device, for use with
+ * notify_device_state_change.
+ */
+typedef enum device_state {
+ /**
+ * The device is in its normal physical configuration. This is the default if the
+ * device does not support multiple different states.
+ */
+ NORMAL = 0,
+
+ /**
+ * Camera device(s) facing backward are covered.
+ */
+ BACK_COVERED = 1 << 0,
+
+ /**
+ * Camera device(s) facing foward are covered.
+ */
+ FRONT_COVERED = 1 << 1,
+
+ /**
+ * The device is folded. If not set, the device is unfolded or does not
+ * support folding.
+ *
+ * The exact point when this status change happens during the folding
+ * operation is device-specific.
+ */
+ FOLDED = 1 << 2,
+
+ /**
+ * First vendor-specific device state. All bits above and including this one
+ * are for vendor state values. Values below this one must only be used
+ * for framework-defined states.
+ */
+ VENDOR_STATE_START = 1LL << 32
+
+} device_state_t;
+
typedef struct camera_module {
/**
* Common methods of the camera module. This *must* be the first member of
@@ -909,8 +1113,104 @@ typedef struct camera_module {
*/
int (*init)();
+ /**
+ * get_physical_camera_info:
+ *
+ * Return the static metadata for a physical camera as a part of a logical
+ * camera device. This function is only called for those physical camera
+ * ID(s) that are not exposed independently. In other words, camera_id will
+ * be greater or equal to the return value of get_number_of_cameras().
+ *
+ * Return values:
+ *
+ * 0: On a successful operation
+ *
+ * -ENODEV: The information cannot be provided due to an internal
+ * error.
+ *
+ * -EINVAL: The input arguments are invalid, i.e. the id is invalid,
+ * and/or the module is invalid.
+ *
+ * Version information (based on camera_module_t.common.module_api_version):
+ *
+ * CAMERA_MODULE_API_VERSION_1_x/2_0/2_1/2_2/2_3/2_4:
+ * Not provided by HAL module. Framework will not call this function.
+ *
+ * CAMERA_MODULE_API_VERSION_2_5 or higher:
+ * If any of the camera devices accessible through this camera module is
+ * a logical multi-camera, and at least one of the physical cameras isn't
+ * a stand-alone camera device, this function will be called by the camera
+ * framework. Calling this function with invalid physical_camera_id will
+ * get -EINVAL, and NULL static_metadata.
+ */
+ int (*get_physical_camera_info)(int physical_camera_id,
+ camera_metadata_t **static_metadata);
+
+ /**
+ * is_stream_combination_supported:
+ *
+ * Check for device support of specific camera stream combination.
+ *
+ * Return values:
+ *
+ * 0: In case the stream combination is supported.
+ *
+ * -EINVAL: In case the stream combination is not supported.
+ *
+ * -ENOSYS: In case stream combination query is not supported.
+ *
+ * Version information (based on camera_module_t.common.module_api_version):
+ *
+ * CAMERA_MODULE_API_VERSION_1_x/2_0/2_1/2_2/2_3/2_4:
+ * Not provided by HAL module. Framework will not call this function.
+ *
+ * CAMERA_MODULE_API_VERSION_2_5 or higher:
+ * Valid to be called by the framework.
+ */
+ int (*is_stream_combination_supported)(int camera_id,
+ const camera_stream_combination_t *streams);
+
+ /**
+ * notify_device_state_change:
+ *
+ * Notify the camera module that the state of the overall device has
+ * changed in some way that the HAL may want to know about.
+ *
+ * For example, a physical shutter may have been uncovered or covered,
+ * or a camera may have been covered or uncovered by an add-on keyboard
+ * or other accessory.
+ *
+ * The state is a bitfield of potential states, and some physical configurations
+ * could plausibly correspond to multiple different combinations of state bits.
+ * The HAL must ignore any state bits it is not actively using to determine
+ * the appropriate camera configuration.
+ *
+ * For example, on some devices the FOLDED state could mean that
+ * backward-facing cameras are covered by the fold, so FOLDED by itself implies
+ * BACK_COVERED. But other devices may support folding but not cover any cameras
+ * when folded, so for those FOLDED would not imply any of the other flags.
+ * Since these relationships are very device-specific, it is difficult to specify
+ * a comprehensive policy. But as a recommendation, it is suggested that if a flag
+ * necessarily implies other flags are set as well, then those flags should be set.
+ * So even though FOLDED would be enough to infer BACK_COVERED on some devices, the
+ * BACK_COVERED flag should also be set for clarity.
+ *
+ * This method may be invoked by the HAL client at any time. It must not
+ * cause any active camera device sessions to be closed, but may dynamically
+ * change which physical camera a logical multi-camera is using for its
+ * active and future output.
+ *
+ * The method must be invoked by the HAL client at least once before the
+ * client calls ICameraDevice::open on any camera device interfaces listed
+ * by this provider, to establish the initial device state.
+ *
+ * Note that the deviceState is 64-bit bitmask, with system defined states in
+ * lower 32-bit and vendor defined states in upper 32-bit.
+ */
+ void (*notify_device_state_change)(uint64_t deviceState);
+
/* reserved for future use */
- void* reserved[5];
+ void* reserved[2];
} camera_module_t;
__END_DECLS
diff --git a/include/hardware/gralloc.h b/include/hardware/gralloc.h
index 5dafea02..10a153c0 100644
--- a/include/hardware/gralloc.h
+++ b/include/hardware/gralloc.h
@@ -131,6 +131,9 @@ enum {
* handle this flag. */
GRALLOC_USAGE_FOREIGN_BUFFERS = 0x00200000U,
+ /* buffer will be used as input to HW HEIC image encoder */
+ GRALLOC_USAGE_HW_IMAGE_ENCODER = 0x08000000U,
+
/* Mask of all flags which could be passed to a gralloc module for buffer
* allocation. Any flags not in this mask do not need to be handled by
* gralloc modules. */
@@ -311,8 +314,38 @@ typedef struct gralloc_module_t {
int l, int t, int w, int h,
struct android_ycbcr *ycbcr, int fenceFd);
+ /* getTransportSize(..., outNumFds, outNumInts)
+ * This function is mandatory on devices running IMapper2.1 or higher.
+ *
+ * Get the transport size of a buffer. An imported buffer handle is a raw
+ * buffer handle with the process-local runtime data appended. This
+ * function, for example, allows a caller to omit the process-local
+ * runtime data at the tail when serializing the imported buffer handle.
+ *
+ * Note that a client might or might not omit the process-local runtime
+ * data when sending an imported buffer handle. The mapper must support
+ * both cases on the receiving end.
+ */
+ int32_t (*getTransportSize)(
+ struct gralloc_module_t const* module, buffer_handle_t handle, uint32_t *outNumFds,
+ uint32_t *outNumInts);
+
+ /* validateBufferSize(..., w, h, format, usage, stride)
+ * This function is mandatory on devices running IMapper2.1 or higher.
+ *
+ * Validate that the buffer can be safely accessed by a caller who assumes
+ * the specified width, height, format, usage, and stride. This must at least validate
+ * that the buffer size is large enough. Validating the buffer against
+ * individual buffer attributes is optional.
+ */
+ int32_t (*validateBufferSize)(
+ struct gralloc_module_t const* device, buffer_handle_t handle,
+ uint32_t w, uint32_t h, int32_t format, int usage,
+ uint32_t stride);
+
/* reserved for future use */
- void* reserved_proc[3];
+ void* reserved_proc[1];
+
} gralloc_module_t;
/*****************************************************************************/
diff --git a/include/hardware/gralloc1.h b/include/hardware/gralloc1.h
index 03e84322..c211029a 100644
--- a/include/hardware/gralloc1.h
+++ b/include/hardware/gralloc1.h
@@ -675,7 +675,6 @@ typedef int32_t /*gralloc1_error_t*/ (*GRALLOC1_PFN_GET_STRIDE)(
/* getTransportSize(..., outNumFds, outNumInts)
* Function descriptor: GRALLOC1_FUNCTION_GET_TRANSPORT_SIZE
- * This function is optional for all gralloc1 devices.
*
* Get the transport size of a buffer. An imported buffer handle is a raw
* buffer handle with the process-local runtime data appended. This
@@ -710,7 +709,6 @@ typedef struct gralloc1_buffer_descriptor_info {
/* validateBufferSize(..., )
* Function descriptor: GRALLOC1_FUNCTION_VALIDATE_BUFFER_SIZE
- * This function is optional for all gralloc1 devices.
*
* Validate that the buffer can be safely accessed by a caller who assumes
* the specified descriptorInfo and stride. This must at least validate
diff --git a/include/hardware/hwcomposer2.h b/include/hardware/hwcomposer2.h
index c9809ce8..c70aef64 100644
--- a/include/hardware/hwcomposer2.h
+++ b/include/hardware/hwcomposer2.h
@@ -270,7 +270,18 @@ typedef enum {
HWC2_FUNCTION_GET_READBACK_BUFFER_FENCE,
HWC2_FUNCTION_GET_RENDER_INTENTS,
HWC2_FUNCTION_SET_COLOR_MODE_WITH_RENDER_INTENT,
- HWC2_FUNCTION_GET_DATASPACE_SATURATION_MATRIX
+ HWC2_FUNCTION_GET_DATASPACE_SATURATION_MATRIX,
+
+ // composer 2.3
+ HWC2_FUNCTION_GET_DISPLAY_IDENTIFICATION_DATA,
+ HWC2_FUNCTION_GET_DISPLAY_CAPABILITIES,
+ HWC2_FUNCTION_SET_LAYER_COLOR_TRANSFORM,
+ HWC2_FUNCTION_GET_DISPLAYED_CONTENT_SAMPLING_ATTRIBUTES,
+ HWC2_FUNCTION_SET_DISPLAYED_CONTENT_SAMPLING_ENABLED,
+ HWC2_FUNCTION_GET_DISPLAYED_CONTENT_SAMPLE,
+ HWC2_FUNCTION_SET_LAYER_PER_FRAME_METADATA_BLOBS,
+ HWC2_FUNCTION_GET_DISPLAY_BRIGHTNESS_SUPPORT,
+ HWC2_FUNCTION_SET_DISPLAY_BRIGHTNESS,
} hwc2_function_descriptor_t;
/* Layer requests returned from getDisplayRequests */
@@ -347,6 +358,62 @@ typedef enum {
HWC2_MAX_FRAME_AVERAGE_LIGHT_LEVEL = 11,
} hwc2_per_frame_metadata_key_t;
+/* SetDisplayedContentSampling values passed to setDisplayedContentSamplingEnabled */
+typedef enum {
+ HWC2_DISPLAYED_CONTENT_SAMPLING_INVALID = 0,
+
+ /* Enable displayed content sampling */
+ HWC2_DISPLAYED_CONTENT_SAMPLING_ENABLE = 1,
+
+ /* Disable displayed content sampling */
+ HWC2_DISPLAYED_CONTENT_SAMPLING_DISABLE = 2,
+} hwc2_displayed_content_sampling_t;
+
+typedef enum {
+ HWC2_FORMAT_COMPONENT_0 = 1 << 0, /* The first component (eg, for RGBA_8888, this is R) */
+ HWC2_FORMAT_COMPONENT_1 = 1 << 1, /* The second component (eg, for RGBA_8888, this is G) */
+ HWC2_FORMAT_COMPONENT_2 = 1 << 2, /* The third component (eg, for RGBA_8888, this is B) */
+ HWC2_FORMAT_COMPONENT_3 = 1 << 3, /* The fourth component (eg, for RGBA_8888, this is A) */
+} hwc2_format_color_component_t;
+
+/* Optional display capabilities which may be supported by some displays.
+ * The particular set of supported capabilities for a given display may be
+ * retrieved using getDisplayCapabilities. */
+typedef enum {
+ HWC2_DISPLAY_CAPABILITY_INVALID = 0,
+
+ /**
+ * Specifies that the display must apply a color transform even when either
+ * the client or the device has chosen that all layers should be composed by
+ * the client. This prevents the client from applying the color transform
+ * during its composition step.
+ * If getDisplayCapabilities is supported, the global capability
+ * HWC2_CAPABILITY_SKIP_CLIENT_COLOR_TRANSFORM is ignored.
+ * If getDisplayCapabilities is not supported, and the global capability
+ * HWC2_CAPABILITY_SKIP_CLIENT_COLOR_TRANSFORM is returned by getCapabilities,
+ * then all displays must be treated as having
+ * HWC2_DISPLAY_CAPABILITY_SKIP_CLIENT_COLOR_TRANSFORM.
+ */
+ HWC2_DISPLAY_CAPABILITY_SKIP_CLIENT_COLOR_TRANSFORM = 1,
+
+ /**
+ * Specifies that the display supports PowerMode::DOZE and
+ * PowerMode::DOZE_SUSPEND. DOZE_SUSPEND may not provide any benefit
+ * over DOZE (see the definition of PowerMode for more information),
+ * but if both DOZE and DOZE_SUSPEND are no different from
+ * PowerMode::ON, the device must not claim support.
+ * HWC2_DISPLAY_CAPABILITY_DOZE must be returned by getDisplayCapabilities
+ * when getDozeSupport indicates the display supports PowerMode::DOZE and
+ * PowerMode::DOZE_SUSPEND.
+ */
+ HWC2_DISPLAY_CAPABILITY_DOZE = 2,
+
+ /**
+ * Specified that the display supports brightness operations.
+ */
+ HWC2_DISPLAY_CAPABILITY_BRIGHTNESS = 3,
+} hwc2_display_capability_t;
+
/*
* Stringification Functions
*/
@@ -524,6 +591,17 @@ static inline const char* getFunctionDescriptorName(
case HWC2_FUNCTION_GET_RENDER_INTENTS: return "GetRenderIntents";
case HWC2_FUNCTION_SET_COLOR_MODE_WITH_RENDER_INTENT: return "SetColorModeWithRenderIntent";
case HWC2_FUNCTION_GET_DATASPACE_SATURATION_MATRIX: return "GetDataspaceSaturationMatrix";
+
+ // composer 2.3
+ case HWC2_FUNCTION_GET_DISPLAY_IDENTIFICATION_DATA: return "GetDisplayIdentificationData";
+ case HWC2_FUNCTION_GET_DISPLAY_CAPABILITIES: return "GetDisplayCapabilities";
+ case HWC2_FUNCTION_SET_LAYER_COLOR_TRANSFORM: return "SetLayerColorTransform";
+ case HWC2_FUNCTION_GET_DISPLAYED_CONTENT_SAMPLING_ATTRIBUTES: return "GetDisplayedContentSamplingAttributes";
+ case HWC2_FUNCTION_SET_DISPLAYED_CONTENT_SAMPLING_ENABLED: return "SetDisplayedContentSamplingEnabled";
+ case HWC2_FUNCTION_GET_DISPLAYED_CONTENT_SAMPLE: return "GetDisplayedContentSample";
+ case HWC2_FUNCTION_SET_LAYER_PER_FRAME_METADATA_BLOBS: return "SetLayerPerFrameMetadataBlobs";
+ case HWC2_FUNCTION_GET_DISPLAY_BRIGHTNESS_SUPPORT: return "GetDisplayBrightnessSupport";
+ case HWC2_FUNCTION_SET_DISPLAY_BRIGHTNESS: return "SetDisplayBrightness";
default: return "Unknown";
}
}
@@ -569,6 +647,40 @@ static inline const char* getVsyncName(hwc2_vsync_t vsync) {
}
}
+static inline const char* getDisplayedContentSamplingName(
+ hwc2_displayed_content_sampling_t sampling) {
+ switch (sampling) {
+ case HWC2_DISPLAYED_CONTENT_SAMPLING_INVALID: return "Invalid";
+ case HWC2_DISPLAYED_CONTENT_SAMPLING_ENABLE: return "Enable";
+ case HWC2_DISPLAYED_CONTENT_SAMPLING_DISABLE: return "Disable";
+ default: return "Unknown";
+ }
+}
+
+static inline const char* getFormatColorComponentName(hwc2_format_color_component_t component) {
+ switch (component) {
+ case HWC2_FORMAT_COMPONENT_0: return "FirstComponent";
+ case HWC2_FORMAT_COMPONENT_1: return "SecondComponent";
+ case HWC2_FORMAT_COMPONENT_2: return "ThirdComponent";
+ case HWC2_FORMAT_COMPONENT_3: return "FourthComponent";
+ default: return "Unknown";
+ }
+}
+
+static inline const char* getDisplayCapabilityName(hwc2_display_capability_t capability) {
+ switch (capability) {
+ case HWC2_DISPLAY_CAPABILITY_INVALID: return "Invalid";
+ case HWC2_DISPLAY_CAPABILITY_SKIP_CLIENT_COLOR_TRANSFORM:
+ return "SkipClientColorTransform";
+ case HWC2_DISPLAY_CAPABILITY_DOZE:
+ return "Doze";
+ case HWC2_DISPLAY_CAPABILITY_BRIGHTNESS:
+ return "Brightness";
+ default:
+ return "Unknown";
+ }
+}
+
#define TO_STRING(E, T, printer) \
inline std::string to_string(E value) { return printer(value); } \
inline std::string to_string(T value) { return to_string(static_cast<E>(value)); }
@@ -722,6 +834,17 @@ enum class FunctionDescriptor : int32_t {
GetRenderIntents = HWC2_FUNCTION_GET_RENDER_INTENTS,
SetColorModeWithRenderIntent = HWC2_FUNCTION_SET_COLOR_MODE_WITH_RENDER_INTENT,
GetDataspaceSaturationMatrix = HWC2_FUNCTION_GET_DATASPACE_SATURATION_MATRIX,
+
+ // composer 2.3
+ GetDisplayIdentificationData = HWC2_FUNCTION_GET_DISPLAY_IDENTIFICATION_DATA,
+ GetDisplayCapabilities = HWC2_FUNCTION_GET_DISPLAY_CAPABILITIES,
+ SetLayerColorTransform = HWC2_FUNCTION_SET_LAYER_COLOR_TRANSFORM,
+ GetDisplayedContentSamplingAttributes = HWC2_FUNCTION_GET_DISPLAYED_CONTENT_SAMPLING_ATTRIBUTES,
+ SetDisplayedContentSamplingEnabled = HWC2_FUNCTION_SET_DISPLAYED_CONTENT_SAMPLING_ENABLED,
+ GetDisplayedContentSample = HWC2_FUNCTION_GET_DISPLAYED_CONTENT_SAMPLE,
+ SetLayerPerFrameMetadataBlobs = HWC2_FUNCTION_SET_LAYER_PER_FRAME_METADATA_BLOBS,
+ GetDisplayBrightnessSupport = HWC2_FUNCTION_GET_DISPLAY_BRIGHTNESS_SUPPORT,
+ SetDisplayBrightness = HWC2_FUNCTION_SET_DISPLAY_BRIGHTNESS,
};
TO_STRING(hwc2_function_descriptor_t, FunctionDescriptor,
getFunctionDescriptorName)
@@ -758,6 +881,14 @@ enum class Vsync : int32_t {
};
TO_STRING(hwc2_vsync_t, Vsync, getVsyncName)
+enum class DisplayCapability : int32_t {
+ Invalid = HWC2_DISPLAY_CAPABILITY_INVALID,
+ SkipClientColorTransform = HWC2_DISPLAY_CAPABILITY_SKIP_CLIENT_COLOR_TRANSFORM,
+ Doze = HWC2_DISPLAY_CAPABILITY_DOZE,
+ Brightness = HWC2_DISPLAY_CAPABILITY_BRIGHTNESS,
+};
+TO_STRING(hwc2_display_capability_t, DisplayCapability, getDisplayCapabilityName)
+
} // namespace HWC2
__BEGIN_DECLS
@@ -1374,6 +1505,35 @@ typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_GET_DISPLAY_TYPE)(
hwc2_device_t* device, hwc2_display_t display,
int32_t* /*hwc2_display_type_t*/ outType);
+/* getDisplayIdentificationData(..., outPort, outDataSize, outData)
+ * Descriptor: HWC2_FUNCTION_GET_DISPLAY_IDENTIFICATION_DATA
+ * Optional for HWC2 devices
+ *
+ * If supported, getDisplayIdentificationData returns the port and data that
+ * describe a physical display. The port is a unique number that identifies a
+ * physical connector (e.g. eDP, HDMI) for display output. The data blob is
+ * parsed to determine its format, typically EDID 1.3 as specified in VESA
+ * E-EDID Standard Release A Revision 1.
+ *
+ * Devices for which display identification is unsupported must return null when
+ * getFunction is called with HWC2_FUNCTION_GET_DISPLAY_IDENTIFICATION_DATA.
+ *
+ * Parameters:
+ * outPort - the connector to which the display is connected;
+ * pointer will be non-NULL
+ * outDataSize - if outData is NULL, the size in bytes of the data which would
+ * have been returned; if outData is not NULL, the size of outData, which
+ * must not exceed the value stored in outDataSize prior to the call;
+ * pointer will be non-NULL
+ * outData - the EDID 1.3 blob identifying the display
+ *
+ * Returns HWC2_ERROR_NONE or one of the following errors:
+ * HWC2_ERROR_BAD_DISPLAY - an invalid display handle was passed in
+ */
+typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_GET_DISPLAY_IDENTIFICATION_DATA)(
+ hwc2_device_t* device, hwc2_display_t display, uint8_t* outPort,
+ uint32_t* outDataSize, uint8_t* outData);
+
/* getDozeSupport(..., outSupport)
* Descriptor: HWC2_FUNCTION_GET_DOZE_SUPPORT
* Must be provided by all HWC2 devices
@@ -2073,6 +2233,42 @@ typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_SET_LAYER_PER_FRAME_METADATA)(
uint32_t numElements, const int32_t* /*hw2_per_frame_metadata_key_t*/ keys,
const float* metadata);
+/* setLayerPerFrameMetadataBlobs(...,numElements, keys, sizes, blobs)
+ * Descriptor: HWC2_FUNCTION_SET_LAYER_PER_FRAME_METADATA_BLOBS
+ * Optional for HWC2 devices
+ *
+ * If supported, (getFunction(HWC2_FUNCTION_SET_LAYER_PER_FRAME_METADATA_BLOBS)
+ * is non-null), sets the metadata for the given display and layer.
+ *
+ * Upon returning from this function, the metadata change must have fully taken
+ * effect.
+ *
+ * This function must only be called if getPerFrameMetadataKeys is non-NULL
+ * and returns at least one key that corresponds to a blob type.
+ *
+ * Current valid blob type keys are: HDR10_PLUS_SEI
+ *
+ * Parameters:
+ * numElements is the number of elements in each of the keys, sizes, and
+ * metadata arrays
+ * keys is a pointer to an array of keys. Current valid keys are those listed
+ * above as valid blob type keys.
+ * sizes is a pointer to an array of unsigned ints specifying the sizes of
+ * each metadata blob
+ * metadata is a pointer to a blob of data holding all blobs contiguously in
+ * memory
+ *
+ * Returns HWC2_ERROR_NONE or one of the following erros:
+ * HWC2_ERROR_BAD_DISPLAY - an invalid display handle was passed in
+ * HWC2_ERROR_BAD_PARAMETER - sizes of keys and metadata parameters does
+ * not match numElements, numElements < 0, or keys contains a
+ * non-valid key (see above for current valid blob type keys).
+ * HWC2_ERROR_UNSUPPORTED - metadata is not supported on this display
+ */
+typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_SET_LAYER_PER_FRAME_METADATA_BLOBS)(
+ hwc2_device_t* device, hwc2_display_t display, hwc2_layer_t layer,
+ uint32_t numElements, const int32_t* keys, const uint32_t* sizes,
+ const uint8_t* metadata);
/*
* Layer State Functions
*
@@ -2326,6 +2522,211 @@ typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_SET_LAYER_Z_ORDER)(
hwc2_device_t* device, hwc2_display_t display, hwc2_layer_t layer,
uint32_t z);
+/* setLayerColorTransform(..., matrix)
+ * Descriptor: HWC2_FUNCTION_SET_LAYER_COLOR_TRANSFORM
+ * Optional by all HWC2 devices
+ *
+ * Sets a matrix for color transform which will be applied on this layer
+ * before composition.
+ *
+ * If the device is not capable of apply the matrix on this layer, it must force
+ * this layer to client composition during VALIDATE_DISPLAY.
+ *
+ * The matrix provided is an affine color transformation of the following form:
+ *
+ * |r.r r.g r.b 0|
+ * |g.r g.g g.b 0|
+ * |b.r b.g b.b 0|
+ * |Tr Tg Tb 1|
+ *
+ * This matrix must be provided in row-major form:
+ *
+ * {r.r, r.g, r.b, 0, g.r, ...}.
+ *
+ * Given a matrix of this form and an input color [R_in, G_in, B_in],
+ * the input color must first be converted to linear space
+ * [R_linear, G_linear, B_linear], then the output linear color
+ * [R_out_linear, G_out_linear, B_out_linear] will be:
+ *
+ * R_out_linear = R_linear * r.r + G_linear * g.r + B_linear * b.r + Tr
+ * G_out_linear = R_linear * r.g + G_linear * g.g + B_linear * b.g + Tg
+ * B_out_linear = R_linear * r.b + G_linear * g.b + B_linear * b.b + Tb
+ *
+ * [R_out_linear, G_out_linear, B_out_linear] must then be converted to
+ * gamma space: [R_out, G_out, B_out] before blending.
+ *
+ * Parameters:
+ * matrix - a 4x4 transform matrix (16 floats) as described above
+ *
+ * Returns HWC2_ERROR_NONE or one of the following errors:
+ * HWC2_ERROR_BAD_DISPLAY - an invalid display handle was passed in
+ * HWC2_ERROR_BAD_LAYER - an invalid layer handle was passed in
+ */
+typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_SET_LAYER_COLOR_TRANSFORM)(
+ hwc2_device_t* device, hwc2_display_t display, hwc2_layer_t layer,
+ const float* matrix);
+
+/* getDisplayedContentSamplingAttributes(...,
+ * format, dataspace, supported_components, max_frames)
+ * Descriptor: HWC2_FUNCTION_GET_DISPLAYED_CONTENT_SAMPLING_ATTRIBUTES
+ * Optional by all HWC2 devices
+ *
+ * Query for what types of color sampling the hardware supports.
+ *
+ * Parameters:
+ * format - The format of the sampled pixels; pointer will be non-NULL
+ * dataspace - The dataspace of the sampled pixels; pointer will be non-NULL
+ * supported_components - The mask of which components can be sampled; pointer
+ * will be non-NULL
+ *
+ * Returns HWC2_ERROR_NONE or one of the following errors:
+ * HWC2_ERROR_BAD_DISPLAY when an invalid display was passed in, or
+ * HWC2_ERROR_UNSUPPORTED when there is no efficient way to sample.
+ */
+typedef int32_t (*HWC2_PFN_GET_DISPLAYED_CONTENT_SAMPLING_ATTRIBUTES)(
+ hwc2_device_t* device, hwc2_display_t display,
+ int32_t* /* android_pixel_format_t */ format,
+ int32_t* /* android_dataspace_t */ dataspace,
+ uint8_t* /* mask of android_component_t */ supported_components);
+
+/* setDisplayedContentSamplingEnabled(..., enabled)
+ * Descriptor: HWC2_FUNCTION_SET_DISPLAYED_CONTENT_SAMPLING_ENABLED
+ * Optional by all HWC2 devices
+ *
+ * Enables or disables the collection of color content statistics
+ * on this display.
+ *
+ * Sampling occurs on the contents of the final composition on this display
+ * (i.e., the contents presented on screen).
+ *
+ * Sampling support is optional, and is set to DISABLE by default.
+ * On each call to ENABLE, all collected statistics will be reset.
+ *
+ * Sample data can be queried via getDisplayedContentSample().
+ *
+ * Parameters:
+ * enabled - indicates whether to enable or disable sampling.
+ * component_mask - The mask of which components should be sampled.
+ * If zero, all supported components are to be enabled.
+ * max_frames - is the maximum number of frames that should be stored before
+ * discard. The sample represents the most-recently posted frames.
+ *
+ * Returns HWC2_ERROR_NONE or one of the following errors:
+ * HWC2_ERROR_BAD_DISPLAY when an invalid display handle was passed in,
+ * HWC2_ERROR_BAD_PARAMETER when enabled was an invalid value, or
+ * HWC2_ERROR_NO_RESOURCES when the requested ringbuffer size via max_frames
+ * was not available.
+ * HWC2_ERROR_UNSUPPORTED when there is no efficient way to sample.
+ */
+typedef int32_t (*HWC2_PFN_SET_DISPLAYED_CONTENT_SAMPLING_ENABLED)(
+ hwc2_device_t* device, hwc2_display_t display,
+ int32_t /*hwc2_displayed_content_sampling_t*/ enabled,
+ uint8_t /* mask of android_component_t */ component_mask,
+ uint64_t max_frames);
+
+/* getDisplayedContentSample(..., component, max_frames, timestamp,
+ * samples_size, samples, frame_count)
+ * Descriptor: HWC2_FUNCTION_GET_DISPLAYED_CONTENT_SAMPLE
+ * Optional by all HWC2 devices
+ *
+ * Collects the results of display content color sampling for display.
+ *
+ * Collection of data can occur whether the sampling is in ENABLE or
+ * DISABLE state.
+ *
+ * Parameters:
+ * max_frames - is the maximum number of frames that should be represented in
+ * the sample. The sample represents the most-recently posted frames.
+ * If max_frames is 0, all frames are to be represented by the sample.
+ * timestamp - is the timestamp after which any frames were posted that should
+ * be included in the sample. Timestamp is CLOCK_MONOTONIC.
+ * If timestamp is 0, do not filter from the sample by time.
+ * frame_count - The number of frames represented by this sample; pointer will
+ * be non-NULL.
+ * samples_size - The sizes of the color histogram representing the color
+ * sampling. Sample_sizes are indexed in the same order as
+ * HWC2_FORMAT_COMPONENT_.
+ * samples - The arrays of data corresponding to the sampling data. Samples are
+ * indexed in the same order as HWC2_FORMAT_COMPONENT_.
+ * The size of each sample is the samples_size for the same index.
+ * Each components sample is an array that is to be filled with the
+ * evenly-weighted buckets of a histogram counting how many times a pixel
+ * of the given component was displayed onscreen. Caller owns the data and
+ * pointer may be NULL to query samples_size.
+ *
+ * Returns HWC2_ERROR_NONE or one of the following errors:
+ * HWC2_ERROR_BAD_DISPLAY when an invalid display was passed in, or
+ * HWC2_ERROR_UNSUPPORTED when there is no efficient way to sample, or
+ * HWC2_ERROR_BAD_PARAMETER when the component is not supported by the hardware.
+ */
+typedef int32_t (*HWC2_PFN_GET_DISPLAYED_CONTENT_SAMPLE)(
+ hwc2_device_t* device, hwc2_display_t display,
+ uint64_t max_frames, uint64_t timestamp,
+ uint64_t* frame_count, int32_t samples_size[4], uint64_t* samples[4]);
+
+/* getDisplayCapabilities(..., outCapabilities)
+ * Descriptor: HWC2_FUNCTION_GET_DISPLAY_CAPABILITIES
+ * Required for HWC2 devices for composer 2.3
+ * Optional for HWC2 devices for composer 2.1 and 2.2
+ *
+ * getDisplayCapabilities returns a list of supported capabilities
+ * (as described in the definition of Capability above).
+ * This list must not change after initialization.
+ *
+ * Parameters:
+ * outNumCapabilities - if outCapabilities was nullptr, returns the number of capabilities
+ * if outCapabilities was not nullptr, returns the number of capabilities stored in
+ * outCapabilities, which must not exceed the value stored in outNumCapabilities prior
+ * to the call; pointer will be non-NULL
+ * outCapabilities - a list of supported capabilities.
+ *
+ * Returns HWC2_ERROR_NONE or one of the following errors:
+ * HWC2_ERROR_BAD_DISPLAY - an invalid display handle was passed in
+ */
+typedef int32_t /*hwc2_error_t*/ (*HWC2_PFN_GET_DISPLAY_CAPABILITIES)(
+ hwc2_device_t* device, hwc2_display_t display, uint32_t* outNumCapabilities,
+ uint32_t* outCapabilities);
+
+/* Use getDisplayCapabilities instead. If brightness is supported, must return
+ * DisplayCapability::BRIGHTNESS as one of the display capabilities via getDisplayCapabilities.
+ * Only use getDisplayCapabilities as the source of truth to query brightness support.
+ *
+ * getDisplayBrightnessSupport(displayToken)
+ * Descriptor: HWC2_FUNCTION_GET_DISPLAY_BRIGHTNESS_SUPPORT
+ * Required for HWC2 devices for composer 2.3
+ * Optional for HWC2 devices for composer 2.1 and 2.2
+ *
+ * getDisplayBrightnessSupport returns whether brightness operations are supported on a display.
+ *
+ * Parameters:
+ * outSupport - whether the display supports operations.
+ *
+ * Returns HWC2_ERROR_NONE or one of the following errors:
+ * HWC2_ERROR_BAD_DISPLAY when the display is invalid.
+ */
+typedef int32_t /*hwc_error_t*/ (*HWC2_PFN_GET_DISPLAY_BRIGHTNESS_SUPPORT)(hwc2_device_t* device,
+ hwc2_display_t display, bool* outSupport);
+
+/* setDisplayBrightness(displayToken, brightnesss)
+ * Descriptor: HWC2_FUNCTION_SET_DISPLAY_BRIGHTNESS
+ * Required for HWC2 devices for composer 2.3
+ * Optional for HWC2 devices for composer 2.1 and 2.2
+ *
+ * setDisplayBrightness sets the brightness of a display.
+ *
+ * Parameters:
+ * brightness - a number between 0.0f (minimum brightness) and 1.0f (maximum brightness), or
+ * -1.0f to turn the backlight off.
+ *
+ * Returns HWC2_ERROR_NONE or one of the following errors:
+ * HWC2_ERROR_BAD_DISPLAY when the display is invalid, or
+ * HWC2_ERROR_UNSUPPORTED when brightness operations are not supported, or
+ * HWC2_ERROR_BAD_PARAMETER when the brightness is invalid, or
+ * HWC2_ERROR_NO_RESOURCES when the brightness cannot be applied.
+ */
+typedef int32_t /*hwc_error_t*/ (*HWC2_PFN_SET_DISPLAY_BRIGHTNESS)(hwc2_device_t* device,
+ hwc2_display_t display, float brightness);
+
__END_DECLS
#endif
diff --git a/include/hardware/sound_trigger.h b/include/hardware/sound_trigger.h
index d7828acd..99346ef7 100644
--- a/include/hardware/sound_trigger.h
+++ b/include/hardware/sound_trigger.h
@@ -40,7 +40,8 @@ __BEGIN_DECLS
#define SOUND_TRIGGER_DEVICE_API_VERSION_1_0 HARDWARE_DEVICE_API_VERSION(1, 0)
#define SOUND_TRIGGER_DEVICE_API_VERSION_1_1 HARDWARE_DEVICE_API_VERSION(1, 1)
-#define SOUND_TRIGGER_DEVICE_API_VERSION_CURRENT SOUND_TRIGGER_DEVICE_API_VERSION_1_1
+#define SOUND_TRIGGER_DEVICE_API_VERSION_1_2 HARDWARE_DEVICE_API_VERSION(1, 2)
+#define SOUND_TRIGGER_DEVICE_API_VERSION_CURRENT SOUND_TRIGGER_DEVICE_API_VERSION_1_2
/**
* List of known sound trigger HAL modules. This is the base name of the sound_trigger HAL
@@ -114,6 +115,14 @@ struct sound_trigger_hw_device {
* If no implementation is provided, stop_recognition will be called for each running model.
*/
int (*stop_all_recognitions)(const struct sound_trigger_hw_device* dev);
+
+ /* Get the current state of a given model.
+ * The state will be returned as a recognition event, via the callback that was registered
+ * in the start_recognition method.
+ * Only supported for device api versions SOUND_TRIGGER_DEVICE_API_VERSION_1_2 or above.
+ */
+ int (*get_model_state)(const struct sound_trigger_hw_device *dev,
+ sound_model_handle_t sound_model_handle);
};
typedef struct sound_trigger_hw_device sound_trigger_hw_device_t;
diff --git a/modules/audio_remote_submix/audio_hw.cpp b/modules/audio_remote_submix/audio_hw.cpp
index 8c0c0971..833c12bb 100644
--- a/modules/audio_remote_submix/audio_hw.cpp
+++ b/modules/audio_remote_submix/audio_hw.cpp
@@ -488,17 +488,26 @@ static void submix_audio_device_destroy_pipe_l(struct submix_audio_device * cons
ALOGV("submix_audio_device_destroy_pipe_l()");
int route_idx = -1;
if (in != NULL) {
+ bool shut_down = false;
#if ENABLE_LEGACY_INPUT_OPEN
const_cast<struct submix_stream_in*>(in)->ref_count--;
route_idx = in->route_handle;
ALOG_ASSERT(rsxadev->routes[route_idx].input == in);
if (in->ref_count == 0) {
rsxadev->routes[route_idx].input = NULL;
+ shut_down = true;
}
ALOGV("submix_audio_device_destroy_pipe_l(): input ref_count %d", in->ref_count);
#else
rsxadev->input = NULL;
+ shut_down = true;
#endif // ENABLE_LEGACY_INPUT_OPEN
+ if (shut_down) {
+ sp <MonoPipe> sink = rsxadev->routes[in->route_handle].rsxSink;
+ if (sink != NULL) {
+ sink->shutdown(true);
+ }
+ }
}
if (out != NULL) {
route_idx = out->route_handle;
@@ -796,6 +805,11 @@ static ssize_t out_write(struct audio_stream_out *stream, const void* buffer,
// the pipe has already been shutdown, this buffer will be lost but we must
// simulate timing so we don't drain the output faster than realtime
usleep(frames * 1000000 / out_get_sample_rate(&stream->common));
+
+ pthread_mutex_lock(&rsxadev->lock);
+ out->frames_written += frames;
+ out->frames_written_since_standby += frames;
+ pthread_mutex_unlock(&rsxadev->lock);
return bytes;
}
} else {
@@ -1648,6 +1662,12 @@ static int adev_open_input_stream(struct audio_hw_device *dev,
ALOGV("adev_open_input_stream(): about to create pipe");
submix_audio_device_create_pipe_l(rsxadev, config, DEFAULT_PIPE_SIZE_IN_FRAMES,
DEFAULT_PIPE_PERIOD_COUNT, in, NULL, address, route_idx);
+
+ sp <MonoPipe> sink = rsxadev->routes[route_idx].rsxSink;
+ if (sink != NULL) {
+ sink->shutdown(false);
+ }
+
#if LOG_STREAMS_TO_FILES
if (in->log_fd >= 0) close(in->log_fd);
in->log_fd = open(LOG_STREAM_IN_FILENAME, O_CREAT | O_TRUNC | O_WRONLY,
diff --git a/modules/camera/3_4/Android.mk b/modules/camera/3_4/Android.mk
index 3395b5bd..0a11f684 100644
--- a/modules/camera/3_4/Android.mk
+++ b/modules/camera/3_4/Android.mk
@@ -106,7 +106,6 @@ LOCAL_MODULE := camera.v4l2_test
LOCAL_CFLAGS += $(v4l2_cflags)
LOCAL_SHARED_LIBRARIES := $(v4l2_shared_libs)
LOCAL_STATIC_LIBRARIES := \
- libBionicGtestMain \
libgmock \
$(v4l2_static_libs) \
diff --git a/modules/camera/3_4/v4l2_camera_hal.cpp b/modules/camera/3_4/v4l2_camera_hal.cpp
index 386160f4..17fac1a8 100644
--- a/modules/camera/3_4/v4l2_camera_hal.cpp
+++ b/modules/camera/3_4/v4l2_camera_hal.cpp
@@ -238,4 +238,5 @@ camera_module_t HAL_MODULE_INFO_SYM __attribute__((visibility("default"))) = {
.open_legacy = v4l2_camera_hal::open_legacy,
.set_torch_mode = v4l2_camera_hal::set_torch_mode,
.init = nullptr,
- .reserved = {nullptr, nullptr, nullptr, nullptr, nullptr}};
+ .get_physical_camera_info = nullptr,
+ .reserved = {nullptr, nullptr}};
diff --git a/modules/soundtrigger/sound_trigger_hw.c b/modules/soundtrigger/sound_trigger_hw.c
index 0089f980..38212c45 100644
--- a/modules/soundtrigger/sound_trigger_hw.c
+++ b/modules/soundtrigger/sound_trigger_hw.c
@@ -811,6 +811,41 @@ static int stdev_stop_all_recognitions(const struct sound_trigger_hw_device *dev
return 0;
}
+static int stdev_get_model_state(const struct sound_trigger_hw_device *dev,
+ sound_model_handle_t handle) {
+ int ret = 0;
+ struct stub_sound_trigger_device *stdev = (struct stub_sound_trigger_device *)dev;
+ ALOGI("%s", __func__);
+ pthread_mutex_lock(&stdev->lock);
+
+ struct recognition_context *model_context = get_model_context(stdev, handle);
+ if (!model_context) {
+ ALOGW("Can't find sound model handle %d in registered list", handle);
+ ret = -ENOSYS;
+ goto exit;
+ }
+
+ if (!model_context->model_started) {
+ ALOGW("Sound model %d not started", handle);
+ ret = -ENOSYS;
+ goto exit;
+ }
+
+ if (model_context->recognition_callback == NULL) {
+ ALOGW("Sound model %d not initialized", handle);
+ ret = -ENOSYS;
+ goto exit;
+ }
+
+ // TODO(mdooley): trigger recognition event
+
+exit:
+ pthread_mutex_unlock(&stdev->lock);
+ ALOGI("%s done for handle %d", __func__, handle);
+
+ return ret;
+}
+
__attribute__ ((visibility ("default")))
int sound_trigger_open_for_streaming() {
int ret = 0;
@@ -863,6 +898,7 @@ static int stdev_open(const hw_module_t* module, const char* name,
stdev->device.start_recognition = stdev_start_recognition;
stdev->device.stop_recognition = stdev_stop_recognition;
stdev->device.stop_all_recognitions = stdev_stop_all_recognitions;
+ stdev->device.get_model_state = stdev_get_model_state;
pthread_mutex_init(&stdev->lock, (const pthread_mutexattr_t *) NULL);
@@ -890,4 +926,3 @@ struct sound_trigger_module HAL_MODULE_INFO_SYM = {
.methods = &hal_module_methods,
},
};
-
diff --git a/modules/usbaudio/audio_hal.c b/modules/usbaudio/audio_hal.c
index 81c9fd61..f0ea015a 100644
--- a/modules/usbaudio/audio_hal.c
+++ b/modules/usbaudio/audio_hal.c
@@ -15,7 +15,7 @@
*/
#define LOG_TAG "modules.usbaudio.audio_hal"
-/*#define LOG_NDEBUG 0*/
+/* #define LOG_NDEBUG 0 */
#include <errno.h>
#include <inttypes.h>
@@ -47,27 +47,26 @@
/* Lock play & record samples rates at or above this threshold */
#define RATELOCK_THRESHOLD 96000
+#define max(a, b) ((a) > (b) ? (a) : (b))
+#define min(a, b) ((a) < (b) ? (a) : (b))
+
struct audio_device {
struct audio_hw_device hw_device;
pthread_mutex_t lock; /* see note below on mutex acquisition order */
/* output */
- alsa_device_profile out_profile;
struct listnode output_stream_list;
/* input */
- alsa_device_profile in_profile;
struct listnode input_stream_list;
/* lock input & output sample rates */
/*FIXME - How do we address multiple output streams? */
- uint32_t device_sample_rate;
+ uint32_t device_sample_rate; // this should be a rate that is common to both input & output
bool mic_muted;
- bool standby;
-
int32_t inputs_open; /* number of input streams currently open. */
};
@@ -79,16 +78,13 @@ struct stream_lock {
struct stream_out {
struct audio_stream_out stream;
- struct stream_lock lock;
+ struct stream_lock lock;
bool standby;
struct audio_device *adev; /* hardware information - only using this for the lock */
- const alsa_device_profile *profile; /* Points to the alsa_device_profile in the audio_device.
- * Const, so modifications go through adev->out_profile
- * and thus should have the hardware lock and ensure
- * stream is not active and no other open output streams.
+ alsa_device_profile profile; /* The profile of the ALSA device connected to the stream.
*/
alsa_device_proxy proxy; /* state of the stream */
@@ -121,10 +117,7 @@ struct stream_in {
struct audio_device *adev; /* hardware information - only using this for the lock */
- const alsa_device_profile *profile; /* Points to the alsa_device_profile in the audio_device.
- * Const, so modifications go through adev->out_profile
- * and thus should have the hardware lock and ensure
- * stream is not active and no other open input streams.
+ alsa_device_profile profile; /* The profile of the ALSA device connected to the stream.
*/
alsa_device_proxy proxy; /* state of the stream */
@@ -344,9 +337,7 @@ static int out_standby(struct audio_stream *stream)
stream_lock(&out->lock);
if (!out->standby) {
- device_lock(out->adev);
proxy_close(&out->proxy);
- device_unlock(out->adev);
out->standby = true;
}
stream_unlock(&out->lock);
@@ -358,7 +349,7 @@ static int out_dump(const struct audio_stream *stream, int fd) {
if (out_stream != NULL) {
dprintf(fd, "Output Profile:\n");
- profile_dump(out_stream->profile, fd);
+ profile_dump(&out_stream->profile, fd);
dprintf(fd, "Output Proxy:\n");
proxy_dump(&out_stream->proxy, fd);
@@ -383,27 +374,23 @@ static int out_set_parameters(struct audio_stream *stream, const char *kvpairs)
}
stream_lock(&out->lock);
- /* Lock the device because that is where the profile lives */
- device_lock(out->adev);
-
- if (!profile_is_cached_for(out->profile, card, device)) {
+ if (!profile_is_cached_for(&out->profile, card, device)) {
/* cannot read pcm device info if playback is active */
if (!out->standby)
ret_value = -ENOSYS;
else {
- int saved_card = out->profile->card;
- int saved_device = out->profile->device;
- out->adev->out_profile.card = card;
- out->adev->out_profile.device = device;
- ret_value = profile_read_device_info(&out->adev->out_profile) ? 0 : -EINVAL;
+ int saved_card = out->profile.card;
+ int saved_device = out->profile.device;
+ out->profile.card = card;
+ out->profile.device = device;
+ ret_value = profile_read_device_info(&out->profile) ? 0 : -EINVAL;
if (ret_value != 0) {
- out->adev->out_profile.card = saved_card;
- out->adev->out_profile.device = saved_device;
+ out->profile.card = saved_card;
+ out->profile.device = saved_device;
}
}
}
- device_unlock(out->adev);
stream_unlock(&out->lock);
return ret_value;
@@ -413,11 +400,7 @@ static char * out_get_parameters(const struct audio_stream *stream, const char *
{
struct stream_out *out = (struct stream_out *)stream;
stream_lock(&out->lock);
- device_lock(out->adev);
-
- char * params_str = device_get_parameters(out->profile, keys);
-
- device_unlock(out->adev);
+ char * params_str = device_get_parameters(&out->profile, keys);
stream_unlock(&out->lock);
return params_str;
}
@@ -436,7 +419,7 @@ static int out_set_volume(struct audio_stream_out *stream, float left, float rig
/* must be called with hw device and output stream mutexes locked */
static int start_output_stream(struct stream_out *out)
{
- ALOGV("start_output_stream(card:%d device:%d)", out->profile->card, out->profile->device);
+ ALOGV("start_output_stream(card:%d device:%d)", out->profile.card, out->profile.device);
return proxy_open(&out->proxy);
}
@@ -448,9 +431,7 @@ static ssize_t out_write(struct audio_stream_out *stream, const void* buffer, si
stream_lock(&out->lock);
if (out->standby) {
- device_lock(out->adev);
ret = start_output_stream(out);
- device_unlock(out->adev);
if (ret != 0) {
goto err;
}
@@ -573,43 +554,45 @@ static int adev_open_output_stream(struct audio_hw_device *hw_dev,
stream_lock_init(&out->lock);
out->adev = (struct audio_device *)hw_dev;
- device_lock(out->adev);
- out->profile = &out->adev->out_profile;
+
+ profile_init(&out->profile, PCM_OUT);
// build this to hand to the alsa_device_proxy
struct pcm_config proxy_config;
memset(&proxy_config, 0, sizeof(proxy_config));
/* Pull out the card/device pair */
- parse_card_device_params(address, &out->adev->out_profile.card, &out->adev->out_profile.device);
+ parse_card_device_params(address, &out->profile.card, &out->profile.device);
- profile_read_device_info(&out->adev->out_profile);
+ profile_read_device_info(&out->profile);
int ret = 0;
/* Rate */
if (config->sample_rate == 0) {
- proxy_config.rate = config->sample_rate = profile_get_default_sample_rate(out->profile);
- } else if (profile_is_sample_rate_valid(out->profile, config->sample_rate)) {
+ proxy_config.rate = config->sample_rate = profile_get_default_sample_rate(&out->profile);
+ } else if (profile_is_sample_rate_valid(&out->profile, config->sample_rate)) {
proxy_config.rate = config->sample_rate;
} else {
- proxy_config.rate = config->sample_rate = profile_get_default_sample_rate(out->profile);
+ proxy_config.rate = config->sample_rate = profile_get_default_sample_rate(&out->profile);
ret = -EINVAL;
}
+ /* TODO: This is a problem if the input does not support this rate */
+ device_lock(out->adev);
out->adev->device_sample_rate = config->sample_rate;
device_unlock(out->adev);
/* Format */
if (config->format == AUDIO_FORMAT_DEFAULT) {
- proxy_config.format = profile_get_default_format(out->profile);
+ proxy_config.format = profile_get_default_format(&out->profile);
config->format = audio_format_from_pcm_format(proxy_config.format);
} else {
enum pcm_format fmt = pcm_format_from_audio_format(config->format);
- if (profile_is_format_valid(out->profile, fmt)) {
+ if (profile_is_format_valid(&out->profile, fmt)) {
proxy_config.format = fmt;
} else {
- proxy_config.format = profile_get_default_format(out->profile);
+ proxy_config.format = profile_get_default_format(&out->profile);
config->format = audio_format_from_pcm_format(proxy_config.format);
ret = -EINVAL;
}
@@ -619,7 +602,7 @@ static int adev_open_output_stream(struct audio_hw_device *hw_dev,
bool calc_mask = false;
if (config->channel_mask == AUDIO_CHANNEL_NONE) {
/* query case */
- out->hal_channel_count = profile_get_default_channel_count(out->profile);
+ out->hal_channel_count = profile_get_default_channel_count(&out->profile);
calc_mask = true;
} else {
/* explicit case */
@@ -647,8 +630,9 @@ static int adev_open_output_stream(struct audio_hw_device *hw_dev,
// Validate the "logical" channel count against support in the "actual" profile.
// if they differ, choose the "actual" number of channels *closest* to the "logical".
// and store THAT in proxy_config.channels
- proxy_config.channels = profile_get_closest_channel_count(out->profile, out->hal_channel_count);
- proxy_prepare(&out->proxy, out->profile, &proxy_config);
+ proxy_config.channels =
+ profile_get_closest_channel_count(&out->profile, out->hal_channel_count);
+ proxy_prepare(&out->proxy, &out->profile, &proxy_config);
/* TODO The retry mechanism isn't implemented in AudioPolicyManager/AudioFlinger
* So clear any errors that may have occurred above.
@@ -672,9 +656,7 @@ static void adev_close_output_stream(struct audio_hw_device *hw_dev,
struct audio_stream_out *stream)
{
struct stream_out *out = (struct stream_out *)stream;
- ALOGV("adev_close_output_stream(c:%d d:%d)", out->profile->card, out->profile->device);
-
- adev_remove_stream_from_list(out->adev, &out->list_node);
+ ALOGV("adev_close_output_stream(c:%d d:%d)", out->profile.card, out->profile.device);
/* Close the pcm device */
out_standby(&stream->common);
@@ -684,6 +666,8 @@ static void adev_close_output_stream(struct audio_hw_device *hw_dev,
out->conversion_buffer = NULL;
out->conversion_buffer_size = 0;
+ adev_remove_stream_from_list(out->adev, &out->list_node);
+
device_lock(out->adev);
out->adev->device_sample_rate = 0;
device_unlock(out->adev);
@@ -746,12 +730,9 @@ static int in_standby(struct audio_stream *stream)
stream_lock(&in->lock);
if (!in->standby) {
- device_lock(in->adev);
proxy_close(&in->proxy);
- device_unlock(in->adev);
in->standby = true;
}
-
stream_unlock(&in->lock);
return 0;
@@ -762,7 +743,7 @@ static int in_dump(const struct audio_stream *stream, int fd)
const struct stream_in* in_stream = (const struct stream_in*)stream;
if (in_stream != NULL) {
dprintf(fd, "Input Profile:\n");
- profile_dump(in_stream->profile, fd);
+ profile_dump(&in_stream->profile, fd);
dprintf(fd, "Input Proxy:\n");
proxy_dump(&in_stream->proxy, fd);
@@ -789,19 +770,20 @@ static int in_set_parameters(struct audio_stream *stream, const char *kvpairs)
stream_lock(&in->lock);
device_lock(in->adev);
- if (card >= 0 && device >= 0 && !profile_is_cached_for(in->profile, card, device)) {
- /* cannot read pcm device info if playback is active, or more than one open stream */
+ if (card >= 0 && device >= 0 && !profile_is_cached_for(&in->profile, card, device)) {
+ /* cannot read pcm device info if capture is active, or more than one open stream */
if (!in->standby || in->adev->inputs_open > 1)
ret_value = -ENOSYS;
else {
- int saved_card = in->profile->card;
- int saved_device = in->profile->device;
- in->adev->in_profile.card = card;
- in->adev->in_profile.device = device;
- ret_value = profile_read_device_info(&in->adev->in_profile) ? 0 : -EINVAL;
+ int saved_card = in->profile.card;
+ int saved_device = in->profile.device;
+ in->profile.card = card;
+ in->profile.device = device;
+ ret_value = profile_read_device_info(&in->profile) ? 0 : -EINVAL;
if (ret_value != 0) {
- in->adev->in_profile.card = saved_card;
- in->adev->in_profile.device = saved_device;
+ ALOGE("Can't read device profile. card:%d, device:%d", card, device);
+ in->profile.card = saved_card;
+ in->profile.device = saved_device;
}
}
}
@@ -817,11 +799,7 @@ static char * in_get_parameters(const struct audio_stream *stream, const char *k
struct stream_in *in = (struct stream_in *)stream;
stream_lock(&in->lock);
- device_lock(in->adev);
-
- char * params_str = device_get_parameters(in->profile, keys);
-
- device_unlock(in->adev);
+ char * params_str = device_get_parameters(&in->profile, keys);
stream_unlock(&in->lock);
return params_str;
@@ -845,7 +823,7 @@ static int in_set_gain(struct audio_stream_in *stream, float gain)
/* must be called with hw device and output stream mutexes locked */
static int start_input_stream(struct stream_in *in)
{
- ALOGV("start_input_stream(card:%d device:%d)", in->profile->card, in->profile->device);
+ ALOGV("start_input_stream(card:%d device:%d)", in->profile.card, in->profile.device);
return proxy_open(&in->proxy);
}
@@ -862,9 +840,7 @@ static ssize_t in_read(struct audio_stream_in *stream, void* buffer, size_t byte
stream_lock(&in->lock);
if (in->standby) {
- device_lock(in->adev);
ret = start_input_stream(in);
- device_unlock(in->adev);
if (ret != 0) {
goto err;
}
@@ -929,6 +905,43 @@ static uint32_t in_get_input_frames_lost(struct audio_stream_in *stream)
return 0;
}
+static int in_get_capture_position(const struct audio_stream_in *stream,
+ int64_t *frames, int64_t *time)
+{
+ struct stream_in *in = (struct stream_in *)stream; // discard const qualifier
+ stream_lock(&in->lock);
+
+ const alsa_device_proxy *proxy = &in->proxy;
+ const int ret = proxy_get_capture_position(proxy, frames, time);
+
+ stream_unlock(&in->lock);
+ return ret;
+}
+
+static int in_get_active_microphones(const struct audio_stream_in *stream,
+ struct audio_microphone_characteristic_t *mic_array,
+ size_t *mic_count) {
+ (void)stream;
+ (void)mic_array;
+ (void)mic_count;
+
+ return -ENOSYS;
+}
+
+static int in_set_microphone_direction(const struct audio_stream_in *stream,
+ audio_microphone_direction_t dir) {
+ (void)stream;
+ (void)dir;
+ ALOGV("---- in_set_microphone_direction()");
+ return -ENOSYS;
+}
+
+static int in_set_microphone_field_dimension(const struct audio_stream_in *stream, float zoom) {
+ (void)zoom;
+ ALOGV("---- in_set_microphone_field_dimension()");
+ return -ENOSYS;
+}
+
static int adev_open_input_stream(struct audio_hw_device *hw_dev,
audio_io_handle_t handle,
audio_devices_t devicesSpec __unused,
@@ -972,68 +985,89 @@ static int adev_open_input_stream(struct audio_hw_device *hw_dev,
in->stream.set_gain = in_set_gain;
in->stream.read = in_read;
in->stream.get_input_frames_lost = in_get_input_frames_lost;
+ in->stream.get_capture_position = in_get_capture_position;
+
+ in->stream.get_active_microphones = in_get_active_microphones;
+ in->stream.set_microphone_direction = in_set_microphone_direction;
+ in->stream.set_microphone_field_dimension = in_set_microphone_field_dimension;
stream_lock_init(&in->lock);
in->adev = (struct audio_device *)hw_dev;
- device_lock(in->adev);
- in->profile = &in->adev->in_profile;
+ profile_init(&in->profile, PCM_IN);
struct pcm_config proxy_config;
memset(&proxy_config, 0, sizeof(proxy_config));
int ret = 0;
+ device_lock(in->adev);
+ int num_open_inputs = in->adev->inputs_open;
+ device_unlock(in->adev);
+
/* Check if an input stream is already open */
- if (in->adev->inputs_open > 0) {
- if (!profile_is_cached_for(in->profile, card, device)) {
+ if (num_open_inputs > 0) {
+ if (!profile_is_cached_for(&in->profile, card, device)) {
ALOGW("%s fail - address card:%d device:%d doesn't match existing profile",
__func__, card, device);
ret = -EINVAL;
}
} else {
/* Read input profile only if necessary */
- in->adev->in_profile.card = card;
- in->adev->in_profile.device = device;
- if (!profile_read_device_info(&in->adev->in_profile)) {
+ in->profile.card = card;
+ in->profile.device = device;
+ if (!profile_read_device_info(&in->profile)) {
ALOGW("%s fail - cannot read profile", __func__);
ret = -EINVAL;
}
}
if (ret != 0) {
- device_unlock(in->adev);
free(in);
*stream_in = NULL;
return ret;
}
/* Rate */
+ int request_config_rate = config->sample_rate;
if (config->sample_rate == 0) {
- config->sample_rate = profile_get_default_sample_rate(in->profile);
+ config->sample_rate = profile_get_default_sample_rate(&in->profile);
}
- if (in->adev->device_sample_rate != 0 && /* we are playing, so lock the rate */
+ if (in->adev->device_sample_rate != 0 && /* we are playing, so lock the rate if possible */
in->adev->device_sample_rate >= RATELOCK_THRESHOLD) {/* but only for high sample rates */
- ret = config->sample_rate != in->adev->device_sample_rate ? -EINVAL : 0;
- proxy_config.rate = config->sample_rate = in->adev->device_sample_rate;
- } else if (profile_is_sample_rate_valid(in->profile, config->sample_rate)) {
+ if (config->sample_rate != in->adev->device_sample_rate) {
+ unsigned highest_rate = profile_get_highest_sample_rate(&in->profile);
+ if (highest_rate == 0) {
+ ret = -EINVAL; /* error with device */
+ } else {
+ proxy_config.rate = config->sample_rate =
+ min(highest_rate, in->adev->device_sample_rate);
+ if (request_config_rate != 0 && proxy_config.rate != config->sample_rate) {
+ /* Changing the requested rate */
+ ret = -EINVAL;
+ } else {
+ /* Everything AOK! */
+ ret = 0;
+ }
+ }
+ }
+ } else if (profile_is_sample_rate_valid(&in->profile, config->sample_rate)) {
proxy_config.rate = config->sample_rate;
} else {
- proxy_config.rate = config->sample_rate = profile_get_default_sample_rate(in->profile);
+ proxy_config.rate = config->sample_rate = profile_get_default_sample_rate(&in->profile);
ret = -EINVAL;
}
- device_unlock(in->adev);
/* Format */
if (config->format == AUDIO_FORMAT_DEFAULT) {
- proxy_config.format = profile_get_default_format(in->profile);
+ proxy_config.format = profile_get_default_format(&in->profile);
config->format = audio_format_from_pcm_format(proxy_config.format);
} else {
enum pcm_format fmt = pcm_format_from_audio_format(config->format);
- if (profile_is_format_valid(in->profile, fmt)) {
+ if (profile_is_format_valid(&in->profile, fmt)) {
proxy_config.format = fmt;
} else {
- proxy_config.format = profile_get_default_format(in->profile);
+ proxy_config.format = profile_get_default_format(&in->profile);
config->format = audio_format_from_pcm_format(proxy_config.format);
ret = -EINVAL;
}
@@ -1043,7 +1077,7 @@ static int adev_open_input_stream(struct audio_hw_device *hw_dev,
bool calc_mask = false;
if (config->channel_mask == AUDIO_CHANNEL_NONE) {
/* query case */
- in->hal_channel_count = profile_get_default_channel_count(in->profile);
+ in->hal_channel_count = profile_get_default_channel_count(&in->profile);
calc_mask = true;
} else {
/* explicit case */
@@ -1080,8 +1114,8 @@ static int adev_open_input_stream(struct audio_hw_device *hw_dev,
// if they differ, choose the "actual" number of channels *closest* to the "logical".
// and store THAT in proxy_config.channels
proxy_config.channels =
- profile_get_closest_channel_count(in->profile, in->hal_channel_count);
- ret = proxy_prepare(&in->proxy, in->profile, &proxy_config);
+ profile_get_closest_channel_count(&in->profile, in->hal_channel_count);
+ ret = proxy_prepare(&in->proxy, &in->profile, &proxy_config);
if (ret == 0) {
in->standby = true;
@@ -1121,7 +1155,7 @@ static void adev_close_input_stream(struct audio_hw_device *hw_dev,
struct audio_stream_in *stream)
{
struct stream_in *in = (struct stream_in *)stream;
- ALOGV("adev_close_input_stream(c:%d d:%d)", in->profile->card, in->profile->device);
+ ALOGV("adev_close_input_stream(c:%d d:%d)", in->profile.card, in->profile.device);
adev_remove_stream_from_list(in->adev, &in->list_node);
@@ -1249,8 +1283,7 @@ static int adev_open(const hw_module_t* module, const char* name, hw_device_t**
if (!adev)
return -ENOMEM;
- profile_init(&adev->out_profile, PCM_OUT);
- profile_init(&adev->in_profile, PCM_IN);
+ pthread_mutex_init(&adev->lock, (const pthread_mutexattr_t *) NULL);
list_init(&adev->output_stream_list);
list_init(&adev->input_stream_list);
diff --git a/tests/hardware/struct-offset.cpp b/tests/hardware/struct-offset.cpp
index 6f86f030..82411ada 100644
--- a/tests/hardware/struct-offset.cpp
+++ b/tests/hardware/struct-offset.cpp
@@ -185,7 +185,9 @@ void CheckOffsets(void) {
CHECK_MEMBER_AT(gralloc_module_t, lockAsync, 152, 296);
CHECK_MEMBER_AT(gralloc_module_t, unlockAsync, 156, 304);
CHECK_MEMBER_AT(gralloc_module_t, lockAsync_ycbcr, 160, 312);
- CHECK_MEMBER_AT(gralloc_module_t, reserved_proc, 164, 320);
+ CHECK_MEMBER_AT(gralloc_module_t, getTransportSize, 164, 320);
+ CHECK_MEMBER_AT(gralloc_module_t, validateBufferSize, 168, 328);
+ CHECK_MEMBER_AT(gralloc_module_t, reserved_proc, 172, 336);
CHECK_MEMBER_AT(alloc_device_t, common, 0, 0);
CHECK_MEMBER_AT(alloc_device_t, alloc, 64, 120);
@@ -216,7 +218,10 @@ void CheckOffsets(void) {
CHECK_MEMBER_AT(camera_module_t, open_legacy, 144, 280);
CHECK_MEMBER_AT(camera_module_t, set_torch_mode, 148, 288);
CHECK_MEMBER_AT(camera_module_t, init, 152, 296);
- CHECK_MEMBER_AT(camera_module_t, reserved, 156, 304);
+ CHECK_MEMBER_AT(camera_module_t, get_physical_camera_info, 156, 304);
+ CHECK_MEMBER_AT(camera_module_t, is_stream_combination_supported, 160, 312);
+ CHECK_MEMBER_AT(camera_module_t, notify_device_state_change, 164, 320);
+ CHECK_MEMBER_AT(camera_module_t, reserved, 168, 328);
//Types defined in camera3.h
CHECK_MEMBER_AT(camera3_device_ops_t, initialize, 0, 0);
@@ -227,6 +232,7 @@ void CheckOffsets(void) {
CHECK_MEMBER_AT(camera3_device_ops_t, get_metadata_vendor_tag_ops, 20, 40);
CHECK_MEMBER_AT(camera3_device_ops_t, dump, 24, 48);
CHECK_MEMBER_AT(camera3_device_ops_t, flush, 28, 56);
- CHECK_MEMBER_AT(camera3_device_ops_t, reserved, 32, 64);
+ CHECK_MEMBER_AT(camera3_device_ops_t, signal_stream_flush, 32, 64);
+ CHECK_MEMBER_AT(camera3_device_ops_t, is_reconfiguration_required, 36, 72);
+ CHECK_MEMBER_AT(camera3_device_ops_t, reserved, 40, 80);
}
-