Revert "land: Shim old camera client to ims-camera"

This reverts commit d07a59e651.
This commit is contained in:
Darshan Manjrekar 2018-01-11 18:39:10 +05:30
parent 1b6f0496a2
commit efa789bf52
42 changed files with 1 additions and 8306 deletions

View file

@ -28,45 +28,3 @@ LOCAL_MODULE_TAGS := optional
LOCAL_32_BIT_ONLY := true
include $(BUILD_SHARED_LIBRARY)
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \
camera/Camera.cpp \
camera/CameraMetadata.cpp \
camera/CaptureResult.cpp \
camera/CameraParameters2.cpp \
camera/ICamera.cpp \
camera/ICameraClient.cpp \
camera/ICameraService.cpp \
camera/ICameraServiceListener.cpp \
camera/ICameraServiceProxy.cpp \
camera/ICameraRecordingProxy.cpp \
camera/ICameraRecordingProxyListener.cpp \
camera/camera2/ICameraDeviceUser.cpp \
camera/camera2/ICameraDeviceCallbacks.cpp \
camera/camera2/CaptureRequest.cpp \
camera/camera2/OutputConfiguration.cpp \
camera/CameraBase.cpp \
camera/CameraUtils.cpp \
camera/VendorTagDescriptor.cpp \
camera/CameraParameters.cpp
LOCAL_SHARED_LIBRARIES := \
libcutils \
libutils \
liblog \
libbinder \
libhardware \
libui \
libgui \
libcamera_metadata
LOCAL_C_INCLUDES += \
$(LOCAL_PATH)/camera/include \
system/media/camera/include \
system/media/private/camera/include
LOCAL_MODULE:= libshim_ims_camera
include $(BUILD_SHARED_LIBRARY)

View file

@ -1,376 +0,0 @@
/*
**
** Copyright (C) 2008, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
//#define LOG_NDEBUG 0
#define LOG_TAG "Camera"
#include <utils/Log.h>
#include <utils/threads.h>
#include <utils/String16.h>
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
#include <binder/IMemory.h>
#include <camera/Camera.h>
#include <camera/ICameraRecordingProxyListener.h>
#include <camera/ICameraService.h>
#include <camera/ICamera.h>
#include <gui/IGraphicBufferProducer.h>
#include <gui/Surface.h>
namespace android {
Camera::Camera(int cameraId)
: CameraBase(cameraId)
{
}
CameraTraits<Camera>::TCamConnectService CameraTraits<Camera>::fnConnectService =
&ICameraService::connect;
// construct a camera client from an existing camera remote
sp<Camera> Camera::create(const sp<ICamera>& camera)
{
ALOGV("create");
if (camera == 0) {
ALOGE("camera remote is a NULL pointer");
return 0;
}
sp<Camera> c = new Camera(-1);
if (camera->connect(c) == NO_ERROR) {
c->mStatus = NO_ERROR;
c->mCamera = camera;
IInterface::asBinder(camera)->linkToDeath(c);
return c;
}
return 0;
}
Camera::~Camera()
{
// We don't need to call disconnect() here because if the CameraService
// thinks we are the owner of the hardware, it will hold a (strong)
// reference to us, and we can't possibly be here. We also don't want to
// call disconnect() here if we are in the same process as mediaserver,
// because we may be invoked by CameraService::Client::connect() and will
// deadlock if we call any method of ICamera here.
}
sp<Camera> Camera::connect(int cameraId, const String16& clientPackageName,
int clientUid)
{
return CameraBaseT::connect(cameraId, clientPackageName, clientUid);
}
status_t Camera::connectLegacy(int cameraId, int halVersion,
const String16& clientPackageName,
int clientUid,
sp<Camera>& camera)
{
ALOGV("%s: connect legacy camera device", __FUNCTION__);
sp<Camera> c = new Camera(cameraId);
sp<ICameraClient> cl = c;
status_t status = NO_ERROR;
const sp<ICameraService>& cs = CameraBaseT::getCameraService();
if (cs != 0) {
status = cs.get()->connectLegacy(cl, cameraId, halVersion, clientPackageName,
clientUid, /*out*/c->mCamera);
}
if (status == OK && c->mCamera != 0) {
IInterface::asBinder(c->mCamera)->linkToDeath(c);
c->mStatus = NO_ERROR;
camera = c;
} else {
ALOGW("An error occurred while connecting to camera %d: %d (%s)",
cameraId, status, strerror(-status));
c.clear();
}
return status;
}
status_t Camera::reconnect()
{
ALOGV("reconnect");
sp <ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->connect(this);
}
status_t Camera::lock()
{
sp <ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->lock();
}
status_t Camera::unlock()
{
sp <ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->unlock();
}
// pass the buffered IGraphicBufferProducer to the camera service
status_t Camera::setPreviewTarget(const sp<IGraphicBufferProducer>& bufferProducer)
{
ALOGV("setPreviewTarget(%p)", bufferProducer.get());
sp <ICamera> c = mCamera;
if (c == 0) return NO_INIT;
ALOGD_IF(bufferProducer == 0, "app passed NULL surface");
return c->setPreviewTarget(bufferProducer);
}
// start preview mode
status_t Camera::startPreview()
{
ALOGV("startPreview");
sp <ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->startPreview();
}
status_t Camera::storeMetaDataInBuffers(bool enabled)
{
ALOGV("storeMetaDataInBuffers: %s",
enabled? "true": "false");
sp <ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->storeMetaDataInBuffers(enabled);
}
// start recording mode, must call setPreviewTarget first
status_t Camera::startRecording()
{
ALOGV("startRecording");
sp <ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->startRecording();
}
// stop preview mode
void Camera::stopPreview()
{
ALOGV("stopPreview");
sp <ICamera> c = mCamera;
if (c == 0) return;
c->stopPreview();
}
// stop recording mode
void Camera::stopRecording()
{
ALOGV("stopRecording");
{
Mutex::Autolock _l(mLock);
mRecordingProxyListener.clear();
}
sp <ICamera> c = mCamera;
if (c == 0) return;
c->stopRecording();
}
// release a recording frame
void Camera::releaseRecordingFrame(const sp<IMemory>& mem)
{
ALOGV("releaseRecordingFrame");
sp <ICamera> c = mCamera;
if (c == 0) return;
c->releaseRecordingFrame(mem);
}
// get preview state
bool Camera::previewEnabled()
{
ALOGV("previewEnabled");
sp <ICamera> c = mCamera;
if (c == 0) return false;
return c->previewEnabled();
}
// get recording state
bool Camera::recordingEnabled()
{
ALOGV("recordingEnabled");
sp <ICamera> c = mCamera;
if (c == 0) return false;
return c->recordingEnabled();
}
status_t Camera::autoFocus()
{
ALOGV("autoFocus");
sp <ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->autoFocus();
}
status_t Camera::cancelAutoFocus()
{
ALOGV("cancelAutoFocus");
sp <ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->cancelAutoFocus();
}
// take a picture
status_t Camera::takePicture(int msgType)
{
ALOGV("takePicture: 0x%x", msgType);
sp <ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->takePicture(msgType);
}
// set preview/capture parameters - key/value pairs
status_t Camera::setParameters(const String8& params)
{
ALOGV("setParameters");
sp <ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->setParameters(params);
}
// get preview/capture parameters - key/value pairs
String8 Camera::getParameters() const
{
ALOGV("getParameters");
String8 params;
sp <ICamera> c = mCamera;
if (c != 0) params = mCamera->getParameters();
return params;
}
// send command to camera driver
status_t Camera::sendCommand(int32_t cmd, int32_t arg1, int32_t arg2)
{
ALOGV("sendCommand");
sp <ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->sendCommand(cmd, arg1, arg2);
}
void Camera::setListener(const sp<CameraListener>& listener)
{
Mutex::Autolock _l(mLock);
mListener = listener;
}
void Camera::setRecordingProxyListener(const sp<ICameraRecordingProxyListener>& listener)
{
Mutex::Autolock _l(mLock);
mRecordingProxyListener = listener;
}
void Camera::setPreviewCallbackFlags(int flag)
{
ALOGV("setPreviewCallbackFlags");
sp <ICamera> c = mCamera;
if (c == 0) return;
mCamera->setPreviewCallbackFlag(flag);
}
status_t Camera::setPreviewCallbackTarget(
const sp<IGraphicBufferProducer>& callbackProducer)
{
sp <ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->setPreviewCallbackTarget(callbackProducer);
}
// callback from camera service
void Camera::notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2)
{
return CameraBaseT::notifyCallback(msgType, ext1, ext2);
}
// callback from camera service when frame or image is ready
void Camera::dataCallback(int32_t msgType, const sp<IMemory>& dataPtr,
camera_frame_metadata_t *metadata)
{
sp<CameraListener> listener;
{
Mutex::Autolock _l(mLock);
listener = mListener;
}
if (listener != NULL) {
listener->postData(msgType, dataPtr, metadata);
}
}
// callback from camera service when timestamped frame is ready
void Camera::dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr)
{
// If recording proxy listener is registered, forward the frame and return.
// The other listener (mListener) is ignored because the receiver needs to
// call releaseRecordingFrame.
sp<ICameraRecordingProxyListener> proxylistener;
{
Mutex::Autolock _l(mLock);
proxylistener = mRecordingProxyListener;
}
if (proxylistener != NULL) {
proxylistener->dataCallbackTimestamp(timestamp, msgType, dataPtr);
return;
}
sp<CameraListener> listener;
{
Mutex::Autolock _l(mLock);
listener = mListener;
}
if (listener != NULL) {
listener->postDataTimestamp(timestamp, msgType, dataPtr);
} else {
ALOGW("No listener was set. Drop a recording frame.");
releaseRecordingFrame(dataPtr);
}
}
sp<ICameraRecordingProxy> Camera::getRecordingProxy() {
ALOGV("getProxy");
return new RecordingProxy(this);
}
status_t Camera::RecordingProxy::startRecording(const sp<ICameraRecordingProxyListener>& listener)
{
ALOGV("RecordingProxy::startRecording");
mCamera->setRecordingProxyListener(listener);
mCamera->reconnect();
return mCamera->startRecording();
}
void Camera::RecordingProxy::stopRecording()
{
ALOGV("RecordingProxy::stopRecording");
mCamera->stopRecording();
}
void Camera::RecordingProxy::releaseRecordingFrame(const sp<IMemory>& mem)
{
ALOGV("RecordingProxy::releaseRecordingFrame");
mCamera->releaseRecordingFrame(mem);
}
Camera::RecordingProxy::RecordingProxy(const sp<Camera>& camera)
{
mCamera = camera;
}
}; // namespace android

View file

@ -1,221 +0,0 @@
/*
**
** Copyright (C) 2013, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
//#define LOG_NDEBUG 0
#define LOG_TAG "CameraBase"
#include <utils/Log.h>
#include <utils/threads.h>
#include <utils/Mutex.h>
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
#include <binder/IMemory.h>
#include <camera/CameraBase.h>
#include <camera/ICameraService.h>
// needed to instantiate
#include <camera/Camera.h>
#include <system/camera_metadata.h>
namespace android {
namespace {
sp<ICameraService> gCameraService;
const int kCameraServicePollDelay = 500000; // 0.5s
const char* kCameraServiceName = "media.camera";
Mutex gLock;
class DeathNotifier : public IBinder::DeathRecipient
{
public:
DeathNotifier() {
}
virtual void binderDied(const wp<IBinder>& /*who*/) {
ALOGV("binderDied");
Mutex::Autolock _l(gLock);
gCameraService.clear();
ALOGW("Camera service died!");
}
};
sp<DeathNotifier> gDeathNotifier;
}; // namespace anonymous
///////////////////////////////////////////////////////////
// CameraBase definition
///////////////////////////////////////////////////////////
// establish binder interface to camera service
template <typename TCam, typename TCamTraits>
const sp<ICameraService>& CameraBase<TCam, TCamTraits>::getCameraService()
{
Mutex::Autolock _l(gLock);
if (gCameraService.get() == 0) {
sp<IServiceManager> sm = defaultServiceManager();
sp<IBinder> binder;
do {
binder = sm->getService(String16(kCameraServiceName));
if (binder != 0) {
break;
}
ALOGW("CameraService not published, waiting...");
usleep(kCameraServicePollDelay);
} while(true);
if (gDeathNotifier == NULL) {
gDeathNotifier = new DeathNotifier();
}
binder->linkToDeath(gDeathNotifier);
gCameraService = interface_cast<ICameraService>(binder);
}
ALOGE_IF(gCameraService == 0, "no CameraService!?");
return gCameraService;
}
template <typename TCam, typename TCamTraits>
sp<TCam> CameraBase<TCam, TCamTraits>::connect(int cameraId,
const String16& clientPackageName,
int clientUid)
{
ALOGV("%s: connect", __FUNCTION__);
sp<TCam> c = new TCam(cameraId);
sp<TCamCallbacks> cl = c;
status_t status = NO_ERROR;
const sp<ICameraService>& cs = getCameraService();
if (cs != 0) {
TCamConnectService fnConnectService = TCamTraits::fnConnectService;
status = (cs.get()->*fnConnectService)(cl, cameraId, clientPackageName, clientUid,
/*out*/ c->mCamera);
}
if (status == OK && c->mCamera != 0) {
IInterface::asBinder(c->mCamera)->linkToDeath(c);
c->mStatus = NO_ERROR;
} else {
ALOGW("An error occurred while connecting to camera: %d", cameraId);
c.clear();
}
return c;
}
template <typename TCam, typename TCamTraits>
void CameraBase<TCam, TCamTraits>::disconnect()
{
ALOGV("%s: disconnect", __FUNCTION__);
if (mCamera != 0) {
mCamera->disconnect();
IInterface::asBinder(mCamera)->unlinkToDeath(this);
mCamera = 0;
}
ALOGV("%s: disconnect (done)", __FUNCTION__);
}
template <typename TCam, typename TCamTraits>
CameraBase<TCam, TCamTraits>::CameraBase(int cameraId) :
mStatus(UNKNOWN_ERROR),
mCameraId(cameraId)
{
}
template <typename TCam, typename TCamTraits>
CameraBase<TCam, TCamTraits>::~CameraBase()
{
}
template <typename TCam, typename TCamTraits>
sp<typename TCamTraits::TCamUser> CameraBase<TCam, TCamTraits>::remote()
{
return mCamera;
}
template <typename TCam, typename TCamTraits>
status_t CameraBase<TCam, TCamTraits>::getStatus()
{
return mStatus;
}
template <typename TCam, typename TCamTraits>
void CameraBase<TCam, TCamTraits>::binderDied(const wp<IBinder>& /*who*/) {
ALOGW("mediaserver's remote binder Camera object died");
notifyCallback(CAMERA_MSG_ERROR, CAMERA_ERROR_SERVER_DIED, /*ext2*/0);
}
template <typename TCam, typename TCamTraits>
void CameraBase<TCam, TCamTraits>::setListener(const sp<TCamListener>& listener)
{
Mutex::Autolock _l(mLock);
mListener = listener;
}
// callback from camera service
template <typename TCam, typename TCamTraits>
void CameraBase<TCam, TCamTraits>::notifyCallback(int32_t msgType,
int32_t ext1,
int32_t ext2)
{
sp<TCamListener> listener;
{
Mutex::Autolock _l(mLock);
listener = mListener;
}
if (listener != NULL) {
listener->notify(msgType, ext1, ext2);
}
}
template <typename TCam, typename TCamTraits>
int CameraBase<TCam, TCamTraits>::getNumberOfCameras() {
const sp<ICameraService> cs = getCameraService();
if (!cs.get()) {
// as required by the public Java APIs
return 0;
}
return cs->getNumberOfCameras();
}
// this can be in BaseCamera but it should be an instance method
template <typename TCam, typename TCamTraits>
status_t CameraBase<TCam, TCamTraits>::getCameraInfo(int cameraId,
struct CameraInfo* cameraInfo) {
const sp<ICameraService>& cs = getCameraService();
if (cs == 0) return UNKNOWN_ERROR;
return cs->getCameraInfo(cameraId, cameraInfo);
}
template <typename TCam, typename TCamTraits>
status_t CameraBase<TCam, TCamTraits>::addServiceListener(
const sp<ICameraServiceListener>& listener) {
const sp<ICameraService>& cs = getCameraService();
if (cs == 0) return UNKNOWN_ERROR;
return cs->addListener(listener);
}
template <typename TCam, typename TCamTraits>
status_t CameraBase<TCam, TCamTraits>::removeServiceListener(
const sp<ICameraServiceListener>& listener) {
const sp<ICameraService>& cs = getCameraService();
if (cs == 0) return UNKNOWN_ERROR;
return cs->removeListener(listener);
}
template class CameraBase<Camera>;
} // namespace android

View file

@ -1,684 +0,0 @@
/*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// #define LOG_NDEBUG 0
#define LOG_TAG "Camera2-Metadata"
#include <utils/Log.h>
#include <utils/Errors.h>
#include <camera/CameraMetadata.h>
#include <binder/Parcel.h>
namespace android {
#define ALIGN_TO(val, alignment) \
(((uintptr_t)(val) + ((alignment) - 1)) & ~((alignment) - 1))
typedef Parcel::WritableBlob WritableBlob;
typedef Parcel::ReadableBlob ReadableBlob;
CameraMetadata::CameraMetadata() :
mBuffer(NULL), mLocked(false) {
}
CameraMetadata::CameraMetadata(size_t entryCapacity, size_t dataCapacity) :
mLocked(false)
{
mBuffer = allocate_camera_metadata(entryCapacity, dataCapacity);
}
CameraMetadata::CameraMetadata(const CameraMetadata &other) :
mLocked(false) {
mBuffer = clone_camera_metadata(other.mBuffer);
}
CameraMetadata::CameraMetadata(camera_metadata_t *buffer) :
mBuffer(NULL), mLocked(false) {
acquire(buffer);
}
CameraMetadata &CameraMetadata::operator=(const CameraMetadata &other) {
return operator=(other.mBuffer);
}
CameraMetadata &CameraMetadata::operator=(const camera_metadata_t *buffer) {
if (mLocked) {
ALOGE("%s: Assignment to a locked CameraMetadata!", __FUNCTION__);
return *this;
}
if (CC_LIKELY(buffer != mBuffer)) {
camera_metadata_t *newBuffer = clone_camera_metadata(buffer);
clear();
mBuffer = newBuffer;
}
return *this;
}
CameraMetadata::~CameraMetadata() {
mLocked = false;
clear();
}
const camera_metadata_t* CameraMetadata::getAndLock() const {
mLocked = true;
return mBuffer;
}
status_t CameraMetadata::unlock(const camera_metadata_t *buffer) {
if (!mLocked) {
ALOGE("%s: Can't unlock a non-locked CameraMetadata!", __FUNCTION__);
return INVALID_OPERATION;
}
if (buffer != mBuffer) {
ALOGE("%s: Can't unlock CameraMetadata with wrong pointer!",
__FUNCTION__);
return BAD_VALUE;
}
mLocked = false;
return OK;
}
camera_metadata_t* CameraMetadata::release() {
if (mLocked) {
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
return NULL;
}
camera_metadata_t *released = mBuffer;
mBuffer = NULL;
return released;
}
void CameraMetadata::clear() {
if (mLocked) {
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
return;
}
if (mBuffer) {
free_camera_metadata(mBuffer);
mBuffer = NULL;
}
}
void CameraMetadata::acquire(camera_metadata_t *buffer) {
if (mLocked) {
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
return;
}
clear();
mBuffer = buffer;
ALOGE_IF(validate_camera_metadata_structure(mBuffer, /*size*/NULL) != OK,
"%s: Failed to validate metadata structure %p",
__FUNCTION__, buffer);
}
void CameraMetadata::acquire(CameraMetadata &other) {
if (mLocked) {
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
return;
}
acquire(other.release());
}
status_t CameraMetadata::append(const CameraMetadata &other) {
return append(other.mBuffer);
}
status_t CameraMetadata::append(const camera_metadata_t* other) {
if (mLocked) {
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
return INVALID_OPERATION;
}
size_t extraEntries = get_camera_metadata_entry_count(other);
size_t extraData = get_camera_metadata_data_count(other);
resizeIfNeeded(extraEntries, extraData);
return append_camera_metadata(mBuffer, other);
}
size_t CameraMetadata::entryCount() const {
return (mBuffer == NULL) ? 0 :
get_camera_metadata_entry_count(mBuffer);
}
bool CameraMetadata::isEmpty() const {
return entryCount() == 0;
}
status_t CameraMetadata::sort() {
if (mLocked) {
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
return INVALID_OPERATION;
}
return sort_camera_metadata(mBuffer);
}
status_t CameraMetadata::checkType(uint32_t tag, uint8_t expectedType) {
int tagType = get_camera_metadata_tag_type(tag);
if ( CC_UNLIKELY(tagType == -1)) {
ALOGE("Update metadata entry: Unknown tag %d", tag);
return INVALID_OPERATION;
}
if ( CC_UNLIKELY(tagType != expectedType) ) {
ALOGE("Mismatched tag type when updating entry %s (%d) of type %s; "
"got type %s data instead ",
get_camera_metadata_tag_name(tag), tag,
camera_metadata_type_names[tagType],
camera_metadata_type_names[expectedType]);
return INVALID_OPERATION;
}
return OK;
}
status_t CameraMetadata::update(uint32_t tag,
const int32_t *data, size_t data_count) {
status_t res;
if (mLocked) {
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
return INVALID_OPERATION;
}
if ( (res = checkType(tag, TYPE_INT32)) != OK) {
return res;
}
return updateImpl(tag, (const void*)data, data_count);
}
status_t CameraMetadata::update(uint32_t tag,
const uint8_t *data, size_t data_count) {
status_t res;
if (mLocked) {
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
return INVALID_OPERATION;
}
if ( (res = checkType(tag, TYPE_BYTE)) != OK) {
return res;
}
return updateImpl(tag, (const void*)data, data_count);
}
status_t CameraMetadata::update(uint32_t tag,
const float *data, size_t data_count) {
status_t res;
if (mLocked) {
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
return INVALID_OPERATION;
}
if ( (res = checkType(tag, TYPE_FLOAT)) != OK) {
return res;
}
return updateImpl(tag, (const void*)data, data_count);
}
status_t CameraMetadata::update(uint32_t tag,
const int64_t *data, size_t data_count) {
status_t res;
if (mLocked) {
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
return INVALID_OPERATION;
}
if ( (res = checkType(tag, TYPE_INT64)) != OK) {
return res;
}
return updateImpl(tag, (const void*)data, data_count);
}
status_t CameraMetadata::update(uint32_t tag,
const double *data, size_t data_count) {
status_t res;
if (mLocked) {
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
return INVALID_OPERATION;
}
if ( (res = checkType(tag, TYPE_DOUBLE)) != OK) {
return res;
}
return updateImpl(tag, (const void*)data, data_count);
}
status_t CameraMetadata::update(uint32_t tag,
const camera_metadata_rational_t *data, size_t data_count) {
status_t res;
if (mLocked) {
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
return INVALID_OPERATION;
}
if ( (res = checkType(tag, TYPE_RATIONAL)) != OK) {
return res;
}
return updateImpl(tag, (const void*)data, data_count);
}
status_t CameraMetadata::update(uint32_t tag,
const String8 &string) {
status_t res;
if (mLocked) {
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
return INVALID_OPERATION;
}
if ( (res = checkType(tag, TYPE_BYTE)) != OK) {
return res;
}
// string.size() doesn't count the null termination character.
return updateImpl(tag, (const void*)string.string(), string.size() + 1);
}
status_t CameraMetadata::updateImpl(uint32_t tag, const void *data,
size_t data_count) {
status_t res;
if (mLocked) {
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
return INVALID_OPERATION;
}
int type = get_camera_metadata_tag_type(tag);
if (type == -1) {
ALOGE("%s: Tag %d not found", __FUNCTION__, tag);
return BAD_VALUE;
}
// Safety check - ensure that data isn't pointing to this metadata, since
// that would get invalidated if a resize is needed
size_t bufferSize = get_camera_metadata_size(mBuffer);
uintptr_t bufAddr = reinterpret_cast<uintptr_t>(mBuffer);
uintptr_t dataAddr = reinterpret_cast<uintptr_t>(data);
if (dataAddr > bufAddr && dataAddr < (bufAddr + bufferSize)) {
ALOGE("%s: Update attempted with data from the same metadata buffer!",
__FUNCTION__);
return INVALID_OPERATION;
}
size_t data_size = calculate_camera_metadata_entry_data_size(type,
data_count);
res = resizeIfNeeded(1, data_size);
if (res == OK) {
camera_metadata_entry_t entry;
res = find_camera_metadata_entry(mBuffer, tag, &entry);
if (res == NAME_NOT_FOUND) {
res = add_camera_metadata_entry(mBuffer,
tag, data, data_count);
} else if (res == OK) {
res = update_camera_metadata_entry(mBuffer,
entry.index, data, data_count, NULL);
}
}
if (res != OK) {
ALOGE("%s: Unable to update metadata entry %s.%s (%x): %s (%d)",
__FUNCTION__, get_camera_metadata_section_name(tag),
get_camera_metadata_tag_name(tag), tag, strerror(-res), res);
}
IF_ALOGV() {
ALOGE_IF(validate_camera_metadata_structure(mBuffer, /*size*/NULL) !=
OK,
"%s: Failed to validate metadata structure after update %p",
__FUNCTION__, mBuffer);
}
return res;
}
bool CameraMetadata::exists(uint32_t tag) const {
camera_metadata_ro_entry entry;
return find_camera_metadata_ro_entry(mBuffer, tag, &entry) == 0;
}
camera_metadata_entry_t CameraMetadata::find(uint32_t tag) {
status_t res;
camera_metadata_entry entry;
if (mLocked) {
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
entry.count = 0;
return entry;
}
res = find_camera_metadata_entry(mBuffer, tag, &entry);
if (CC_UNLIKELY( res != OK )) {
entry.count = 0;
entry.data.u8 = NULL;
}
return entry;
}
camera_metadata_ro_entry_t CameraMetadata::find(uint32_t tag) const {
status_t res;
camera_metadata_ro_entry entry;
res = find_camera_metadata_ro_entry(mBuffer, tag, &entry);
if (CC_UNLIKELY( res != OK )) {
entry.count = 0;
entry.data.u8 = NULL;
}
return entry;
}
status_t CameraMetadata::erase(uint32_t tag) {
camera_metadata_entry_t entry;
status_t res;
if (mLocked) {
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
return INVALID_OPERATION;
}
res = find_camera_metadata_entry(mBuffer, tag, &entry);
if (res == NAME_NOT_FOUND) {
return OK;
} else if (res != OK) {
ALOGE("%s: Error looking for entry %s.%s (%x): %s %d",
__FUNCTION__,
get_camera_metadata_section_name(tag),
get_camera_metadata_tag_name(tag), tag, strerror(-res), res);
return res;
}
res = delete_camera_metadata_entry(mBuffer, entry.index);
if (res != OK) {
ALOGE("%s: Error deleting entry %s.%s (%x): %s %d",
__FUNCTION__,
get_camera_metadata_section_name(tag),
get_camera_metadata_tag_name(tag), tag, strerror(-res), res);
}
return res;
}
void CameraMetadata::dump(int fd, int verbosity, int indentation) const {
dump_indented_camera_metadata(mBuffer, fd, verbosity, indentation);
}
status_t CameraMetadata::resizeIfNeeded(size_t extraEntries, size_t extraData) {
if (mBuffer == NULL) {
mBuffer = allocate_camera_metadata(extraEntries * 2, extraData * 2);
if (mBuffer == NULL) {
ALOGE("%s: Can't allocate larger metadata buffer", __FUNCTION__);
return NO_MEMORY;
}
} else {
size_t currentEntryCount = get_camera_metadata_entry_count(mBuffer);
size_t currentEntryCap = get_camera_metadata_entry_capacity(mBuffer);
size_t newEntryCount = currentEntryCount +
extraEntries;
newEntryCount = (newEntryCount > currentEntryCap) ?
newEntryCount * 2 : currentEntryCap;
size_t currentDataCount = get_camera_metadata_data_count(mBuffer);
size_t currentDataCap = get_camera_metadata_data_capacity(mBuffer);
size_t newDataCount = currentDataCount +
extraData;
newDataCount = (newDataCount > currentDataCap) ?
newDataCount * 2 : currentDataCap;
if (newEntryCount > currentEntryCap ||
newDataCount > currentDataCap) {
camera_metadata_t *oldBuffer = mBuffer;
mBuffer = allocate_camera_metadata(newEntryCount,
newDataCount);
if (mBuffer == NULL) {
ALOGE("%s: Can't allocate larger metadata buffer", __FUNCTION__);
return NO_MEMORY;
}
append_camera_metadata(mBuffer, oldBuffer);
free_camera_metadata(oldBuffer);
}
}
return OK;
}
status_t CameraMetadata::readFromParcel(const Parcel& data,
camera_metadata_t** out) {
status_t err = OK;
camera_metadata_t* metadata = NULL;
if (out) {
*out = NULL;
}
// See CameraMetadata::writeToParcel for parcel data layout diagram and explanation.
// arg0 = blobSize (int32)
int32_t blobSizeTmp = -1;
if ((err = data.readInt32(&blobSizeTmp)) != OK) {
ALOGE("%s: Failed to read metadata size (error %d %s)",
__FUNCTION__, err, strerror(-err));
return err;
}
const size_t blobSize = static_cast<size_t>(blobSizeTmp);
const size_t alignment = get_camera_metadata_alignment();
// Special case: zero blob size means zero sized (NULL) metadata.
if (blobSize == 0) {
ALOGV("%s: Read 0-sized metadata", __FUNCTION__);
return OK;
}
if (blobSize <= alignment) {
ALOGE("%s: metadata blob is malformed, blobSize(%zu) should be larger than alignment(%zu)",
__FUNCTION__, blobSize, alignment);
return BAD_VALUE;
}
const size_t metadataSize = blobSize - alignment;
// NOTE: this doesn't make sense to me. shouldn't the blob
// know how big it is? why do we have to specify the size
// to Parcel::readBlob ?
ReadableBlob blob;
// arg1 = metadata (blob)
do {
if ((err = data.readBlob(blobSize, &blob)) != OK) {
ALOGE("%s: Failed to read metadata blob (sized %zu). Possible "
" serialization bug. Error %d %s",
__FUNCTION__, blobSize, err, strerror(-err));
break;
}
// arg2 = offset (blob)
// Must be after blob since we don't know offset until after writeBlob.
int32_t offsetTmp;
if ((err = data.readInt32(&offsetTmp)) != OK) {
ALOGE("%s: Failed to read metadata offsetTmp (error %d %s)",
__FUNCTION__, err, strerror(-err));
break;
}
const size_t offset = static_cast<size_t>(offsetTmp);
if (offset >= alignment) {
ALOGE("%s: metadata offset(%zu) should be less than alignment(%zu)",
__FUNCTION__, blobSize, alignment);
err = BAD_VALUE;
break;
}
const uintptr_t metadataStart = reinterpret_cast<uintptr_t>(blob.data()) + offset;
const camera_metadata_t* tmp =
reinterpret_cast<const camera_metadata_t*>(metadataStart);
ALOGV("%s: alignment is: %zu, metadata start: %p, offset: %zu",
__FUNCTION__, alignment, tmp, offset);
metadata = allocate_copy_camera_metadata_checked(tmp, metadataSize);
if (metadata == NULL) {
// We consider that allocation only fails if the validation
// also failed, therefore the readFromParcel was a failure.
ALOGE("%s: metadata allocation and copy failed", __FUNCTION__);
err = BAD_VALUE;
}
} while(0);
blob.release();
if (out) {
ALOGV("%s: Set out metadata to %p", __FUNCTION__, metadata);
*out = metadata;
} else if (metadata != NULL) {
ALOGV("%s: Freed camera metadata at %p", __FUNCTION__, metadata);
free_camera_metadata(metadata);
}
return err;
}
status_t CameraMetadata::writeToParcel(Parcel& data,
const camera_metadata_t* metadata) {
status_t res = OK;
/**
* Below is the camera metadata parcel layout:
*
* |--------------------------------------------|
* | arg0: blobSize |
* | (length = 4) |
* |--------------------------------------------|<--Skip the rest if blobSize == 0.
* | |
* | |
* | arg1: blob |
* | (length = variable, see arg1 layout below) |
* | |
* | |
* |--------------------------------------------|
* | arg2: offset |
* | (length = 4) |
* |--------------------------------------------|
*/
// arg0 = blobSize (int32)
if (metadata == NULL) {
// Write zero blobSize for null metadata.
return data.writeInt32(0);
}
/**
* Always make the blob size sufficiently larger, as we need put alignment
* padding and metadata into the blob. Since we don't know the alignment
* offset before writeBlob. Then write the metadata to aligned offset.
*/
const size_t metadataSize = get_camera_metadata_compact_size(metadata);
const size_t alignment = get_camera_metadata_alignment();
const size_t blobSize = metadataSize + alignment;
res = data.writeInt32(static_cast<int32_t>(blobSize));
if (res != OK) {
return res;
}
size_t offset = 0;
/**
* arg1 = metadata (blob).
*
* The blob size is the sum of front padding size, metadata size and back padding
* size, which is equal to metadataSize + alignment.
*
* The blob layout is:
* |------------------------------------|<----Start address of the blob (unaligned).
* | front padding |
* | (size = offset) |
* |------------------------------------|<----Aligned start address of metadata.
* | |
* | |
* | metadata |
* | (size = metadataSize) |
* | |
* | |
* |------------------------------------|
* | back padding |
* | (size = alignment - offset) |
* |------------------------------------|<----End address of blob.
* (Blob start address + blob size).
*/
WritableBlob blob;
do {
res = data.writeBlob(blobSize, false, &blob);
if (res != OK) {
break;
}
const uintptr_t metadataStart = ALIGN_TO(blob.data(), alignment);
offset = metadataStart - reinterpret_cast<uintptr_t>(blob.data());
ALOGV("%s: alignment is: %zu, metadata start: %p, offset: %zu",
__FUNCTION__, alignment,
reinterpret_cast<const void *>(metadataStart), offset);
copy_camera_metadata(reinterpret_cast<void*>(metadataStart), metadataSize, metadata);
// Not too big of a problem since receiving side does hard validation
// Don't check the size since the compact size could be larger
if (validate_camera_metadata_structure(metadata, /*size*/NULL) != OK) {
ALOGW("%s: Failed to validate metadata %p before writing blob",
__FUNCTION__, metadata);
}
} while(false);
blob.release();
// arg2 = offset (int32)
res = data.writeInt32(static_cast<int32_t>(offset));
return res;
}
status_t CameraMetadata::readFromParcel(Parcel *parcel) {
ALOGV("%s: parcel = %p", __FUNCTION__, parcel);
status_t res = OK;
if (parcel == NULL) {
ALOGE("%s: parcel is null", __FUNCTION__);
return BAD_VALUE;
}
if (mLocked) {
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
return INVALID_OPERATION;
}
camera_metadata *buffer = NULL;
// TODO: reading should return a status code, in case validation fails
res = CameraMetadata::readFromParcel(*parcel, &buffer);
if (res != NO_ERROR) {
ALOGE("%s: Failed to read from parcel. Metadata is unchanged.",
__FUNCTION__);
return res;
}
clear();
mBuffer = buffer;
return OK;
}
status_t CameraMetadata::writeToParcel(Parcel *parcel) const {
ALOGV("%s: parcel = %p", __FUNCTION__, parcel);
if (parcel == NULL) {
ALOGE("%s: parcel is null", __FUNCTION__);
return BAD_VALUE;
}
return CameraMetadata::writeToParcel(*parcel, mBuffer);
}
void CameraMetadata::swap(CameraMetadata& other) {
if (mLocked) {
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
return;
} else if (other.mLocked) {
ALOGE("%s: Other CameraMetadata is locked", __FUNCTION__);
return;
}
camera_metadata* thisBuf = mBuffer;
camera_metadata* otherBuf = other.mBuffer;
other.mBuffer = thisBuf;
mBuffer = otherBuf;
}
}; // namespace android

View file

@ -1,555 +0,0 @@
/*
**
** Copyright 2008, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
#define LOG_TAG "CameraParams"
#include <utils/Log.h>
#include <string.h>
#include <stdlib.h>
#include <camera/CameraParameters.h>
#include <camera/CameraParametersExtra.h>
#include <system/graphics.h>
namespace android {
// Parameter keys to communicate between camera application and driver.
const char CameraParameters::KEY_PREVIEW_SIZE[] = "preview-size";
const char CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES[] = "preview-size-values";
const char CameraParameters::KEY_PREVIEW_FORMAT[] = "preview-format";
const char CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS[] = "preview-format-values";
const char CameraParameters::KEY_PREVIEW_FRAME_RATE[] = "preview-frame-rate";
const char CameraParameters::KEY_SUPPORTED_PREVIEW_FRAME_RATES[] = "preview-frame-rate-values";
const char CameraParameters::KEY_PREVIEW_FPS_RANGE[] = "preview-fps-range";
const char CameraParameters::KEY_SUPPORTED_PREVIEW_FPS_RANGE[] = "preview-fps-range-values";
const char CameraParameters::KEY_PICTURE_SIZE[] = "picture-size";
const char CameraParameters::KEY_SUPPORTED_PICTURE_SIZES[] = "picture-size-values";
const char CameraParameters::KEY_PICTURE_FORMAT[] = "picture-format";
const char CameraParameters::KEY_SUPPORTED_PICTURE_FORMATS[] = "picture-format-values";
const char CameraParameters::KEY_JPEG_THUMBNAIL_WIDTH[] = "jpeg-thumbnail-width";
const char CameraParameters::KEY_JPEG_THUMBNAIL_HEIGHT[] = "jpeg-thumbnail-height";
const char CameraParameters::KEY_SUPPORTED_JPEG_THUMBNAIL_SIZES[] = "jpeg-thumbnail-size-values";
const char CameraParameters::KEY_JPEG_THUMBNAIL_QUALITY[] = "jpeg-thumbnail-quality";
const char CameraParameters::KEY_JPEG_QUALITY[] = "jpeg-quality";
const char CameraParameters::KEY_ROTATION[] = "rotation";
const char CameraParameters::KEY_GPS_LATITUDE[] = "gps-latitude";
const char CameraParameters::KEY_GPS_LONGITUDE[] = "gps-longitude";
const char CameraParameters::KEY_GPS_ALTITUDE[] = "gps-altitude";
const char CameraParameters::KEY_GPS_TIMESTAMP[] = "gps-timestamp";
const char CameraParameters::KEY_GPS_PROCESSING_METHOD[] = "gps-processing-method";
const char CameraParameters::KEY_WHITE_BALANCE[] = "whitebalance";
const char CameraParameters::KEY_SUPPORTED_WHITE_BALANCE[] = "whitebalance-values";
const char CameraParameters::KEY_EFFECT[] = "effect";
const char CameraParameters::KEY_SUPPORTED_EFFECTS[] = "effect-values";
const char CameraParameters::KEY_ANTIBANDING[] = "antibanding";
const char CameraParameters::KEY_SUPPORTED_ANTIBANDING[] = "antibanding-values";
const char CameraParameters::KEY_SCENE_MODE[] = "scene-mode";
const char CameraParameters::KEY_SUPPORTED_SCENE_MODES[] = "scene-mode-values";
const char CameraParameters::KEY_FLASH_MODE[] = "flash-mode";
const char CameraParameters::KEY_SUPPORTED_FLASH_MODES[] = "flash-mode-values";
const char CameraParameters::KEY_FOCUS_MODE[] = "focus-mode";
const char CameraParameters::KEY_SUPPORTED_FOCUS_MODES[] = "focus-mode-values";
const char CameraParameters::KEY_MAX_NUM_FOCUS_AREAS[] = "max-num-focus-areas";
const char CameraParameters::KEY_FOCUS_AREAS[] = "focus-areas";
const char CameraParameters::KEY_FOCAL_LENGTH[] = "focal-length";
const char CameraParameters::KEY_HORIZONTAL_VIEW_ANGLE[] = "horizontal-view-angle";
const char CameraParameters::KEY_VERTICAL_VIEW_ANGLE[] = "vertical-view-angle";
const char CameraParameters::KEY_EXPOSURE_COMPENSATION[] = "exposure-compensation";
const char CameraParameters::KEY_MAX_EXPOSURE_COMPENSATION[] = "max-exposure-compensation";
const char CameraParameters::KEY_MIN_EXPOSURE_COMPENSATION[] = "min-exposure-compensation";
const char CameraParameters::KEY_EXPOSURE_COMPENSATION_STEP[] = "exposure-compensation-step";
const char CameraParameters::KEY_AUTO_EXPOSURE_LOCK[] = "auto-exposure-lock";
const char CameraParameters::KEY_AUTO_EXPOSURE_LOCK_SUPPORTED[] = "auto-exposure-lock-supported";
const char CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK[] = "auto-whitebalance-lock";
const char CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK_SUPPORTED[] = "auto-whitebalance-lock-supported";
const char CameraParameters::KEY_MAX_NUM_METERING_AREAS[] = "max-num-metering-areas";
const char CameraParameters::KEY_METERING_AREAS[] = "metering-areas";
const char CameraParameters::KEY_ZOOM[] = "zoom";
const char CameraParameters::KEY_MAX_ZOOM[] = "max-zoom";
const char CameraParameters::KEY_ZOOM_RATIOS[] = "zoom-ratios";
const char CameraParameters::KEY_ZOOM_SUPPORTED[] = "zoom-supported";
const char CameraParameters::KEY_SMOOTH_ZOOM_SUPPORTED[] = "smooth-zoom-supported";
const char CameraParameters::KEY_FOCUS_DISTANCES[] = "focus-distances";
const char CameraParameters::KEY_VIDEO_FRAME_FORMAT[] = "video-frame-format";
const char CameraParameters::KEY_VIDEO_SIZE[] = "video-size";
const char CameraParameters::KEY_SUPPORTED_VIDEO_SIZES[] = "video-size-values";
const char CameraParameters::KEY_PREFERRED_PREVIEW_SIZE_FOR_VIDEO[] = "preferred-preview-size-for-video";
const char CameraParameters::KEY_MAX_NUM_DETECTED_FACES_HW[] = "max-num-detected-faces-hw";
const char CameraParameters::KEY_MAX_NUM_DETECTED_FACES_SW[] = "max-num-detected-faces-sw";
const char CameraParameters::KEY_RECORDING_HINT[] = "recording-hint";
const char CameraParameters::KEY_VIDEO_SNAPSHOT_SUPPORTED[] = "video-snapshot-supported";
const char CameraParameters::KEY_VIDEO_STABILIZATION[] = "video-stabilization";
const char CameraParameters::KEY_VIDEO_STABILIZATION_SUPPORTED[] = "video-stabilization-supported";
const char CameraParameters::KEY_LIGHTFX[] = "light-fx";
const char CameraParameters::TRUE[] = "true";
const char CameraParameters::FALSE[] = "false";
const char CameraParameters::FOCUS_DISTANCE_INFINITY[] = "Infinity";
// Values for white balance settings.
const char CameraParameters::WHITE_BALANCE_AUTO[] = "auto";
const char CameraParameters::WHITE_BALANCE_INCANDESCENT[] = "incandescent";
const char CameraParameters::WHITE_BALANCE_FLUORESCENT[] = "fluorescent";
const char CameraParameters::WHITE_BALANCE_WARM_FLUORESCENT[] = "warm-fluorescent";
const char CameraParameters::WHITE_BALANCE_DAYLIGHT[] = "daylight";
const char CameraParameters::WHITE_BALANCE_CLOUDY_DAYLIGHT[] = "cloudy-daylight";
const char CameraParameters::WHITE_BALANCE_TWILIGHT[] = "twilight";
const char CameraParameters::WHITE_BALANCE_SHADE[] = "shade";
const char CameraParameters::WHITE_BALANCE_MANUAL_CCT[] = "manual-cct";
// Values for effect settings.
const char CameraParameters::EFFECT_NONE[] = "none";
const char CameraParameters::EFFECT_MONO[] = "mono";
const char CameraParameters::EFFECT_NEGATIVE[] = "negative";
const char CameraParameters::EFFECT_SOLARIZE[] = "solarize";
const char CameraParameters::EFFECT_SEPIA[] = "sepia";
const char CameraParameters::EFFECT_POSTERIZE[] = "posterize";
const char CameraParameters::EFFECT_WHITEBOARD[] = "whiteboard";
const char CameraParameters::EFFECT_BLACKBOARD[] = "blackboard";
const char CameraParameters::EFFECT_AQUA[] = "aqua";
// Values for antibanding settings.
const char CameraParameters::ANTIBANDING_AUTO[] = "auto";
const char CameraParameters::ANTIBANDING_50HZ[] = "50hz";
const char CameraParameters::ANTIBANDING_60HZ[] = "60hz";
const char CameraParameters::ANTIBANDING_OFF[] = "off";
// Values for flash mode settings.
const char CameraParameters::FLASH_MODE_OFF[] = "off";
const char CameraParameters::FLASH_MODE_AUTO[] = "auto";
const char CameraParameters::FLASH_MODE_ON[] = "on";
const char CameraParameters::FLASH_MODE_RED_EYE[] = "red-eye";
const char CameraParameters::FLASH_MODE_TORCH[] = "torch";
// Values for scene mode settings.
const char CameraParameters::SCENE_MODE_AUTO[] = "auto";
const char CameraParameters::SCENE_MODE_ACTION[] = "action";
const char CameraParameters::SCENE_MODE_PORTRAIT[] = "portrait";
const char CameraParameters::SCENE_MODE_LANDSCAPE[] = "landscape";
const char CameraParameters::SCENE_MODE_NIGHT[] = "night";
const char CameraParameters::SCENE_MODE_NIGHT_PORTRAIT[] = "night-portrait";
const char CameraParameters::SCENE_MODE_THEATRE[] = "theatre";
const char CameraParameters::SCENE_MODE_BEACH[] = "beach";
const char CameraParameters::SCENE_MODE_SNOW[] = "snow";
const char CameraParameters::SCENE_MODE_SUNSET[] = "sunset";
const char CameraParameters::SCENE_MODE_STEADYPHOTO[] = "steadyphoto";
const char CameraParameters::SCENE_MODE_FIREWORKS[] = "fireworks";
const char CameraParameters::SCENE_MODE_SPORTS[] = "sports";
const char CameraParameters::SCENE_MODE_PARTY[] = "party";
const char CameraParameters::SCENE_MODE_CANDLELIGHT[] = "candlelight";
const char CameraParameters::SCENE_MODE_BARCODE[] = "barcode";
const char CameraParameters::SCENE_MODE_HDR[] = "hdr";
const char CameraParameters::PIXEL_FORMAT_YUV422SP[] = "yuv422sp";
const char CameraParameters::PIXEL_FORMAT_YUV420SP[] = "yuv420sp";
const char CameraParameters::PIXEL_FORMAT_YUV422I[] = "yuv422i-yuyv";
const char CameraParameters::PIXEL_FORMAT_YUV420P[] = "yuv420p";
const char CameraParameters::PIXEL_FORMAT_RGB565[] = "rgb565";
const char CameraParameters::PIXEL_FORMAT_RGBA8888[] = "rgba8888";
const char CameraParameters::PIXEL_FORMAT_JPEG[] = "jpeg";
const char CameraParameters::PIXEL_FORMAT_BAYER_RGGB[] = "bayer-rggb";
const char CameraParameters::PIXEL_FORMAT_ANDROID_OPAQUE[] = "android-opaque";
// Values for focus mode settings.
const char CameraParameters::FOCUS_MODE_AUTO[] = "auto";
const char CameraParameters::FOCUS_MODE_INFINITY[] = "infinity";
const char CameraParameters::FOCUS_MODE_MACRO[] = "macro";
const char CameraParameters::FOCUS_MODE_FIXED[] = "fixed";
const char CameraParameters::FOCUS_MODE_EDOF[] = "edof";
const char CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO[] = "continuous-video";
const char CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE[] = "continuous-picture";
const char CameraParameters::FOCUS_MODE_MANUAL_POSITION[] = "manual";
// Values for light fx settings
const char CameraParameters::LIGHTFX_LOWLIGHT[] = "low-light";
const char CameraParameters::LIGHTFX_HDR[] = "high-dynamic-range";
#ifdef CAMERA_PARAMETERS_EXTRA_C
CAMERA_PARAMETERS_EXTRA_C
#endif
CameraParameters::CameraParameters()
: mMap()
{
}
CameraParameters::~CameraParameters()
{
}
String8 CameraParameters::flatten() const
{
String8 flattened("");
size_t size = mMap.size();
for (size_t i = 0; i < size; i++) {
String8 k, v;
k = mMap.keyAt(i);
v = mMap.valueAt(i);
flattened += k;
flattened += "=";
flattened += v;
if (i != size-1)
flattened += ";";
}
return flattened;
}
void CameraParameters::unflatten(const String8 &params)
{
const char *a = params.string();
const char *b;
mMap.clear();
for (;;) {
// Find the bounds of the key name.
b = strchr(a, '=');
if (b == 0)
break;
// Create the key string.
String8 k(a, (size_t)(b-a));
// Find the value.
a = b+1;
b = strchr(a, ';');
if (b == 0) {
// If there's no semicolon, this is the last item.
String8 v(a);
mMap.add(k, v);
break;
}
String8 v(a, (size_t)(b-a));
mMap.add(k, v);
a = b+1;
}
}
void CameraParameters::set(const char *key, const char *value)
{
if (key == NULL || value == NULL)
return;
// XXX i think i can do this with strspn()
if (strchr(key, '=') || strchr(key, ';')) {
//XXX ALOGE("Key \"%s\"contains invalid character (= or ;)", key);
return;
}
if (strchr(value, '=') || strchr(value, ';')) {
//XXX ALOGE("Value \"%s\"contains invalid character (= or ;)", value);
return;
}
#ifdef QCOM_HARDWARE
// qcom cameras default to delivering an extra zero-exposure frame on HDR.
// The android SDK only wants one frame, so disable this unless the app
// explicitly asks for it
if (!get("hdr-need-1x")) {
mMap.replaceValueFor(String8("hdr-need-1x"), String8("false"));
}
#endif
mMap.replaceValueFor(String8(key), String8(value));
}
void CameraParameters::set(const char *key, int value)
{
char str[16];
sprintf(str, "%d", value);
set(key, str);
}
void CameraParameters::setFloat(const char *key, float value)
{
char str[16]; // 14 should be enough. We overestimate to be safe.
snprintf(str, sizeof(str), "%g", value);
set(key, str);
}
const char *CameraParameters::get(const char *key) const
{
String8 v = mMap.valueFor(String8(key));
if (v.length() == 0)
return 0;
return v.string();
}
int CameraParameters::getInt(const char *key) const
{
const char *v = get(key);
if (v == 0)
return -1;
return strtol(v, 0, 0);
}
float CameraParameters::getFloat(const char *key) const
{
const char *v = get(key);
if (v == 0) return -1;
return strtof(v, 0);
}
void CameraParameters::remove(const char *key)
{
mMap.removeItem(String8(key));
}
// Parse string like "640x480" or "10000,20000"
static int parse_pair(const char *str, int *first, int *second, char delim,
char **endptr = NULL)
{
// Find the first integer.
char *end;
int w = (int)strtol(str, &end, 10);
// If a delimeter does not immediately follow, give up.
if (*end != delim) {
ALOGE("Cannot find delimeter (%c) in str=%s", delim, str);
return -1;
}
// Find the second integer, immediately after the delimeter.
int h = (int)strtol(end+1, &end, 10);
*first = w;
*second = h;
if (endptr) {
*endptr = end;
}
return 0;
}
static void parseSizesList(const char *sizesStr, Vector<Size> &sizes)
{
if (sizesStr == 0) {
return;
}
char *sizeStartPtr = (char *)sizesStr;
while (true) {
int width, height;
int success = parse_pair(sizeStartPtr, &width, &height, 'x',
&sizeStartPtr);
if (success == -1 || (*sizeStartPtr != ',' && *sizeStartPtr != '\0')) {
ALOGE("Picture sizes string \"%s\" contains invalid character.", sizesStr);
return;
}
sizes.push(Size(width, height));
if (*sizeStartPtr == '\0') {
return;
}
sizeStartPtr++;
}
}
void CameraParameters::setPreviewSize(int width, int height)
{
char str[32];
sprintf(str, "%dx%d", width, height);
set(KEY_PREVIEW_SIZE, str);
}
void CameraParameters::getPreviewSize(int *width, int *height) const
{
*width = *height = -1;
// Get the current string, if it doesn't exist, leave the -1x-1
const char *p = get(KEY_PREVIEW_SIZE);
if (p == 0) return;
parse_pair(p, width, height, 'x');
}
void CameraParameters::getPreferredPreviewSizeForVideo(int *width, int *height) const
{
*width = *height = -1;
const char *p = get(KEY_PREFERRED_PREVIEW_SIZE_FOR_VIDEO);
if (p == 0) return;
parse_pair(p, width, height, 'x');
}
void CameraParameters::getSupportedPreviewSizes(Vector<Size> &sizes) const
{
const char *previewSizesStr = get(KEY_SUPPORTED_PREVIEW_SIZES);
parseSizesList(previewSizesStr, sizes);
}
void CameraParameters::setVideoSize(int width, int height)
{
char str[32];
sprintf(str, "%dx%d", width, height);
set(KEY_VIDEO_SIZE, str);
}
void CameraParameters::getVideoSize(int *width, int *height) const
{
*width = *height = -1;
const char *p = get(KEY_VIDEO_SIZE);
if (p == 0) return;
parse_pair(p, width, height, 'x');
}
void CameraParameters::getSupportedVideoSizes(Vector<Size> &sizes) const
{
const char *videoSizesStr = get(KEY_SUPPORTED_VIDEO_SIZES);
parseSizesList(videoSizesStr, sizes);
}
void CameraParameters::setPreviewFrameRate(int fps)
{
set(KEY_PREVIEW_FRAME_RATE, fps);
}
int CameraParameters::getPreviewFrameRate() const
{
return getInt(KEY_PREVIEW_FRAME_RATE);
}
void CameraParameters::getPreviewFpsRange(int *min_fps, int *max_fps) const
{
*min_fps = *max_fps = -1;
const char *p = get(KEY_PREVIEW_FPS_RANGE);
if (p == 0) return;
parse_pair(p, min_fps, max_fps, ',');
}
void CameraParameters::setPreviewFormat(const char *format)
{
set(KEY_PREVIEW_FORMAT, format);
}
const char *CameraParameters::getPreviewFormat() const
{
return get(KEY_PREVIEW_FORMAT);
}
void CameraParameters::setPictureSize(int width, int height)
{
char str[32];
sprintf(str, "%dx%d", width, height);
set(KEY_PICTURE_SIZE, str);
}
void CameraParameters::getPictureSize(int *width, int *height) const
{
*width = *height = -1;
// Get the current string, if it doesn't exist, leave the -1x-1
const char *p = get(KEY_PICTURE_SIZE);
if (p == 0) return;
parse_pair(p, width, height, 'x');
}
void CameraParameters::getSupportedPictureSizes(Vector<Size> &sizes) const
{
const char *pictureSizesStr = get(KEY_SUPPORTED_PICTURE_SIZES);
parseSizesList(pictureSizesStr, sizes);
}
void CameraParameters::setPictureFormat(const char *format)
{
set(KEY_PICTURE_FORMAT, format);
}
const char *CameraParameters::getPictureFormat() const
{
return get(KEY_PICTURE_FORMAT);
}
void CameraParameters::dump() const
{
ALOGD("dump: mMap.size = %zu", mMap.size());
for (size_t i = 0; i < mMap.size(); i++) {
String8 k, v;
k = mMap.keyAt(i);
v = mMap.valueAt(i);
ALOGD("%s: %s\n", k.string(), v.string());
}
}
status_t CameraParameters::dump(int fd, const Vector<String16>& /*args*/) const
{
const size_t SIZE = 256;
char buffer[SIZE];
String8 result;
snprintf(buffer, 255, "CameraParameters::dump: mMap.size = %zu\n", mMap.size());
result.append(buffer);
for (size_t i = 0; i < mMap.size(); i++) {
String8 k, v;
k = mMap.keyAt(i);
v = mMap.valueAt(i);
snprintf(buffer, 255, "\t%s: %s\n", k.string(), v.string());
result.append(buffer);
}
write(fd, result.string(), result.size());
return NO_ERROR;
}
void CameraParameters::getSupportedPreviewFormats(Vector<int>& formats) const {
const char* supportedPreviewFormats =
get(CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS);
if (supportedPreviewFormats == NULL) {
ALOGW("%s: No supported preview formats.", __FUNCTION__);
return;
}
String8 fmtStr(supportedPreviewFormats);
char* prevFmts = fmtStr.lockBuffer(fmtStr.size());
char* savePtr;
char* fmt = strtok_r(prevFmts, ",", &savePtr);
while (fmt) {
int actual = previewFormatToEnum(fmt);
if (actual != -1) {
formats.add(actual);
}
fmt = strtok_r(NULL, ",", &savePtr);
}
fmtStr.unlockBuffer(fmtStr.size());
}
int CameraParameters::previewFormatToEnum(const char* format) {
return
!format ?
HAL_PIXEL_FORMAT_YCrCb_420_SP :
!strcmp(format, PIXEL_FORMAT_YUV422SP) ?
HAL_PIXEL_FORMAT_YCbCr_422_SP : // NV16
!strcmp(format, PIXEL_FORMAT_YUV420SP) ?
HAL_PIXEL_FORMAT_YCrCb_420_SP : // NV21
!strcmp(format, PIXEL_FORMAT_YUV422I) ?
HAL_PIXEL_FORMAT_YCbCr_422_I : // YUY2
!strcmp(format, PIXEL_FORMAT_YUV420P) ?
HAL_PIXEL_FORMAT_YV12 : // YV12
!strcmp(format, PIXEL_FORMAT_RGB565) ?
HAL_PIXEL_FORMAT_RGB_565 : // RGB565
!strcmp(format, PIXEL_FORMAT_RGBA8888) ?
HAL_PIXEL_FORMAT_RGBA_8888 : // RGB8888
!strcmp(format, PIXEL_FORMAT_BAYER_RGGB) ?
HAL_PIXEL_FORMAT_RAW16 : // Raw sensor data
-1;
}
bool CameraParameters::isEmpty() const {
return mMap.isEmpty();
}
}; // namespace android

View file

@ -1,382 +0,0 @@
/*
**
** Copyright 2008, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
#define LOG_TAG "CameraParams2"
// #define LOG_NDEBUG 0
#include <utils/Log.h>
#include <string.h>
#include <stdlib.h>
#include <camera/CameraParameters2.h>
namespace android {
CameraParameters2::CameraParameters2()
: mMap()
{
}
CameraParameters2::~CameraParameters2()
{
}
String8 CameraParameters2::flatten() const
{
String8 flattened("");
size_t size = mMap.size();
for (size_t i = 0; i < size; i++) {
String8 k, v;
k = mMap.keyAt(i);
v = mMap.valueAt(i);
flattened += k;
flattened += "=";
flattened += v;
if (i != size-1)
flattened += ";";
}
ALOGV("%s: Flattened params = %s", __FUNCTION__, flattened.string());
return flattened;
}
void CameraParameters2::unflatten(const String8 &params)
{
const char *a = params.string();
const char *b;
mMap.clear();
for (;;) {
// Find the bounds of the key name.
b = strchr(a, '=');
if (b == 0)
break;
// Create the key string.
String8 k(a, (size_t)(b-a));
// Find the value.
a = b+1;
b = strchr(a, ';');
if (b == 0) {
// If there's no semicolon, this is the last item.
String8 v(a);
mMap.add(k, v);
break;
}
String8 v(a, (size_t)(b-a));
mMap.add(k, v);
a = b+1;
}
}
void CameraParameters2::set(const char *key, const char *value)
{
// XXX i think i can do this with strspn()
if (strchr(key, '=') || strchr(key, ';')) {
//XXX ALOGE("Key \"%s\"contains invalid character (= or ;)", key);
return;
}
if (strchr(value, '=') || strchr(value, ';')) {
//XXX ALOGE("Value \"%s\"contains invalid character (= or ;)", value);
return;
}
// Replacing a value updates the key's order to be the new largest order
ssize_t res = mMap.replaceValueFor(String8(key), String8(value));
LOG_ALWAYS_FATAL_IF(res < 0, "replaceValueFor(%s,%s) failed", key, value);
}
void CameraParameters2::set(const char *key, int value)
{
char str[16];
sprintf(str, "%d", value);
set(key, str);
}
void CameraParameters2::setFloat(const char *key, float value)
{
char str[16]; // 14 should be enough. We overestimate to be safe.
snprintf(str, sizeof(str), "%g", value);
set(key, str);
}
const char *CameraParameters2::get(const char *key) const
{
ssize_t idx = mMap.indexOfKey(String8(key));
if (idx < 0) {
return NULL;
} else {
return mMap.valueAt(idx).string();
}
}
int CameraParameters2::getInt(const char *key) const
{
const char *v = get(key);
if (v == 0)
return -1;
return strtol(v, 0, 0);
}
float CameraParameters2::getFloat(const char *key) const
{
const char *v = get(key);
if (v == 0) return -1;
return strtof(v, 0);
}
status_t CameraParameters2::compareSetOrder(const char *key1, const char *key2,
int *order) const {
if (key1 == NULL) {
ALOGE("%s: key1 must not be NULL", __FUNCTION__);
return BAD_VALUE;
} else if (key2 == NULL) {
ALOGE("%s: key2 must not be NULL", __FUNCTION__);
return BAD_VALUE;
} else if (order == NULL) {
ALOGE("%s: order must not be NULL", __FUNCTION__);
return BAD_VALUE;
}
ssize_t index1 = mMap.indexOfKey(String8(key1));
ssize_t index2 = mMap.indexOfKey(String8(key2));
if (index1 < 0) {
ALOGW("%s: Key1 (%s) was not set", __FUNCTION__, key1);
return NAME_NOT_FOUND;
} else if (index2 < 0) {
ALOGW("%s: Key2 (%s) was not set", __FUNCTION__, key2);
return NAME_NOT_FOUND;
}
*order = (index1 == index2) ? 0 :
(index1 < index2) ? -1 :
1;
return OK;
}
void CameraParameters2::remove(const char *key)
{
mMap.removeItem(String8(key));
}
// Parse string like "640x480" or "10000,20000"
static int parse_pair(const char *str, int *first, int *second, char delim,
char **endptr = NULL)
{
// Find the first integer.
char *end;
int w = (int)strtol(str, &end, 10);
// If a delimeter does not immediately follow, give up.
if (*end != delim) {
ALOGE("Cannot find delimeter (%c) in str=%s", delim, str);
return -1;
}
// Find the second integer, immediately after the delimeter.
int h = (int)strtol(end+1, &end, 10);
*first = w;
*second = h;
if (endptr) {
*endptr = end;
}
return 0;
}
static void parseSizesList(const char *sizesStr, Vector<Size> &sizes)
{
if (sizesStr == 0) {
return;
}
char *sizeStartPtr = (char *)sizesStr;
while (true) {
int width, height;
int success = parse_pair(sizeStartPtr, &width, &height, 'x',
&sizeStartPtr);
if (success == -1 || (*sizeStartPtr != ',' && *sizeStartPtr != '\0')) {
ALOGE("Picture sizes string \"%s\" contains invalid character.", sizesStr);
return;
}
sizes.push(Size(width, height));
if (*sizeStartPtr == '\0') {
return;
}
sizeStartPtr++;
}
}
void CameraParameters2::setPreviewSize(int width, int height)
{
char str[32];
sprintf(str, "%dx%d", width, height);
set(CameraParameters::KEY_PREVIEW_SIZE, str);
}
void CameraParameters2::getPreviewSize(int *width, int *height) const
{
*width = *height = -1;
// Get the current string, if it doesn't exist, leave the -1x-1
const char *p = get(CameraParameters::KEY_PREVIEW_SIZE);
if (p == 0) return;
parse_pair(p, width, height, 'x');
}
void CameraParameters2::getPreferredPreviewSizeForVideo(int *width, int *height) const
{
*width = *height = -1;
const char *p = get(CameraParameters::KEY_PREFERRED_PREVIEW_SIZE_FOR_VIDEO);
if (p == 0) return;
parse_pair(p, width, height, 'x');
}
void CameraParameters2::getSupportedPreviewSizes(Vector<Size> &sizes) const
{
const char *previewSizesStr = get(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES);
parseSizesList(previewSizesStr, sizes);
}
void CameraParameters2::setVideoSize(int width, int height)
{
char str[32];
sprintf(str, "%dx%d", width, height);
set(CameraParameters::KEY_VIDEO_SIZE, str);
}
void CameraParameters2::getVideoSize(int *width, int *height) const
{
*width = *height = -1;
const char *p = get(CameraParameters::KEY_VIDEO_SIZE);
if (p == 0) return;
parse_pair(p, width, height, 'x');
}
void CameraParameters2::getSupportedVideoSizes(Vector<Size> &sizes) const
{
const char *videoSizesStr = get(CameraParameters::KEY_SUPPORTED_VIDEO_SIZES);
parseSizesList(videoSizesStr, sizes);
}
void CameraParameters2::setPreviewFrameRate(int fps)
{
set(CameraParameters::KEY_PREVIEW_FRAME_RATE, fps);
}
int CameraParameters2::getPreviewFrameRate() const
{
return getInt(CameraParameters::KEY_PREVIEW_FRAME_RATE);
}
void CameraParameters2::getPreviewFpsRange(int *min_fps, int *max_fps) const
{
*min_fps = *max_fps = -1;
const char *p = get(CameraParameters::KEY_PREVIEW_FPS_RANGE);
if (p == 0) return;
parse_pair(p, min_fps, max_fps, ',');
}
void CameraParameters2::setPreviewFpsRange(int min_fps, int max_fps)
{
String8 str = String8::format("%d,%d", min_fps, max_fps);
set(CameraParameters::KEY_PREVIEW_FPS_RANGE, str.string());
}
void CameraParameters2::setPreviewFormat(const char *format)
{
set(CameraParameters::KEY_PREVIEW_FORMAT, format);
}
const char *CameraParameters2::getPreviewFormat() const
{
return get(CameraParameters::KEY_PREVIEW_FORMAT);
}
void CameraParameters2::setPictureSize(int width, int height)
{
char str[32];
sprintf(str, "%dx%d", width, height);
set(CameraParameters::KEY_PICTURE_SIZE, str);
}
void CameraParameters2::getPictureSize(int *width, int *height) const
{
*width = *height = -1;
// Get the current string, if it doesn't exist, leave the -1x-1
const char *p = get(CameraParameters::KEY_PICTURE_SIZE);
if (p == 0) return;
parse_pair(p, width, height, 'x');
}
void CameraParameters2::getSupportedPictureSizes(Vector<Size> &sizes) const
{
const char *pictureSizesStr = get(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES);
parseSizesList(pictureSizesStr, sizes);
}
void CameraParameters2::setPictureFormat(const char *format)
{
set(CameraParameters::KEY_PICTURE_FORMAT, format);
}
const char *CameraParameters2::getPictureFormat() const
{
return get(CameraParameters::KEY_PICTURE_FORMAT);
}
void CameraParameters2::dump() const
{
ALOGD("dump: mMap.size = %d", mMap.size());
for (size_t i = 0; i < mMap.size(); i++) {
String8 k, v;
k = mMap.keyAt(i);
v = mMap.valueAt(i);
ALOGD("%s: %s\n", k.string(), v.string());
}
}
status_t CameraParameters2::dump(int fd, const Vector<String16>& args) const
{
(void)args;
const size_t SIZE = 256;
char buffer[SIZE];
String8 result;
snprintf(buffer, 255, "CameraParameters2::dump: mMap.size = %zu\n", mMap.size());
result.append(buffer);
for (size_t i = 0; i < mMap.size(); i++) {
String8 k, v;
k = mMap.keyAt(i);
v = mMap.valueAt(i);
snprintf(buffer, 255, "\t%s: %s\n", k.string(), v.string());
result.append(buffer);
}
write(fd, result.string(), result.size());
return NO_ERROR;
}
}; // namespace android

View file

@ -1,125 +0,0 @@
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "CameraUtils"
//#define LOG_NDEBUG 0
#include <camera/CameraUtils.h>
#include <system/window.h>
#include <system/graphics.h>
#include <utils/Log.h>
namespace android {
status_t CameraUtils::getRotationTransform(const CameraMetadata& staticInfo,
/*out*/int32_t* transform) {
ALOGV("%s", __FUNCTION__);
if (transform == NULL) {
ALOGW("%s: null transform", __FUNCTION__);
return BAD_VALUE;
}
*transform = 0;
camera_metadata_ro_entry_t entry = staticInfo.find(ANDROID_SENSOR_ORIENTATION);
if (entry.count == 0) {
ALOGE("%s: Can't find android.sensor.orientation in static metadata!", __FUNCTION__);
return INVALID_OPERATION;
}
camera_metadata_ro_entry_t entryFacing = staticInfo.find(ANDROID_LENS_FACING);
if (entry.count == 0) {
ALOGE("%s: Can't find android.lens.facing in static metadata!", __FUNCTION__);
return INVALID_OPERATION;
}
int32_t& flags = *transform;
bool mirror = (entryFacing.data.u8[0] == ANDROID_LENS_FACING_FRONT);
int orientation = entry.data.i32[0];
if (!mirror) {
switch (orientation) {
case 0:
flags = 0;
break;
case 90:
flags = NATIVE_WINDOW_TRANSFORM_ROT_90;
break;
case 180:
flags = NATIVE_WINDOW_TRANSFORM_ROT_180;
break;
case 270:
flags = NATIVE_WINDOW_TRANSFORM_ROT_270;
break;
default:
ALOGE("%s: Invalid HAL android.sensor.orientation value: %d",
__FUNCTION__, orientation);
return INVALID_OPERATION;
}
} else {
// Front camera needs to be horizontally flipped for mirror-like behavior.
// Note: Flips are applied before rotates; using XOR here as some of these flags are
// composed in terms of other flip/rotation flags, and are not bitwise-ORable.
switch (orientation) {
case 0:
flags = NATIVE_WINDOW_TRANSFORM_FLIP_H;
break;
case 90:
flags = NATIVE_WINDOW_TRANSFORM_FLIP_H ^
NATIVE_WINDOW_TRANSFORM_ROT_270;
break;
case 180:
flags = NATIVE_WINDOW_TRANSFORM_FLIP_H ^
NATIVE_WINDOW_TRANSFORM_ROT_180;
break;
case 270:
flags = NATIVE_WINDOW_TRANSFORM_FLIP_H ^
NATIVE_WINDOW_TRANSFORM_ROT_90;
break;
default:
ALOGE("%s: Invalid HAL android.sensor.orientation value: %d",
__FUNCTION__, orientation);
return INVALID_OPERATION;
}
}
/**
* This magic flag makes surfaceflinger un-rotate the buffers
* to counter the extra global device UI rotation whenever the user
* physically rotates the device.
*
* By doing this, the camera buffer always ends up aligned
* with the physical camera for a "see through" effect.
*
* In essence, the buffer only gets rotated during preview use-cases.
* The user is still responsible to re-create streams of the proper
* aspect ratio, or the preview will end up looking non-uniformly
* stretched.
*/
flags |= NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY;
ALOGV("%s: final transform = 0x%x", __FUNCTION__, flags);
return OK;
}
} /* namespace android */

View file

@ -1,129 +0,0 @@
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "Camera-CaptureResult"
#include <utils/Log.h>
#include <camera/CaptureResult.h>
#include <binder/Parcel.h>
namespace android {
bool CaptureResultExtras::isValid() {
return requestId >= 0;
}
status_t CaptureResultExtras::readFromParcel(Parcel *parcel) {
if (parcel == NULL) {
ALOGE("%s: Null parcel", __FUNCTION__);
return BAD_VALUE;
}
parcel->readInt32(&requestId);
parcel->readInt32(&burstId);
parcel->readInt32(&afTriggerId);
parcel->readInt32(&precaptureTriggerId);
parcel->readInt64(&frameNumber);
parcel->readInt32(&partialResultCount);
return OK;
}
status_t CaptureResultExtras::writeToParcel(Parcel *parcel) const {
if (parcel == NULL) {
ALOGE("%s: Null parcel", __FUNCTION__);
return BAD_VALUE;
}
parcel->writeInt32(requestId);
parcel->writeInt32(burstId);
parcel->writeInt32(afTriggerId);
parcel->writeInt32(precaptureTriggerId);
parcel->writeInt64(frameNumber);
parcel->writeInt32(partialResultCount);
return OK;
}
CaptureResult::CaptureResult() :
mMetadata(), mResultExtras() {
}
CaptureResult::CaptureResult(const CaptureResult &otherResult) {
mResultExtras = otherResult.mResultExtras;
mMetadata = otherResult.mMetadata;
}
status_t CaptureResult::readFromParcel(Parcel *parcel) {
ALOGV("%s: parcel = %p", __FUNCTION__, parcel);
if (parcel == NULL) {
ALOGE("%s: parcel is null", __FUNCTION__);
return BAD_VALUE;
}
mMetadata.clear();
status_t res = OK;
res = mMetadata.readFromParcel(parcel);
if (res != OK) {
ALOGE("%s: Failed to read metadata from parcel.",
__FUNCTION__);
return res;
}
ALOGV("%s: Read metadata from parcel", __FUNCTION__);
res = mResultExtras.readFromParcel(parcel);
if (res != OK) {
ALOGE("%s: Failed to read result extras from parcel.",
__FUNCTION__);
return res;
}
ALOGV("%s: Read result extras from parcel", __FUNCTION__);
return OK;
}
status_t CaptureResult::writeToParcel(Parcel *parcel) const {
ALOGV("%s: parcel = %p", __FUNCTION__, parcel);
if (parcel == NULL) {
ALOGE("%s: parcel is null", __FUNCTION__);
return BAD_VALUE;
}
status_t res;
res = mMetadata.writeToParcel(parcel);
if (res != OK) {
ALOGE("%s: Failed to write metadata to parcel", __FUNCTION__);
return res;
}
ALOGV("%s: Wrote metadata to parcel", __FUNCTION__);
res = mResultExtras.writeToParcel(parcel);
if (res != OK) {
ALOGE("%s: Failed to write result extras to parcel", __FUNCTION__);
return res;
}
ALOGV("%s: Wrote result extras to parcel", __FUNCTION__);
return OK;
}
}

View file

@ -1,425 +0,0 @@
/*
**
** Copyright 2008, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
//#define LOG_NDEBUG 0
#define LOG_TAG "ICamera"
#include <utils/Log.h>
#include <stdint.h>
#include <sys/types.h>
#include <binder/Parcel.h>
#include <camera/ICamera.h>
#include <gui/IGraphicBufferProducer.h>
#include <gui/Surface.h>
namespace android {
enum {
DISCONNECT = IBinder::FIRST_CALL_TRANSACTION,
SET_PREVIEW_TARGET,
SET_PREVIEW_CALLBACK_FLAG,
SET_PREVIEW_CALLBACK_TARGET,
START_PREVIEW,
STOP_PREVIEW,
AUTO_FOCUS,
CANCEL_AUTO_FOCUS,
TAKE_PICTURE,
SET_PARAMETERS,
GET_PARAMETERS,
SEND_COMMAND,
CONNECT,
LOCK,
UNLOCK,
PREVIEW_ENABLED,
START_RECORDING,
STOP_RECORDING,
RECORDING_ENABLED,
RELEASE_RECORDING_FRAME,
STORE_META_DATA_IN_BUFFERS,
};
class BpCamera: public BpInterface<ICamera>
{
public:
BpCamera(const sp<IBinder>& impl)
: BpInterface<ICamera>(impl)
{
}
// disconnect from camera service
void disconnect()
{
ALOGV("disconnect");
Parcel data, reply;
data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
remote()->transact(DISCONNECT, data, &reply);
reply.readExceptionCode();
}
// pass the buffered IGraphicBufferProducer to the camera service
status_t setPreviewTarget(const sp<IGraphicBufferProducer>& bufferProducer)
{
ALOGV("setPreviewTarget");
Parcel data, reply;
data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
sp<IBinder> b(IInterface::asBinder(bufferProducer));
data.writeStrongBinder(b);
remote()->transact(SET_PREVIEW_TARGET, data, &reply);
return reply.readInt32();
}
// set the preview callback flag to affect how the received frames from
// preview are handled. See Camera.h for details.
void setPreviewCallbackFlag(int flag)
{
ALOGV("setPreviewCallbackFlag(%d)", flag);
Parcel data, reply;
data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
data.writeInt32(flag);
remote()->transact(SET_PREVIEW_CALLBACK_FLAG, data, &reply);
}
status_t setPreviewCallbackTarget(
const sp<IGraphicBufferProducer>& callbackProducer)
{
ALOGV("setPreviewCallbackTarget");
Parcel data, reply;
data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
sp<IBinder> b(IInterface::asBinder(callbackProducer));
data.writeStrongBinder(b);
remote()->transact(SET_PREVIEW_CALLBACK_TARGET, data, &reply);
return reply.readInt32();
}
// start preview mode, must call setPreviewTarget first
status_t startPreview()
{
ALOGV("startPreview");
Parcel data, reply;
data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
remote()->transact(START_PREVIEW, data, &reply);
return reply.readInt32();
}
// start recording mode, must call setPreviewTarget first
status_t startRecording()
{
ALOGV("startRecording");
Parcel data, reply;
data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
remote()->transact(START_RECORDING, data, &reply);
return reply.readInt32();
}
// stop preview mode
void stopPreview()
{
ALOGV("stopPreview");
Parcel data, reply;
data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
remote()->transact(STOP_PREVIEW, data, &reply);
}
// stop recording mode
void stopRecording()
{
ALOGV("stopRecording");
Parcel data, reply;
data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
remote()->transact(STOP_RECORDING, data, &reply);
}
void releaseRecordingFrame(const sp<IMemory>& mem)
{
ALOGV("releaseRecordingFrame");
Parcel data, reply;
data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
data.writeStrongBinder(IInterface::asBinder(mem));
remote()->transact(RELEASE_RECORDING_FRAME, data, &reply);
}
status_t storeMetaDataInBuffers(bool enabled)
{
ALOGV("storeMetaDataInBuffers: %s", enabled? "true": "false");
Parcel data, reply;
data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
data.writeInt32(enabled);
remote()->transact(STORE_META_DATA_IN_BUFFERS, data, &reply);
return reply.readInt32();
}
// check preview state
bool previewEnabled()
{
ALOGV("previewEnabled");
Parcel data, reply;
data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
remote()->transact(PREVIEW_ENABLED, data, &reply);
return reply.readInt32();
}
// check recording state
bool recordingEnabled()
{
ALOGV("recordingEnabled");
Parcel data, reply;
data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
remote()->transact(RECORDING_ENABLED, data, &reply);
return reply.readInt32();
}
// auto focus
status_t autoFocus()
{
ALOGV("autoFocus");
Parcel data, reply;
data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
remote()->transact(AUTO_FOCUS, data, &reply);
status_t ret = reply.readInt32();
return ret;
}
// cancel focus
status_t cancelAutoFocus()
{
ALOGV("cancelAutoFocus");
Parcel data, reply;
data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
remote()->transact(CANCEL_AUTO_FOCUS, data, &reply);
status_t ret = reply.readInt32();
return ret;
}
// take a picture - returns an IMemory (ref-counted mmap)
status_t takePicture(int msgType)
{
ALOGV("takePicture: 0x%x", msgType);
Parcel data, reply;
data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
data.writeInt32(msgType);
remote()->transact(TAKE_PICTURE, data, &reply);
status_t ret = reply.readInt32();
return ret;
}
// set preview/capture parameters - key/value pairs
status_t setParameters(const String8& params)
{
ALOGV("setParameters");
Parcel data, reply;
data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
data.writeString8(params);
remote()->transact(SET_PARAMETERS, data, &reply);
return reply.readInt32();
}
// get preview/capture parameters - key/value pairs
String8 getParameters() const
{
ALOGV("getParameters");
Parcel data, reply;
data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
remote()->transact(GET_PARAMETERS, data, &reply);
return reply.readString8();
}
virtual status_t sendCommand(int32_t cmd, int32_t arg1, int32_t arg2)
{
ALOGV("sendCommand");
Parcel data, reply;
data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
data.writeInt32(cmd);
data.writeInt32(arg1);
data.writeInt32(arg2);
remote()->transact(SEND_COMMAND, data, &reply);
return reply.readInt32();
}
virtual status_t connect(const sp<ICameraClient>& cameraClient)
{
Parcel data, reply;
data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
data.writeStrongBinder(IInterface::asBinder(cameraClient));
remote()->transact(CONNECT, data, &reply);
return reply.readInt32();
}
virtual status_t lock()
{
Parcel data, reply;
data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
remote()->transact(LOCK, data, &reply);
return reply.readInt32();
}
virtual status_t unlock()
{
Parcel data, reply;
data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
remote()->transact(UNLOCK, data, &reply);
return reply.readInt32();
}
};
IMPLEMENT_META_INTERFACE(Camera, "android.hardware.ICamera");
// ----------------------------------------------------------------------
status_t BnCamera::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
switch(code) {
case DISCONNECT: {
ALOGV("DISCONNECT");
CHECK_INTERFACE(ICamera, data, reply);
disconnect();
reply->writeNoException();
return NO_ERROR;
} break;
case SET_PREVIEW_TARGET: {
ALOGV("SET_PREVIEW_TARGET");
CHECK_INTERFACE(ICamera, data, reply);
sp<IGraphicBufferProducer> st =
interface_cast<IGraphicBufferProducer>(data.readStrongBinder());
reply->writeInt32(setPreviewTarget(st));
return NO_ERROR;
} break;
case SET_PREVIEW_CALLBACK_FLAG: {
ALOGV("SET_PREVIEW_CALLBACK_TYPE");
CHECK_INTERFACE(ICamera, data, reply);
int callback_flag = data.readInt32();
setPreviewCallbackFlag(callback_flag);
return NO_ERROR;
} break;
case SET_PREVIEW_CALLBACK_TARGET: {
ALOGV("SET_PREVIEW_CALLBACK_TARGET");
CHECK_INTERFACE(ICamera, data, reply);
sp<IGraphicBufferProducer> cp =
interface_cast<IGraphicBufferProducer>(data.readStrongBinder());
reply->writeInt32(setPreviewCallbackTarget(cp));
return NO_ERROR;
}
case START_PREVIEW: {
ALOGV("START_PREVIEW");
CHECK_INTERFACE(ICamera, data, reply);
reply->writeInt32(startPreview());
return NO_ERROR;
} break;
case START_RECORDING: {
ALOGV("START_RECORDING");
CHECK_INTERFACE(ICamera, data, reply);
reply->writeInt32(startRecording());
return NO_ERROR;
} break;
case STOP_PREVIEW: {
ALOGV("STOP_PREVIEW");
CHECK_INTERFACE(ICamera, data, reply);
stopPreview();
return NO_ERROR;
} break;
case STOP_RECORDING: {
ALOGV("STOP_RECORDING");
CHECK_INTERFACE(ICamera, data, reply);
stopRecording();
return NO_ERROR;
} break;
case RELEASE_RECORDING_FRAME: {
ALOGV("RELEASE_RECORDING_FRAME");
CHECK_INTERFACE(ICamera, data, reply);
sp<IMemory> mem = interface_cast<IMemory>(data.readStrongBinder());
releaseRecordingFrame(mem);
return NO_ERROR;
} break;
case STORE_META_DATA_IN_BUFFERS: {
ALOGV("STORE_META_DATA_IN_BUFFERS");
CHECK_INTERFACE(ICamera, data, reply);
bool enabled = data.readInt32();
reply->writeInt32(storeMetaDataInBuffers(enabled));
return NO_ERROR;
} break;
case PREVIEW_ENABLED: {
ALOGV("PREVIEW_ENABLED");
CHECK_INTERFACE(ICamera, data, reply);
reply->writeInt32(previewEnabled());
return NO_ERROR;
} break;
case RECORDING_ENABLED: {
ALOGV("RECORDING_ENABLED");
CHECK_INTERFACE(ICamera, data, reply);
reply->writeInt32(recordingEnabled());
return NO_ERROR;
} break;
case AUTO_FOCUS: {
ALOGV("AUTO_FOCUS");
CHECK_INTERFACE(ICamera, data, reply);
reply->writeInt32(autoFocus());
return NO_ERROR;
} break;
case CANCEL_AUTO_FOCUS: {
ALOGV("CANCEL_AUTO_FOCUS");
CHECK_INTERFACE(ICamera, data, reply);
reply->writeInt32(cancelAutoFocus());
return NO_ERROR;
} break;
case TAKE_PICTURE: {
ALOGV("TAKE_PICTURE");
CHECK_INTERFACE(ICamera, data, reply);
int msgType = data.readInt32();
reply->writeInt32(takePicture(msgType));
return NO_ERROR;
} break;
case SET_PARAMETERS: {
ALOGV("SET_PARAMETERS");
CHECK_INTERFACE(ICamera, data, reply);
String8 params(data.readString8());
reply->writeInt32(setParameters(params));
return NO_ERROR;
} break;
case GET_PARAMETERS: {
ALOGV("GET_PARAMETERS");
CHECK_INTERFACE(ICamera, data, reply);
reply->writeString8(getParameters());
return NO_ERROR;
} break;
case SEND_COMMAND: {
ALOGV("SEND_COMMAND");
CHECK_INTERFACE(ICamera, data, reply);
int command = data.readInt32();
int arg1 = data.readInt32();
int arg2 = data.readInt32();
reply->writeInt32(sendCommand(command, arg1, arg2));
return NO_ERROR;
} break;
case CONNECT: {
CHECK_INTERFACE(ICamera, data, reply);
sp<ICameraClient> cameraClient = interface_cast<ICameraClient>(data.readStrongBinder());
reply->writeInt32(connect(cameraClient));
return NO_ERROR;
} break;
case LOCK: {
CHECK_INTERFACE(ICamera, data, reply);
reply->writeInt32(lock());
return NO_ERROR;
} break;
case UNLOCK: {
CHECK_INTERFACE(ICamera, data, reply);
reply->writeInt32(unlock());
return NO_ERROR;
} break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
}
// ----------------------------------------------------------------------------
}; // namespace android

View file

@ -1,143 +0,0 @@
/*
**
** Copyright 2008, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
//#define LOG_NDEBUG 0
#define LOG_TAG "ICameraClient"
#include <utils/Log.h>
#include <stdint.h>
#include <sys/types.h>
#include <camera/ICameraClient.h>
namespace android {
enum {
NOTIFY_CALLBACK = IBinder::FIRST_CALL_TRANSACTION,
DATA_CALLBACK,
DATA_CALLBACK_TIMESTAMP,
};
class BpCameraClient: public BpInterface<ICameraClient>
{
public:
BpCameraClient(const sp<IBinder>& impl)
: BpInterface<ICameraClient>(impl)
{
}
// generic callback from camera service to app
void notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2)
{
ALOGV("notifyCallback");
Parcel data, reply;
data.writeInterfaceToken(ICameraClient::getInterfaceDescriptor());
data.writeInt32(msgType);
data.writeInt32(ext1);
if ((msgType == CAMERA_MSG_PREVIEW_FRAME) && (ext1 == CAMERA_FRAME_DATA_FD)) {
ALOGD("notifyCallback: CAMERA_MSG_PREVIEW_FRAME fd = %d", ext2);
data.writeFileDescriptor(ext2);
} else {
data.writeInt32(ext2);
}
remote()->transact(NOTIFY_CALLBACK, data, &reply, IBinder::FLAG_ONEWAY);
}
// generic data callback from camera service to app with image data
void dataCallback(int32_t msgType, const sp<IMemory>& imageData,
camera_frame_metadata_t *metadata)
{
ALOGV("dataCallback");
Parcel data, reply;
data.writeInterfaceToken(ICameraClient::getInterfaceDescriptor());
data.writeInt32(msgType);
data.writeStrongBinder(IInterface::asBinder(imageData));
if (metadata) {
data.writeInt32(metadata->number_of_faces);
data.write(metadata->faces, sizeof(camera_face_t) * metadata->number_of_faces);
}
remote()->transact(DATA_CALLBACK, data, &reply, IBinder::FLAG_ONEWAY);
}
// generic data callback from camera service to app with image data
void dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& imageData)
{
ALOGV("dataCallback");
Parcel data, reply;
data.writeInterfaceToken(ICameraClient::getInterfaceDescriptor());
data.writeInt64(timestamp);
data.writeInt32(msgType);
data.writeStrongBinder(IInterface::asBinder(imageData));
remote()->transact(DATA_CALLBACK_TIMESTAMP, data, &reply, IBinder::FLAG_ONEWAY);
}
};
IMPLEMENT_META_INTERFACE(CameraClient, "android.hardware.ICameraClient");
// ----------------------------------------------------------------------
status_t BnCameraClient::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
switch(code) {
case NOTIFY_CALLBACK: {
ALOGV("NOTIFY_CALLBACK");
CHECK_INTERFACE(ICameraClient, data, reply);
int32_t msgType = data.readInt32();
int32_t ext1 = data.readInt32();
int32_t ext2 = 0;
if ((msgType == CAMERA_MSG_PREVIEW_FRAME) && (ext1 == CAMERA_FRAME_DATA_FD)) {
ext2 = data.readFileDescriptor();
ALOGD("onTransact: CAMERA_MSG_PREVIEW_FRAME fd = %d", ext2);
} else {
ext2 = data.readInt32();
}
notifyCallback(msgType, ext1, ext2);
return NO_ERROR;
} break;
case DATA_CALLBACK: {
ALOGV("DATA_CALLBACK");
CHECK_INTERFACE(ICameraClient, data, reply);
int32_t msgType = data.readInt32();
sp<IMemory> imageData = interface_cast<IMemory>(data.readStrongBinder());
camera_frame_metadata_t *metadata = NULL;
if (data.dataAvail() > 0) {
metadata = new camera_frame_metadata_t;
metadata->number_of_faces = data.readInt32();
metadata->faces = (camera_face_t *) data.readInplace(
sizeof(camera_face_t) * metadata->number_of_faces);
}
dataCallback(msgType, imageData, metadata);
if (metadata) delete metadata;
return NO_ERROR;
} break;
case DATA_CALLBACK_TIMESTAMP: {
ALOGV("DATA_CALLBACK_TIMESTAMP");
CHECK_INTERFACE(ICameraClient, data, reply);
nsecs_t timestamp = data.readInt64();
int32_t msgType = data.readInt32();
sp<IMemory> imageData = interface_cast<IMemory>(data.readStrongBinder());
dataCallbackTimestamp(timestamp, msgType, imageData);
return NO_ERROR;
} break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
}
// ----------------------------------------------------------------------------
}; // namespace android

View file

@ -1,113 +0,0 @@
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//#define LOG_NDEBUG 0
#define LOG_TAG "ICameraRecordingProxy"
#include <camera/ICameraRecordingProxy.h>
#include <camera/ICameraRecordingProxyListener.h>
#include <binder/IMemory.h>
#include <binder/Parcel.h>
#include <stdint.h>
#include <utils/Log.h>
namespace android {
enum {
START_RECORDING = IBinder::FIRST_CALL_TRANSACTION,
STOP_RECORDING,
RELEASE_RECORDING_FRAME,
};
uint8_t ICameraRecordingProxy::baseObject = 0;
size_t ICameraRecordingProxy::getCommonBaseAddress() {
return (size_t)&baseObject;
}
class BpCameraRecordingProxy: public BpInterface<ICameraRecordingProxy>
{
public:
BpCameraRecordingProxy(const sp<IBinder>& impl)
: BpInterface<ICameraRecordingProxy>(impl)
{
}
status_t startRecording(const sp<ICameraRecordingProxyListener>& listener)
{
ALOGV("startRecording");
Parcel data, reply;
data.writeInterfaceToken(ICameraRecordingProxy::getInterfaceDescriptor());
data.writeStrongBinder(IInterface::asBinder(listener));
remote()->transact(START_RECORDING, data, &reply);
return reply.readInt32();
}
void stopRecording()
{
ALOGV("stopRecording");
Parcel data, reply;
data.writeInterfaceToken(ICameraRecordingProxy::getInterfaceDescriptor());
remote()->transact(STOP_RECORDING, data, &reply);
}
void releaseRecordingFrame(const sp<IMemory>& mem)
{
ALOGV("releaseRecordingFrame");
Parcel data, reply;
data.writeInterfaceToken(ICameraRecordingProxy::getInterfaceDescriptor());
data.writeStrongBinder(IInterface::asBinder(mem));
remote()->transact(RELEASE_RECORDING_FRAME, data, &reply);
}
};
IMPLEMENT_META_INTERFACE(CameraRecordingProxy, "android.hardware.ICameraRecordingProxy");
// ----------------------------------------------------------------------
status_t BnCameraRecordingProxy::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
switch(code) {
case START_RECORDING: {
ALOGV("START_RECORDING");
CHECK_INTERFACE(ICameraRecordingProxy, data, reply);
sp<ICameraRecordingProxyListener> listener =
interface_cast<ICameraRecordingProxyListener>(data.readStrongBinder());
reply->writeInt32(startRecording(listener));
return NO_ERROR;
} break;
case STOP_RECORDING: {
ALOGV("STOP_RECORDING");
CHECK_INTERFACE(ICameraRecordingProxy, data, reply);
stopRecording();
return NO_ERROR;
} break;
case RELEASE_RECORDING_FRAME: {
ALOGV("RELEASE_RECORDING_FRAME");
CHECK_INTERFACE(ICameraRecordingProxy, data, reply);
sp<IMemory> mem = interface_cast<IMemory>(data.readStrongBinder());
releaseRecordingFrame(mem);
return NO_ERROR;
} break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
}
// ----------------------------------------------------------------------------
}; // namespace android

View file

@ -1,75 +0,0 @@
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//#define LOG_NDEBUG 0
#define LOG_TAG "ICameraRecordingProxyListener"
#include <camera/ICameraRecordingProxyListener.h>
#include <binder/IMemory.h>
#include <binder/Parcel.h>
#include <utils/Log.h>
namespace android {
enum {
DATA_CALLBACK_TIMESTAMP = IBinder::FIRST_CALL_TRANSACTION,
};
class BpCameraRecordingProxyListener: public BpInterface<ICameraRecordingProxyListener>
{
public:
BpCameraRecordingProxyListener(const sp<IBinder>& impl)
: BpInterface<ICameraRecordingProxyListener>(impl)
{
}
void dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& imageData)
{
ALOGV("dataCallback");
Parcel data, reply;
data.writeInterfaceToken(ICameraRecordingProxyListener::getInterfaceDescriptor());
data.writeInt64(timestamp);
data.writeInt32(msgType);
data.writeStrongBinder(IInterface::asBinder(imageData));
remote()->transact(DATA_CALLBACK_TIMESTAMP, data, &reply, IBinder::FLAG_ONEWAY);
}
};
IMPLEMENT_META_INTERFACE(CameraRecordingProxyListener, "android.hardware.ICameraRecordingProxyListener");
// ----------------------------------------------------------------------
status_t BnCameraRecordingProxyListener::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
switch(code) {
case DATA_CALLBACK_TIMESTAMP: {
ALOGV("DATA_CALLBACK_TIMESTAMP");
CHECK_INTERFACE(ICameraRecordingProxyListener, data, reply);
nsecs_t timestamp = data.readInt64();
int32_t msgType = data.readInt32();
sp<IMemory> imageData = interface_cast<IMemory>(data.readStrongBinder());
dataCallbackTimestamp(timestamp, msgType, imageData);
return NO_ERROR;
} break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
}
// ----------------------------------------------------------------------------
}; // namespace android

View file

@ -1,534 +0,0 @@
/*
**
** Copyright 2008, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
#define LOG_TAG "BpCameraService"
#include <utils/Log.h>
#include <utils/Errors.h>
#include <utils/String16.h>
#include <inttypes.h>
#include <stdint.h>
#include <sys/types.h>
#include <binder/Parcel.h>
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
#include <camera/ICameraService.h>
#include <camera/ICameraServiceListener.h>
#include <camera/ICamera.h>
#include <camera/ICameraClient.h>
#include <camera/camera2/ICameraDeviceUser.h>
#include <camera/camera2/ICameraDeviceCallbacks.h>
#include <camera/CameraMetadata.h>
#include <camera/VendorTagDescriptor.h>
namespace android {
namespace {
enum {
EX_SECURITY = -1,
EX_BAD_PARCELABLE = -2,
EX_ILLEGAL_ARGUMENT = -3,
EX_NULL_POINTER = -4,
EX_ILLEGAL_STATE = -5,
EX_HAS_REPLY_HEADER = -128, // special; see below
};
static bool readExceptionCode(Parcel& reply) {
int32_t exceptionCode = reply.readExceptionCode();
if (exceptionCode != 0) {
const char* errorMsg;
switch(exceptionCode) {
case EX_SECURITY:
errorMsg = "Security";
break;
case EX_BAD_PARCELABLE:
errorMsg = "BadParcelable";
break;
case EX_NULL_POINTER:
errorMsg = "NullPointer";
break;
case EX_ILLEGAL_STATE:
errorMsg = "IllegalState";
break;
// Binder should be handling this code inside Parcel::readException
// but lets have a to-string here anyway just in case.
case EX_HAS_REPLY_HEADER:
errorMsg = "HasReplyHeader";
break;
default:
errorMsg = "Unknown";
}
ALOGE("Binder transmission error %s (%d)", errorMsg, exceptionCode);
return true;
}
return false;
}
};
class BpCameraService: public BpInterface<ICameraService>
{
public:
BpCameraService(const sp<IBinder>& impl)
: BpInterface<ICameraService>(impl)
{
}
// get number of cameras available that support standard camera operations
virtual int32_t getNumberOfCameras()
{
return getNumberOfCameras(CAMERA_TYPE_BACKWARD_COMPATIBLE);
}
// get number of cameras available of a given type
virtual int32_t getNumberOfCameras(int type)
{
Parcel data, reply;
data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
data.writeInt32(type);
remote()->transact(BnCameraService::GET_NUMBER_OF_CAMERAS, data, &reply);
if (readExceptionCode(reply)) return 0;
return reply.readInt32();
}
// get information about a camera
virtual status_t getCameraInfo(int cameraId,
struct CameraInfo* cameraInfo) {
Parcel data, reply;
data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
data.writeInt32(cameraId);
remote()->transact(BnCameraService::GET_CAMERA_INFO, data, &reply);
if (readExceptionCode(reply)) return -EPROTO;
status_t result = reply.readInt32();
if (reply.readInt32() != 0) {
cameraInfo->facing = reply.readInt32();
cameraInfo->orientation = reply.readInt32();
}
return result;
}
// get camera characteristics (static metadata)
virtual status_t getCameraCharacteristics(int cameraId,
CameraMetadata* cameraInfo) {
Parcel data, reply;
data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
data.writeInt32(cameraId);
remote()->transact(BnCameraService::GET_CAMERA_CHARACTERISTICS, data, &reply);
if (readExceptionCode(reply)) return -EPROTO;
status_t result = reply.readInt32();
CameraMetadata out;
if (reply.readInt32() != 0) {
out.readFromParcel(&reply);
}
if (cameraInfo != NULL) {
cameraInfo->swap(out);
}
return result;
}
// Get enumeration and description of vendor tags for camera
virtual status_t getCameraVendorTagDescriptor(/*out*/sp<VendorTagDescriptor>& desc) {
Parcel data, reply;
data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
remote()->transact(BnCameraService::GET_CAMERA_VENDOR_TAG_DESCRIPTOR, data, &reply);
if (readExceptionCode(reply)) return -EPROTO;
status_t result = reply.readInt32();
if (reply.readInt32() != 0) {
sp<VendorTagDescriptor> d;
if (VendorTagDescriptor::createFromParcel(&reply, /*out*/d) == OK) {
desc = d;
}
}
return result;
}
// connect to camera service (android.hardware.Camera)
virtual status_t connect(const sp<ICameraClient>& cameraClient, int cameraId,
const String16 &clientPackageName, int clientUid,
/*out*/
sp<ICamera>& device)
{
Parcel data, reply;
data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
data.writeStrongBinder(IInterface::asBinder(cameraClient));
data.writeInt32(cameraId);
data.writeString16(clientPackageName);
data.writeInt32(clientUid);
status_t status;
status = remote()->transact(BnCameraService::CONNECT, data, &reply);
if (status != OK) return status;
if (readExceptionCode(reply)) return -EPROTO;
status = reply.readInt32();
if (reply.readInt32() != 0) {
device = interface_cast<ICamera>(reply.readStrongBinder());
}
return status;
}
// connect to camera service (android.hardware.Camera)
virtual status_t connectLegacy(const sp<ICameraClient>& cameraClient, int cameraId,
int halVersion,
const String16 &clientPackageName, int clientUid,
/*out*/sp<ICamera>& device)
{
Parcel data, reply;
data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
data.writeStrongBinder(IInterface::asBinder(cameraClient));
data.writeInt32(cameraId);
data.writeInt32(halVersion);
data.writeString16(clientPackageName);
data.writeInt32(clientUid);
status_t status;
status = remote()->transact(BnCameraService::CONNECT_LEGACY, data, &reply);
if (status != OK) return status;
if (readExceptionCode(reply)) return -EPROTO;
status = reply.readInt32();
if (reply.readInt32() != 0) {
device = interface_cast<ICamera>(reply.readStrongBinder());
}
return status;
}
virtual status_t setTorchMode(const String16& cameraId, bool enabled,
const sp<IBinder>& clientBinder)
{
Parcel data, reply;
data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
data.writeString16(cameraId);
data.writeInt32(enabled ? 1 : 0);
data.writeStrongBinder(clientBinder);
remote()->transact(BnCameraService::SET_TORCH_MODE, data, &reply);
if (readExceptionCode(reply)) return -EPROTO;
return reply.readInt32();
}
// connect to camera service (android.hardware.camera2.CameraDevice)
virtual status_t connectDevice(
const sp<ICameraDeviceCallbacks>& cameraCb,
int cameraId,
const String16& clientPackageName,
int clientUid,
/*out*/
sp<ICameraDeviceUser>& device)
{
Parcel data, reply;
data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
data.writeStrongBinder(IInterface::asBinder(cameraCb));
data.writeInt32(cameraId);
data.writeString16(clientPackageName);
data.writeInt32(clientUid);
status_t status;
status = remote()->transact(BnCameraService::CONNECT_DEVICE, data, &reply);
if (status != OK) return status;
if (readExceptionCode(reply)) return -EPROTO;
status = reply.readInt32();
if (reply.readInt32() != 0) {
device = interface_cast<ICameraDeviceUser>(reply.readStrongBinder());
}
return status;
}
virtual status_t addListener(const sp<ICameraServiceListener>& listener)
{
Parcel data, reply;
data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
data.writeStrongBinder(IInterface::asBinder(listener));
remote()->transact(BnCameraService::ADD_LISTENER, data, &reply);
if (readExceptionCode(reply)) return -EPROTO;
return reply.readInt32();
}
virtual status_t removeListener(const sp<ICameraServiceListener>& listener)
{
Parcel data, reply;
data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
data.writeStrongBinder(IInterface::asBinder(listener));
remote()->transact(BnCameraService::REMOVE_LISTENER, data, &reply);
if (readExceptionCode(reply)) return -EPROTO;
return reply.readInt32();
}
virtual status_t getLegacyParameters(int cameraId, String16* parameters) {
if (parameters == NULL) {
ALOGE("%s: parameters must not be null", __FUNCTION__);
return BAD_VALUE;
}
Parcel data, reply;
data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
data.writeInt32(cameraId);
remote()->transact(BnCameraService::GET_LEGACY_PARAMETERS, data, &reply);
if (readExceptionCode(reply)) return -EPROTO;
status_t res = data.readInt32();
int32_t length = data.readInt32(); // -1 means null
if (length > 0) {
*parameters = data.readString16();
} else {
*parameters = String16();
}
return res;
}
virtual status_t supportsCameraApi(int cameraId, int apiVersion) {
Parcel data, reply;
data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
data.writeInt32(cameraId);
data.writeInt32(apiVersion);
remote()->transact(BnCameraService::SUPPORTS_CAMERA_API, data, &reply);
if (readExceptionCode(reply)) return -EPROTO;
status_t res = data.readInt32();
return res;
}
virtual void notifySystemEvent(int32_t eventId, const int32_t* args, size_t len) {
Parcel data, reply;
data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
data.writeInt32(eventId);
data.writeInt32Array(len, args);
remote()->transact(BnCameraService::NOTIFY_SYSTEM_EVENT, data, &reply,
IBinder::FLAG_ONEWAY);
}
};
IMPLEMENT_META_INTERFACE(CameraService, "android.hardware.ICameraService");
// ----------------------------------------------------------------------
status_t BnCameraService::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
switch(code) {
case GET_NUMBER_OF_CAMERAS: {
CHECK_INTERFACE(ICameraService, data, reply);
reply->writeNoException();
reply->writeInt32(getNumberOfCameras(data.readInt32()));
return NO_ERROR;
} break;
case GET_CAMERA_INFO: {
CHECK_INTERFACE(ICameraService, data, reply);
CameraInfo cameraInfo = CameraInfo();
memset(&cameraInfo, 0, sizeof(cameraInfo));
status_t result = getCameraInfo(data.readInt32(), &cameraInfo);
reply->writeNoException();
reply->writeInt32(result);
// Fake a parcelable object here
reply->writeInt32(1); // means the parcelable is included
reply->writeInt32(cameraInfo.facing);
reply->writeInt32(cameraInfo.orientation);
return NO_ERROR;
} break;
case GET_CAMERA_CHARACTERISTICS: {
CHECK_INTERFACE(ICameraService, data, reply);
CameraMetadata info;
status_t result = getCameraCharacteristics(data.readInt32(), &info);
reply->writeNoException();
reply->writeInt32(result);
// out-variables are after exception and return value
reply->writeInt32(1); // means the parcelable is included
info.writeToParcel(reply);
return NO_ERROR;
} break;
case GET_CAMERA_VENDOR_TAG_DESCRIPTOR: {
CHECK_INTERFACE(ICameraService, data, reply);
sp<VendorTagDescriptor> d;
status_t result = getCameraVendorTagDescriptor(d);
reply->writeNoException();
reply->writeInt32(result);
// out-variables are after exception and return value
if (d == NULL) {
reply->writeInt32(0);
} else {
reply->writeInt32(1); // means the parcelable is included
d->writeToParcel(reply);
}
return NO_ERROR;
} break;
case CONNECT: {
CHECK_INTERFACE(ICameraService, data, reply);
sp<ICameraClient> cameraClient =
interface_cast<ICameraClient>(data.readStrongBinder());
int32_t cameraId = data.readInt32();
const String16 clientName = data.readString16();
int32_t clientUid = data.readInt32();
sp<ICamera> camera;
status_t status = connect(cameraClient, cameraId,
clientName, clientUid, /*out*/camera);
reply->writeNoException();
reply->writeInt32(status);
if (camera != NULL) {
reply->writeInt32(1);
reply->writeStrongBinder(IInterface::asBinder(camera));
} else {
reply->writeInt32(0);
}
return NO_ERROR;
} break;
case CONNECT_DEVICE: {
CHECK_INTERFACE(ICameraService, data, reply);
sp<ICameraDeviceCallbacks> cameraClient =
interface_cast<ICameraDeviceCallbacks>(data.readStrongBinder());
int32_t cameraId = data.readInt32();
const String16 clientName = data.readString16();
int32_t clientUid = data.readInt32();
sp<ICameraDeviceUser> camera;
status_t status = connectDevice(cameraClient, cameraId,
clientName, clientUid, /*out*/camera);
reply->writeNoException();
reply->writeInt32(status);
if (camera != NULL) {
reply->writeInt32(1);
reply->writeStrongBinder(IInterface::asBinder(camera));
} else {
reply->writeInt32(0);
}
return NO_ERROR;
} break;
case ADD_LISTENER: {
CHECK_INTERFACE(ICameraService, data, reply);
sp<ICameraServiceListener> listener =
interface_cast<ICameraServiceListener>(data.readStrongBinder());
reply->writeNoException();
reply->writeInt32(addListener(listener));
return NO_ERROR;
} break;
case REMOVE_LISTENER: {
CHECK_INTERFACE(ICameraService, data, reply);
sp<ICameraServiceListener> listener =
interface_cast<ICameraServiceListener>(data.readStrongBinder());
reply->writeNoException();
reply->writeInt32(removeListener(listener));
return NO_ERROR;
} break;
case GET_LEGACY_PARAMETERS: {
CHECK_INTERFACE(ICameraService, data, reply);
int cameraId = data.readInt32();
String16 parameters;
reply->writeNoException();
// return value
reply->writeInt32(getLegacyParameters(cameraId, &parameters));
// out parameters
reply->writeInt32(1); // parameters is always available
reply->writeString16(parameters);
return NO_ERROR;
} break;
case SUPPORTS_CAMERA_API: {
CHECK_INTERFACE(ICameraService, data, reply);
int cameraId = data.readInt32();
int apiVersion = data.readInt32();
reply->writeNoException();
// return value
reply->writeInt32(supportsCameraApi(cameraId, apiVersion));
return NO_ERROR;
} break;
case CONNECT_LEGACY: {
CHECK_INTERFACE(ICameraService, data, reply);
sp<ICameraClient> cameraClient =
interface_cast<ICameraClient>(data.readStrongBinder());
int32_t cameraId = data.readInt32();
int32_t halVersion = data.readInt32();
const String16 clientName = data.readString16();
int32_t clientUid = data.readInt32();
sp<ICamera> camera;
status_t status = connectLegacy(cameraClient, cameraId, halVersion,
clientName, clientUid, /*out*/camera);
reply->writeNoException();
reply->writeInt32(status);
if (camera != NULL) {
reply->writeInt32(1);
reply->writeStrongBinder(IInterface::asBinder(camera));
} else {
reply->writeInt32(0);
}
return NO_ERROR;
} break;
case SET_TORCH_MODE: {
CHECK_INTERFACE(ICameraService, data, reply);
String16 cameraId = data.readString16();
bool enabled = data.readInt32() != 0 ? true : false;
const sp<IBinder> clientBinder = data.readStrongBinder();
status_t status = setTorchMode(cameraId, enabled, clientBinder);
reply->writeNoException();
reply->writeInt32(status);
return NO_ERROR;
} break;
case NOTIFY_SYSTEM_EVENT: {
CHECK_INTERFACE(ICameraService, data, reply);
int32_t eventId = data.readInt32();
int32_t len = data.readInt32();
if (len < 0) {
ALOGE("%s: Received poorly formatted length in binder request: notifySystemEvent.",
__FUNCTION__);
return FAILED_TRANSACTION;
}
if (len > 512) {
ALOGE("%s: Length %" PRIi32 " too long in binder request: notifySystemEvent.",
__FUNCTION__, len);
return FAILED_TRANSACTION;
}
int32_t events[len];
memset(events, 0, sizeof(int32_t) * len);
status_t status = data.read(events, sizeof(int32_t) * len);
if (status != NO_ERROR) {
ALOGE("%s: Received poorly formatted binder request: notifySystemEvent.",
__FUNCTION__);
return FAILED_TRANSACTION;
}
notifySystemEvent(eventId, events, len);
return NO_ERROR;
} break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
}
// ----------------------------------------------------------------------------
}; // namespace android

View file

@ -1,108 +0,0 @@
/*
**
** Copyright 2013, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
#include <stdint.h>
#include <sys/types.h>
#include <binder/Parcel.h>
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
#include <camera/ICameraServiceListener.h>
namespace android {
namespace {
enum {
STATUS_CHANGED = IBinder::FIRST_CALL_TRANSACTION,
TORCH_STATUS_CHANGED,
};
}; // namespace anonymous
class BpCameraServiceListener: public BpInterface<ICameraServiceListener>
{
public:
BpCameraServiceListener(const sp<IBinder>& impl)
: BpInterface<ICameraServiceListener>(impl)
{
}
virtual void onStatusChanged(Status status, int32_t cameraId)
{
Parcel data, reply;
data.writeInterfaceToken(ICameraServiceListener::getInterfaceDescriptor());
data.writeInt32(static_cast<int32_t>(status));
data.writeInt32(cameraId);
remote()->transact(STATUS_CHANGED,
data,
&reply,
IBinder::FLAG_ONEWAY);
}
virtual void onTorchStatusChanged(TorchStatus status, const String16 &cameraId)
{
Parcel data, reply;
data.writeInterfaceToken(ICameraServiceListener::getInterfaceDescriptor());
data.writeInt32(static_cast<int32_t>(status));
data.writeString16(cameraId);
remote()->transact(TORCH_STATUS_CHANGED,
data,
&reply,
IBinder::FLAG_ONEWAY);
}
};
IMPLEMENT_META_INTERFACE(CameraServiceListener, "android.hardware.ICameraServiceListener");
// ----------------------------------------------------------------------
status_t BnCameraServiceListener::onTransact(uint32_t code, const Parcel& data, Parcel* reply,
uint32_t flags) {
switch(code) {
case STATUS_CHANGED: {
CHECK_INTERFACE(ICameraServiceListener, data, reply);
Status status = static_cast<Status>(data.readInt32());
int32_t cameraId = data.readInt32();
onStatusChanged(status, cameraId);
return NO_ERROR;
} break;
case TORCH_STATUS_CHANGED: {
CHECK_INTERFACE(ICameraServiceListener, data, reply);
TorchStatus status = static_cast<TorchStatus>(data.readInt32());
String16 cameraId = data.readString16();
onTorchStatusChanged(status, cameraId);
return NO_ERROR;
} break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
}
// ----------------------------------------------------------------------------
}; // namespace android

View file

@ -1,72 +0,0 @@
/*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "BpCameraServiceProxy"
#include <stdint.h>
#include <binder/Parcel.h>
#include <camera/ICameraServiceProxy.h>
namespace android {
class BpCameraServiceProxy: public BpInterface<ICameraServiceProxy> {
public:
BpCameraServiceProxy(const sp<IBinder>& impl) : BpInterface<ICameraServiceProxy>(impl) {}
virtual void pingForUserUpdate() {
Parcel data;
data.writeInterfaceToken(ICameraServiceProxy::getInterfaceDescriptor());
remote()->transact(BnCameraServiceProxy::PING_FOR_USER_UPDATE, data, nullptr,
IBinder::FLAG_ONEWAY);
}
virtual void notifyCameraState(String16 cameraId, CameraState newCameraState) {
Parcel data;
data.writeInterfaceToken(ICameraServiceProxy::getInterfaceDescriptor());
data.writeString16(cameraId);
data.writeInt32(newCameraState);
remote()->transact(BnCameraServiceProxy::NOTIFY_CAMERA_STATE, data, nullptr,
IBinder::FLAG_ONEWAY);
}
};
IMPLEMENT_META_INTERFACE(CameraServiceProxy, "android.hardware.ICameraServiceProxy");
status_t BnCameraServiceProxy::onTransact(uint32_t code, const Parcel& data, Parcel* reply,
uint32_t flags) {
switch(code) {
case PING_FOR_USER_UPDATE: {
CHECK_INTERFACE(ICameraServiceProxy, data, reply);
pingForUserUpdate();
return NO_ERROR;
} break;
case NOTIFY_CAMERA_STATE: {
CHECK_INTERFACE(ICameraServiceProxy, data, reply);
String16 cameraId = data.readString16();
CameraState newCameraState =
static_cast<CameraState>(data.readInt32());
notifyCameraState(cameraId, newCameraState);
return NO_ERROR;
} break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
}
}; // namespace android

View file

@ -1,460 +0,0 @@
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "VendorTagDescriptor"
#include <binder/Parcel.h>
#include <utils/Errors.h>
#include <utils/Log.h>
#include <utils/Mutex.h>
#include <utils/Vector.h>
#include <utils/SortedVector.h>
#include <system/camera_metadata.h>
#include <camera_metadata_hidden.h>
#include "camera/VendorTagDescriptor.h"
#include <stdio.h>
#include <string.h>
namespace android {
extern "C" {
static int vendor_tag_descriptor_get_tag_count(const vendor_tag_ops_t* v);
static void vendor_tag_descriptor_get_all_tags(const vendor_tag_ops_t* v, uint32_t* tagArray);
static const char* vendor_tag_descriptor_get_section_name(const vendor_tag_ops_t* v, uint32_t tag);
static const char* vendor_tag_descriptor_get_tag_name(const vendor_tag_ops_t* v, uint32_t tag);
static int vendor_tag_descriptor_get_tag_type(const vendor_tag_ops_t* v, uint32_t tag);
} /* extern "C" */
static Mutex sLock;
static sp<VendorTagDescriptor> sGlobalVendorTagDescriptor;
VendorTagDescriptor::VendorTagDescriptor() {}
VendorTagDescriptor::~VendorTagDescriptor() {
size_t len = mReverseMapping.size();
for (size_t i = 0; i < len; ++i) {
delete mReverseMapping[i];
}
}
status_t VendorTagDescriptor::createDescriptorFromOps(const vendor_tag_ops_t* vOps,
/*out*/
sp<VendorTagDescriptor>& descriptor) {
if (vOps == NULL) {
ALOGE("%s: vendor_tag_ops argument was NULL.", __FUNCTION__);
return BAD_VALUE;
}
int tagCount = vOps->get_tag_count(vOps);
if (tagCount < 0 || tagCount > INT32_MAX) {
ALOGE("%s: tag count %d from vendor ops is invalid.", __FUNCTION__, tagCount);
return BAD_VALUE;
}
Vector<uint32_t> tagArray;
LOG_ALWAYS_FATAL_IF(tagArray.resize(tagCount) != tagCount,
"%s: too many (%u) vendor tags defined.", __FUNCTION__, tagCount);
vOps->get_all_tags(vOps, /*out*/tagArray.editArray());
sp<VendorTagDescriptor> desc = new VendorTagDescriptor();
desc->mTagCount = tagCount;
SortedVector<String8> sections;
KeyedVector<uint32_t, String8> tagToSectionMap;
for (size_t i = 0; i < static_cast<size_t>(tagCount); ++i) {
uint32_t tag = tagArray[i];
if (tag < CAMERA_METADATA_VENDOR_TAG_BOUNDARY) {
ALOGE("%s: vendor tag %d not in vendor tag section.", __FUNCTION__, tag);
return BAD_VALUE;
}
const char *tagName = vOps->get_tag_name(vOps, tag);
if (tagName == NULL) {
ALOGE("%s: no tag name defined for vendor tag %d.", __FUNCTION__, tag);
return BAD_VALUE;
}
desc->mTagToNameMap.add(tag, String8(tagName));
const char *sectionName = vOps->get_section_name(vOps, tag);
if (sectionName == NULL) {
ALOGE("%s: no section name defined for vendor tag %d.", __FUNCTION__, tag);
return BAD_VALUE;
}
String8 sectionString(sectionName);
sections.add(sectionString);
tagToSectionMap.add(tag, sectionString);
int tagType = vOps->get_tag_type(vOps, tag);
if (tagType < 0 || tagType >= NUM_TYPES) {
ALOGE("%s: tag type %d from vendor ops does not exist.", __FUNCTION__, tagType);
return BAD_VALUE;
}
desc->mTagToTypeMap.add(tag, tagType);
}
desc->mSections = sections;
for (size_t i = 0; i < static_cast<size_t>(tagCount); ++i) {
uint32_t tag = tagArray[i];
String8 sectionString = tagToSectionMap.valueFor(tag);
// Set up tag to section index map
ssize_t index = sections.indexOf(sectionString);
LOG_ALWAYS_FATAL_IF(index < 0, "index %zd must be non-negative", index);
desc->mTagToSectionMap.add(tag, static_cast<uint32_t>(index));
// Set up reverse mapping
ssize_t reverseIndex = -1;
if ((reverseIndex = desc->mReverseMapping.indexOfKey(sectionString)) < 0) {
KeyedVector<String8, uint32_t>* nameMapper = new KeyedVector<String8, uint32_t>();
reverseIndex = desc->mReverseMapping.add(sectionString, nameMapper);
}
desc->mReverseMapping[reverseIndex]->add(desc->mTagToNameMap.valueFor(tag), tag);
}
descriptor = desc;
return OK;
}
status_t VendorTagDescriptor::createFromParcel(const Parcel* parcel,
/*out*/
sp<VendorTagDescriptor>& descriptor) {
status_t res = OK;
if (parcel == NULL) {
ALOGE("%s: parcel argument was NULL.", __FUNCTION__);
return BAD_VALUE;
}
int32_t tagCount = 0;
if ((res = parcel->readInt32(&tagCount)) != OK) {
ALOGE("%s: could not read tag count from parcel", __FUNCTION__);
return res;
}
if (tagCount < 0 || tagCount > INT32_MAX) {
ALOGE("%s: tag count %d from vendor ops is invalid.", __FUNCTION__, tagCount);
return BAD_VALUE;
}
sp<VendorTagDescriptor> desc = new VendorTagDescriptor();
desc->mTagCount = tagCount;
uint32_t tag, sectionIndex;
uint32_t maxSectionIndex = 0;
int32_t tagType;
Vector<uint32_t> allTags;
for (int32_t i = 0; i < tagCount; ++i) {
if ((res = parcel->readInt32(reinterpret_cast<int32_t*>(&tag))) != OK) {
ALOGE("%s: could not read tag id from parcel for index %d", __FUNCTION__, i);
break;
}
if (tag < CAMERA_METADATA_VENDOR_TAG_BOUNDARY) {
ALOGE("%s: vendor tag %d not in vendor tag section.", __FUNCTION__, tag);
res = BAD_VALUE;
break;
}
if ((res = parcel->readInt32(&tagType)) != OK) {
ALOGE("%s: could not read tag type from parcel for tag %d", __FUNCTION__, tag);
break;
}
if (tagType < 0 || tagType >= NUM_TYPES) {
ALOGE("%s: tag type %d from vendor ops does not exist.", __FUNCTION__, tagType);
res = BAD_VALUE;
break;
}
String8 tagName = parcel->readString8();
if (tagName.isEmpty()) {
ALOGE("%s: parcel tag name was NULL for tag %d.", __FUNCTION__, tag);
res = NOT_ENOUGH_DATA;
break;
}
if ((res = parcel->readInt32(reinterpret_cast<int32_t*>(&sectionIndex))) != OK) {
ALOGE("%s: could not read section index for tag %d.", __FUNCTION__, tag);
break;
}
maxSectionIndex = (maxSectionIndex >= sectionIndex) ? maxSectionIndex : sectionIndex;
allTags.add(tag);
desc->mTagToNameMap.add(tag, tagName);
desc->mTagToSectionMap.add(tag, sectionIndex);
desc->mTagToTypeMap.add(tag, tagType);
}
if (res != OK) {
return res;
}
size_t sectionCount = 0;
if (tagCount > 0) {
if ((res = parcel->readInt32(reinterpret_cast<int32_t*>(&sectionCount))) != OK) {
ALOGE("%s: could not read section count for.", __FUNCTION__);
return res;
}
if (sectionCount < (maxSectionIndex + 1)) {
ALOGE("%s: Incorrect number of sections defined, received %zu, needs %d.",
__FUNCTION__, sectionCount, (maxSectionIndex + 1));
return BAD_VALUE;
}
LOG_ALWAYS_FATAL_IF(desc->mSections.setCapacity(sectionCount) <= 0,
"Vector capacity must be positive");
for (size_t i = 0; i < sectionCount; ++i) {
String8 sectionName = parcel->readString8();
if (sectionName.isEmpty()) {
ALOGE("%s: parcel section name was NULL for section %zu.",
__FUNCTION__, i);
return NOT_ENOUGH_DATA;
}
desc->mSections.add(sectionName);
}
}
LOG_ALWAYS_FATAL_IF(static_cast<size_t>(tagCount) != allTags.size(),
"tagCount must be the same as allTags size");
// Set up reverse mapping
for (size_t i = 0; i < static_cast<size_t>(tagCount); ++i) {
uint32_t tag = allTags[i];
String8 sectionString = desc->mSections[desc->mTagToSectionMap.valueFor(tag)];
ssize_t reverseIndex = -1;
if ((reverseIndex = desc->mReverseMapping.indexOfKey(sectionString)) < 0) {
KeyedVector<String8, uint32_t>* nameMapper = new KeyedVector<String8, uint32_t>();
reverseIndex = desc->mReverseMapping.add(sectionString, nameMapper);
}
desc->mReverseMapping[reverseIndex]->add(desc->mTagToNameMap.valueFor(tag), tag);
}
descriptor = desc;
return res;
}
int VendorTagDescriptor::getTagCount() const {
size_t size = mTagToNameMap.size();
if (size == 0) {
return VENDOR_TAG_COUNT_ERR;
}
return size;
}
void VendorTagDescriptor::getTagArray(uint32_t* tagArray) const {
size_t size = mTagToNameMap.size();
for (size_t i = 0; i < size; ++i) {
tagArray[i] = mTagToNameMap.keyAt(i);
}
}
const char* VendorTagDescriptor::getSectionName(uint32_t tag) const {
ssize_t index = mTagToSectionMap.indexOfKey(tag);
if (index < 0) {
return VENDOR_SECTION_NAME_ERR;
}
return mSections[mTagToSectionMap.valueAt(index)].string();
}
const char* VendorTagDescriptor::getTagName(uint32_t tag) const {
ssize_t index = mTagToNameMap.indexOfKey(tag);
if (index < 0) {
return VENDOR_TAG_NAME_ERR;
}
return mTagToNameMap.valueAt(index).string();
}
int VendorTagDescriptor::getTagType(uint32_t tag) const {
ssize_t index = mTagToNameMap.indexOfKey(tag);
if (index < 0) {
return VENDOR_TAG_TYPE_ERR;
}
return mTagToTypeMap.valueFor(tag);
}
status_t VendorTagDescriptor::writeToParcel(Parcel* parcel) const {
status_t res = OK;
if (parcel == NULL) {
ALOGE("%s: parcel argument was NULL.", __FUNCTION__);
return BAD_VALUE;
}
if ((res = parcel->writeInt32(mTagCount)) != OK) {
return res;
}
size_t size = mTagToNameMap.size();
uint32_t tag, sectionIndex;
int32_t tagType;
for (size_t i = 0; i < size; ++i) {
tag = mTagToNameMap.keyAt(i);
String8 tagName = mTagToNameMap[i];
sectionIndex = mTagToSectionMap.valueFor(tag);
tagType = mTagToTypeMap.valueFor(tag);
if ((res = parcel->writeInt32(tag)) != OK) break;
if ((res = parcel->writeInt32(tagType)) != OK) break;
if ((res = parcel->writeString8(tagName)) != OK) break;
if ((res = parcel->writeInt32(sectionIndex)) != OK) break;
}
size_t numSections = mSections.size();
if (numSections > 0) {
if ((res = parcel->writeInt32(numSections)) != OK) return res;
for (size_t i = 0; i < numSections; ++i) {
if ((res = parcel->writeString8(mSections[i])) != OK) return res;
}
}
return res;
}
SortedVector<String8> VendorTagDescriptor::getAllSectionNames() const {
return mSections;
}
status_t VendorTagDescriptor::lookupTag(String8 name, String8 section, /*out*/uint32_t* tag) const {
ssize_t index = mReverseMapping.indexOfKey(section);
if (index < 0) {
ALOGE("%s: Section '%s' does not exist.", __FUNCTION__, section.string());
return BAD_VALUE;
}
ssize_t nameIndex = mReverseMapping[index]->indexOfKey(name);
if (nameIndex < 0) {
ALOGE("%s: Tag name '%s' does not exist.", __FUNCTION__, name.string());
return BAD_VALUE;
}
if (tag != NULL) {
*tag = mReverseMapping[index]->valueAt(nameIndex);
}
return OK;
}
void VendorTagDescriptor::dump(int fd, int verbosity, int indentation) const {
size_t size = mTagToNameMap.size();
if (size == 0) {
dprintf(fd, "%*sDumping configured vendor tag descriptors: None set\n",
indentation, "");
return;
}
dprintf(fd, "%*sDumping configured vendor tag descriptors: %zu entries\n",
indentation, "", size);
for (size_t i = 0; i < size; ++i) {
uint32_t tag = mTagToNameMap.keyAt(i);
if (verbosity < 1) {
dprintf(fd, "%*s0x%x\n", indentation + 2, "", tag);
continue;
}
String8 name = mTagToNameMap.valueAt(i);
uint32_t sectionId = mTagToSectionMap.valueFor(tag);
String8 sectionName = mSections[sectionId];
int type = mTagToTypeMap.valueFor(tag);
const char* typeName = (type >= 0 && type < NUM_TYPES) ?
camera_metadata_type_names[type] : "UNKNOWN";
dprintf(fd, "%*s0x%x (%s) with type %d (%s) defined in section %s\n", indentation + 2,
"", tag, name.string(), type, typeName, sectionName.string());
}
}
status_t VendorTagDescriptor::setAsGlobalVendorTagDescriptor(const sp<VendorTagDescriptor>& desc) {
status_t res = OK;
Mutex::Autolock al(sLock);
sGlobalVendorTagDescriptor = desc;
vendor_tag_ops_t* opsPtr = NULL;
if (desc != NULL) {
opsPtr = &(desc->mVendorOps);
opsPtr->get_tag_count = vendor_tag_descriptor_get_tag_count;
opsPtr->get_all_tags = vendor_tag_descriptor_get_all_tags;
opsPtr->get_section_name = vendor_tag_descriptor_get_section_name;
opsPtr->get_tag_name = vendor_tag_descriptor_get_tag_name;
opsPtr->get_tag_type = vendor_tag_descriptor_get_tag_type;
}
if((res = set_camera_metadata_vendor_ops(opsPtr)) != OK) {
ALOGE("%s: Could not set vendor tag descriptor, received error %s (%d)."
, __FUNCTION__, strerror(-res), res);
}
return res;
}
void VendorTagDescriptor::clearGlobalVendorTagDescriptor() {
Mutex::Autolock al(sLock);
set_camera_metadata_vendor_ops(NULL);
sGlobalVendorTagDescriptor.clear();
}
sp<VendorTagDescriptor> VendorTagDescriptor::getGlobalVendorTagDescriptor() {
Mutex::Autolock al(sLock);
return sGlobalVendorTagDescriptor;
}
extern "C" {
int vendor_tag_descriptor_get_tag_count(const vendor_tag_ops_t* /*v*/) {
Mutex::Autolock al(sLock);
if (sGlobalVendorTagDescriptor == NULL) {
ALOGE("%s: Vendor tag descriptor not initialized.", __FUNCTION__);
return VENDOR_TAG_COUNT_ERR;
}
return sGlobalVendorTagDescriptor->getTagCount();
}
void vendor_tag_descriptor_get_all_tags(const vendor_tag_ops_t* /*v*/, uint32_t* tagArray) {
Mutex::Autolock al(sLock);
if (sGlobalVendorTagDescriptor == NULL) {
ALOGE("%s: Vendor tag descriptor not initialized.", __FUNCTION__);
return;
}
sGlobalVendorTagDescriptor->getTagArray(tagArray);
}
const char* vendor_tag_descriptor_get_section_name(const vendor_tag_ops_t* /*v*/, uint32_t tag) {
Mutex::Autolock al(sLock);
if (sGlobalVendorTagDescriptor == NULL) {
ALOGE("%s: Vendor tag descriptor not initialized.", __FUNCTION__);
return VENDOR_SECTION_NAME_ERR;
}
return sGlobalVendorTagDescriptor->getSectionName(tag);
}
const char* vendor_tag_descriptor_get_tag_name(const vendor_tag_ops_t* /*v*/, uint32_t tag) {
Mutex::Autolock al(sLock);
if (sGlobalVendorTagDescriptor == NULL) {
ALOGE("%s: Vendor tag descriptor not initialized.", __FUNCTION__);
return VENDOR_TAG_NAME_ERR;
}
return sGlobalVendorTagDescriptor->getTagName(tag);
}
int vendor_tag_descriptor_get_tag_type(const vendor_tag_ops_t* /*v*/, uint32_t tag) {
Mutex::Autolock al(sLock);
if (sGlobalVendorTagDescriptor == NULL) {
ALOGE("%s: Vendor tag descriptor not initialized.", __FUNCTION__);
return VENDOR_TAG_TYPE_ERR;
}
return sGlobalVendorTagDescriptor->getTagType(tag);
}
} /* extern "C" */
} /* namespace android */

View file

@ -1,133 +0,0 @@
/*
**
** Copyright 2013, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
// #define LOG_NDEBUG 0
#define LOG_TAG "CameraRequest"
#include <utils/Log.h>
#include <camera/camera2/CaptureRequest.h>
#include <binder/Parcel.h>
#include <gui/Surface.h>
namespace android {
status_t CaptureRequest::readFromParcel(Parcel* parcel) {
if (parcel == NULL) {
ALOGE("%s: Null parcel", __FUNCTION__);
return BAD_VALUE;
}
mMetadata.clear();
mSurfaceList.clear();
status_t err;
if ((err = mMetadata.readFromParcel(parcel)) != OK) {
ALOGE("%s: Failed to read metadata from parcel", __FUNCTION__);
return err;
}
ALOGV("%s: Read metadata from parcel", __FUNCTION__);
int32_t size;
if ((err = parcel->readInt32(&size)) != OK) {
ALOGE("%s: Failed to read surface list size from parcel", __FUNCTION__);
return err;
}
ALOGV("%s: Read surface list size = %d", __FUNCTION__, size);
// Do not distinguish null arrays from 0-sized arrays.
for (int i = 0; i < size; ++i) {
// Parcel.writeParcelableArray
size_t len;
const char16_t* className = parcel->readString16Inplace(&len);
ALOGV("%s: Read surface class = %s", __FUNCTION__,
className != NULL ? String8(className).string() : "<null>");
if (className == NULL) {
continue;
}
// Surface.writeToParcel
const char16_t* name = parcel->readString16Inplace(&len);
ALOGV("%s: Read surface name = %s", __FUNCTION__,
name != NULL ? String8(name).string() : "<null>");
sp<IBinder> binder(parcel->readStrongBinder());
ALOGV("%s: Read surface binder = %p",
__FUNCTION__, binder.get());
sp<Surface> surface;
if (binder != NULL) {
sp<IGraphicBufferProducer> gbp =
interface_cast<IGraphicBufferProducer>(binder);
surface = new Surface(gbp);
}
mSurfaceList.push_back(surface);
}
int isReprocess = 0;
if ((err = parcel->readInt32(&isReprocess)) != OK) {
ALOGE("%s: Failed to read reprocessing from parcel", __FUNCTION__);
return err;
}
mIsReprocess = (isReprocess != 0);
return OK;
}
status_t CaptureRequest::writeToParcel(Parcel* parcel) const {
if (parcel == NULL) {
ALOGE("%s: Null parcel", __FUNCTION__);
return BAD_VALUE;
}
status_t err;
if ((err = mMetadata.writeToParcel(parcel)) != OK) {
return err;
}
int32_t size = static_cast<int32_t>(mSurfaceList.size());
// Send 0-sized arrays when it's empty. Do not send null arrays.
parcel->writeInt32(size);
for (int32_t i = 0; i < size; ++i) {
sp<Surface> surface = mSurfaceList[i];
sp<IBinder> binder;
if (surface != 0) {
binder = IInterface::asBinder(surface->getIGraphicBufferProducer());
}
// not sure if readParcelableArray does this, hard to tell from source
parcel->writeString16(String16("android.view.Surface"));
// Surface.writeToParcel
parcel->writeString16(String16("unknown_name"));
// Surface.nativeWriteToParcel
parcel->writeStrongBinder(binder);
}
parcel->writeInt32(mIsReprocess ? 1 : 0);
return OK;
}
}; // namespace android

View file

@ -1,190 +0,0 @@
/*
**
** Copyright 2013, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
//#define LOG_NDEBUG 0
#define LOG_TAG "ICameraDeviceCallbacks"
#include <utils/Log.h>
#include <stdint.h>
#include <sys/types.h>
#include <binder/Parcel.h>
#include <gui/IGraphicBufferProducer.h>
#include <gui/Surface.h>
#include <utils/Mutex.h>
#include <camera/camera2/ICameraDeviceCallbacks.h>
#include "camera/CameraMetadata.h"
#include "camera/CaptureResult.h"
namespace android {
enum {
CAMERA_ERROR = IBinder::FIRST_CALL_TRANSACTION,
CAMERA_IDLE,
CAPTURE_STARTED,
RESULT_RECEIVED,
PREPARED
};
class BpCameraDeviceCallbacks: public BpInterface<ICameraDeviceCallbacks>
{
public:
BpCameraDeviceCallbacks(const sp<IBinder>& impl)
: BpInterface<ICameraDeviceCallbacks>(impl)
{
}
void onDeviceError(CameraErrorCode errorCode, const CaptureResultExtras& resultExtras)
{
ALOGV("onDeviceError");
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor());
data.writeInt32(static_cast<int32_t>(errorCode));
data.writeInt32(1); // to mark presence of CaptureResultExtras object
resultExtras.writeToParcel(&data);
remote()->transact(CAMERA_ERROR, data, &reply, IBinder::FLAG_ONEWAY);
data.writeNoException();
}
void onDeviceIdle()
{
ALOGV("onDeviceIdle");
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor());
remote()->transact(CAMERA_IDLE, data, &reply, IBinder::FLAG_ONEWAY);
data.writeNoException();
}
void onCaptureStarted(const CaptureResultExtras& result, int64_t timestamp)
{
ALOGV("onCaptureStarted");
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor());
data.writeInt32(1); // to mark presence of CaptureResultExtras object
result.writeToParcel(&data);
data.writeInt64(timestamp);
remote()->transact(CAPTURE_STARTED, data, &reply, IBinder::FLAG_ONEWAY);
data.writeNoException();
}
void onResultReceived(const CameraMetadata& metadata,
const CaptureResultExtras& resultExtras) {
ALOGV("onResultReceived");
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor());
data.writeInt32(1); // to mark presence of metadata object
metadata.writeToParcel(&data);
data.writeInt32(1); // to mark presence of CaptureResult object
resultExtras.writeToParcel(&data);
remote()->transact(RESULT_RECEIVED, data, &reply, IBinder::FLAG_ONEWAY);
data.writeNoException();
}
void onPrepared(int streamId)
{
ALOGV("onPrepared");
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor());
data.writeInt32(streamId);
remote()->transact(PREPARED, data, &reply, IBinder::FLAG_ONEWAY);
data.writeNoException();
}
};
IMPLEMENT_META_INTERFACE(CameraDeviceCallbacks,
"android.hardware.camera2.ICameraDeviceCallbacks");
// ----------------------------------------------------------------------
status_t BnCameraDeviceCallbacks::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
ALOGV("onTransact - code = %d", code);
switch(code) {
case CAMERA_ERROR: {
ALOGV("onDeviceError");
CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply);
CameraErrorCode errorCode =
static_cast<CameraErrorCode>(data.readInt32());
CaptureResultExtras resultExtras;
if (data.readInt32() != 0) {
resultExtras.readFromParcel(const_cast<Parcel*>(&data));
} else {
ALOGE("No CaptureResultExtras object is present!");
}
onDeviceError(errorCode, resultExtras);
data.readExceptionCode();
return NO_ERROR;
} break;
case CAMERA_IDLE: {
ALOGV("onDeviceIdle");
CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply);
onDeviceIdle();
data.readExceptionCode();
return NO_ERROR;
} break;
case CAPTURE_STARTED: {
ALOGV("onCaptureStarted");
CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply);
CaptureResultExtras result;
if (data.readInt32() != 0) {
result.readFromParcel(const_cast<Parcel*>(&data));
} else {
ALOGE("No CaptureResultExtras object is present in result!");
}
int64_t timestamp = data.readInt64();
onCaptureStarted(result, timestamp);
data.readExceptionCode();
return NO_ERROR;
} break;
case RESULT_RECEIVED: {
ALOGV("onResultReceived");
CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply);
CameraMetadata metadata;
if (data.readInt32() != 0) {
metadata.readFromParcel(const_cast<Parcel*>(&data));
} else {
ALOGW("No metadata object is present in result");
}
CaptureResultExtras resultExtras;
if (data.readInt32() != 0) {
resultExtras.readFromParcel(const_cast<Parcel*>(&data));
} else {
ALOGW("No capture result extras object is present in result");
}
onResultReceived(metadata, resultExtras);
data.readExceptionCode();
return NO_ERROR;
} break;
case PREPARED: {
ALOGV("onPrepared");
CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply);
CaptureResultExtras result;
int streamId = data.readInt32();
onPrepared(streamId);
data.readExceptionCode();
return NO_ERROR;
} break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
}
// ----------------------------------------------------------------------------
}; // namespace android

View file

@ -1,626 +0,0 @@
/*
**
** Copyright 2013, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
// #define LOG_NDEBUG 0
#define LOG_TAG "ICameraDeviceUser"
#include <utils/Log.h>
#include <stdint.h>
#include <sys/types.h>
#include <binder/Parcel.h>
#include <camera/camera2/ICameraDeviceUser.h>
#include <gui/IGraphicBufferProducer.h>
#include <gui/Surface.h>
#include <camera/CameraMetadata.h>
#include <camera/camera2/CaptureRequest.h>
#include <camera/camera2/OutputConfiguration.h>
namespace android {
typedef Parcel::WritableBlob WritableBlob;
typedef Parcel::ReadableBlob ReadableBlob;
enum {
DISCONNECT = IBinder::FIRST_CALL_TRANSACTION,
SUBMIT_REQUEST,
SUBMIT_REQUEST_LIST,
CANCEL_REQUEST,
BEGIN_CONFIGURE,
END_CONFIGURE,
DELETE_STREAM,
CREATE_STREAM,
CREATE_INPUT_STREAM,
GET_INPUT_SURFACE,
CREATE_DEFAULT_REQUEST,
GET_CAMERA_INFO,
WAIT_UNTIL_IDLE,
FLUSH,
PREPARE,
TEAR_DOWN,
PREPARE2
};
namespace {
// Read empty strings without printing a false error message.
String16 readMaybeEmptyString16(const Parcel& parcel) {
size_t len;
const char16_t* str = parcel.readString16Inplace(&len);
if (str != NULL) {
return String16(str, len);
} else {
return String16();
}
}
};
class BpCameraDeviceUser : public BpInterface<ICameraDeviceUser>
{
public:
BpCameraDeviceUser(const sp<IBinder>& impl)
: BpInterface<ICameraDeviceUser>(impl)
{
}
// disconnect from camera service
void disconnect()
{
ALOGV("disconnect");
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
remote()->transact(DISCONNECT, data, &reply);
reply.readExceptionCode();
}
virtual int submitRequest(sp<CaptureRequest> request, bool repeating,
int64_t *lastFrameNumber)
{
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
// arg0 = CaptureRequest
if (request != 0) {
data.writeInt32(1);
request->writeToParcel(&data);
} else {
data.writeInt32(0);
}
// arg1 = streaming (bool)
data.writeInt32(repeating);
remote()->transact(SUBMIT_REQUEST, data, &reply);
reply.readExceptionCode();
status_t res = reply.readInt32();
status_t resFrameNumber = BAD_VALUE;
if (reply.readInt32() != 0) {
if (lastFrameNumber != NULL) {
resFrameNumber = reply.readInt64(lastFrameNumber);
}
}
if (res < 0 || (resFrameNumber != NO_ERROR)) {
res = FAILED_TRANSACTION;
}
return res;
}
virtual int submitRequestList(List<sp<CaptureRequest> > requestList, bool repeating,
int64_t *lastFrameNumber)
{
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
data.writeInt32(requestList.size());
for (List<sp<CaptureRequest> >::iterator it = requestList.begin();
it != requestList.end(); ++it) {
sp<CaptureRequest> request = *it;
if (request != 0) {
data.writeInt32(1);
if (request->writeToParcel(&data) != OK) {
return BAD_VALUE;
}
} else {
data.writeInt32(0);
}
}
data.writeInt32(repeating);
remote()->transact(SUBMIT_REQUEST_LIST, data, &reply);
reply.readExceptionCode();
status_t res = reply.readInt32();
status_t resFrameNumber = BAD_VALUE;
if (reply.readInt32() != 0) {
if (lastFrameNumber != NULL) {
resFrameNumber = reply.readInt64(lastFrameNumber);
}
}
if (res < 0 || (resFrameNumber != NO_ERROR)) {
res = FAILED_TRANSACTION;
}
return res;
}
virtual status_t cancelRequest(int requestId, int64_t *lastFrameNumber)
{
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
data.writeInt32(requestId);
remote()->transact(CANCEL_REQUEST, data, &reply);
reply.readExceptionCode();
status_t res = reply.readInt32();
status_t resFrameNumber = BAD_VALUE;
if (reply.readInt32() != 0) {
if (lastFrameNumber != NULL) {
resFrameNumber = reply.readInt64(lastFrameNumber);
}
}
if ((res != NO_ERROR) || (resFrameNumber != NO_ERROR)) {
res = FAILED_TRANSACTION;
}
return res;
}
virtual status_t beginConfigure()
{
ALOGV("beginConfigure");
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
remote()->transact(BEGIN_CONFIGURE, data, &reply);
reply.readExceptionCode();
return reply.readInt32();
}
virtual status_t endConfigure(bool isConstrainedHighSpeed)
{
ALOGV("endConfigure");
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
data.writeInt32(isConstrainedHighSpeed);
remote()->transact(END_CONFIGURE, data, &reply);
reply.readExceptionCode();
return reply.readInt32();
}
virtual status_t deleteStream(int streamId)
{
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
data.writeInt32(streamId);
remote()->transact(DELETE_STREAM, data, &reply);
reply.readExceptionCode();
return reply.readInt32();
}
virtual status_t createStream(const OutputConfiguration& outputConfiguration)
{
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
if (outputConfiguration.getGraphicBufferProducer() != NULL) {
data.writeInt32(1); // marker that OutputConfiguration is not null. Mimic aidl behavior
outputConfiguration.writeToParcel(data);
} else {
data.writeInt32(0);
}
remote()->transact(CREATE_STREAM, data, &reply);
reply.readExceptionCode();
return reply.readInt32();
}
virtual status_t createInputStream(int width, int height, int format)
{
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
data.writeInt32(width);
data.writeInt32(height);
data.writeInt32(format);
remote()->transact(CREATE_INPUT_STREAM, data, &reply);
reply.readExceptionCode();
return reply.readInt32();
}
// get the buffer producer of the input stream
virtual status_t getInputBufferProducer(
sp<IGraphicBufferProducer> *producer) {
if (producer == NULL) {
return BAD_VALUE;
}
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
remote()->transact(GET_INPUT_SURFACE, data, &reply);
reply.readExceptionCode();
status_t result = reply.readInt32() ;
if (result != OK) {
return result;
}
sp<IGraphicBufferProducer> bp = NULL;
if (reply.readInt32() != 0) {
String16 name = readMaybeEmptyString16(reply);
bp = interface_cast<IGraphicBufferProducer>(
reply.readStrongBinder());
}
*producer = bp;
return *producer == NULL ? INVALID_OPERATION : OK;
}
// Create a request object from a template.
virtual status_t createDefaultRequest(int templateId,
/*out*/
CameraMetadata* request)
{
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
data.writeInt32(templateId);
remote()->transact(CREATE_DEFAULT_REQUEST, data, &reply);
reply.readExceptionCode();
status_t result = reply.readInt32();
CameraMetadata out;
if (reply.readInt32() != 0) {
out.readFromParcel(&reply);
}
if (request != NULL) {
request->swap(out);
}
return result;
}
virtual status_t getCameraInfo(CameraMetadata* info)
{
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
remote()->transact(GET_CAMERA_INFO, data, &reply);
reply.readExceptionCode();
status_t result = reply.readInt32();
CameraMetadata out;
if (reply.readInt32() != 0) {
out.readFromParcel(&reply);
}
if (info != NULL) {
info->swap(out);
}
return result;
}
virtual status_t waitUntilIdle()
{
ALOGV("waitUntilIdle");
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
remote()->transact(WAIT_UNTIL_IDLE, data, &reply);
reply.readExceptionCode();
return reply.readInt32();
}
virtual status_t flush(int64_t *lastFrameNumber)
{
ALOGV("flush");
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
remote()->transact(FLUSH, data, &reply);
reply.readExceptionCode();
status_t res = reply.readInt32();
status_t resFrameNumber = BAD_VALUE;
if (reply.readInt32() != 0) {
if (lastFrameNumber != NULL) {
resFrameNumber = reply.readInt64(lastFrameNumber);
}
}
if ((res != NO_ERROR) || (resFrameNumber != NO_ERROR)) {
res = FAILED_TRANSACTION;
}
return res;
}
virtual status_t prepare(int streamId)
{
ALOGV("prepare");
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
data.writeInt32(streamId);
remote()->transact(PREPARE, data, &reply);
reply.readExceptionCode();
return reply.readInt32();
}
virtual status_t prepare2(int maxCount, int streamId)
{
ALOGV("prepare2");
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
data.writeInt32(maxCount);
data.writeInt32(streamId);
remote()->transact(PREPARE2, data, &reply);
reply.readExceptionCode();
return reply.readInt32();
}
virtual status_t tearDown(int streamId)
{
ALOGV("tearDown");
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
data.writeInt32(streamId);
remote()->transact(TEAR_DOWN, data, &reply);
reply.readExceptionCode();
return reply.readInt32();
}
private:
};
IMPLEMENT_META_INTERFACE(CameraDeviceUser,
"android.hardware.camera2.ICameraDeviceUser");
// ----------------------------------------------------------------------
status_t BnCameraDeviceUser::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
switch(code) {
case DISCONNECT: {
ALOGV("DISCONNECT");
CHECK_INTERFACE(ICameraDeviceUser, data, reply);
disconnect();
reply->writeNoException();
return NO_ERROR;
} break;
case SUBMIT_REQUEST: {
CHECK_INTERFACE(ICameraDeviceUser, data, reply);
// arg0 = request
sp<CaptureRequest> request;
if (data.readInt32() != 0) {
request = new CaptureRequest();
request->readFromParcel(const_cast<Parcel*>(&data));
}
// arg1 = streaming (bool)
bool repeating = data.readInt32();
// return code: requestId (int32)
reply->writeNoException();
int64_t lastFrameNumber = -1;
reply->writeInt32(submitRequest(request, repeating, &lastFrameNumber));
reply->writeInt32(1);
reply->writeInt64(lastFrameNumber);
return NO_ERROR;
} break;
case SUBMIT_REQUEST_LIST: {
CHECK_INTERFACE(ICameraDeviceUser, data, reply);
List<sp<CaptureRequest> > requestList;
int requestListSize = data.readInt32();
for (int i = 0; i < requestListSize; i++) {
if (data.readInt32() != 0) {
sp<CaptureRequest> request = new CaptureRequest();
if (request->readFromParcel(const_cast<Parcel*>(&data)) != OK) {
return BAD_VALUE;
}
requestList.push_back(request);
} else {
sp<CaptureRequest> request = 0;
requestList.push_back(request);
ALOGE("A request is missing. Sending in null request.");
}
}
bool repeating = data.readInt32();
reply->writeNoException();
int64_t lastFrameNumber = -1;
reply->writeInt32(submitRequestList(requestList, repeating, &lastFrameNumber));
reply->writeInt32(1);
reply->writeInt64(lastFrameNumber);
return NO_ERROR;
} break;
case CANCEL_REQUEST: {
CHECK_INTERFACE(ICameraDeviceUser, data, reply);
int requestId = data.readInt32();
reply->writeNoException();
int64_t lastFrameNumber = -1;
reply->writeInt32(cancelRequest(requestId, &lastFrameNumber));
reply->writeInt32(1);
reply->writeInt64(lastFrameNumber);
return NO_ERROR;
} break;
case DELETE_STREAM: {
CHECK_INTERFACE(ICameraDeviceUser, data, reply);
int streamId = data.readInt32();
reply->writeNoException();
reply->writeInt32(deleteStream(streamId));
return NO_ERROR;
} break;
case CREATE_STREAM: {
CHECK_INTERFACE(ICameraDeviceUser, data, reply);
status_t ret = BAD_VALUE;
if (data.readInt32() != 0) {
OutputConfiguration outputConfiguration(data);
ret = createStream(outputConfiguration);
} else {
ALOGE("%s: cannot take an empty OutputConfiguration", __FUNCTION__);
}
reply->writeNoException();
ALOGV("%s: CREATE_STREAM: write noException", __FUNCTION__);
reply->writeInt32(ret);
ALOGV("%s: CREATE_STREAM: write ret = %d", __FUNCTION__, ret);
return NO_ERROR;
} break;
case CREATE_INPUT_STREAM: {
CHECK_INTERFACE(ICameraDeviceUser, data, reply);
int width, height, format;
width = data.readInt32();
height = data.readInt32();
format = data.readInt32();
status_t ret = createInputStream(width, height, format);
reply->writeNoException();
reply->writeInt32(ret);
return NO_ERROR;
} break;
case GET_INPUT_SURFACE: {
CHECK_INTERFACE(ICameraDeviceUser, data, reply);
sp<IGraphicBufferProducer> bp;
status_t ret = getInputBufferProducer(&bp);
sp<IBinder> b(IInterface::asBinder(ret == OK ? bp : NULL));
reply->writeNoException();
reply->writeInt32(ret);
reply->writeInt32(1);
reply->writeString16(String16("camera input")); // name of surface
reply->writeStrongBinder(b);
return NO_ERROR;
} break;
case CREATE_DEFAULT_REQUEST: {
CHECK_INTERFACE(ICameraDeviceUser, data, reply);
int templateId = data.readInt32();
CameraMetadata request;
status_t ret;
ret = createDefaultRequest(templateId, &request);
reply->writeNoException();
reply->writeInt32(ret);
// out-variables are after exception and return value
reply->writeInt32(1); // to mark presence of metadata object
request.writeToParcel(const_cast<Parcel*>(reply));
return NO_ERROR;
} break;
case GET_CAMERA_INFO: {
CHECK_INTERFACE(ICameraDeviceUser, data, reply);
CameraMetadata info;
status_t ret;
ret = getCameraInfo(&info);
reply->writeNoException();
reply->writeInt32(ret);
// out-variables are after exception and return value
reply->writeInt32(1); // to mark presence of metadata object
info.writeToParcel(reply);
return NO_ERROR;
} break;
case WAIT_UNTIL_IDLE: {
CHECK_INTERFACE(ICameraDeviceUser, data, reply);
reply->writeNoException();
reply->writeInt32(waitUntilIdle());
return NO_ERROR;
} break;
case FLUSH: {
CHECK_INTERFACE(ICameraDeviceUser, data, reply);
reply->writeNoException();
int64_t lastFrameNumber = -1;
reply->writeInt32(flush(&lastFrameNumber));
reply->writeInt32(1);
reply->writeInt64(lastFrameNumber);
return NO_ERROR;
}
case BEGIN_CONFIGURE: {
CHECK_INTERFACE(ICameraDeviceUser, data, reply);
reply->writeNoException();
reply->writeInt32(beginConfigure());
return NO_ERROR;
} break;
case END_CONFIGURE: {
CHECK_INTERFACE(ICameraDeviceUser, data, reply);
bool isConstrainedHighSpeed = data.readInt32();
reply->writeNoException();
reply->writeInt32(endConfigure(isConstrainedHighSpeed));
return NO_ERROR;
} break;
case PREPARE: {
CHECK_INTERFACE(ICameraDeviceUser, data, reply);
int streamId = data.readInt32();
reply->writeNoException();
reply->writeInt32(prepare(streamId));
return NO_ERROR;
} break;
case TEAR_DOWN: {
CHECK_INTERFACE(ICameraDeviceUser, data, reply);
int streamId = data.readInt32();
reply->writeNoException();
reply->writeInt32(tearDown(streamId));
return NO_ERROR;
} break;
case PREPARE2: {
CHECK_INTERFACE(ICameraDeviceUser, data, reply);
int maxCount = data.readInt32();
int streamId = data.readInt32();
reply->writeNoException();
reply->writeInt32(prepare2(maxCount, streamId));
return NO_ERROR;
} break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
}
// ----------------------------------------------------------------------------
}; // namespace android

View file

@ -1,84 +0,0 @@
/*
**
** Copyright 2015, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
#define LOG_TAG "OutputConfiguration"
#include <utils/Log.h>
#include <camera/camera2/OutputConfiguration.h>
#include <binder/Parcel.h>
namespace android {
const int OutputConfiguration::INVALID_ROTATION = -1;
// Read empty strings without printing a false error message.
String16 OutputConfiguration::readMaybeEmptyString16(const Parcel& parcel) {
size_t len;
const char16_t* str = parcel.readString16Inplace(&len);
if (str != NULL) {
return String16(str, len);
} else {
return String16();
}
}
sp<IGraphicBufferProducer> OutputConfiguration::getGraphicBufferProducer() const {
return mGbp;
}
int OutputConfiguration::getRotation() const {
return mRotation;
}
OutputConfiguration::OutputConfiguration(const Parcel& parcel) {
status_t err;
int rotation = 0;
if ((err = parcel.readInt32(&rotation)) != OK) {
ALOGE("%s: Failed to read rotation from parcel", __FUNCTION__);
mGbp = NULL;
mRotation = INVALID_ROTATION;
return;
}
String16 name = readMaybeEmptyString16(parcel);
const sp<IGraphicBufferProducer>& gbp =
interface_cast<IGraphicBufferProducer>(parcel.readStrongBinder());
mGbp = gbp;
mRotation = rotation;
ALOGV("%s: OutputConfiguration: bp = %p, name = %s", __FUNCTION__,
gbp.get(), String8(name).string());
}
OutputConfiguration::OutputConfiguration(sp<IGraphicBufferProducer>& gbp, int rotation) {
mGbp = gbp;
mRotation = rotation;
}
status_t OutputConfiguration::writeToParcel(Parcel& parcel) const {
parcel.writeInt32(mRotation);
parcel.writeString16(String16("unknown_name")); // name of surface
sp<IBinder> b(IInterface::asBinder(mGbp));
parcel.writeStrongBinder(b);
return OK;
}
}; // namespace android

View file

@ -1,178 +0,0 @@
/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_CAMERA_H
#define ANDROID_HARDWARE_CAMERA_H
#include <utils/Timers.h>
#include <gui/IGraphicBufferProducer.h>
#include <system/camera.h>
#include <camera/ICameraClient.h>
#include <camera/ICameraRecordingProxy.h>
#include <camera/ICameraRecordingProxyListener.h>
#include <camera/ICameraService.h>
#include <camera/ICamera.h>
#include <camera/CameraBase.h>
namespace android {
class Surface;
class String8;
class String16;
// ref-counted object for callbacks
class CameraListener: virtual public RefBase
{
public:
virtual void notify(int32_t msgType, int32_t ext1, int32_t ext2) = 0;
virtual void postData(int32_t msgType, const sp<IMemory>& dataPtr,
camera_frame_metadata_t *metadata) = 0;
virtual void postDataTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr) = 0;
};
class Camera;
template <>
struct CameraTraits<Camera>
{
typedef CameraListener TCamListener;
typedef ICamera TCamUser;
typedef ICameraClient TCamCallbacks;
typedef status_t (ICameraService::*TCamConnectService)(const sp<ICameraClient>&,
int, const String16&, int,
/*out*/
sp<ICamera>&);
static TCamConnectService fnConnectService;
};
class Camera :
public CameraBase<Camera>,
public BnCameraClient
{
public:
enum {
USE_CALLING_UID = ICameraService::USE_CALLING_UID
};
// construct a camera client from an existing remote
static sp<Camera> create(const sp<ICamera>& camera);
static sp<Camera> connect(int cameraId,
const String16& clientPackageName,
int clientUid);
static status_t connectLegacy(int cameraId, int halVersion,
const String16& clientPackageName,
int clientUid, sp<Camera>& camera);
virtual ~Camera();
status_t reconnect();
status_t lock();
status_t unlock();
// pass the buffered IGraphicBufferProducer to the camera service
status_t setPreviewTarget(const sp<IGraphicBufferProducer>& bufferProducer);
// start preview mode, must call setPreviewTarget first
status_t startPreview();
// stop preview mode
void stopPreview();
// get preview state
bool previewEnabled();
// start recording mode, must call setPreviewTarget first
status_t startRecording();
// stop recording mode
void stopRecording();
// get recording state
bool recordingEnabled();
// release a recording frame
void releaseRecordingFrame(const sp<IMemory>& mem);
// autoFocus - status returned from callback
status_t autoFocus();
// cancel auto focus
status_t cancelAutoFocus();
// take a picture - picture returned from callback
status_t takePicture(int msgType);
// set preview/capture parameters - key/value pairs
status_t setParameters(const String8& params);
// get preview/capture parameters - key/value pairs
String8 getParameters() const;
// send command to camera driver
status_t sendCommand(int32_t cmd, int32_t arg1, int32_t arg2);
// tell camera hal to store meta data or real YUV in video buffers.
status_t storeMetaDataInBuffers(bool enabled);
void setListener(const sp<CameraListener>& listener);
void setRecordingProxyListener(const sp<ICameraRecordingProxyListener>& listener);
// Configure preview callbacks to app. Only one of the older
// callbacks or the callback surface can be active at the same time;
// enabling one will disable the other if active. Flags can be
// disabled by calling it with CAMERA_FRAME_CALLBACK_FLAG_NOOP, and
// Target by calling it with a NULL interface.
void setPreviewCallbackFlags(int preview_callback_flag);
status_t setPreviewCallbackTarget(
const sp<IGraphicBufferProducer>& callbackProducer);
sp<ICameraRecordingProxy> getRecordingProxy();
// ICameraClient interface
virtual void notifyCallback(int32_t msgType, int32_t ext, int32_t ext2);
virtual void dataCallback(int32_t msgType, const sp<IMemory>& dataPtr,
camera_frame_metadata_t *metadata);
virtual void dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr);
class RecordingProxy : public BnCameraRecordingProxy
{
public:
RecordingProxy(const sp<Camera>& camera);
// ICameraRecordingProxy interface
virtual status_t startRecording(const sp<ICameraRecordingProxyListener>& listener);
virtual void stopRecording();
virtual void releaseRecordingFrame(const sp<IMemory>& mem);
private:
sp<Camera> mCamera;
};
protected:
Camera(int cameraId);
Camera(const Camera&);
Camera& operator=(const Camera);
sp<ICameraRecordingProxyListener> mRecordingProxyListener;
friend class CameraBase;
};
}; // namespace android
#endif

View file

@ -1,118 +0,0 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_CAMERA_BASE_H
#define ANDROID_HARDWARE_CAMERA_BASE_H
#include <utils/Mutex.h>
#include <camera/ICameraService.h>
struct camera_frame_metadata;
namespace android {
struct CameraInfo {
/**
* The direction that the camera faces to. It should be CAMERA_FACING_BACK
* or CAMERA_FACING_FRONT.
*/
int facing;
/**
* The orientation of the camera image. The value is the angle that the
* camera image needs to be rotated clockwise so it shows correctly on the
* display in its natural orientation. It should be 0, 90, 180, or 270.
*
* For example, suppose a device has a naturally tall screen. The
* back-facing camera sensor is mounted in landscape. You are looking at
* the screen. If the top side of the camera sensor is aligned with the
* right edge of the screen in natural orientation, the value should be
* 90. If the top side of a front-facing camera sensor is aligned with the
* right of the screen, the value should be 270.
*/
int orientation;
};
template <typename TCam>
struct CameraTraits {
};
template <typename TCam, typename TCamTraits = CameraTraits<TCam> >
class CameraBase : public IBinder::DeathRecipient
{
public:
typedef typename TCamTraits::TCamListener TCamListener;
typedef typename TCamTraits::TCamUser TCamUser;
typedef typename TCamTraits::TCamCallbacks TCamCallbacks;
typedef typename TCamTraits::TCamConnectService TCamConnectService;
static sp<TCam> connect(int cameraId,
const String16& clientPackageName,
int clientUid);
virtual void disconnect();
void setListener(const sp<TCamListener>& listener);
static int getNumberOfCameras();
static status_t getCameraInfo(int cameraId,
/*out*/
struct CameraInfo* cameraInfo);
static status_t addServiceListener(
const sp<ICameraServiceListener>& listener);
static status_t removeServiceListener(
const sp<ICameraServiceListener>& listener);
sp<TCamUser> remote();
// Status is set to 'UNKNOWN_ERROR' after successful (re)connection
status_t getStatus();
protected:
CameraBase(int cameraId);
virtual ~CameraBase();
////////////////////////////////////////////////////////
// TCamCallbacks implementation
////////////////////////////////////////////////////////
virtual void notifyCallback(int32_t msgType, int32_t ext,
int32_t ext2);
////////////////////////////////////////////////////////
// Common instance variables
////////////////////////////////////////////////////////
Mutex mLock;
virtual void binderDied(const wp<IBinder>& who);
// helper function to obtain camera service handle
static const sp<ICameraService>& getCameraService();
sp<TCamUser> mCamera;
status_t mStatus;
sp<TCamListener> mListener;
const int mCameraId;
typedef CameraBase<TCam> CameraBaseT;
};
}; // namespace android
#endif

View file

@ -1,232 +0,0 @@
/*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_CLIENT_CAMERA2_CAMERAMETADATA_CPP
#define ANDROID_CLIENT_CAMERA2_CAMERAMETADATA_CPP
#include "system/camera_metadata.h"
#include <utils/String8.h>
#include <utils/Vector.h>
namespace android {
class Parcel;
/**
* A convenience wrapper around the C-based camera_metadata_t library.
*/
class CameraMetadata {
public:
/** Creates an empty object; best used when expecting to acquire contents
* from elsewhere */
CameraMetadata();
/** Creates an object with space for entryCapacity entries, with
* dataCapacity extra storage */
CameraMetadata(size_t entryCapacity, size_t dataCapacity = 10);
~CameraMetadata();
/** Takes ownership of passed-in buffer */
CameraMetadata(camera_metadata_t *buffer);
/** Clones the metadata */
CameraMetadata(const CameraMetadata &other);
/**
* Assignment clones metadata buffer.
*/
CameraMetadata &operator=(const CameraMetadata &other);
CameraMetadata &operator=(const camera_metadata_t *buffer);
/**
* Get reference to the underlying metadata buffer. Ownership remains with
* the CameraMetadata object, but non-const CameraMetadata methods will not
* work until unlock() is called. Note that the lock has nothing to do with
* thread-safety, it simply prevents the camera_metadata_t pointer returned
* here from being accidentally invalidated by CameraMetadata operations.
*/
const camera_metadata_t* getAndLock() const;
/**
* Unlock the CameraMetadata for use again. After this unlock, the pointer
* given from getAndLock() may no longer be used. The pointer passed out
* from getAndLock must be provided to guarantee that the right object is
* being unlocked.
*/
status_t unlock(const camera_metadata_t *buffer);
/**
* Release a raw metadata buffer to the caller. After this call,
* CameraMetadata no longer references the buffer, and the caller takes
* responsibility for freeing the raw metadata buffer (using
* free_camera_metadata()), or for handing it to another CameraMetadata
* instance.
*/
camera_metadata_t* release();
/**
* Clear the metadata buffer and free all storage used by it
*/
void clear();
/**
* Acquire a raw metadata buffer from the caller. After this call,
* the caller no longer owns the raw buffer, and must not free or manipulate it.
* If CameraMetadata already contains metadata, it is freed.
*/
void acquire(camera_metadata_t* buffer);
/**
* Acquires raw buffer from other CameraMetadata object. After the call, the argument
* object no longer has any metadata.
*/
void acquire(CameraMetadata &other);
/**
* Append metadata from another CameraMetadata object.
*/
status_t append(const CameraMetadata &other);
/**
* Append metadata from a raw camera_metadata buffer
*/
status_t append(const camera_metadata* other);
/**
* Number of metadata entries.
*/
size_t entryCount() const;
/**
* Is the buffer empty (no entires)
*/
bool isEmpty() const;
/**
* Sort metadata buffer for faster find
*/
status_t sort();
/**
* Update metadata entry. Will create entry if it doesn't exist already, and
* will reallocate the buffer if insufficient space exists. Overloaded for
* the various types of valid data.
*/
status_t update(uint32_t tag,
const uint8_t *data, size_t data_count);
status_t update(uint32_t tag,
const int32_t *data, size_t data_count);
status_t update(uint32_t tag,
const float *data, size_t data_count);
status_t update(uint32_t tag,
const int64_t *data, size_t data_count);
status_t update(uint32_t tag,
const double *data, size_t data_count);
status_t update(uint32_t tag,
const camera_metadata_rational_t *data, size_t data_count);
status_t update(uint32_t tag,
const String8 &string);
template<typename T>
status_t update(uint32_t tag, Vector<T> data) {
return update(tag, data.array(), data.size());
}
/**
* Check if a metadata entry exists for a given tag id
*
*/
bool exists(uint32_t tag) const;
/**
* Get metadata entry by tag id
*/
camera_metadata_entry find(uint32_t tag);
/**
* Get metadata entry by tag id, with no editing
*/
camera_metadata_ro_entry find(uint32_t tag) const;
/**
* Delete metadata entry by tag
*/
status_t erase(uint32_t tag);
/**
* Swap the underlying camera metadata between this and the other
* metadata object.
*/
void swap(CameraMetadata &other);
/**
* Dump contents into FD for debugging. The verbosity levels are
* 0: Tag entry information only, no data values
* 1: Level 0 plus at most 16 data values per entry
* 2: All information
*
* The indentation parameter sets the number of spaces to add to the start
* each line of output.
*/
void dump(int fd, int verbosity = 1, int indentation = 0) const;
/**
* Serialization over Binder
*/
// Metadata object is unchanged when reading from parcel fails.
status_t readFromParcel(Parcel *parcel);
status_t writeToParcel(Parcel *parcel) const;
/**
* Caller becomes the owner of the new metadata
* 'const Parcel' doesnt prevent us from calling the read functions.
* which is interesting since it changes the internal state
*
* NULL can be returned when no metadata was sent, OR if there was an issue
* unpacking the serialized data (i.e. bad parcel or invalid structure).
*/
static status_t readFromParcel(const Parcel &parcel,
camera_metadata_t** out);
/**
* Caller retains ownership of metadata
* - Write 2 (int32 + blob) args in the current position
*/
static status_t writeToParcel(Parcel &parcel,
const camera_metadata_t* metadata);
private:
camera_metadata_t *mBuffer;
mutable bool mLocked;
/**
* Check if tag has a given type
*/
status_t checkType(uint32_t tag, uint8_t expectedType);
/**
* Base update entry method
*/
status_t updateImpl(uint32_t tag, const void *data, size_t data_count);
/**
* Resize metadata buffer if needed by reallocating it and copying it over.
*/
status_t resizeIfNeeded(size_t extraEntries, size_t extraData);
};
}; // namespace android
#endif

View file

@ -1,707 +0,0 @@
/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_CAMERA_PARAMETERS_H
#define ANDROID_HARDWARE_CAMERA_PARAMETERS_H
#include <utils/KeyedVector.h>
#include <utils/String8.h>
#include <camera/CameraParametersExtra.h>
namespace android {
struct Size {
int width;
int height;
Size() {
width = 0;
height = 0;
}
Size(int w, int h) {
width = w;
height = h;
}
};
class CameraParameters
{
public:
CameraParameters();
CameraParameters(const String8 &params) { unflatten(params); }
~CameraParameters();
String8 flatten() const;
void unflatten(const String8 &params);
void set(const char *key, const char *value);
void set(const char *key, int value);
void setFloat(const char *key, float value);
const char *get(const char *key) const;
int getInt(const char *key) const;
float getFloat(const char *key) const;
void remove(const char *key);
void setPreviewSize(int width, int height);
void getPreviewSize(int *width, int *height) const;
void getSupportedPreviewSizes(Vector<Size> &sizes) const;
// Set the dimensions in pixels to the given width and height
// for video frames. The given width and height must be one
// of the supported dimensions returned from
// getSupportedVideoSizes(). Must not be called if
// getSupportedVideoSizes() returns an empty Vector of Size.
void setVideoSize(int width, int height);
// Retrieve the current dimensions (width and height)
// in pixels for video frames, which must be one of the
// supported dimensions returned from getSupportedVideoSizes().
// Must not be called if getSupportedVideoSizes() returns an
// empty Vector of Size.
void getVideoSize(int *width, int *height) const;
// Retrieve a Vector of supported dimensions (width and height)
// in pixels for video frames. If sizes returned from the method
// is empty, the camera does not support calls to setVideoSize()
// or getVideoSize(). In adddition, it also indicates that
// the camera only has a single output, and does not have
// separate output for video frames and preview frame.
void getSupportedVideoSizes(Vector<Size> &sizes) const;
// Retrieve the preferred preview size (width and height) in pixels
// for video recording. The given width and height must be one of
// supported preview sizes returned from getSupportedPreviewSizes().
// Must not be called if getSupportedVideoSizes() returns an empty
// Vector of Size. If getSupportedVideoSizes() returns an empty
// Vector of Size, the width and height returned from this method
// is invalid, and is "-1x-1".
void getPreferredPreviewSizeForVideo(int *width, int *height) const;
void setPreviewFrameRate(int fps);
int getPreviewFrameRate() const;
void getPreviewFpsRange(int *min_fps, int *max_fps) const;
void setPreviewFormat(const char *format);
const char *getPreviewFormat() const;
void setPictureSize(int width, int height);
void getPictureSize(int *width, int *height) const;
void getSupportedPictureSizes(Vector<Size> &sizes) const;
void setPictureFormat(const char *format);
const char *getPictureFormat() const;
void dump() const;
status_t dump(int fd, const Vector<String16>& args) const;
/**
* Returns a Vector containing the supported preview formats
* as enums given in graphics.h.
*/
void getSupportedPreviewFormats(Vector<int>& formats) const;
// Returns true if no keys are present
bool isEmpty() const;
// Parameter keys to communicate between camera application and driver.
// The access (read/write, read only, or write only) is viewed from the
// perspective of applications, not driver.
// Preview frame size in pixels (width x height).
// Example value: "480x320". Read/Write.
static const char KEY_PREVIEW_SIZE[];
// Supported preview frame sizes in pixels.
// Example value: "800x600,480x320". Read only.
static const char KEY_SUPPORTED_PREVIEW_SIZES[];
// The current minimum and maximum preview fps. This controls the rate of
// preview frames received (CAMERA_MSG_PREVIEW_FRAME). The minimum and
// maximum fps must be one of the elements from
// KEY_SUPPORTED_PREVIEW_FPS_RANGE parameter.
// Example value: "10500,26623"
static const char KEY_PREVIEW_FPS_RANGE[];
// The supported preview fps (frame-per-second) ranges. Each range contains
// a minimum fps and maximum fps. If minimum fps equals to maximum fps, the
// camera outputs frames in fixed frame rate. If not, the camera outputs
// frames in auto frame rate. The actual frame rate fluctuates between the
// minimum and the maximum. The list has at least one element. The list is
// sorted from small to large (first by maximum fps and then minimum fps).
// Example value: "(10500,26623),(15000,26623),(30000,30000)"
static const char KEY_SUPPORTED_PREVIEW_FPS_RANGE[];
// The image format for preview frames. See CAMERA_MSG_PREVIEW_FRAME in
// frameworks/av/include/camera/Camera.h. The default is
// PIXEL_FORMAT_YUV420SP. Example value: "yuv420sp" or PIXEL_FORMAT_XXX
// constants. Read/write.
static const char KEY_PREVIEW_FORMAT[];
// Supported image formats for preview frames.
// Example value: "yuv420sp,yuv422i-yuyv". Read only.
static const char KEY_SUPPORTED_PREVIEW_FORMATS[];
// Number of preview frames per second. This is the target frame rate. The
// actual frame rate depends on the driver.
// Example value: "15". Read/write.
static const char KEY_PREVIEW_FRAME_RATE[];
// Supported number of preview frames per second.
// Example value: "24,15,10". Read.
static const char KEY_SUPPORTED_PREVIEW_FRAME_RATES[];
// The dimensions for captured pictures in pixels (width x height).
// Example value: "1024x768". Read/write.
static const char KEY_PICTURE_SIZE[];
// Supported dimensions for captured pictures in pixels.
// Example value: "2048x1536,1024x768". Read only.
static const char KEY_SUPPORTED_PICTURE_SIZES[];
// The image format for captured pictures. See CAMERA_MSG_COMPRESSED_IMAGE
// in frameworks/base/include/camera/Camera.h.
// Example value: "jpeg" or PIXEL_FORMAT_XXX constants. Read/write.
static const char KEY_PICTURE_FORMAT[];
// Supported image formats for captured pictures.
// Example value: "jpeg,rgb565". Read only.
static const char KEY_SUPPORTED_PICTURE_FORMATS[];
// The width (in pixels) of EXIF thumbnail in Jpeg picture.
// Example value: "512". Read/write.
static const char KEY_JPEG_THUMBNAIL_WIDTH[];
// The height (in pixels) of EXIF thumbnail in Jpeg picture.
// Example value: "384". Read/write.
static const char KEY_JPEG_THUMBNAIL_HEIGHT[];
// Supported EXIF thumbnail sizes (width x height). 0x0 means not thumbnail
// in EXIF.
// Example value: "512x384,320x240,0x0". Read only.
static const char KEY_SUPPORTED_JPEG_THUMBNAIL_SIZES[];
// The quality of the EXIF thumbnail in Jpeg picture. The range is 1 to 100,
// with 100 being the best.
// Example value: "90". Read/write.
static const char KEY_JPEG_THUMBNAIL_QUALITY[];
// Jpeg quality of captured picture. The range is 1 to 100, with 100 being
// the best.
// Example value: "90". Read/write.
static const char KEY_JPEG_QUALITY[];
// The rotation angle in degrees relative to the orientation of the camera.
// This affects the pictures returned from CAMERA_MSG_COMPRESSED_IMAGE. The
// camera driver may set orientation in the EXIF header without rotating the
// picture. Or the driver may rotate the picture and the EXIF thumbnail. If
// the Jpeg picture is rotated, the orientation in the EXIF header will be
// missing or 1 (row #0 is top and column #0 is left side).
//
// Note that the JPEG pictures of front-facing cameras are not mirrored
// as in preview display.
//
// For example, suppose the natural orientation of the device is portrait.
// The device is rotated 270 degrees clockwise, so the device orientation is
// 270. Suppose a back-facing camera sensor is mounted in landscape and the
// top side of the camera sensor is aligned with the right edge of the
// display in natural orientation. So the camera orientation is 90. The
// rotation should be set to 0 (270 + 90).
//
// Example value: "0" or "90" or "180" or "270". Write only.
static const char KEY_ROTATION[];
// GPS latitude coordinate. GPSLatitude and GPSLatitudeRef will be stored in
// JPEG EXIF header.
// Example value: "25.032146" or "-33.462809". Write only.
static const char KEY_GPS_LATITUDE[];
// GPS longitude coordinate. GPSLongitude and GPSLongitudeRef will be stored
// in JPEG EXIF header.
// Example value: "121.564448" or "-70.660286". Write only.
static const char KEY_GPS_LONGITUDE[];
// GPS altitude. GPSAltitude and GPSAltitudeRef will be stored in JPEG EXIF
// header.
// Example value: "21.0" or "-5". Write only.
static const char KEY_GPS_ALTITUDE[];
// GPS timestamp (UTC in seconds since January 1, 1970). This should be
// stored in JPEG EXIF header.
// Example value: "1251192757". Write only.
static const char KEY_GPS_TIMESTAMP[];
// GPS Processing Method
// Example value: "GPS" or "NETWORK". Write only.
static const char KEY_GPS_PROCESSING_METHOD[];
// Current white balance setting.
// Example value: "auto" or WHITE_BALANCE_XXX constants. Read/write.
static const char KEY_WHITE_BALANCE[];
// Supported white balance settings.
// Example value: "auto,incandescent,daylight". Read only.
static const char KEY_SUPPORTED_WHITE_BALANCE[];
// Current color effect setting.
// Example value: "none" or EFFECT_XXX constants. Read/write.
static const char KEY_EFFECT[];
// Supported color effect settings.
// Example value: "none,mono,sepia". Read only.
static const char KEY_SUPPORTED_EFFECTS[];
// Current antibanding setting.
// Example value: "auto" or ANTIBANDING_XXX constants. Read/write.
static const char KEY_ANTIBANDING[];
// Supported antibanding settings.
// Example value: "auto,50hz,60hz,off". Read only.
static const char KEY_SUPPORTED_ANTIBANDING[];
// Current scene mode.
// Example value: "auto" or SCENE_MODE_XXX constants. Read/write.
static const char KEY_SCENE_MODE[];
// Supported scene mode settings.
// Example value: "auto,night,fireworks". Read only.
static const char KEY_SUPPORTED_SCENE_MODES[];
// Current flash mode.
// Example value: "auto" or FLASH_MODE_XXX constants. Read/write.
static const char KEY_FLASH_MODE[];
// Supported flash modes.
// Example value: "auto,on,off". Read only.
static const char KEY_SUPPORTED_FLASH_MODES[];
// Current focus mode. This will not be empty. Applications should call
// CameraHardwareInterface.autoFocus to start the focus if focus mode is
// FOCUS_MODE_AUTO or FOCUS_MODE_MACRO.
// Example value: "auto" or FOCUS_MODE_XXX constants. Read/write.
static const char KEY_FOCUS_MODE[];
// Supported focus modes.
// Example value: "auto,macro,fixed". Read only.
static const char KEY_SUPPORTED_FOCUS_MODES[];
// The maximum number of focus areas supported. This is the maximum length
// of KEY_FOCUS_AREAS.
// Example value: "0" or "2". Read only.
static const char KEY_MAX_NUM_FOCUS_AREAS[];
// Current focus areas.
//
// Before accessing this parameter, apps should check
// KEY_MAX_NUM_FOCUS_AREAS first to know the maximum number of focus areas
// first. If the value is 0, focus area is not supported.
//
// Each focus area is a five-element int array. The first four elements are
// the rectangle of the area (left, top, right, bottom). The direction is
// relative to the sensor orientation, that is, what the sensor sees. The
// direction is not affected by the rotation or mirroring of
// CAMERA_CMD_SET_DISPLAY_ORIENTATION. Coordinates range from -1000 to 1000.
// (-1000,-1000) is the upper left point. (1000, 1000) is the lower right
// point. The width and height of focus areas cannot be 0 or negative.
//
// The fifth element is the weight. Values for weight must range from 1 to
// 1000. The weight should be interpreted as a per-pixel weight - all
// pixels in the area have the specified weight. This means a small area
// with the same weight as a larger area will have less influence on the
// focusing than the larger area. Focus areas can partially overlap and the
// driver will add the weights in the overlap region.
//
// A special case of single focus area (0,0,0,0,0) means driver to decide
// the focus area. For example, the driver may use more signals to decide
// focus areas and change them dynamically. Apps can set (0,0,0,0,0) if they
// want the driver to decide focus areas.
//
// Focus areas are relative to the current field of view (KEY_ZOOM). No
// matter what the zoom level is, (-1000,-1000) represents the top of the
// currently visible camera frame. The focus area cannot be set to be
// outside the current field of view, even when using zoom.
//
// Focus area only has effect if the current focus mode is FOCUS_MODE_AUTO,
// FOCUS_MODE_MACRO, FOCUS_MODE_CONTINUOUS_VIDEO, or
// FOCUS_MODE_CONTINUOUS_PICTURE.
// Example value: "(-10,-10,0,0,300),(0,0,10,10,700)". Read/write.
static const char KEY_FOCUS_AREAS[];
// Focal length in millimeter.
// Example value: "4.31". Read only.
static const char KEY_FOCAL_LENGTH[];
// Horizontal angle of view in degrees.
// Example value: "54.8". Read only.
static const char KEY_HORIZONTAL_VIEW_ANGLE[];
// Vertical angle of view in degrees.
// Example value: "42.5". Read only.
static const char KEY_VERTICAL_VIEW_ANGLE[];
// Exposure compensation index. 0 means exposure is not adjusted.
// Example value: "-5" or "5". Read/write.
static const char KEY_EXPOSURE_COMPENSATION[];
// The maximum exposure compensation index (>=0).
// Example value: "6". Read only.
static const char KEY_MAX_EXPOSURE_COMPENSATION[];
// The minimum exposure compensation index (<=0).
// Example value: "-6". Read only.
static const char KEY_MIN_EXPOSURE_COMPENSATION[];
// The exposure compensation step. Exposure compensation index multiply by
// step eqals to EV. Ex: if exposure compensation index is -6 and step is
// 0.3333, EV is -2.
// Example value: "0.333333333" or "0.5". Read only.
static const char KEY_EXPOSURE_COMPENSATION_STEP[];
// The state of the auto-exposure lock. "true" means that
// auto-exposure is locked to its current value and will not
// change. "false" means the auto-exposure routine is free to
// change exposure values. If auto-exposure is already locked,
// setting this to true again has no effect (the driver will not
// recalculate exposure values). Changing exposure compensation
// settings will still affect the exposure settings while
// auto-exposure is locked. Stopping preview or taking a still
// image will not change the lock. In conjunction with
// exposure compensation, this allows for capturing multi-exposure
// brackets with known relative exposure values. Locking
// auto-exposure after open but before the first call to
// startPreview may result in severely over- or under-exposed
// images. The driver will not change the AE lock after
// auto-focus completes.
static const char KEY_AUTO_EXPOSURE_LOCK[];
// Whether locking the auto-exposure is supported. "true" means it is, and
// "false" or this key not existing means it is not supported.
static const char KEY_AUTO_EXPOSURE_LOCK_SUPPORTED[];
// The state of the auto-white balance lock. "true" means that
// auto-white balance is locked to its current value and will not
// change. "false" means the auto-white balance routine is free to
// change white balance values. If auto-white balance is already
// locked, setting this to true again has no effect (the driver
// will not recalculate white balance values). Stopping preview or
// taking a still image will not change the lock. In conjunction
// with exposure compensation, this allows for capturing
// multi-exposure brackets with fixed white balance. Locking
// auto-white balance after open but before the first call to
// startPreview may result in severely incorrect color. The
// driver will not change the AWB lock after auto-focus
// completes.
static const char KEY_AUTO_WHITEBALANCE_LOCK[];
// Whether locking the auto-white balance is supported. "true"
// means it is, and "false" or this key not existing means it is
// not supported.
static const char KEY_AUTO_WHITEBALANCE_LOCK_SUPPORTED[];
// The maximum number of metering areas supported. This is the maximum
// length of KEY_METERING_AREAS.
// Example value: "0" or "2". Read only.
static const char KEY_MAX_NUM_METERING_AREAS[];
// Current metering areas. Camera driver uses these areas to decide
// exposure.
//
// Before accessing this parameter, apps should check
// KEY_MAX_NUM_METERING_AREAS first to know the maximum number of metering
// areas first. If the value is 0, metering area is not supported.
//
// Each metering area is a rectangle with specified weight. The direction is
// relative to the sensor orientation, that is, what the sensor sees. The
// direction is not affected by the rotation or mirroring of
// CAMERA_CMD_SET_DISPLAY_ORIENTATION. Coordinates of the rectangle range
// from -1000 to 1000. (-1000, -1000) is the upper left point. (1000, 1000)
// is the lower right point. The width and height of metering areas cannot
// be 0 or negative.
//
// The fifth element is the weight. Values for weight must range from 1 to
// 1000. The weight should be interpreted as a per-pixel weight - all
// pixels in the area have the specified weight. This means a small area
// with the same weight as a larger area will have less influence on the
// metering than the larger area. Metering areas can partially overlap and
// the driver will add the weights in the overlap region.
//
// A special case of all-zero single metering area means driver to decide
// the metering area. For example, the driver may use more signals to decide
// metering areas and change them dynamically. Apps can set all-zero if they
// want the driver to decide metering areas.
//
// Metering areas are relative to the current field of view (KEY_ZOOM).
// No matter what the zoom level is, (-1000,-1000) represents the top of the
// currently visible camera frame. The metering area cannot be set to be
// outside the current field of view, even when using zoom.
//
// No matter what metering areas are, the final exposure are compensated
// by KEY_EXPOSURE_COMPENSATION.
// Example value: "(-10,-10,0,0,300),(0,0,10,10,700)". Read/write.
static const char KEY_METERING_AREAS[];
// Current zoom value.
// Example value: "0" or "6". Read/write.
static const char KEY_ZOOM[];
// Maximum zoom value.
// Example value: "6". Read only.
static const char KEY_MAX_ZOOM[];
// The zoom ratios of all zoom values. The zoom ratio is in 1/100
// increments. Ex: a zoom of 3.2x is returned as 320. The number of list
// elements is KEY_MAX_ZOOM + 1. The first element is always 100. The last
// element is the zoom ratio of zoom value KEY_MAX_ZOOM.
// Example value: "100,150,200,250,300,350,400". Read only.
static const char KEY_ZOOM_RATIOS[];
// Whether zoom is supported. Zoom is supported if the value is "true". Zoom
// is not supported if the value is not "true" or the key does not exist.
// Example value: "true". Read only.
static const char KEY_ZOOM_SUPPORTED[];
// Whether if smooth zoom is supported. Smooth zoom is supported if the
// value is "true". It is not supported if the value is not "true" or the
// key does not exist.
// See CAMERA_CMD_START_SMOOTH_ZOOM, CAMERA_CMD_STOP_SMOOTH_ZOOM, and
// CAMERA_MSG_ZOOM in frameworks/base/include/camera/Camera.h.
// Example value: "true". Read only.
static const char KEY_SMOOTH_ZOOM_SUPPORTED[];
// The distances (in meters) from the camera to where an object appears to
// be in focus. The object is sharpest at the optimal focus distance. The
// depth of field is the far focus distance minus near focus distance.
//
// Focus distances may change after starting auto focus, canceling auto
// focus, or starting the preview. Applications can read this anytime to get
// the latest focus distances. If the focus mode is FOCUS_MODE_CONTINUOUS,
// focus distances may change from time to time.
//
// This is intended to estimate the distance between the camera and the
// subject. After autofocus, the subject distance may be within near and far
// focus distance. However, the precision depends on the camera hardware,
// autofocus algorithm, the focus area, and the scene. The error can be
// large and it should be only used as a reference.
//
// Far focus distance > optimal focus distance > near focus distance. If
// the far focus distance is infinity, the value should be "Infinity" (case
// sensitive). The format is three float values separated by commas. The
// first is near focus distance. The second is optimal focus distance. The
// third is far focus distance.
// Example value: "0.95,1.9,Infinity" or "0.049,0.05,0.051". Read only.
static const char KEY_FOCUS_DISTANCES[];
// The current dimensions in pixels (width x height) for video frames.
// The width and height must be one of the supported sizes retrieved
// via KEY_SUPPORTED_VIDEO_SIZES.
// Example value: "1280x720". Read/write.
static const char KEY_VIDEO_SIZE[];
// A list of the supported dimensions in pixels (width x height)
// for video frames. See CAMERA_MSG_VIDEO_FRAME for details in
// frameworks/base/include/camera/Camera.h.
// Example: "176x144,1280x720". Read only.
static const char KEY_SUPPORTED_VIDEO_SIZES[];
// The maximum number of detected faces supported by hardware face
// detection. If the value is 0, hardware face detection is not supported.
// Example: "5". Read only
static const char KEY_MAX_NUM_DETECTED_FACES_HW[];
// The maximum number of detected faces supported by software face
// detection. If the value is 0, software face detection is not supported.
// Example: "5". Read only
static const char KEY_MAX_NUM_DETECTED_FACES_SW[];
// Preferred preview frame size in pixels for video recording.
// The width and height must be one of the supported sizes retrieved
// via KEY_SUPPORTED_PREVIEW_SIZES. This key can be used only when
// getSupportedVideoSizes() does not return an empty Vector of Size.
// Camcorder applications are recommended to set the preview size
// to a value that is not larger than the preferred preview size.
// In other words, the product of the width and height of the
// preview size should not be larger than that of the preferred
// preview size. In addition, we recommend to choos a preview size
// that has the same aspect ratio as the resolution of video to be
// recorded.
// Example value: "800x600". Read only.
static const char KEY_PREFERRED_PREVIEW_SIZE_FOR_VIDEO[];
// The image format for video frames. See CAMERA_MSG_VIDEO_FRAME in
// frameworks/base/include/camera/Camera.h.
// Example value: "yuv420sp" or PIXEL_FORMAT_XXX constants. Read only.
static const char KEY_VIDEO_FRAME_FORMAT[];
// Sets the hint of the recording mode. If this is true, MediaRecorder.start
// may be faster or has less glitches. This should be called before starting
// the preview for the best result. But it is allowed to change the hint
// while the preview is active. The default value is false.
//
// The apps can still call Camera.takePicture when the hint is true. The
// apps can call MediaRecorder.start when the hint is false. But the
// performance may be worse.
// Example value: "true" or "false". Read/write.
static const char KEY_RECORDING_HINT[];
// Returns true if video snapshot is supported. That is, applications
// can call Camera.takePicture during recording. Applications do not need to
// call Camera.startPreview after taking a picture. The preview will be
// still active. Other than that, taking a picture during recording is
// identical to taking a picture normally. All settings and methods related
// to takePicture work identically. Ex: KEY_PICTURE_SIZE,
// KEY_SUPPORTED_PICTURE_SIZES, KEY_JPEG_QUALITY, KEY_ROTATION, and etc.
// The picture will have an EXIF header. FLASH_MODE_AUTO and FLASH_MODE_ON
// also still work, but the video will record the flash.
//
// Applications can set shutter callback as null to avoid the shutter
// sound. It is also recommended to set raw picture and post view callbacks
// to null to avoid the interrupt of preview display.
//
// Field-of-view of the recorded video may be different from that of the
// captured pictures.
// Example value: "true" or "false". Read only.
static const char KEY_VIDEO_SNAPSHOT_SUPPORTED[];
// The state of the video stabilization. If set to true, both the
// preview stream and the recorded video stream are stabilized by
// the camera. Only valid to set if KEY_VIDEO_STABILIZATION_SUPPORTED is
// set to true.
//
// The value of this key can be changed any time the camera is
// open. If preview or recording is active, it is acceptable for
// there to be a slight video glitch when video stabilization is
// toggled on and off.
//
// This only stabilizes video streams (between-frames stabilization), and
// has no effect on still image capture.
static const char KEY_VIDEO_STABILIZATION[];
// Returns true if video stabilization is supported. That is, applications
// can set KEY_VIDEO_STABILIZATION to true and have a stabilized preview
// stream and record stabilized videos.
static const char KEY_VIDEO_STABILIZATION_SUPPORTED[];
// Supported modes for special effects with light.
// Example values: "lowlight,hdr".
static const char KEY_LIGHTFX[];
// Value for KEY_ZOOM_SUPPORTED or KEY_SMOOTH_ZOOM_SUPPORTED.
static const char TRUE[];
static const char FALSE[];
// Value for KEY_FOCUS_DISTANCES.
static const char FOCUS_DISTANCE_INFINITY[];
// Values for white balance settings.
static const char WHITE_BALANCE_AUTO[];
static const char WHITE_BALANCE_INCANDESCENT[];
static const char WHITE_BALANCE_FLUORESCENT[];
static const char WHITE_BALANCE_WARM_FLUORESCENT[];
static const char WHITE_BALANCE_DAYLIGHT[];
static const char WHITE_BALANCE_CLOUDY_DAYLIGHT[];
static const char WHITE_BALANCE_TWILIGHT[];
static const char WHITE_BALANCE_SHADE[];
static const char WHITE_BALANCE_MANUAL_CCT[];
// Values for effect settings.
static const char EFFECT_NONE[];
static const char EFFECT_MONO[];
static const char EFFECT_NEGATIVE[];
static const char EFFECT_SOLARIZE[];
static const char EFFECT_SEPIA[];
static const char EFFECT_POSTERIZE[];
static const char EFFECT_WHITEBOARD[];
static const char EFFECT_BLACKBOARD[];
static const char EFFECT_AQUA[];
// Values for antibanding settings.
static const char ANTIBANDING_AUTO[];
static const char ANTIBANDING_50HZ[];
static const char ANTIBANDING_60HZ[];
static const char ANTIBANDING_OFF[];
// Values for flash mode settings.
// Flash will not be fired.
static const char FLASH_MODE_OFF[];
// Flash will be fired automatically when required. The flash may be fired
// during preview, auto-focus, or snapshot depending on the driver.
static const char FLASH_MODE_AUTO[];
// Flash will always be fired during snapshot. The flash may also be
// fired during preview or auto-focus depending on the driver.
static const char FLASH_MODE_ON[];
// Flash will be fired in red-eye reduction mode.
static const char FLASH_MODE_RED_EYE[];
// Constant emission of light during preview, auto-focus and snapshot.
// This can also be used for video recording.
static const char FLASH_MODE_TORCH[];
// Values for scene mode settings.
static const char SCENE_MODE_AUTO[];
static const char SCENE_MODE_ACTION[];
static const char SCENE_MODE_PORTRAIT[];
static const char SCENE_MODE_LANDSCAPE[];
static const char SCENE_MODE_NIGHT[];
static const char SCENE_MODE_NIGHT_PORTRAIT[];
static const char SCENE_MODE_THEATRE[];
static const char SCENE_MODE_BEACH[];
static const char SCENE_MODE_SNOW[];
static const char SCENE_MODE_SUNSET[];
static const char SCENE_MODE_STEADYPHOTO[];
static const char SCENE_MODE_FIREWORKS[];
static const char SCENE_MODE_SPORTS[];
static const char SCENE_MODE_PARTY[];
static const char SCENE_MODE_CANDLELIGHT[];
// Applications are looking for a barcode. Camera driver will be optimized
// for barcode reading.
static const char SCENE_MODE_BARCODE[];
// A high-dynamic range mode. In this mode, the HAL module will use a
// capture strategy that extends the dynamic range of the captured
// image in some fashion. Only the final image is returned.
static const char SCENE_MODE_HDR[];
// Pixel color formats for KEY_PREVIEW_FORMAT, KEY_PICTURE_FORMAT,
// and KEY_VIDEO_FRAME_FORMAT
static const char PIXEL_FORMAT_YUV422SP[];
static const char PIXEL_FORMAT_YUV420SP[]; // NV21
static const char PIXEL_FORMAT_YUV422I[]; // YUY2
static const char PIXEL_FORMAT_YUV420P[]; // YV12
static const char PIXEL_FORMAT_RGB565[];
static const char PIXEL_FORMAT_RGBA8888[];
static const char PIXEL_FORMAT_JPEG[];
// Raw bayer format used for images, which is 10 bit precision samples
// stored in 16 bit words. The filter pattern is RGGB.
static const char PIXEL_FORMAT_BAYER_RGGB[];
// Pixel format is not known to the framework
static const char PIXEL_FORMAT_ANDROID_OPAQUE[];
// Values for focus mode settings.
// Auto-focus mode. Applications should call
// CameraHardwareInterface.autoFocus to start the focus in this mode.
static const char FOCUS_MODE_AUTO[];
// Focus is set at infinity. Applications should not call
// CameraHardwareInterface.autoFocus in this mode.
static const char FOCUS_MODE_INFINITY[];
// Macro (close-up) focus mode. Applications should call
// CameraHardwareInterface.autoFocus to start the focus in this mode.
static const char FOCUS_MODE_MACRO[];
// Focus is fixed. The camera is always in this mode if the focus is not
// adjustable. If the camera has auto-focus, this mode can fix the
// focus, which is usually at hyperfocal distance. Applications should
// not call CameraHardwareInterface.autoFocus in this mode.
static const char FOCUS_MODE_FIXED[];
// Extended depth of field (EDOF). Focusing is done digitally and
// continuously. Applications should not call
// CameraHardwareInterface.autoFocus in this mode.
static const char FOCUS_MODE_EDOF[];
// Continuous auto focus mode intended for video recording. The camera
// continuously tries to focus. This is the best choice for video
// recording because the focus changes smoothly . Applications still can
// call CameraHardwareInterface.takePicture in this mode but the subject may
// not be in focus. Auto focus starts when the parameter is set.
//
// Applications can call CameraHardwareInterface.autoFocus in this mode. The
// focus callback will immediately return with a boolean that indicates
// whether the focus is sharp or not. The focus position is locked after
// autoFocus call. If applications want to resume the continuous focus,
// cancelAutoFocus must be called. Restarting the preview will not resume
// the continuous autofocus. To stop continuous focus, applications should
// change the focus mode to other modes.
static const char FOCUS_MODE_CONTINUOUS_VIDEO[];
// Continuous auto focus mode intended for taking pictures. The camera
// continuously tries to focus. The speed of focus change is more aggressive
// than FOCUS_MODE_CONTINUOUS_VIDEO. Auto focus starts when the parameter is
// set.
//
// Applications can call CameraHardwareInterface.autoFocus in this mode. If
// the autofocus is in the middle of scanning, the focus callback will
// return when it completes. If the autofocus is not scanning, focus
// callback will immediately return with a boolean that indicates whether
// the focus is sharp or not. The apps can then decide if they want to take
// a picture immediately or to change the focus mode to auto, and run a full
// autofocus cycle. The focus position is locked after autoFocus call. If
// applications want to resume the continuous focus, cancelAutoFocus must be
// called. Restarting the preview will not resume the continuous autofocus.
// To stop continuous focus, applications should change the focus mode to
// other modes.
static const char FOCUS_MODE_CONTINUOUS_PICTURE[];
static const char FOCUS_MODE_MANUAL_POSITION[];
// Values for light special effects
// Low-light enhancement mode
static const char LIGHTFX_LOWLIGHT[];
// High-dynamic range mode
static const char LIGHTFX_HDR[];
#ifdef CAMERA_PARAMETERS_EXTRA_H
CAMERA_PARAMETERS_EXTRA_H
#endif
/**
* Returns the the supported preview formats as an enum given in graphics.h
* corrsponding to the format given in the input string or -1 if no such
* conversion exists.
*/
static int previewFormatToEnum(const char* format);
private:
DefaultKeyedVector<String8,String8> mMap;
};
}; // namespace android
#endif

View file

@ -1,203 +0,0 @@
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_CAMERA_PARAMETERS2_H
#define ANDROID_HARDWARE_CAMERA_PARAMETERS2_H
#include <utils/Vector.h>
#include <utils/String8.h>
#include "CameraParameters.h"
namespace android {
/**
* A copy of CameraParameters plus ABI-breaking changes. Needed
* because some camera HALs directly link to CameraParameters and cannot
* tolerate an ABI change.
*/
class CameraParameters2
{
public:
CameraParameters2();
CameraParameters2(const String8 &params) { unflatten(params); }
~CameraParameters2();
String8 flatten() const;
void unflatten(const String8 &params);
void set(const char *key, const char *value);
void set(const char *key, int value);
void setFloat(const char *key, float value);
// Look up string value by key.
// -- The string remains valid until the next set/remove of the same key,
// or until the map gets cleared.
const char *get(const char *key) const;
int getInt(const char *key) const;
float getFloat(const char *key) const;
// Compare the order that key1 was set vs the order that key2 was set.
//
// Sets the order parameter to an integer less than, equal to, or greater
// than zero if key1's set order was respectively, to be less than, to
// match, or to be greater than key2's set order.
//
// Error codes:
// * NAME_NOT_FOUND - if either key has not been set previously
// * BAD_VALUE - if any of the parameters are NULL
status_t compareSetOrder(const char *key1, const char *key2,
/*out*/
int *order) const;
void remove(const char *key);
void setPreviewSize(int width, int height);
void getPreviewSize(int *width, int *height) const;
void getSupportedPreviewSizes(Vector<Size> &sizes) const;
// Set the dimensions in pixels to the given width and height
// for video frames. The given width and height must be one
// of the supported dimensions returned from
// getSupportedVideoSizes(). Must not be called if
// getSupportedVideoSizes() returns an empty Vector of Size.
void setVideoSize(int width, int height);
// Retrieve the current dimensions (width and height)
// in pixels for video frames, which must be one of the
// supported dimensions returned from getSupportedVideoSizes().
// Must not be called if getSupportedVideoSizes() returns an
// empty Vector of Size.
void getVideoSize(int *width, int *height) const;
// Retrieve a Vector of supported dimensions (width and height)
// in pixels for video frames. If sizes returned from the method
// is empty, the camera does not support calls to setVideoSize()
// or getVideoSize(). In adddition, it also indicates that
// the camera only has a single output, and does not have
// separate output for video frames and preview frame.
void getSupportedVideoSizes(Vector<Size> &sizes) const;
// Retrieve the preferred preview size (width and height) in pixels
// for video recording. The given width and height must be one of
// supported preview sizes returned from getSupportedPreviewSizes().
// Must not be called if getSupportedVideoSizes() returns an empty
// Vector of Size. If getSupportedVideoSizes() returns an empty
// Vector of Size, the width and height returned from this method
// is invalid, and is "-1x-1".
void getPreferredPreviewSizeForVideo(int *width, int *height) const;
void setPreviewFrameRate(int fps);
int getPreviewFrameRate() const;
void getPreviewFpsRange(int *min_fps, int *max_fps) const;
void setPreviewFpsRange(int min_fps, int max_fps);
void setPreviewFormat(const char *format);
const char *getPreviewFormat() const;
void setPictureSize(int width, int height);
void getPictureSize(int *width, int *height) const;
void getSupportedPictureSizes(Vector<Size> &sizes) const;
void setPictureFormat(const char *format);
const char *getPictureFormat() const;
void dump() const;
status_t dump(int fd, const Vector<String16>& args) const;
private:
// Quick and dirty map that maintains insertion order
template <typename KeyT, typename ValueT>
struct OrderedKeyedVector {
ssize_t add(const KeyT& key, const ValueT& value) {
return mList.add(Pair(key, value));
}
size_t size() const {
return mList.size();
}
const KeyT& keyAt(size_t idx) const {
return mList[idx].mKey;
}
const ValueT& valueAt(size_t idx) const {
return mList[idx].mValue;
}
const ValueT& valueFor(const KeyT& key) const {
ssize_t i = indexOfKey(key);
LOG_ALWAYS_FATAL_IF(i<0, "%s: key not found", __PRETTY_FUNCTION__);
return valueAt(i);
}
ssize_t indexOfKey(const KeyT& key) const {
size_t vectorIdx = 0;
for (; vectorIdx < mList.size(); ++vectorIdx) {
if (mList[vectorIdx].mKey == key) {
return (ssize_t) vectorIdx;
}
}
return NAME_NOT_FOUND;
}
ssize_t removeItem(const KeyT& key) {
size_t vectorIdx = (size_t) indexOfKey(key);
if (vectorIdx < 0) {
return vectorIdx;
}
return mList.removeAt(vectorIdx);
}
void clear() {
mList.clear();
}
// Same as removing and re-adding. The key's index changes to max.
ssize_t replaceValueFor(const KeyT& key, const ValueT& value) {
removeItem(key);
return add(key, value);
}
private:
struct Pair {
Pair() : mKey(), mValue() {}
Pair(const KeyT& key, const ValueT& value) :
mKey(key),
mValue(value) {}
KeyT mKey;
ValueT mValue;
};
Vector<Pair> mList;
};
/**
* Order matters: Keys that are set() later are stored later in the map.
*
* If two keys have meaning that conflict, then the later-set key
* wins.
*
* For example, preview FPS and preview FPS range conflict since only
* we only want to use the FPS range if that's the last thing that was set.
* So in that case, only use preview FPS range if it was set later than
* the preview FPS.
*/
OrderedKeyedVector<String8,String8> mMap;
};
}; // namespace android
#endif

View file

@ -1,35 +0,0 @@
// Overload this file in your device specific config if you need
// to add extra camera parameters.
// A typical file would look like this:
/*
* Copyright (C) 2014 The CyanogenMod Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
#define CAMERA_PARAMETERS_EXTRA_C \
const char CameraParameters::KEY_SUPPORTED_BURST_NUM[] = "supported-burst-num"; \
const char CameraParameters::KEY_BURST_NUM[] = "burst-num"; \
const char CameraParameters::KEY_SUPPORTED_HDR_MODES[] = "supported-hdr-modes"; \
const char CameraParameters::KEY_HDR_MODE[] = "hdr-mode"; \
const char CameraParameters::HDR_MODE_OFF[] = "hdr-mode-off"; \
const char CameraParameters::HDR_MODE_HDR[] = "hdr-mode-hdr";
#define CAMERA_PARAMETERS_EXTRA_H \
static const char KEY_SUPPORTED_BURST_NUM[]; \
static const char KEY_BURST_NUM[]; \
static const char KEY_SUPPORTED_HDR_MODES[]; \
static const char KEY_HDR_MODE[]; \
static const char HDR_MODE_OFF[]; \
static const char HDR_MODE_HDR[];
*/

View file

@ -1,49 +0,0 @@
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_CAMERA_CLIENT_CAMERAUTILS_H
#define ANDROID_CAMERA_CLIENT_CAMERAUTILS_H
#include <camera/CameraMetadata.h>
#include <utils/Errors.h>
#include <stdint.h>
namespace android {
/**
* CameraUtils contains utility methods that are shared between the native
* camera client, and the camera service.
*/
class CameraUtils {
public:
/**
* Calculate the ANativeWindow transform from the static camera
* metadata. This is based on the sensor orientation and lens facing
* attributes of the camera device.
*
* Returns OK on success, or a negative error code.
*/
static status_t getRotationTransform(const CameraMetadata& staticInfo,
/*out*/int32_t* transform);
private:
CameraUtils();
};
} /* namespace android */
#endif /* ANDROID_CAMERA_CLIENT_CAMERAUTILS_H */

View file

@ -1,96 +0,0 @@
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_CAPTURERESULT_H
#define ANDROID_HARDWARE_CAPTURERESULT_H
#include <utils/RefBase.h>
#include <camera/CameraMetadata.h>
namespace android {
/**
* CaptureResultExtras is a structure to encapsulate various indices for a capture result.
* These indices are framework-internal and not sent to the HAL.
*/
struct CaptureResultExtras {
/**
* An integer to index the request sequence that this result belongs to.
*/
int32_t requestId;
/**
* An integer to index this result inside a request sequence, starting from 0.
*/
int32_t burstId;
/**
* TODO: Add documentation for this field.
*/
int32_t afTriggerId;
/**
* TODO: Add documentation for this field.
*/
int32_t precaptureTriggerId;
/**
* A 64bit integer to index the frame number associated with this result.
*/
int64_t frameNumber;
/**
* The partial result count (index) for this capture result.
*/
int32_t partialResultCount;
/**
* Constructor initializes object as invalid by setting requestId to be -1.
*/
CaptureResultExtras()
: requestId(-1),
burstId(0),
afTriggerId(0),
precaptureTriggerId(0),
frameNumber(0),
partialResultCount(0) {
}
/**
* This function returns true if it's a valid CaptureResultExtras object.
* Otherwise, returns false. It is valid only when requestId is non-negative.
*/
bool isValid();
status_t readFromParcel(Parcel* parcel);
status_t writeToParcel(Parcel* parcel) const;
};
struct CaptureResult : public virtual LightRefBase<CaptureResult> {
CameraMetadata mMetadata;
CaptureResultExtras mResultExtras;
CaptureResult();
CaptureResult(const CaptureResult& otherResult);
status_t readFromParcel(Parcel* parcel);
status_t writeToParcel(Parcel* parcel) const;
};
}
#endif /* ANDROID_HARDWARE_CAPTURERESULT_H */

View file

@ -1,129 +0,0 @@
/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_ICAMERA_H
#define ANDROID_HARDWARE_ICAMERA_H
#include <utils/RefBase.h>
#include <binder/IInterface.h>
#include <binder/Parcel.h>
#include <binder/IMemory.h>
#include <utils/String8.h>
#include <camera/Camera.h>
namespace android {
class ICameraClient;
class IGraphicBufferProducer;
class Surface;
class ICamera: public IInterface
{
/**
* Keep up-to-date with ICamera.aidl in frameworks/base
*/
public:
DECLARE_META_INTERFACE(Camera);
virtual void disconnect() = 0;
// connect new client with existing camera remote
virtual status_t connect(const sp<ICameraClient>& client) = 0;
// prevent other processes from using this ICamera interface
virtual status_t lock() = 0;
// allow other processes to use this ICamera interface
virtual status_t unlock() = 0;
// pass the buffered IGraphicBufferProducer to the camera service
virtual status_t setPreviewTarget(
const sp<IGraphicBufferProducer>& bufferProducer) = 0;
// set the preview callback flag to affect how the received frames from
// preview are handled. Enabling preview callback flags disables any active
// preview callback surface set by setPreviewCallbackTarget().
virtual void setPreviewCallbackFlag(int flag) = 0;
// set a buffer interface to use for client-received preview frames instead
// of preview callback buffers. Passing a valid interface here disables any
// active preview callbacks set by setPreviewCallbackFlag(). Passing NULL
// disables the use of the callback target.
virtual status_t setPreviewCallbackTarget(
const sp<IGraphicBufferProducer>& callbackProducer) = 0;
// start preview mode, must call setPreviewTarget first
virtual status_t startPreview() = 0;
// stop preview mode
virtual void stopPreview() = 0;
// get preview state
virtual bool previewEnabled() = 0;
// start recording mode
virtual status_t startRecording() = 0;
// stop recording mode
virtual void stopRecording() = 0;
// get recording state
virtual bool recordingEnabled() = 0;
// release a recording frame
virtual void releaseRecordingFrame(const sp<IMemory>& mem) = 0;
// auto focus
virtual status_t autoFocus() = 0;
// cancel auto focus
virtual status_t cancelAutoFocus() = 0;
/*
* take a picture.
* @param msgType the message type an application selectively turn on/off
* on a photo-by-photo basis. The supported message types are:
* CAMERA_MSG_SHUTTER, CAMERA_MSG_RAW_IMAGE, CAMERA_MSG_COMPRESSED_IMAGE,
* and CAMERA_MSG_POSTVIEW_FRAME. Any other message types will be ignored.
*/
virtual status_t takePicture(int msgType) = 0;
// set preview/capture parameters - key/value pairs
virtual status_t setParameters(const String8& params) = 0;
// get preview/capture parameters - key/value pairs
virtual String8 getParameters() const = 0;
// send command to camera driver
virtual status_t sendCommand(int32_t cmd, int32_t arg1, int32_t arg2) = 0;
// tell the camera hal to store meta data or real YUV data in video buffers.
virtual status_t storeMetaDataInBuffers(bool enabled) = 0;
};
// ----------------------------------------------------------------------------
class BnCamera: public BnInterface<ICamera>
{
public:
virtual status_t onTransact( uint32_t code,
const Parcel& data,
Parcel* reply,
uint32_t flags = 0);
};
}; // namespace android
#endif

View file

@ -1,56 +0,0 @@
/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_ICAMERA_APP_H
#define ANDROID_HARDWARE_ICAMERA_APP_H
#include <utils/RefBase.h>
#include <binder/IInterface.h>
#include <binder/Parcel.h>
#include <binder/IMemory.h>
#include <utils/Timers.h>
#include <system/camera.h>
namespace android {
class ICameraClient: public IInterface
{
/**
* Keep up-to-date with ICameraClient.aidl in frameworks/base
*/
public:
DECLARE_META_INTERFACE(CameraClient);
virtual void notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2) = 0;
virtual void dataCallback(int32_t msgType, const sp<IMemory>& data,
camera_frame_metadata_t *metadata) = 0;
virtual void dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& data) = 0;
};
// ----------------------------------------------------------------------------
class BnCameraClient: public BnInterface<ICameraClient>
{
public:
virtual status_t onTransact( uint32_t code,
const Parcel& data,
Parcel* reply,
uint32_t flags = 0);
};
}; // namespace android
#endif

View file

@ -1,107 +0,0 @@
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_ICAMERA_RECORDING_PROXY_H
#define ANDROID_HARDWARE_ICAMERA_RECORDING_PROXY_H
#include <binder/IInterface.h>
#include <utils/RefBase.h>
namespace android {
class ICameraRecordingProxyListener;
class IMemory;
class Parcel;
/*
* The purpose of ICameraRecordingProxy and ICameraRecordingProxyListener is to
* allow applications using the camera during recording.
*
* Camera service allows only one client at a time. Since camcorder application
* needs to own the camera to do things like zoom, the media recorder cannot
* access the camera directly during recording. So ICameraRecordingProxy is a
* proxy of ICamera, which allows the media recorder to start/stop the recording
* and release recording frames. ICameraRecordingProxyListener is an interface
* that allows the recorder to receive video frames during recording.
*
* ICameraRecordingProxy
* startRecording()
* stopRecording()
* releaseRecordingFrame()
*
* ICameraRecordingProxyListener
* dataCallbackTimestamp()
* The camcorder app opens the camera and starts the preview. The app passes
* ICamera and ICameraRecordingProxy to the media recorder by
* MediaRecorder::setCamera(). The recorder uses ICamera to setup the camera in
* MediaRecorder::start(). After setup, the recorder disconnects from camera
* service. The recorder calls ICameraRecordingProxy::startRecording() and
* passes a ICameraRecordingProxyListener to the app. The app connects back to
* camera service and starts the recording. The app owns the camera and can do
* things like zoom. The media recorder receives the video frames from the
* listener and releases them by ICameraRecordingProxy::releaseRecordingFrame.
* The recorder calls ICameraRecordingProxy::stopRecording() to stop the
* recording.
*
* The call sequences are as follows:
* 1. The app: Camera.unlock().
* 2. The app: MediaRecorder.setCamera().
* 3. Start recording
* (1) The app: MediaRecorder.start().
* (2) The recorder: ICamera.unlock() and ICamera.disconnect().
* (3) The recorder: ICameraRecordingProxy.startRecording().
* (4) The app: ICamera.reconnect().
* (5) The app: ICamera.startRecording().
* 4. During recording
* (1) The recorder: receive frames from ICameraRecordingProxyListener.dataCallbackTimestamp()
* (2) The recorder: release frames by ICameraRecordingProxy.releaseRecordingFrame().
* 5. Stop recording
* (1) The app: MediaRecorder.stop()
* (2) The recorder: ICameraRecordingProxy.stopRecording().
* (3) The app: ICamera.stopRecording().
*/
class ICameraRecordingProxy: public IInterface
{
public:
DECLARE_META_INTERFACE(CameraRecordingProxy);
virtual status_t startRecording(const sp<ICameraRecordingProxyListener>& listener) = 0;
virtual void stopRecording() = 0;
virtual void releaseRecordingFrame(const sp<IMemory>& mem) = 0;
// b/28466701
static size_t getCommonBaseAddress();
private:
static uint8_t baseObject;
};
// ----------------------------------------------------------------------------
class BnCameraRecordingProxy: public BnInterface<ICameraRecordingProxy>
{
public:
virtual status_t onTransact( uint32_t code,
const Parcel& data,
Parcel* reply,
uint32_t flags = 0);
};
}; // namespace android
#endif

View file

@ -1,52 +0,0 @@
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_ICAMERA_RECORDING_PROXY_LISTENER_H
#define ANDROID_HARDWARE_ICAMERA_RECORDING_PROXY_LISTENER_H
#include <binder/IInterface.h>
#include <stdint.h>
#include <utils/RefBase.h>
#include <utils/Timers.h>
namespace android {
class Parcel;
class IMemory;
class ICameraRecordingProxyListener: public IInterface
{
public:
DECLARE_META_INTERFACE(CameraRecordingProxyListener);
virtual void dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType,
const sp<IMemory>& data) = 0;
};
// ----------------------------------------------------------------------------
class BnCameraRecordingProxyListener: public BnInterface<ICameraRecordingProxyListener>
{
public:
virtual status_t onTransact( uint32_t code,
const Parcel& data,
Parcel* reply,
uint32_t flags = 0);
};
}; // namespace android
#endif

View file

@ -1,193 +0,0 @@
/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_ICAMERASERVICE_H
#define ANDROID_HARDWARE_ICAMERASERVICE_H
#include <utils/RefBase.h>
#include <binder/IInterface.h>
#include <binder/Parcel.h>
namespace android {
class ICamera;
class ICameraClient;
class ICameraServiceListener;
class ICameraDeviceUser;
class ICameraDeviceCallbacks;
class CameraMetadata;
class VendorTagDescriptor;
class String16;
class ICameraService : public IInterface
{
public:
/**
* Keep up-to-date with ICameraService.aidl in frameworks/base
*/
enum {
GET_NUMBER_OF_CAMERAS = IBinder::FIRST_CALL_TRANSACTION,
GET_CAMERA_INFO,
CONNECT,
CONNECT_DEVICE,
ADD_LISTENER,
REMOVE_LISTENER,
GET_CAMERA_CHARACTERISTICS,
GET_CAMERA_VENDOR_TAG_DESCRIPTOR,
GET_LEGACY_PARAMETERS,
SUPPORTS_CAMERA_API,
CONNECT_LEGACY,
SET_TORCH_MODE,
NOTIFY_SYSTEM_EVENT,
};
enum {
USE_CALLING_UID = -1
};
enum {
API_VERSION_1 = 1,
API_VERSION_2 = 2,
};
enum {
CAMERA_TYPE_BACKWARD_COMPATIBLE = 0,
CAMERA_TYPE_ALL = 1,
};
enum {
CAMERA_HAL_API_VERSION_UNSPECIFIED = -1
};
/**
* Keep up-to-date with declarations in
* frameworks/base/services/core/java/com/android/server/camera/CameraService.java
*
* These event codes are intended to be used with the notifySystemEvent call.
*/
enum {
NO_EVENT = 0,
USER_SWITCHED,
};
public:
DECLARE_META_INTERFACE(CameraService);
// Get the number of cameras that support basic color camera operation
// (type CAMERA_TYPE_BACKWARD_COMPATIBLE)
virtual int32_t getNumberOfCameras() = 0;
// Get the number of cameras of the specified type, one of CAMERA_TYPE_*
// enums
virtual int32_t getNumberOfCameras(int cameraType) = 0;
virtual status_t getCameraInfo(int cameraId,
/*out*/
struct CameraInfo* cameraInfo) = 0;
virtual status_t getCameraCharacteristics(int cameraId,
/*out*/
CameraMetadata* cameraInfo) = 0;
virtual status_t getCameraVendorTagDescriptor(
/*out*/
sp<VendorTagDescriptor>& desc) = 0;
// Returns 'OK' if operation succeeded
// - Errors: ALREADY_EXISTS if the listener was already added
virtual status_t addListener(const sp<ICameraServiceListener>& listener)
= 0;
// Returns 'OK' if operation succeeded
// - Errors: BAD_VALUE if specified listener was not in the listener list
virtual status_t removeListener(const sp<ICameraServiceListener>& listener)
= 0;
/**
* clientPackageName and clientUid are used for permissions checking. if
* clientUid == USE_CALLING_UID, then the calling UID is used instead. Only
* trusted callers can set a clientUid other than USE_CALLING_UID.
*/
virtual status_t connect(const sp<ICameraClient>& cameraClient,
int cameraId,
const String16& clientPackageName,
int clientUid,
/*out*/
sp<ICamera>& device) = 0;
virtual status_t connectDevice(
const sp<ICameraDeviceCallbacks>& cameraCb,
int cameraId,
const String16& clientPackageName,
int clientUid,
/*out*/
sp<ICameraDeviceUser>& device) = 0;
virtual status_t getLegacyParameters(
int cameraId,
/*out*/
String16* parameters) = 0;
/**
* Returns OK if device supports camera2 api,
* returns -EOPNOTSUPP if it doesn't.
*/
virtual status_t supportsCameraApi(
int cameraId, int apiVersion) = 0;
/**
* Connect the device as a legacy device for a given HAL version.
* For halVersion, use CAMERA_API_DEVICE_VERSION_* for a particular
* version, or CAMERA_HAL_API_VERSION_UNSPECIFIED for a service-selected version.
*/
virtual status_t connectLegacy(const sp<ICameraClient>& cameraClient,
int cameraId, int halVersion,
const String16& clientPackageName,
int clientUid,
/*out*/
sp<ICamera>& device) = 0;
/**
* Turn on or off a camera's torch mode. Torch mode will be turned off by
* camera service if the lastest client binder that turns it on dies.
*
* return values:
* 0: on a successful operation.
* -ENOSYS: the camera device doesn't support this operation. It it returned
* if and only if android.flash.into.available is false.
* -EBUSY: the camera device is opened.
* -EINVAL: camera_id is invalid or clientBinder is NULL when enabling a
* torch mode.
*/
virtual status_t setTorchMode(const String16& cameraId, bool enabled,
const sp<IBinder>& clientBinder) = 0;
/**
* Notify the camera service of a system event. Should only be called from system_server.
*/
virtual void notifySystemEvent(int32_t eventId, const int32_t* args, size_t length) = 0;
};
// ----------------------------------------------------------------------------
class BnCameraService: public BnInterface<ICameraService>
{
public:
virtual status_t onTransact( uint32_t code,
const Parcel& data,
Parcel* reply,
uint32_t flags = 0);
};
}; // namespace android
#endif

View file

@ -1,113 +0,0 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_ICAMERASERVICE_LISTENER_H
#define ANDROID_HARDWARE_ICAMERASERVICE_LISTENER_H
#include <utils/RefBase.h>
#include <binder/IInterface.h>
#include <binder/Parcel.h>
#include <hardware/camera_common.h>
namespace android {
class ICameraServiceListener : public IInterface
{
/**
* Keep up-to-date with ICameraServiceListener.aidl in frameworks/base
*/
public:
/**
* Initial status will be transmitted with onStatusChange immediately
* after this listener is added to the service listener list.
*
* Allowed transitions:
*
* (Any) -> NOT_PRESENT
* NOT_PRESENT -> PRESENT
* NOT_PRESENT -> ENUMERATING
* ENUMERATING -> PRESENT
* PRESENT -> NOT_AVAILABLE
* NOT_AVAILABLE -> PRESENT
*
* A state will never immediately transition back to itself.
*/
enum Status {
// Device physically unplugged
STATUS_NOT_PRESENT = CAMERA_DEVICE_STATUS_NOT_PRESENT,
// Device physically has been plugged in
// and the camera can be used exlusively
STATUS_PRESENT = CAMERA_DEVICE_STATUS_PRESENT,
// Device physically has been plugged in
// but it will not be connect-able until enumeration is complete
STATUS_ENUMERATING = CAMERA_DEVICE_STATUS_ENUMERATING,
// Camera can be used exclusively
STATUS_AVAILABLE = STATUS_PRESENT, // deprecated, will be removed
// Camera is in use by another app and cannot be used exclusively
STATUS_NOT_AVAILABLE = 0x80000000,
// Use to initialize variables only
STATUS_UNKNOWN = 0xFFFFFFFF,
};
/**
* The torch mode status of a camera.
*
* Initial status will be transmitted with onTorchStatusChanged immediately
* after this listener is added to the service listener list.
*
* The enums should be set to values matching
* include/hardware/camera_common.h
*/
enum TorchStatus {
// The camera's torch mode has become not available to use via
// setTorchMode().
TORCH_STATUS_NOT_AVAILABLE = TORCH_MODE_STATUS_NOT_AVAILABLE,
// The camera's torch mode is off and available to be turned on via
// setTorchMode().
TORCH_STATUS_AVAILABLE_OFF = TORCH_MODE_STATUS_AVAILABLE_OFF,
// The camera's torch mode is on and available to be turned off via
// setTorchMode().
TORCH_STATUS_AVAILABLE_ON = TORCH_MODE_STATUS_AVAILABLE_ON,
// Use to initialize variables only
TORCH_STATUS_UNKNOWN = 0xFFFFFFFF,
};
DECLARE_META_INTERFACE(CameraServiceListener);
virtual void onStatusChanged(Status status, int32_t cameraId) = 0;
virtual void onTorchStatusChanged(TorchStatus status, const String16& cameraId) = 0;
};
// ----------------------------------------------------------------------------
class BnCameraServiceListener : public BnInterface<ICameraServiceListener>
{
public:
virtual status_t onTransact( uint32_t code,
const Parcel& data,
Parcel* reply,
uint32_t flags = 0);
};
}; // namespace android
#endif

View file

@ -1,65 +0,0 @@
/*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_ICAMERASERVICEPROXY_H
#define ANDROID_HARDWARE_ICAMERASERVICEPROXY_H
#include <utils/RefBase.h>
#include <binder/IInterface.h>
#include <binder/Parcel.h>
namespace android {
/**
* Interface from native camera service to managed-side camera service proxy.
*
* Keep in sync with frameworks/base/core/java/android/hardware/ICameraServiceProxy.aidl
*
*/
class ICameraServiceProxy : public IInterface {
public:
enum {
PING_FOR_USER_UPDATE = IBinder::FIRST_CALL_TRANSACTION,
NOTIFY_CAMERA_STATE
};
enum CameraState {
CAMERA_STATE_OPEN,
CAMERA_STATE_ACTIVE,
CAMERA_STATE_IDLE,
CAMERA_STATE_CLOSED
};
DECLARE_META_INTERFACE(CameraServiceProxy);
virtual void pingForUserUpdate() = 0;
virtual void notifyCameraState(String16 cameraId, CameraState newCameraState) = 0;
};
class BnCameraServiceProxy: public BnInterface<ICameraServiceProxy>
{
public:
virtual status_t onTransact( uint32_t code,
const Parcel& data,
Parcel* reply,
uint32_t flags = 0);
};
}; // namespace android
#endif // ANDROID_HARDWARE_ICAMERASERVICEPROXY_H

View file

@ -1,145 +0,0 @@
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef VENDOR_TAG_DESCRIPTOR_H
#include <utils/Vector.h>
#include <utils/KeyedVector.h>
#include <utils/String8.h>
#include <utils/RefBase.h>
#include <system/camera_vendor_tags.h>
#include <stdint.h>
namespace android {
class Parcel;
/**
* VendorTagDescriptor objects are parcelable containers for the vendor tag
* definitions provided, and are typically used to pass the vendor tag
* information enumerated by the HAL to clients of the camera service.
*/
class VendorTagDescriptor
: public LightRefBase<VendorTagDescriptor> {
public:
virtual ~VendorTagDescriptor();
/**
* The following 'get*' methods implement the corresponding
* functions defined in
* system/media/camera/include/system/camera_vendor_tags.h
*/
// Returns the number of vendor tags defined.
int getTagCount() const;
// Returns an array containing the id's of vendor tags defined.
void getTagArray(uint32_t* tagArray) const;
// Returns the section name string for a given vendor tag id.
const char* getSectionName(uint32_t tag) const;
// Returns the tag name string for a given vendor tag id.
const char* getTagName(uint32_t tag) const;
// Returns the tag type for a given vendor tag id.
int getTagType(uint32_t tag) const;
/**
* Write the VendorTagDescriptor object into the given parcel.
*
* Returns OK on success, or a negative error code.
*/
status_t writeToParcel(
/*out*/
Parcel* parcel) const;
/**
* Convenience method to get a vector containing all vendor tag
* sections, or an empty vector if none are defined.
*/
SortedVector<String8> getAllSectionNames() const;
/**
* Lookup the tag id for a given tag name and section.
*
* Returns OK on success, or a negative error code.
*/
status_t lookupTag(String8 name, String8 section, /*out*/uint32_t* tag) const;
/**
* Dump the currently configured vendor tags to a file descriptor.
*/
void dump(int fd, int verbosity, int indentation) const;
// Static methods:
/**
* Create a VendorTagDescriptor object from the given parcel.
*
* Returns OK on success, or a negative error code.
*/
static status_t createFromParcel(const Parcel* parcel,
/*out*/
sp<VendorTagDescriptor>& descriptor);
/**
* Create a VendorTagDescriptor object from the given vendor_tag_ops_t
* struct.
*
* Returns OK on success, or a negative error code.
*/
static status_t createDescriptorFromOps(const vendor_tag_ops_t* vOps,
/*out*/
sp<VendorTagDescriptor>& descriptor);
/**
* Sets the global vendor tag descriptor to use for this process.
* Camera metadata operations that access vendor tags will use the
* vendor tag definitions set this way.
*
* Returns OK on success, or a negative error code.
*/
static status_t setAsGlobalVendorTagDescriptor(const sp<VendorTagDescriptor>& desc);
/**
* Clears the global vendor tag descriptor used by this process.
*/
static void clearGlobalVendorTagDescriptor();
/**
* Returns the global vendor tag descriptor used by this process.
* This will contain NULL if no vendor tags are defined.
*/
static sp<VendorTagDescriptor> getGlobalVendorTagDescriptor();
protected:
VendorTagDescriptor();
KeyedVector<String8, KeyedVector<String8, uint32_t>*> mReverseMapping;
KeyedVector<uint32_t, String8> mTagToNameMap;
KeyedVector<uint32_t, uint32_t> mTagToSectionMap; // Value is offset in mSections
KeyedVector<uint32_t, int32_t> mTagToTypeMap;
SortedVector<String8> mSections;
// must be int32_t to be compatible with Parcel::writeInt32
int32_t mTagCount;
private:
vendor_tag_ops mVendorOps;
};
} /* namespace android */
#define VENDOR_TAG_DESCRIPTOR_H
#endif /* VENDOR_TAG_DESCRIPTOR_H */

View file

@ -1,43 +0,0 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_PHOTOGRAPHY_CAPTUREREQUEST_H
#define ANDROID_HARDWARE_PHOTOGRAPHY_CAPTUREREQUEST_H
#include <utils/RefBase.h>
#include <utils/Vector.h>
#include <camera/CameraMetadata.h>
namespace android {
class Surface;
struct CaptureRequest : public virtual RefBase {
public:
CameraMetadata mMetadata;
Vector<sp<Surface> > mSurfaceList;
bool mIsReprocess;
/**
* Keep impl up-to-date with CaptureRequest.java in frameworks/base
*/
status_t readFromParcel(Parcel* parcel);
status_t writeToParcel(Parcel* parcel) const;
};
}; // namespace android
#endif

View file

@ -1,86 +0,0 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_PHOTOGRAPHY_CALLBACKS_H
#define ANDROID_HARDWARE_PHOTOGRAPHY_CALLBACKS_H
#include <utils/RefBase.h>
#include <binder/IInterface.h>
#include <binder/Parcel.h>
#include <binder/IMemory.h>
#include <utils/Timers.h>
#include <system/camera.h>
#include <camera/CaptureResult.h>
namespace android {
class CameraMetadata;
class ICameraDeviceCallbacks : public IInterface
{
/**
* Keep up-to-date with ICameraDeviceCallbacks.aidl in frameworks/base
*/
public:
DECLARE_META_INTERFACE(CameraDeviceCallbacks);
/**
* Error codes for CAMERA_MSG_ERROR
*/
enum CameraErrorCode {
ERROR_CAMERA_INVALID_ERROR = -1, // To indicate all invalid error codes
ERROR_CAMERA_DISCONNECTED = 0,
ERROR_CAMERA_DEVICE = 1,
ERROR_CAMERA_SERVICE = 2,
ERROR_CAMERA_REQUEST = 3,
ERROR_CAMERA_RESULT = 4,
ERROR_CAMERA_BUFFER = 5,
};
// One way
virtual void onDeviceError(CameraErrorCode errorCode,
const CaptureResultExtras& resultExtras) = 0;
// One way
virtual void onDeviceIdle() = 0;
// One way
virtual void onCaptureStarted(const CaptureResultExtras& resultExtras,
int64_t timestamp) = 0;
// One way
virtual void onResultReceived(const CameraMetadata& metadata,
const CaptureResultExtras& resultExtras) = 0;
// One way
virtual void onPrepared(int streamId) = 0;
};
// ----------------------------------------------------------------------------
class BnCameraDeviceCallbacks : public BnInterface<ICameraDeviceCallbacks>
{
public:
virtual status_t onTransact( uint32_t code,
const Parcel& data,
Parcel* reply,
uint32_t flags = 0);
};
}; // namespace android
#endif

View file

@ -1,167 +0,0 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_PHOTOGRAPHY_ICAMERADEVICEUSER_H
#define ANDROID_HARDWARE_PHOTOGRAPHY_ICAMERADEVICEUSER_H
#include <binder/IInterface.h>
#include <binder/Parcel.h>
#include <utils/List.h>
struct camera_metadata;
namespace android {
class ICameraDeviceUserClient;
class IGraphicBufferProducer;
class CaptureRequest;
class CameraMetadata;
class OutputConfiguration;
enum {
NO_IN_FLIGHT_REPEATING_FRAMES = -1,
};
class ICameraDeviceUser : public IInterface
{
/**
* Keep up-to-date with ICameraDeviceUser.aidl in frameworks/base
*/
public:
DECLARE_META_INTERFACE(CameraDeviceUser);
virtual void disconnect() = 0;
/**
* Request Handling
**/
/**
* For streaming requests, output lastFrameNumber is the last frame number
* of the previous repeating request.
* For non-streaming requests, output lastFrameNumber is the expected last
* frame number of the current request.
*/
virtual int submitRequest(sp<CaptureRequest> request,
bool streaming = false,
/*out*/
int64_t* lastFrameNumber = NULL) = 0;
/**
* For streaming requests, output lastFrameNumber is the last frame number
* of the previous repeating request.
* For non-streaming requests, output lastFrameNumber is the expected last
* frame number of the current request.
*/
virtual int submitRequestList(List<sp<CaptureRequest> > requestList,
bool streaming = false,
/*out*/
int64_t* lastFrameNumber = NULL) = 0;
/**
* Output lastFrameNumber is the last frame number of the previous repeating request.
*/
virtual status_t cancelRequest(int requestId,
/*out*/
int64_t* lastFrameNumber = NULL) = 0;
/**
* Begin the device configuration.
*
* <p>
* beginConfigure must be called before any call to deleteStream, createStream,
* or endConfigure. It is not valid to call this when the device is not idle.
* <p>
*/
virtual status_t beginConfigure() = 0;
/**
* End the device configuration.
*
* <p>
* endConfigure must be called after stream configuration is complete (i.e. after
* a call to beginConfigure and subsequent createStream/deleteStream calls). This
* must be called before any requests can be submitted.
* <p>
*/
virtual status_t endConfigure(bool isConstrainedHighSpeed = false) = 0;
virtual status_t deleteStream(int streamId) = 0;
virtual status_t createStream(const OutputConfiguration& outputConfiguration) = 0;
/**
* Create an input stream of width, height, and format (one of
* HAL_PIXEL_FORMAT_*)
*
* Return stream ID if it's a non-negative value. status_t if it's a
* negative value.
*/
virtual status_t createInputStream(int width, int height, int format) = 0;
// get the buffer producer of the input stream
virtual status_t getInputBufferProducer(
sp<IGraphicBufferProducer> *producer) = 0;
// Create a request object from a template.
virtual status_t createDefaultRequest(int templateId,
/*out*/
CameraMetadata* request) = 0;
// Get static camera metadata
virtual status_t getCameraInfo(/*out*/
CameraMetadata* info) = 0;
// Wait until all the submitted requests have finished processing
virtual status_t waitUntilIdle() = 0;
/**
* Flush all pending and in-progress work as quickly as possible.
* Output lastFrameNumber is the last frame number of the previous repeating request.
*/
virtual status_t flush(/*out*/
int64_t* lastFrameNumber = NULL) = 0;
/**
* Preallocate buffers for a given output stream asynchronously.
*/
virtual status_t prepare(int streamId) = 0;
/**
* Preallocate up to maxCount buffers for a given output stream asynchronously.
*/
virtual status_t prepare2(int maxCount, int streamId) = 0;
/**
* Free all unused buffers for a given output stream.
*/
virtual status_t tearDown(int streamId) = 0;
};
// ----------------------------------------------------------------------------
class BnCameraDeviceUser: public BnInterface<ICameraDeviceUser>
{
public:
virtual status_t onTransact( uint32_t code,
const Parcel& data,
Parcel* reply,
uint32_t flags = 0);
};
}; // namespace android
#endif

View file

@ -1,53 +0,0 @@
/*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_CAMERA2_OUTPUTCONFIGURATION_H
#define ANDROID_HARDWARE_CAMERA2_OUTPUTCONFIGURATION_H
#include <utils/RefBase.h>
#include <gui/IGraphicBufferProducer.h>
namespace android {
class Surface;
class OutputConfiguration : public virtual RefBase {
public:
static const int INVALID_ROTATION;
sp<IGraphicBufferProducer> getGraphicBufferProducer() const;
int getRotation() const;
/**
* Keep impl up-to-date with OutputConfiguration.java in frameworks/base
*/
status_t writeToParcel(Parcel& parcel) const;
// getGraphicBufferProducer will be NULL if error occurred
// getRotation will be INVALID_ROTATION if error occurred
OutputConfiguration(const Parcel& parcel);
OutputConfiguration(sp<IGraphicBufferProducer>& gbp, int rotation);
private:
sp<IGraphicBufferProducer> mGbp;
int mRotation;
// helper function
static String16 readMaybeEmptyString16(const Parcel& parcel);
};
}; // namespace android
#endif

View file

@ -2,7 +2,6 @@
PRODUCT_PACKAGES += \
camera.msm8937 \
libshims_camera \
libshim_ims_camera \
Snap
# Properties

View file

@ -38,7 +38,7 @@ on early-init
chmod 0755 /sys/kernel/debug
on init
export LD_SHIM_LIBS "/system/bin/mm-qcamera-daemon|libshims_camera.so:/system/vendor/lib64/lib-imscamera.so|libshim_ims_camera.so"
export LD_SHIM_LIBS "/system/bin/mm-qcamera-daemon|libshims_camera.so"
# Set permissions for persist partition
mkdir /persist 0771 root system