summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--libs/binder/Android.bp4
-rw-r--r--libs/binder/RpcServer.cpp21
-rw-r--r--libs/binder/RpcSession.cpp143
-rw-r--r--libs/binder/RpcState.cpp16
-rw-r--r--libs/binder/RpcWireFormat.h4
-rw-r--r--libs/binder/include/binder/RpcSession.h10
-rw-r--r--libs/binder/tests/binderRpcTest.cpp128
-rw-r--r--libs/nativewindow/AHardwareBuffer.cpp4
-rw-r--r--libs/nativewindow/include/vndk/hardware_buffer.h14
9 files changed, 210 insertions, 134 deletions
diff --git a/libs/binder/Android.bp b/libs/binder/Android.bp
index 928f772e1d..76249384a0 100644
--- a/libs/binder/Android.bp
+++ b/libs/binder/Android.bp
@@ -192,7 +192,7 @@ cc_library {
header_libs: [
"libbinder_headers",
- "libandroid_runtime_threads_headers",
+ "libandroid_runtime_vm_headers",
],
export_header_lib_headers: [
@@ -288,7 +288,7 @@ cc_library {
// Do not expand the visibility.
visibility: [
"//packages/modules/Virtualization/authfs:__subpackages__",
- "//packages/modules/Virtualization/compos",
+ "//packages/modules/Virtualization/compos:__subpackages__",
"//packages/modules/Virtualization/microdroid",
],
}
diff --git a/libs/binder/RpcServer.cpp b/libs/binder/RpcServer.cpp
index 2a87ae4298..c6cf2c5ee8 100644
--- a/libs/binder/RpcServer.cpp
+++ b/libs/binder/RpcServer.cpp
@@ -16,6 +16,7 @@
#define LOG_TAG "RpcServer"
+#include <poll.h>
#include <sys/socket.h>
#include <sys/un.h>
@@ -152,7 +153,7 @@ void RpcServer::join() {
}
status_t status;
- while ((status = mShutdownTrigger->triggerablePollRead(mServer)) == OK) {
+ while ((status = mShutdownTrigger->triggerablePoll(mServer, POLLIN)) == OK) {
unique_fd clientFd(TEMP_FAILURE_RETRY(
accept4(mServer.get(), nullptr, nullptr /*length*/, SOCK_CLOEXEC)));
@@ -182,7 +183,7 @@ void RpcServer::join() {
bool RpcServer::shutdown() {
std::unique_lock<std::mutex> _l(mLock);
if (mShutdownTrigger == nullptr) {
- LOG_RPC_DETAIL("Cannot shutdown. No shutdown trigger installed.");
+ LOG_RPC_DETAIL("Cannot shutdown. No shutdown trigger installed (already shutdown?)");
return false;
}
@@ -212,6 +213,8 @@ bool RpcServer::shutdown() {
mJoinThread.reset();
}
+ LOG_RPC_DETAIL("Finished waiting on shutdown.");
+
mShutdownTrigger = nullptr;
return true;
}
@@ -248,7 +251,7 @@ void RpcServer::establishConnection(sp<RpcServer>&& server, base::unique_fd clie
statusToString(status).c_str());
// still need to cleanup before we can return
}
- bool reverse = header.options & RPC_CONNECTION_OPTION_REVERSE;
+ bool incoming = header.options & RPC_CONNECTION_OPTION_INCOMING;
std::thread thisThread;
sp<RpcSession> session;
@@ -273,8 +276,8 @@ void RpcServer::establishConnection(sp<RpcServer>&& server, base::unique_fd clie
RpcAddress sessionId = RpcAddress::fromRawEmbedded(&header.sessionId);
if (sessionId.isZero()) {
- if (reverse) {
- ALOGE("Cannot create a new session with a reverse connection, would leak");
+ if (incoming) {
+ ALOGE("Cannot create a new session with an incoming connection, would leak");
return;
}
@@ -312,7 +315,7 @@ void RpcServer::establishConnection(sp<RpcServer>&& server, base::unique_fd clie
session = it->second;
}
- if (reverse) {
+ if (incoming) {
LOG_ALWAYS_FATAL_IF(!session->addOutgoingConnection(std::move(clientFd), true),
"server state must already be initialized");
return;
@@ -347,7 +350,11 @@ bool RpcServer::setupSocketServer(const RpcSocketAddress& addr) {
return false;
}
- if (0 != TEMP_FAILURE_RETRY(listen(serverFd.get(), 1 /*backlog*/))) {
+ // Right now, we create all threads at once, making accept4 slow. To avoid hanging the client,
+ // the backlog is increased to a large number.
+ // TODO(b/189955605): Once we create threads dynamically & lazily, the backlog can be reduced
+ // to 1.
+ if (0 != TEMP_FAILURE_RETRY(listen(serverFd.get(), 50 /*backlog*/))) {
int savedErrno = errno;
ALOGE("Could not listen socket at %s: %s", addr.toString().c_str(), strerror(savedErrno));
return false;
diff --git a/libs/binder/RpcSession.cpp b/libs/binder/RpcSession.cpp
index bdf1bbef02..c01a03d4c6 100644
--- a/libs/binder/RpcSession.cpp
+++ b/libs/binder/RpcSession.cpp
@@ -27,10 +27,11 @@
#include <string_view>
#include <android-base/macros.h>
-#include <android_runtime/threads.h>
+#include <android_runtime/vm.h>
#include <binder/Parcel.h>
#include <binder/RpcServer.h>
#include <binder/Stability.h>
+#include <jni.h>
#include <utils/String8.h>
#include "RpcSocketAddress.h"
@@ -178,9 +179,11 @@ bool RpcSession::FdTrigger::isTriggered() {
return mWrite == -1;
}
-status_t RpcSession::FdTrigger::triggerablePollRead(base::borrowed_fd fd) {
+status_t RpcSession::FdTrigger::triggerablePoll(base::borrowed_fd fd, int16_t event) {
while (true) {
- pollfd pfd[]{{.fd = fd.get(), .events = POLLIN | POLLHUP, .revents = 0},
+ pollfd pfd[]{{.fd = fd.get(),
+ .events = static_cast<int16_t>(event | POLLHUP),
+ .revents = 0},
{.fd = mRead.get(), .events = POLLHUP, .revents = 0}};
int ret = TEMP_FAILURE_RETRY(poll(pfd, arraysize(pfd), -1));
if (ret < 0) {
@@ -192,10 +195,31 @@ status_t RpcSession::FdTrigger::triggerablePollRead(base::borrowed_fd fd) {
if (pfd[1].revents & POLLHUP) {
return -ECANCELED;
}
- return pfd[0].revents & POLLIN ? OK : DEAD_OBJECT;
+ return pfd[0].revents & event ? OK : DEAD_OBJECT;
}
}
+status_t RpcSession::FdTrigger::interruptableWriteFully(base::borrowed_fd fd, const void* data,
+ size_t size) {
+ const uint8_t* buffer = reinterpret_cast<const uint8_t*>(data);
+ const uint8_t* end = buffer + size;
+
+ MAYBE_WAIT_IN_FLAKE_MODE;
+
+ status_t status;
+ while ((status = triggerablePoll(fd, POLLOUT)) == OK) {
+ ssize_t writeSize = TEMP_FAILURE_RETRY(send(fd.get(), buffer, end - buffer, MSG_NOSIGNAL));
+ if (writeSize == 0) return DEAD_OBJECT;
+
+ if (writeSize < 0) {
+ return -errno;
+ }
+ buffer += writeSize;
+ if (buffer == end) return OK;
+ }
+ return status;
+}
+
status_t RpcSession::FdTrigger::interruptableReadFully(base::borrowed_fd fd, void* data,
size_t size) {
uint8_t* buffer = reinterpret_cast<uint8_t*>(data);
@@ -204,7 +228,7 @@ status_t RpcSession::FdTrigger::interruptableReadFully(base::borrowed_fd fd, voi
MAYBE_WAIT_IN_FLAKE_MODE;
status_t status;
- while ((status = triggerablePollRead(fd)) == OK) {
+ while ((status = triggerablePoll(fd, POLLIN)) == OK) {
ssize_t readSize = TEMP_FAILURE_RETRY(recv(fd.get(), buffer, end - buffer, MSG_NOSIGNAL));
if (readSize == 0) return DEAD_OBJECT; // EOF
@@ -285,28 +309,34 @@ public:
JavaThreadAttacher() {
// Use dlsym to find androidJavaAttachThread because libandroid_runtime is loaded after
// libbinder.
- static auto attachFn = reinterpret_cast<decltype(&androidJavaAttachThread)>(
- dlsym(RTLD_DEFAULT, "androidJavaAttachThread"));
- if (attachFn == nullptr) return;
-
- char buf[16];
- const char* threadName = "UnknownRpcSessionThread"; // default thread name
- if (0 == pthread_getname_np(pthread_self(), buf, sizeof(buf))) {
- threadName = buf;
+ auto vm = getJavaVM();
+ if (vm == nullptr) return;
+
+ char threadName[16];
+ if (0 != pthread_getname_np(pthread_self(), threadName, sizeof(threadName))) {
+ constexpr const char* defaultThreadName = "UnknownRpcSessionThread";
+ memcpy(threadName, defaultThreadName,
+ std::min<size_t>(sizeof(threadName), strlen(defaultThreadName) + 1));
}
LOG_RPC_DETAIL("Attaching current thread %s to JVM", threadName);
- LOG_ALWAYS_FATAL_IF(!attachFn(threadName), "Cannot attach thread %s to JVM", threadName);
+ JavaVMAttachArgs args;
+ args.version = JNI_VERSION_1_2;
+ args.name = threadName;
+ args.group = nullptr;
+ JNIEnv* env;
+
+ LOG_ALWAYS_FATAL_IF(vm->AttachCurrentThread(&env, &args) != JNI_OK,
+ "Cannot attach thread %s to JVM", threadName);
mAttached = true;
}
~JavaThreadAttacher() {
if (!mAttached) return;
- static auto detachFn = reinterpret_cast<decltype(&androidJavaDetachThread)>(
- dlsym(RTLD_DEFAULT, "androidJavaDetachThread"));
- LOG_ALWAYS_FATAL_IF(detachFn == nullptr,
- "androidJavaAttachThread exists but androidJavaDetachThread doesn't");
+ auto vm = getJavaVM();
+ LOG_ALWAYS_FATAL_IF(vm == nullptr,
+ "Unable to detach thread. No JavaVM, but it was present before!");
LOG_RPC_DETAIL("Detaching current thread from JVM");
- if (detachFn()) {
+ if (vm->DetachCurrentThread() != JNI_OK) {
mAttached = false;
} else {
ALOGW("Unable to detach current thread from JVM");
@@ -316,6 +346,13 @@ public:
private:
DISALLOW_COPY_AND_ASSIGN(JavaThreadAttacher);
bool mAttached = false;
+
+ static JavaVM* getJavaVM() {
+ static auto fn = reinterpret_cast<decltype(&AndroidRuntimeGetJavaVM)>(
+ dlsym(RTLD_DEFAULT, "AndroidRuntimeGetJavaVM"));
+ if (fn == nullptr) return nullptr;
+ return fn();
+ }
};
} // namespace
@@ -376,7 +413,7 @@ bool RpcSession::setupSocketClient(const RpcSocketAddress& addr) {
mOutgoingConnections.size());
}
- if (!setupOneSocketConnection(addr, RpcAddress::zero(), false /*reverse*/)) return false;
+ if (!setupOneSocketConnection(addr, RpcAddress::zero(), false /*incoming*/)) return false;
// TODO(b/189955605): we should add additional sessions dynamically
// instead of all at once.
@@ -397,7 +434,7 @@ bool RpcSession::setupSocketClient(const RpcSocketAddress& addr) {
// we've already setup one client
for (size_t i = 0; i + 1 < numThreadsAvailable; i++) {
// TODO(b/189955605): shutdown existing connections?
- if (!setupOneSocketConnection(addr, mId.value(), false /*reverse*/)) return false;
+ if (!setupOneSocketConnection(addr, mId.value(), false /*incoming*/)) return false;
}
// TODO(b/189955605): we should add additional sessions dynamically
@@ -407,14 +444,14 @@ bool RpcSession::setupSocketClient(const RpcSocketAddress& addr) {
// any requests at all.
for (size_t i = 0; i < mMaxThreads; i++) {
- if (!setupOneSocketConnection(addr, mId.value(), true /*reverse*/)) return false;
+ if (!setupOneSocketConnection(addr, mId.value(), true /*incoming*/)) return false;
}
return true;
}
bool RpcSession::setupOneSocketConnection(const RpcSocketAddress& addr, const RpcAddress& id,
- bool reverse) {
+ bool incoming) {
for (size_t tries = 0; tries < 5; tries++) {
if (tries > 0) usleep(10000);
@@ -441,7 +478,7 @@ bool RpcSession::setupOneSocketConnection(const RpcSocketAddress& addr, const Rp
RpcConnectionHeader header{.options = 0};
memcpy(&header.sessionId, &id.viewRawEmbedded(), sizeof(RpcWireAddress));
- if (reverse) header.options |= RPC_CONNECTION_OPTION_REVERSE;
+ if (incoming) header.options |= RPC_CONNECTION_OPTION_INCOMING;
if (sizeof(header) != TEMP_FAILURE_RETRY(write(serverFd.get(), &header, sizeof(header)))) {
int savedErrno = errno;
@@ -452,33 +489,8 @@ bool RpcSession::setupOneSocketConnection(const RpcSocketAddress& addr, const Rp
LOG_RPC_DETAIL("Socket at %s client with fd %d", addr.toString().c_str(), serverFd.get());
- if (reverse) {
- std::mutex mutex;
- std::condition_variable joinCv;
- std::unique_lock<std::mutex> lock(mutex);
- std::thread thread;
- sp<RpcSession> thiz = sp<RpcSession>::fromExisting(this);
- bool ownershipTransferred = false;
- thread = std::thread([&]() {
- std::unique_lock<std::mutex> threadLock(mutex);
- unique_fd fd = std::move(serverFd);
- // NOLINTNEXTLINE(performance-unnecessary-copy-initialization)
- sp<RpcSession> session = thiz;
- session->preJoinThreadOwnership(std::move(thread));
-
- // only continue once we have a response or the connection fails
- auto setupResult = session->preJoinSetup(std::move(fd));
-
- ownershipTransferred = true;
- threadLock.unlock();
- joinCv.notify_one();
- // do not use & vars below
-
- RpcSession::join(std::move(session), std::move(setupResult));
- });
- joinCv.wait(lock, [&] { return ownershipTransferred; });
- LOG_ALWAYS_FATAL_IF(!ownershipTransferred);
- return true;
+ if (incoming) {
+ return addIncomingConnection(std::move(serverFd));
} else {
return addOutgoingConnection(std::move(serverFd), true);
}
@@ -488,6 +500,35 @@ bool RpcSession::setupOneSocketConnection(const RpcSocketAddress& addr, const Rp
return false;
}
+bool RpcSession::addIncomingConnection(unique_fd fd) {
+ std::mutex mutex;
+ std::condition_variable joinCv;
+ std::unique_lock<std::mutex> lock(mutex);
+ std::thread thread;
+ sp<RpcSession> thiz = sp<RpcSession>::fromExisting(this);
+ bool ownershipTransferred = false;
+ thread = std::thread([&]() {
+ std::unique_lock<std::mutex> threadLock(mutex);
+ unique_fd movedFd = std::move(fd);
+ // NOLINTNEXTLINE(performance-unnecessary-copy-initialization)
+ sp<RpcSession> session = thiz;
+ session->preJoinThreadOwnership(std::move(thread));
+
+ // only continue once we have a response or the connection fails
+ auto setupResult = session->preJoinSetup(std::move(movedFd));
+
+ ownershipTransferred = true;
+ threadLock.unlock();
+ joinCv.notify_one();
+ // do not use & vars below
+
+ RpcSession::join(std::move(session), std::move(setupResult));
+ });
+ joinCv.wait(lock, [&] { return ownershipTransferred; });
+ LOG_ALWAYS_FATAL_IF(!ownershipTransferred);
+ return true;
+}
+
bool RpcSession::addOutgoingConnection(unique_fd fd, bool init) {
sp<RpcConnection> connection = sp<RpcConnection>::make();
{
diff --git a/libs/binder/RpcState.cpp b/libs/binder/RpcState.cpp
index 5881703752..332c75f9e7 100644
--- a/libs/binder/RpcState.cpp
+++ b/libs/binder/RpcState.cpp
@@ -275,23 +275,19 @@ status_t RpcState::rpcSend(const sp<RpcSession::RpcConnection>& connection,
LOG_RPC_DETAIL("Sending %s on fd %d: %s", what, connection->fd.get(),
hexString(data, size).c_str());
- MAYBE_WAIT_IN_FLAKE_MODE;
-
if (size > std::numeric_limits<ssize_t>::max()) {
ALOGE("Cannot send %s at size %zu (too big)", what, size);
(void)session->shutdownAndWait(false);
return BAD_VALUE;
}
- ssize_t sent = TEMP_FAILURE_RETRY(send(connection->fd.get(), data, size, MSG_NOSIGNAL));
-
- if (sent < 0 || sent != static_cast<ssize_t>(size)) {
- int savedErrno = errno;
- LOG_RPC_DETAIL("Failed to send %s (sent %zd of %zu bytes) on fd %d, error: %s", what, sent,
- size, connection->fd.get(), strerror(savedErrno));
-
+ if (status_t status = session->mShutdownTrigger->interruptableWriteFully(connection->fd.get(),
+ data, size);
+ status != OK) {
+ LOG_RPC_DETAIL("Failed to write %s (%zu bytes) on fd %d, error: %s", what, size,
+ connection->fd.get(), statusToString(status).c_str());
(void)session->shutdownAndWait(false);
- return -savedErrno;
+ return status;
}
return OK;
diff --git a/libs/binder/RpcWireFormat.h b/libs/binder/RpcWireFormat.h
index 2016483138..2a44c7af04 100644
--- a/libs/binder/RpcWireFormat.h
+++ b/libs/binder/RpcWireFormat.h
@@ -21,7 +21,7 @@ namespace android {
#pragma clang diagnostic error "-Wpadded"
enum : uint8_t {
- RPC_CONNECTION_OPTION_REVERSE = 0x1,
+ RPC_CONNECTION_OPTION_INCOMING = 0x1, // default is outgoing
};
constexpr uint64_t RPC_WIRE_ADDRESS_OPTION_CREATED = 1 << 0; // distinguish from '0' address
@@ -47,7 +47,7 @@ struct RpcConnectionHeader {
/**
* Whenever a client connection is setup, this is sent as the initial
* transaction. The main use of this is in order to control the timing for when
- * a reverse connection is setup.
+ * an incoming connection is setup.
*/
struct RpcOutgoingConnectionInit {
char msg[4];
diff --git a/libs/binder/include/binder/RpcSession.h b/libs/binder/include/binder/RpcSession.h
index 69c2a1a956..fdca2a987c 100644
--- a/libs/binder/include/binder/RpcSession.h
+++ b/libs/binder/include/binder/RpcSession.h
@@ -152,20 +152,23 @@ private:
/**
* Poll for a read event.
*
+ * event - for pollfd
+ *
* Return:
* true - time to read!
* false - trigger happened
*/
- status_t triggerablePollRead(base::borrowed_fd fd);
+ status_t triggerablePoll(base::borrowed_fd fd, int16_t event);
/**
- * Read, but allow the read to be interrupted by this trigger.
+ * Read (or write), but allow to be interrupted by this trigger.
*
* Return:
- * true - read succeeded at 'size'
+ * true - succeeded in completely processing 'size'
* false - interrupted (failure or trigger)
*/
status_t interruptableReadFully(base::borrowed_fd fd, void* data, size_t size);
+ status_t interruptableWriteFully(base::borrowed_fd fd, const void* data, size_t size);
private:
base::unique_fd mWrite;
@@ -223,6 +226,7 @@ private:
[[nodiscard]] bool setupSocketClient(const RpcSocketAddress& address);
[[nodiscard]] bool setupOneSocketConnection(const RpcSocketAddress& address,
const RpcAddress& sessionId, bool server);
+ [[nodiscard]] bool addIncomingConnection(base::unique_fd fd);
[[nodiscard]] bool addOutgoingConnection(base::unique_fd fd, bool init);
[[nodiscard]] bool setForServer(const wp<RpcServer>& server,
const wp<RpcSession::EventListener>& eventListener,
diff --git a/libs/binder/tests/binderRpcTest.cpp b/libs/binder/tests/binderRpcTest.cpp
index 29bde340a1..40ebd9c8bc 100644
--- a/libs/binder/tests/binderRpcTest.cpp
+++ b/libs/binder/tests/binderRpcTest.cpp
@@ -127,11 +127,6 @@ public:
out->clear();
for (auto session : spServer->listSessions()) {
size_t count = session->state()->countBinders();
- if (count != 1) {
- // this is called when there is only one binder held remaining,
- // so to aid debugging
- session->state()->dump();
- }
out->push_back(count);
}
return Status::ok();
@@ -360,7 +355,11 @@ struct BinderRpcTestProcessSession {
EXPECT_EQ(remoteCount, 1);
}
- EXPECT_OK(rootIface->scheduleShutdown());
+ // even though it is on another thread, shutdown races with
+ // the transaction reply being written
+ if (auto status = rootIface->scheduleShutdown(); !status.isOk()) {
+ EXPECT_EQ(DEAD_OBJECT, status.transactionError()) << status;
+ }
}
rootIface = nullptr;
@@ -389,12 +388,17 @@ static inline std::string PrintSocketType(const testing::TestParamInfo<SocketTyp
class BinderRpc : public ::testing::TestWithParam<SocketType> {
public:
+ struct Options {
+ size_t numThreads = 1;
+ size_t numSessions = 1;
+ size_t numIncomingConnections = 0;
+ };
+
// This creates a new process serving an interface on a certain number of
// threads.
ProcessSession createRpcTestSocketServerProcess(
- size_t numThreads, size_t numSessions, size_t numReverseConnections,
- const std::function<void(const sp<RpcServer>&)>& configure) {
- CHECK_GE(numSessions, 1) << "Must have at least one session to a server";
+ const Options& options, const std::function<void(const sp<RpcServer>&)>& configure) {
+ CHECK_GE(options.numSessions, 1) << "Must have at least one session to a server";
SocketType socketType = GetParam();
@@ -407,7 +411,7 @@ public:
sp<RpcServer> server = RpcServer::make();
server->iUnderstandThisCodeIsExperimentalAndIWillNotUseItInProduction();
- server->setMaxThreads(numThreads);
+ server->setMaxThreads(options.numThreads);
unsigned int outPort = 0;
@@ -445,9 +449,9 @@ public:
CHECK_NE(0, outPort);
}
- for (size_t i = 0; i < numSessions; i++) {
+ for (size_t i = 0; i < options.numSessions; i++) {
sp<RpcSession> session = RpcSession::make();
- session->setMaxThreads(numReverseConnections);
+ session->setMaxThreads(options.numIncomingConnections);
switch (socketType) {
case SocketType::UNIX:
@@ -469,12 +473,9 @@ public:
return ret;
}
- BinderRpcTestProcessSession createRpcTestSocketServerProcess(size_t numThreads,
- size_t numSessions = 1,
- size_t numReverseConnections = 0) {
+ BinderRpcTestProcessSession createRpcTestSocketServerProcess(const Options& options) {
BinderRpcTestProcessSession ret{
- .proc = createRpcTestSocketServerProcess(numThreads, numSessions,
- numReverseConnections,
+ .proc = createRpcTestSocketServerProcess(options,
[&](const sp<RpcServer>& server) {
sp<MyBinderRpcTest> service =
new MyBinderRpcTest;
@@ -491,19 +492,19 @@ public:
};
TEST_P(BinderRpc, Ping) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
ASSERT_NE(proc.rootBinder, nullptr);
EXPECT_EQ(OK, proc.rootBinder->pingBinder());
}
TEST_P(BinderRpc, GetInterfaceDescriptor) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
ASSERT_NE(proc.rootBinder, nullptr);
EXPECT_EQ(IBinderRpcTest::descriptor, proc.rootBinder->getInterfaceDescriptor());
}
TEST_P(BinderRpc, MultipleSessions) {
- auto proc = createRpcTestSocketServerProcess(1 /*threads*/, 5 /*sessions*/);
+ auto proc = createRpcTestSocketServerProcess({.numThreads = 1, .numSessions = 5});
for (auto session : proc.proc.sessions) {
ASSERT_NE(nullptr, session.root);
EXPECT_EQ(OK, session.root->pingBinder());
@@ -511,14 +512,14 @@ TEST_P(BinderRpc, MultipleSessions) {
}
TEST_P(BinderRpc, TransactionsMustBeMarkedRpc) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
Parcel data;
Parcel reply;
EXPECT_EQ(BAD_TYPE, proc.rootBinder->transact(IBinder::PING_TRANSACTION, data, &reply, 0));
}
TEST_P(BinderRpc, AppendSeparateFormats) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
Parcel p1;
p1.markForBinder(proc.rootBinder);
@@ -531,7 +532,7 @@ TEST_P(BinderRpc, AppendSeparateFormats) {
}
TEST_P(BinderRpc, UnknownTransaction) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
Parcel data;
data.markForBinder(proc.rootBinder);
Parcel reply;
@@ -539,19 +540,19 @@ TEST_P(BinderRpc, UnknownTransaction) {
}
TEST_P(BinderRpc, SendSomethingOneway) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
EXPECT_OK(proc.rootIface->sendString("asdf"));
}
TEST_P(BinderRpc, SendAndGetResultBack) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
std::string doubled;
EXPECT_OK(proc.rootIface->doubleString("cool ", &doubled));
EXPECT_EQ("cool cool ", doubled);
}
TEST_P(BinderRpc, SendAndGetResultBackBig) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
std::string single = std::string(1024, 'a');
std::string doubled;
EXPECT_OK(proc.rootIface->doubleString(single, &doubled));
@@ -559,7 +560,7 @@ TEST_P(BinderRpc, SendAndGetResultBackBig) {
}
TEST_P(BinderRpc, CallMeBack) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
int32_t pingResult;
EXPECT_OK(proc.rootIface->pingMe(new MyBinderRpcSession("foo"), &pingResult));
@@ -569,7 +570,7 @@ TEST_P(BinderRpc, CallMeBack) {
}
TEST_P(BinderRpc, RepeatBinder) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
sp<IBinder> inBinder = new MyBinderRpcSession("foo");
sp<IBinder> outBinder;
@@ -591,7 +592,7 @@ TEST_P(BinderRpc, RepeatBinder) {
}
TEST_P(BinderRpc, RepeatTheirBinder) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
sp<IBinderRpcSession> session;
EXPECT_OK(proc.rootIface->openSession("aoeu", &session));
@@ -615,7 +616,7 @@ TEST_P(BinderRpc, RepeatTheirBinder) {
}
TEST_P(BinderRpc, RepeatBinderNull) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
sp<IBinder> outBinder;
EXPECT_OK(proc.rootIface->repeatBinder(nullptr, &outBinder));
@@ -623,7 +624,7 @@ TEST_P(BinderRpc, RepeatBinderNull) {
}
TEST_P(BinderRpc, HoldBinder) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
IBinder* ptr = nullptr;
{
@@ -649,8 +650,8 @@ TEST_P(BinderRpc, HoldBinder) {
// aren't supported.
TEST_P(BinderRpc, CannotMixBindersBetweenUnrelatedSocketSessions) {
- auto proc1 = createRpcTestSocketServerProcess(1);
- auto proc2 = createRpcTestSocketServerProcess(1);
+ auto proc1 = createRpcTestSocketServerProcess({});
+ auto proc2 = createRpcTestSocketServerProcess({});
sp<IBinder> outBinder;
EXPECT_EQ(INVALID_OPERATION,
@@ -658,7 +659,7 @@ TEST_P(BinderRpc, CannotMixBindersBetweenUnrelatedSocketSessions) {
}
TEST_P(BinderRpc, CannotMixBindersBetweenTwoSessionsToTheSameServer) {
- auto proc = createRpcTestSocketServerProcess(1 /*threads*/, 2 /*sessions*/);
+ auto proc = createRpcTestSocketServerProcess({.numThreads = 1, .numSessions = 2});
sp<IBinder> outBinder;
EXPECT_EQ(INVALID_OPERATION,
@@ -667,7 +668,7 @@ TEST_P(BinderRpc, CannotMixBindersBetweenTwoSessionsToTheSameServer) {
}
TEST_P(BinderRpc, CannotSendRegularBinderOverSocketBinder) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
sp<IBinder> someRealBinder = IInterface::asBinder(defaultServiceManager());
sp<IBinder> outBinder;
@@ -676,7 +677,7 @@ TEST_P(BinderRpc, CannotSendRegularBinderOverSocketBinder) {
}
TEST_P(BinderRpc, CannotSendSocketBinderOverRegularBinder) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
// for historical reasons, IServiceManager interface only returns the
// exception code
@@ -687,7 +688,7 @@ TEST_P(BinderRpc, CannotSendSocketBinderOverRegularBinder) {
// END TESTS FOR LIMITATIONS OF SOCKET BINDER
TEST_P(BinderRpc, RepeatRootObject) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
sp<IBinder> outBinder;
EXPECT_OK(proc.rootIface->repeatBinder(proc.rootBinder, &outBinder));
@@ -695,7 +696,7 @@ TEST_P(BinderRpc, RepeatRootObject) {
}
TEST_P(BinderRpc, NestedTransactions) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
auto nastyNester = sp<MyBinderRpcTest>::make();
EXPECT_OK(proc.rootIface->nestMe(nastyNester, 10));
@@ -706,7 +707,7 @@ TEST_P(BinderRpc, NestedTransactions) {
}
TEST_P(BinderRpc, SameBinderEquality) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
sp<IBinder> a;
EXPECT_OK(proc.rootIface->alwaysGiveMeTheSameBinder(&a));
@@ -718,7 +719,7 @@ TEST_P(BinderRpc, SameBinderEquality) {
}
TEST_P(BinderRpc, SameBinderEqualityWeak) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
sp<IBinder> a;
EXPECT_OK(proc.rootIface->alwaysGiveMeTheSameBinder(&a));
@@ -750,7 +751,7 @@ TEST_P(BinderRpc, SameBinderEqualityWeak) {
} while (false)
TEST_P(BinderRpc, SingleSession) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
sp<IBinderRpcSession> session;
EXPECT_OK(proc.rootIface->openSession("aoeu", &session));
@@ -764,7 +765,7 @@ TEST_P(BinderRpc, SingleSession) {
}
TEST_P(BinderRpc, ManySessions) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
std::vector<sp<IBinderRpcSession>> sessions;
@@ -800,7 +801,7 @@ size_t epochMillis() {
TEST_P(BinderRpc, ThreadPoolGreaterThanEqualRequested) {
constexpr size_t kNumThreads = 10;
- auto proc = createRpcTestSocketServerProcess(kNumThreads);
+ auto proc = createRpcTestSocketServerProcess({.numThreads = kNumThreads});
EXPECT_OK(proc.rootIface->lock());
@@ -834,7 +835,7 @@ TEST_P(BinderRpc, ThreadPoolOverSaturated) {
constexpr size_t kNumCalls = kNumThreads + 3;
constexpr size_t kSleepMs = 500;
- auto proc = createRpcTestSocketServerProcess(kNumThreads);
+ auto proc = createRpcTestSocketServerProcess({.numThreads = kNumThreads});
size_t epochMsBefore = epochMillis();
@@ -858,7 +859,7 @@ TEST_P(BinderRpc, ThreadingStressTest) {
constexpr size_t kNumServerThreads = 10;
constexpr size_t kNumCalls = 100;
- auto proc = createRpcTestSocketServerProcess(kNumServerThreads);
+ auto proc = createRpcTestSocketServerProcess({.numThreads = kNumServerThreads});
std::vector<std::thread> threads;
for (size_t i = 0; i < kNumClientThreads; i++) {
@@ -879,7 +880,7 @@ TEST_P(BinderRpc, OnewayStressTest) {
constexpr size_t kNumServerThreads = 10;
constexpr size_t kNumCalls = 500;
- auto proc = createRpcTestSocketServerProcess(kNumServerThreads);
+ auto proc = createRpcTestSocketServerProcess({.numThreads = kNumServerThreads});
std::vector<std::thread> threads;
for (size_t i = 0; i < kNumClientThreads; i++) {
@@ -900,7 +901,7 @@ TEST_P(BinderRpc, OnewayCallDoesNotWait) {
constexpr size_t kReallyLongTimeMs = 100;
constexpr size_t kSleepMs = kReallyLongTimeMs * 5;
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
size_t epochMsBefore = epochMillis();
@@ -916,7 +917,7 @@ TEST_P(BinderRpc, OnewayCallQueueing) {
constexpr size_t kSleepMs = 50;
// make sure calls to the same object happen on the same thread
- auto proc = createRpcTestSocketServerProcess(1 + kNumExtraServerThreads);
+ auto proc = createRpcTestSocketServerProcess({.numThreads = 1 + kNumExtraServerThreads});
EXPECT_OK(proc.rootIface->lock());
@@ -946,7 +947,7 @@ TEST_P(BinderRpc, OnewayCallExhaustion) {
constexpr size_t kNumClients = 2;
constexpr size_t kTooLongMs = 1000;
- auto proc = createRpcTestSocketServerProcess(kNumClients /*threads*/, 2 /*sessions*/);
+ auto proc = createRpcTestSocketServerProcess({.numThreads = kNumClients, .numSessions = 2});
// Build up oneway calls on the second session to make sure it terminates
// and shuts down. The first session should be unaffected (proc destructor
@@ -968,6 +969,12 @@ TEST_P(BinderRpc, OnewayCallExhaustion) {
Status status = iface->sleepMsAsync(kTooLongMs);
EXPECT_EQ(DEAD_OBJECT, status.transactionError()) << status;
+ // now that it has died, wait for the remote session to shutdown
+ std::vector<int32_t> remoteCounts;
+ do {
+ EXPECT_OK(proc.rootIface->countBinders(&remoteCounts));
+ } while (remoteCounts.size() == kNumClients);
+
// the second session should be shutdown in the other process by the time we
// are able to join above (it'll only be hung up once it finishes processing
// any pending commands). We need to erase this session from the record
@@ -982,7 +989,8 @@ TEST_P(BinderRpc, Callbacks) {
for (bool callIsOneway : {true, false}) {
for (bool callbackIsOneway : {true, false}) {
for (bool delayed : {true, false}) {
- auto proc = createRpcTestSocketServerProcess(1, 1, 1);
+ auto proc = createRpcTestSocketServerProcess(
+ {.numThreads = 1, .numSessions = 1, .numIncomingConnections = 1});
auto cb = sp<MyBinderRpcCallback>::make();
if (callIsOneway) {
@@ -1007,9 +1015,11 @@ TEST_P(BinderRpc, Callbacks) {
// since we are severing the connection, we need to go ahead and
// tell the server to shutdown and exit so that waitpid won't hang
- EXPECT_OK(proc.rootIface->scheduleShutdown());
+ if (auto status = proc.rootIface->scheduleShutdown(); !status.isOk()) {
+ EXPECT_EQ(DEAD_OBJECT, status.transactionError()) << status;
+ }
- // since this session has a reverse connection w/ a threadpool, we
+ // since this session has an incoming connection w/ a threadpool, we
// need to manually shut it down
EXPECT_TRUE(proc.proc.sessions.at(0).session->shutdownAndWait(true));
@@ -1020,7 +1030,7 @@ TEST_P(BinderRpc, Callbacks) {
}
TEST_P(BinderRpc, OnewayCallbackWithNoThread) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
auto cb = sp<MyBinderRpcCallback>::make();
Status status = proc.rootIface->doCallback(cb, true /*oneway*/, false /*delayed*/, "anything");
@@ -1029,7 +1039,7 @@ TEST_P(BinderRpc, OnewayCallbackWithNoThread) {
TEST_P(BinderRpc, Die) {
for (bool doDeathCleanup : {true, false}) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
// make sure there is some state during crash
// 1. we hold their binder
@@ -1047,7 +1057,7 @@ TEST_P(BinderRpc, Die) {
}
TEST_P(BinderRpc, UseKernelBinderCallingId) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
// we can't allocate IPCThreadState so actually the first time should
// succeed :(
@@ -1060,7 +1070,7 @@ TEST_P(BinderRpc, UseKernelBinderCallingId) {
}
TEST_P(BinderRpc, WorksWithLibbinderNdkPing) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
ndk::SpAIBinder binder = ndk::SpAIBinder(AIBinder_fromPlatformBinder(proc.rootBinder));
ASSERT_NE(binder, nullptr);
@@ -1069,7 +1079,7 @@ TEST_P(BinderRpc, WorksWithLibbinderNdkPing) {
}
TEST_P(BinderRpc, WorksWithLibbinderNdkUserTransaction) {
- auto proc = createRpcTestSocketServerProcess(1);
+ auto proc = createRpcTestSocketServerProcess({});
ndk::SpAIBinder binder = ndk::SpAIBinder(AIBinder_fromPlatformBinder(proc.rootBinder));
ASSERT_NE(binder, nullptr);
@@ -1097,7 +1107,7 @@ TEST_P(BinderRpc, Fds) {
ssize_t beforeFds = countFds();
ASSERT_GE(beforeFds, 0);
{
- auto proc = createRpcTestSocketServerProcess(10);
+ auto proc = createRpcTestSocketServerProcess({.numThreads = 10});
ASSERT_EQ(OK, proc.rootBinder->pingBinder());
}
ASSERT_EQ(beforeFds, countFds()) << (system("ls -l /proc/self/fd/"), "fd leak?");
@@ -1112,7 +1122,7 @@ static bool testSupportVsockLoopback() {
sp<RpcSession> session = RpcSession::make();
bool okay = session->setupVsockClient(VMADDR_CID_LOCAL, vsockPort);
- CHECK(server->shutdown());
+ while (!server->shutdown()) usleep(10000);
ALOGE("Detected vsock loopback supported: %d", okay);
return okay;
}
diff --git a/libs/nativewindow/AHardwareBuffer.cpp b/libs/nativewindow/AHardwareBuffer.cpp
index ccc47e937c..de5f1ed134 100644
--- a/libs/nativewindow/AHardwareBuffer.cpp
+++ b/libs/nativewindow/AHardwareBuffer.cpp
@@ -688,6 +688,10 @@ uint64_t AHardwareBuffer_convertToGrallocUsageBits(uint64_t usage) {
"gralloc and AHardwareBuffer flags don't match");
static_assert(AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE == (uint64_t)BufferUsage::GPU_MIPMAP_COMPLETE,
"gralloc and AHardwareBuffer flags don't match");
+ static_assert(AHARDWAREBUFFER_USAGE_CAMERA_WRITE == (uint64_t)BufferUsage::CAMERA_OUTPUT,
+ "gralloc and AHardwareBuffer flags don't match");
+ static_assert(AHARDWAREBUFFER_USAGE_CAMERA_READ == (uint64_t)BufferUsage::CAMERA_INPUT,
+ "gralloc and AHardwareBuffer flags don't match");
return usage;
}
diff --git a/libs/nativewindow/include/vndk/hardware_buffer.h b/libs/nativewindow/include/vndk/hardware_buffer.h
index 3392d7f094..12f8691684 100644
--- a/libs/nativewindow/include/vndk/hardware_buffer.h
+++ b/libs/nativewindow/include/vndk/hardware_buffer.h
@@ -81,6 +81,20 @@ enum {
AHARDWAREBUFFER_FORMAT_YCbCr_422_I = 0x14,
};
+/**
+ * Buffer usage flags.
+ */
+enum {
+ /* for future proofing, keep these in sync with hardware/gralloc.h */
+
+ /* The buffer will be written by the HW camera pipeline. */
+ AHARDWAREBUFFER_USAGE_CAMERA_WRITE = 2UL << 16,
+ /* The buffer will be read by the HW camera pipeline. */
+ AHARDWAREBUFFER_USAGE_CAMERA_READ = 4UL << 16,
+ /* Mask for the camera access values. */
+ AHARDWAREBUFFER_USAGE_CAMERA_MASK = 6UL << 16,
+};
+
__END_DECLS
#endif /* ANDROID_VNDK_NATIVEWINDOW_AHARDWAREBUFFER_H */