From c4b7338b1a0995222d229a6f794db02c974ba0d8 Mon Sep 17 00:00:00 2001 From: Michael Wachenschwanz Date: Fri, 17 Nov 2017 18:25:05 -0800 Subject: Disallow reading object data from Parcels with non-object reads The check added to each non-object reads adds an overhead. If the objects (binders and file descriptors) were written to the Parcel in sequential order then check adds a small O(1) overhead to each read, plus an O(N) overhead to the first read (to verify the N objects were added in order). If the objects were written out of order (as in by jumping around the Parcel with setDataPosition and writing Binder, DON'T DO THIS!!) (writing non objects out of order is fine), the first read is forced to sort the objects in the internal bookkeeping. Based on the assumption non sequential writes are infrequent and overall Parcels are probably mostly sorted, insertion sort was used. Worst case sorts will add an O(N^2) overhead to the first non object read from the Parcel. Test: run cts -m CtsOsTestCases -t android.os.cts.ParcelTest Bug: 29833520 Change-Id: I82de8eb5f5eb56f869542d5358e96884c24301b2 (cherry picked from commit c517681c66a1a387be657e0cf06da8d19659dd14) --- libs/binder/Parcel.cpp | 73 +++++++++++++++++++++++++++++++++++++ libs/binder/include/binder/Parcel.h | 2 + 2 files changed, 75 insertions(+) diff --git a/libs/binder/Parcel.cpp b/libs/binder/Parcel.cpp index e22179b15d..44357c3653 100644 --- a/libs/binder/Parcel.cpp +++ b/libs/binder/Parcel.cpp @@ -433,6 +433,7 @@ void Parcel::setDataPosition(size_t pos) const mDataPos = pos; mNextObjectHint = 0; + mObjectsSorted = false; } status_t Parcel::setDataCapacity(size_t size) @@ -1469,6 +1470,59 @@ void Parcel::remove(size_t /*start*/, size_t /*amt*/) LOG_ALWAYS_FATAL("Parcel::remove() not yet implemented!"); } +status_t Parcel::validateReadData(size_t upperBound) const +{ + // Don't allow non-object reads on object data + if (mObjectsSorted || mObjectsSize <= 1) { +data_sorted: + // Expect to check only against the next object + if (mNextObjectHint < mObjectsSize && upperBound > mObjects[mNextObjectHint]) { + // For some reason the current read position is greater than the next object + // hint. Iterate until we find the right object + size_t nextObject = mNextObjectHint; + do { + if (mDataPos < mObjects[nextObject] + sizeof(flat_binder_object)) { + // Requested info overlaps with an object + ALOGE("Attempt to read from protected data in Parcel %p", this); + return PERMISSION_DENIED; + } + nextObject++; + } while (nextObject < mObjectsSize && upperBound > mObjects[nextObject]); + mNextObjectHint = nextObject; + } + return NO_ERROR; + } + // Quickly determine if mObjects is sorted. + binder_size_t* currObj = mObjects + mObjectsSize - 1; + binder_size_t* prevObj = currObj; + while (currObj > mObjects) { + prevObj--; + if(*prevObj > *currObj) { + goto data_unsorted; + } + currObj--; + } + mObjectsSorted = true; + goto data_sorted; + +data_unsorted: + // Insertion Sort mObjects + // Great for mostly sorted lists. If randomly sorted or reverse ordered mObjects become common, + // switch to std::sort(mObjects, mObjects + mObjectsSize); + for (binder_size_t* iter0 = mObjects + 1; iter0 < mObjects + mObjectsSize; iter0++) { + binder_size_t temp = *iter0; + binder_size_t* iter1 = iter0 - 1; + while (iter1 >= mObjects && *iter1 > temp) { + *(iter1 + 1) = *iter1; + iter1--; + } + *(iter1 + 1) = temp; + } + mNextObjectHint = 0; + mObjectsSorted = true; + goto data_sorted; +} + status_t Parcel::read(void* outData, size_t len) const { if (len > INT32_MAX) { @@ -1479,6 +1533,10 @@ status_t Parcel::read(void* outData, size_t len) const if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize && len <= pad_size(len)) { + if (mObjectsSize > 0) { + status_t err = validateReadData(mDataPos + pad_size(len)); + if(err != NO_ERROR) return err; + } memcpy(outData, mData+mDataPos, len); mDataPos += pad_size(len); ALOGV("read Setting data pos of %p to %zu", this, mDataPos); @@ -1497,6 +1555,11 @@ const void* Parcel::readInplace(size_t len) const if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize && len <= pad_size(len)) { + if (mObjectsSize > 0) { + status_t err = validateReadData(mDataPos + pad_size(len)); + if(err != NO_ERROR) return NULL; + } + const void* data = mData+mDataPos; mDataPos += pad_size(len); ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos); @@ -1510,6 +1573,11 @@ status_t Parcel::readAligned(T *pArg) const { COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T)); if ((mDataPos+sizeof(T)) <= mDataSize) { + if (mObjectsSize > 0) { + status_t err = validateReadData(mDataPos + sizeof(T)); + if(err != NO_ERROR) return err; + } + const void* data = mData+mDataPos; mDataPos += sizeof(T); *pArg = *reinterpret_cast(data); @@ -2366,6 +2434,7 @@ void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize, mObjects = const_cast(objects); mObjectsSize = mObjectsCapacity = objectsCount; mNextObjectHint = 0; + mObjectsSorted = false; mOwner = relFunc; mOwnerCookie = relCookie; for (size_t i = 0; i < mObjectsSize; i++) { @@ -2524,6 +2593,7 @@ status_t Parcel::restartWrite(size_t desired) mObjects = NULL; mObjectsSize = mObjectsCapacity = 0; mNextObjectHint = 0; + mObjectsSorted = false; mHasFds = false; mFdsKnown = true; mAllowFds = true; @@ -2610,6 +2680,7 @@ status_t Parcel::continueWrite(size_t desired) mDataCapacity = desired; mObjectsSize = mObjectsCapacity = objectsSize; mNextObjectHint = 0; + mObjectsSorted = false; } else if (mData) { if (objectsSize < mObjectsSize) { @@ -2631,6 +2702,7 @@ status_t Parcel::continueWrite(size_t desired) } mObjectsSize = objectsSize; mNextObjectHint = 0; + mObjectsSorted = false; } // We own the data, so we can just do a realloc(). @@ -2703,6 +2775,7 @@ void Parcel::initState() mObjectsSize = 0; mObjectsCapacity = 0; mNextObjectHint = 0; + mObjectsSorted = false; mHasFds = false; mFdsKnown = true; mAllowFds = true; diff --git a/libs/binder/include/binder/Parcel.h b/libs/binder/include/binder/Parcel.h index 5d36526cb3..dede78f61e 100644 --- a/libs/binder/include/binder/Parcel.h +++ b/libs/binder/include/binder/Parcel.h @@ -417,6 +417,7 @@ private: void freeDataNoInit(); void initState(); void scanForFds() const; + status_t validateReadData(size_t len) const; template status_t readAligned(T *pArg) const; @@ -463,6 +464,7 @@ private: size_t mObjectsSize; size_t mObjectsCapacity; mutable size_t mNextObjectHint; + mutable bool mObjectsSorted; mutable bool mFdsKnown; mutable bool mHasFds; -- cgit v1.2.3 From ab1fb955acc8bbe6b9086a4ab54beab003a887a9 Mon Sep 17 00:00:00 2001 From: Michael Wachenschwanz Date: Tue, 17 Apr 2018 16:52:40 -0700 Subject: Increment when attempting to read protected Parcel Data Make sure to increment the parcel data position even when trying to improperly read from protected data Bug: 29833520 Test (M): cts-tradefed run cts -c android.os.cts.ParcelTest -m testBinderDataProtection Test (M): cts-tradefed run cts -c android.os.cts.ParcelTest -m testBinderDataProtectionIncrements Test: cts-tradefed run cts -m CtsOsTestCases -t android.os.cts.ParcelTest#testBinderDataProtection Test: cts-tradefed run cts -m CtsOsTestCases -t android.os.cts.ParcelTest#testBinderDataProtectionIncrements Change-Id: Ie4aae6277fc5f5c924f603d9828c3a608998b986 Merged-In: Ie4aae6277fc5f5c924f603d9828c3a608998b986 (cherry picked from commit 6a825e8ad1a3928dd872bb7c3fbcd94784d77267) --- libs/binder/Parcel.cpp | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/libs/binder/Parcel.cpp b/libs/binder/Parcel.cpp index 44357c3653..3fafbb8d1b 100644 --- a/libs/binder/Parcel.cpp +++ b/libs/binder/Parcel.cpp @@ -1535,7 +1535,12 @@ status_t Parcel::read(void* outData, size_t len) const && len <= pad_size(len)) { if (mObjectsSize > 0) { status_t err = validateReadData(mDataPos + pad_size(len)); - if(err != NO_ERROR) return err; + if(err != NO_ERROR) { + // Still increment the data position by the expected length + mDataPos += pad_size(len); + ALOGV("read Setting data pos of %p to %zu", this, mDataPos); + return err; + } } memcpy(outData, mData+mDataPos, len); mDataPos += pad_size(len); @@ -1557,7 +1562,12 @@ const void* Parcel::readInplace(size_t len) const && len <= pad_size(len)) { if (mObjectsSize > 0) { status_t err = validateReadData(mDataPos + pad_size(len)); - if(err != NO_ERROR) return NULL; + if(err != NO_ERROR) { + // Still increment the data position by the expected length + mDataPos += pad_size(len); + ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos); + return NULL; + } } const void* data = mData+mDataPos; @@ -1575,7 +1585,11 @@ status_t Parcel::readAligned(T *pArg) const { if ((mDataPos+sizeof(T)) <= mDataSize) { if (mObjectsSize > 0) { status_t err = validateReadData(mDataPos + sizeof(T)); - if(err != NO_ERROR) return err; + if(err != NO_ERROR) { + // Still increment the data position by the expected length + mDataPos += sizeof(T); + return err; + } } const void* data = mData+mDataPos; -- cgit v1.2.3 From ff2171f2460e3a6d3443ab957732b8b7d4831d40 Mon Sep 17 00:00:00 2001 From: Martijn Coenen Date: Wed, 4 Apr 2018 11:46:56 +0200 Subject: Don't pad before calling writeInPlace(). writeInplace() itself already pads securely, by masking off the padded bytes. If the padding is done before calling writeInplace(), no mask is applied, and heap data can leak. Bug: 77237570 Test: builds Change-Id: Ide27a0002d4ed4196530430760245b971f6a3f44 Merged-In: Ide27a0002d4ed4196530430760245b971f6a3f44 (cherry picked from commit f8542381b72a7bb2452a5278a00ca8c34edbf8a0) (cherry picked from commit 732132b765cd7b667f16cf32f0fe4c852d7d44dd) Change-Id: Id65e4573e18ab68b804f1cf63a6977a71da01e5d --- libs/binder/Parcel.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/binder/Parcel.cpp b/libs/binder/Parcel.cpp index 3fafbb8d1b..460bbe2fc5 100644 --- a/libs/binder/Parcel.cpp +++ b/libs/binder/Parcel.cpp @@ -1277,7 +1277,7 @@ status_t Parcel::write(const FlattenableHelperInterface& val) if (err) return err; // payload - void* const buf = this->writeInplace(pad_size(len)); + void* const buf = this->writeInplace(len); if (buf == NULL) return BAD_VALUE; -- cgit v1.2.3