2008-10-21 14:00:00 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2005 The Android Open Source Project
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define LOG_TAG "Parcel"
|
|
|
|
//#define LOG_NDEBUG 0
|
|
|
|
|
2009-05-20 02:08:10 +00:00
|
|
|
#include <binder/Parcel.h>
|
2016-02-02 18:27:03 +00:00
|
|
|
#include <fcntl.h>
|
|
|
|
#include <pthread.h>
|
2008-10-21 14:00:00 +00:00
|
|
|
|
2010-06-18 20:07:53 +00:00
|
|
|
#include <binder/IPCThreadState.h>
|
2009-05-20 02:08:10 +00:00
|
|
|
#include <binder/Binder.h>
|
|
|
|
#include <binder/BpBinder.h>
|
|
|
|
#include <binder/ProcessState.h>
|
2013-05-07 03:20:50 +00:00
|
|
|
#include <binder/TextOutput.h>
|
|
|
|
|
2014-04-29 06:22:10 +00:00
|
|
|
#include <errno.h>
|
2013-05-07 03:20:50 +00:00
|
|
|
#include <utils/Debug.h>
|
2008-10-21 14:00:00 +00:00
|
|
|
#include <utils/Log.h>
|
|
|
|
#include <utils/String8.h>
|
|
|
|
#include <utils/String16.h>
|
|
|
|
#include <utils/misc.h>
|
2010-02-12 01:30:52 +00:00
|
|
|
#include <utils/Flattenable.h>
|
2011-09-24 04:17:56 +00:00
|
|
|
#include <cutils/ashmem.h>
|
2008-10-21 14:00:00 +00:00
|
|
|
|
2009-05-18 22:08:03 +00:00
|
|
|
#include <private/binder/binder_module.h>
|
2014-11-11 20:22:53 +00:00
|
|
|
#include <private/binder/Static.h>
|
2008-10-21 14:00:00 +00:00
|
|
|
|
2014-02-14 03:22:08 +00:00
|
|
|
#include <inttypes.h>
|
2008-10-21 14:00:00 +00:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdint.h>
|
2011-09-24 04:17:56 +00:00
|
|
|
#include <sys/mman.h>
|
2016-01-27 16:02:48 +00:00
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <unistd.h>
|
2008-10-21 14:00:00 +00:00
|
|
|
|
|
|
|
#ifndef INT32_MAX
|
|
|
|
#define INT32_MAX ((int32_t)(2147483647))
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define LOG_REFS(...)
|
2011-10-12 16:27:03 +00:00
|
|
|
//#define LOG_REFS(...) ALOG(LOG_DEBUG, "Parcel", __VA_ARGS__)
|
2014-11-11 20:22:53 +00:00
|
|
|
#define LOG_ALLOC(...)
|
|
|
|
//#define LOG_ALLOC(...) ALOG(LOG_DEBUG, "Parcel", __VA_ARGS__)
|
2008-10-21 14:00:00 +00:00
|
|
|
|
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
|
2015-04-02 16:36:02 +00:00
|
|
|
// This macro should never be used at runtime, as a too large value
|
|
|
|
// of s could cause an integer overflow. Instead, you should always
|
|
|
|
// use the wrapper function pad_size()
|
|
|
|
#define PAD_SIZE_UNSAFE(s) (((s)+3)&~3)
|
|
|
|
|
|
|
|
static size_t pad_size(size_t s) {
|
|
|
|
if (s > (SIZE_T_MAX - 3)) {
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
return PAD_SIZE_UNSAFE(s);
|
|
|
|
}
|
2008-10-21 14:00:00 +00:00
|
|
|
|
2010-07-07 23:06:39 +00:00
|
|
|
// Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER
|
2014-12-18 18:26:57 +00:00
|
|
|
#define STRICT_MODE_PENALTY_GATHER (0x40 << 16)
|
2010-07-07 23:06:39 +00:00
|
|
|
|
2010-07-12 18:05:38 +00:00
|
|
|
// Note: must be kept in sync with android/os/Parcel.java's EX_HAS_REPLY_HEADER
|
|
|
|
#define EX_HAS_REPLY_HEADER -128
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
// XXX This can be made public if we want to provide
|
|
|
|
// support for typed data.
|
|
|
|
struct small_flat_data
|
|
|
|
{
|
|
|
|
uint32_t type;
|
|
|
|
uint32_t data;
|
|
|
|
};
|
|
|
|
|
|
|
|
namespace android {
|
|
|
|
|
2014-11-14 01:07:40 +00:00
|
|
|
static pthread_mutex_t gParcelGlobalAllocSizeLock = PTHREAD_MUTEX_INITIALIZER;
|
|
|
|
static size_t gParcelGlobalAllocSize = 0;
|
|
|
|
static size_t gParcelGlobalAllocCount = 0;
|
|
|
|
|
2014-11-12 00:44:25 +00:00
|
|
|
// Maximum size of a blob to transfer in-place.
|
|
|
|
static const size_t BLOB_INPLACE_LIMIT = 16 * 1024;
|
|
|
|
|
|
|
|
enum {
|
|
|
|
BLOB_INPLACE = 0,
|
|
|
|
BLOB_ASHMEM_IMMUTABLE = 1,
|
|
|
|
BLOB_ASHMEM_MUTABLE = 2,
|
|
|
|
};
|
|
|
|
|
2016-02-02 18:27:03 +00:00
|
|
|
static dev_t ashmem_rdev()
|
|
|
|
{
|
|
|
|
static dev_t __ashmem_rdev;
|
|
|
|
static pthread_mutex_t __ashmem_rdev_lock = PTHREAD_MUTEX_INITIALIZER;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&__ashmem_rdev_lock);
|
|
|
|
|
|
|
|
dev_t rdev = __ashmem_rdev;
|
|
|
|
if (!rdev) {
|
|
|
|
int fd = TEMP_FAILURE_RETRY(open("/dev/ashmem", O_RDONLY));
|
|
|
|
if (fd >= 0) {
|
|
|
|
struct stat st;
|
|
|
|
|
|
|
|
int ret = TEMP_FAILURE_RETRY(fstat(fd, &st));
|
|
|
|
close(fd);
|
|
|
|
if ((ret >= 0) && S_ISCHR(st.st_mode)) {
|
|
|
|
rdev = __ashmem_rdev = st.st_rdev;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&__ashmem_rdev_lock);
|
|
|
|
|
|
|
|
return rdev;
|
|
|
|
}
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
void acquire_object(const sp<ProcessState>& proc,
|
2015-10-22 23:12:53 +00:00
|
|
|
const flat_binder_object& obj, const void* who, size_t* outAshmemSize)
|
2008-10-21 14:00:00 +00:00
|
|
|
{
|
|
|
|
switch (obj.type) {
|
|
|
|
case BINDER_TYPE_BINDER:
|
|
|
|
if (obj.binder) {
|
|
|
|
LOG_REFS("Parcel %p acquiring reference on local %p", who, obj.cookie);
|
2014-01-29 04:12:59 +00:00
|
|
|
reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who);
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
case BINDER_TYPE_WEAK_BINDER:
|
|
|
|
if (obj.binder)
|
2014-01-29 04:12:59 +00:00
|
|
|
reinterpret_cast<RefBase::weakref_type*>(obj.binder)->incWeak(who);
|
2008-10-21 14:00:00 +00:00
|
|
|
return;
|
|
|
|
case BINDER_TYPE_HANDLE: {
|
|
|
|
const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
|
|
|
|
if (b != NULL) {
|
|
|
|
LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get());
|
|
|
|
b->incStrong(who);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
case BINDER_TYPE_WEAK_HANDLE: {
|
|
|
|
const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle);
|
|
|
|
if (b != NULL) b.get_refs()->incWeak(who);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
case BINDER_TYPE_FD: {
|
2016-01-27 16:02:48 +00:00
|
|
|
if ((obj.cookie != 0) && (outAshmemSize != NULL)) {
|
|
|
|
struct stat st;
|
|
|
|
int ret = fstat(obj.handle, &st);
|
2016-02-02 18:27:03 +00:00
|
|
|
if (!ret && S_ISCHR(st.st_mode) && (st.st_rdev == ashmem_rdev())) {
|
2015-10-22 23:46:12 +00:00
|
|
|
// If we own an ashmem fd, keep track of how much memory it refers to.
|
|
|
|
int size = ashmem_get_size_region(obj.handle);
|
|
|
|
if (size > 0) {
|
|
|
|
*outAshmemSize += size;
|
|
|
|
}
|
2015-10-22 23:12:53 +00:00
|
|
|
}
|
|
|
|
}
|
2008-10-21 14:00:00 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-06 01:42:44 +00:00
|
|
|
ALOGD("Invalid object type 0x%08x", obj.type);
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
|
2015-10-22 23:46:12 +00:00
|
|
|
void acquire_object(const sp<ProcessState>& proc,
|
|
|
|
const flat_binder_object& obj, const void* who)
|
|
|
|
{
|
|
|
|
acquire_object(proc, obj, who, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void release_object(const sp<ProcessState>& proc,
|
2015-10-22 23:12:53 +00:00
|
|
|
const flat_binder_object& obj, const void* who, size_t* outAshmemSize)
|
2008-10-21 14:00:00 +00:00
|
|
|
{
|
|
|
|
switch (obj.type) {
|
|
|
|
case BINDER_TYPE_BINDER:
|
|
|
|
if (obj.binder) {
|
|
|
|
LOG_REFS("Parcel %p releasing reference on local %p", who, obj.cookie);
|
2014-01-29 04:12:59 +00:00
|
|
|
reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who);
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
case BINDER_TYPE_WEAK_BINDER:
|
|
|
|
if (obj.binder)
|
2014-01-29 04:12:59 +00:00
|
|
|
reinterpret_cast<RefBase::weakref_type*>(obj.binder)->decWeak(who);
|
2008-10-21 14:00:00 +00:00
|
|
|
return;
|
|
|
|
case BINDER_TYPE_HANDLE: {
|
|
|
|
const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
|
|
|
|
if (b != NULL) {
|
|
|
|
LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get());
|
|
|
|
b->decStrong(who);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
case BINDER_TYPE_WEAK_HANDLE: {
|
|
|
|
const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle);
|
|
|
|
if (b != NULL) b.get_refs()->decWeak(who);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
case BINDER_TYPE_FD: {
|
2016-01-27 16:02:48 +00:00
|
|
|
if (obj.cookie != 0) { // owned
|
|
|
|
if (outAshmemSize != NULL) {
|
2016-01-27 16:02:48 +00:00
|
|
|
struct stat st;
|
|
|
|
int ret = fstat(obj.handle, &st);
|
2016-02-02 18:27:03 +00:00
|
|
|
if (!ret && S_ISCHR(st.st_mode) && (st.st_rdev == ashmem_rdev())) {
|
2016-01-27 16:02:48 +00:00
|
|
|
int size = ashmem_get_size_region(obj.handle);
|
|
|
|
if (size > 0) {
|
|
|
|
*outAshmemSize -= size;
|
|
|
|
}
|
2015-10-22 23:46:12 +00:00
|
|
|
}
|
|
|
|
}
|
2016-01-27 16:02:48 +00:00
|
|
|
close(obj.handle);
|
2015-12-03 09:34:15 +00:00
|
|
|
#ifdef DISABLE_ASHMEM_TRACKING
|
|
|
|
} else if (obj.cookie != 0) {
|
|
|
|
close(obj.handle);
|
|
|
|
#endif
|
2015-10-22 23:12:53 +00:00
|
|
|
}
|
2008-10-21 14:00:00 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-06 01:42:44 +00:00
|
|
|
ALOGE("Invalid object type 0x%08x", obj.type);
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
|
2015-10-22 23:46:12 +00:00
|
|
|
void release_object(const sp<ProcessState>& proc,
|
|
|
|
const flat_binder_object& obj, const void* who)
|
|
|
|
{
|
|
|
|
release_object(proc, obj, who, NULL);
|
|
|
|
}
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
inline static status_t finish_flatten_binder(
|
2014-02-06 01:42:44 +00:00
|
|
|
const sp<IBinder>& /*binder*/, const flat_binder_object& flat, Parcel* out)
|
2008-10-21 14:00:00 +00:00
|
|
|
{
|
|
|
|
return out->writeObject(flat, false);
|
|
|
|
}
|
|
|
|
|
2014-02-06 01:42:44 +00:00
|
|
|
status_t flatten_binder(const sp<ProcessState>& /*proc*/,
|
2008-10-21 14:00:00 +00:00
|
|
|
const sp<IBinder>& binder, Parcel* out)
|
|
|
|
{
|
|
|
|
flat_binder_object obj;
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
|
|
|
|
if (binder != NULL) {
|
|
|
|
IBinder *local = binder->localBinder();
|
|
|
|
if (!local) {
|
|
|
|
BpBinder *proxy = binder->remoteBinder();
|
|
|
|
if (proxy == NULL) {
|
2012-01-06 19:20:56 +00:00
|
|
|
ALOGE("null proxy");
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
const int32_t handle = proxy ? proxy->handle() : 0;
|
|
|
|
obj.type = BINDER_TYPE_HANDLE;
|
2014-02-19 05:10:29 +00:00
|
|
|
obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
|
2008-10-21 14:00:00 +00:00
|
|
|
obj.handle = handle;
|
2014-01-29 04:12:59 +00:00
|
|
|
obj.cookie = 0;
|
2008-10-21 14:00:00 +00:00
|
|
|
} else {
|
|
|
|
obj.type = BINDER_TYPE_BINDER;
|
2014-01-29 04:12:59 +00:00
|
|
|
obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
|
|
|
|
obj.cookie = reinterpret_cast<uintptr_t>(local);
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
obj.type = BINDER_TYPE_BINDER;
|
2014-01-29 04:12:59 +00:00
|
|
|
obj.binder = 0;
|
|
|
|
obj.cookie = 0;
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
return finish_flatten_binder(binder, obj, out);
|
|
|
|
}
|
|
|
|
|
2014-02-06 01:42:44 +00:00
|
|
|
status_t flatten_binder(const sp<ProcessState>& /*proc*/,
|
2008-10-21 14:00:00 +00:00
|
|
|
const wp<IBinder>& binder, Parcel* out)
|
|
|
|
{
|
|
|
|
flat_binder_object obj;
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
|
|
|
|
if (binder != NULL) {
|
|
|
|
sp<IBinder> real = binder.promote();
|
|
|
|
if (real != NULL) {
|
|
|
|
IBinder *local = real->localBinder();
|
|
|
|
if (!local) {
|
|
|
|
BpBinder *proxy = real->remoteBinder();
|
|
|
|
if (proxy == NULL) {
|
2012-01-06 19:20:56 +00:00
|
|
|
ALOGE("null proxy");
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
const int32_t handle = proxy ? proxy->handle() : 0;
|
|
|
|
obj.type = BINDER_TYPE_WEAK_HANDLE;
|
2014-02-19 05:10:29 +00:00
|
|
|
obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
|
2008-10-21 14:00:00 +00:00
|
|
|
obj.handle = handle;
|
2014-01-29 04:12:59 +00:00
|
|
|
obj.cookie = 0;
|
2008-10-21 14:00:00 +00:00
|
|
|
} else {
|
|
|
|
obj.type = BINDER_TYPE_WEAK_BINDER;
|
2014-01-29 04:12:59 +00:00
|
|
|
obj.binder = reinterpret_cast<uintptr_t>(binder.get_refs());
|
|
|
|
obj.cookie = reinterpret_cast<uintptr_t>(binder.unsafe_get());
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
return finish_flatten_binder(real, obj, out);
|
|
|
|
}
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
// XXX How to deal? In order to flatten the given binder,
|
|
|
|
// we need to probe it for information, which requires a primary
|
|
|
|
// reference... but we don't have one.
|
|
|
|
//
|
|
|
|
// The OpenBinder implementation uses a dynamic_cast<> here,
|
|
|
|
// but we can't do that with the different reference counting
|
|
|
|
// implementation we are using.
|
2012-01-06 19:20:56 +00:00
|
|
|
ALOGE("Unable to unflatten Binder weak reference!");
|
2008-10-21 14:00:00 +00:00
|
|
|
obj.type = BINDER_TYPE_BINDER;
|
2014-01-29 04:12:59 +00:00
|
|
|
obj.binder = 0;
|
|
|
|
obj.cookie = 0;
|
2008-10-21 14:00:00 +00:00
|
|
|
return finish_flatten_binder(NULL, obj, out);
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
} else {
|
|
|
|
obj.type = BINDER_TYPE_BINDER;
|
2014-01-29 04:12:59 +00:00
|
|
|
obj.binder = 0;
|
|
|
|
obj.cookie = 0;
|
2008-10-21 14:00:00 +00:00
|
|
|
return finish_flatten_binder(NULL, obj, out);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
inline static status_t finish_unflatten_binder(
|
2014-02-06 01:42:44 +00:00
|
|
|
BpBinder* /*proxy*/, const flat_binder_object& /*flat*/,
|
|
|
|
const Parcel& /*in*/)
|
2008-10-21 14:00:00 +00:00
|
|
|
{
|
|
|
|
return NO_ERROR;
|
|
|
|
}
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
status_t unflatten_binder(const sp<ProcessState>& proc,
|
|
|
|
const Parcel& in, sp<IBinder>* out)
|
|
|
|
{
|
|
|
|
const flat_binder_object* flat = in.readObject(false);
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
if (flat) {
|
|
|
|
switch (flat->type) {
|
|
|
|
case BINDER_TYPE_BINDER:
|
2014-01-29 04:12:59 +00:00
|
|
|
*out = reinterpret_cast<IBinder*>(flat->cookie);
|
2008-10-21 14:00:00 +00:00
|
|
|
return finish_unflatten_binder(NULL, *flat, in);
|
|
|
|
case BINDER_TYPE_HANDLE:
|
|
|
|
*out = proc->getStrongProxyForHandle(flat->handle);
|
|
|
|
return finish_unflatten_binder(
|
|
|
|
static_cast<BpBinder*>(out->get()), *flat, in);
|
2014-05-30 23:35:57 +00:00
|
|
|
}
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
return BAD_TYPE;
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t unflatten_binder(const sp<ProcessState>& proc,
|
|
|
|
const Parcel& in, wp<IBinder>* out)
|
|
|
|
{
|
|
|
|
const flat_binder_object* flat = in.readObject(false);
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
if (flat) {
|
|
|
|
switch (flat->type) {
|
|
|
|
case BINDER_TYPE_BINDER:
|
2014-01-29 04:12:59 +00:00
|
|
|
*out = reinterpret_cast<IBinder*>(flat->cookie);
|
2008-10-21 14:00:00 +00:00
|
|
|
return finish_unflatten_binder(NULL, *flat, in);
|
|
|
|
case BINDER_TYPE_WEAK_BINDER:
|
2014-01-29 04:12:59 +00:00
|
|
|
if (flat->binder != 0) {
|
2008-10-21 14:00:00 +00:00
|
|
|
out->set_object_and_refs(
|
2014-01-29 04:12:59 +00:00
|
|
|
reinterpret_cast<IBinder*>(flat->cookie),
|
|
|
|
reinterpret_cast<RefBase::weakref_type*>(flat->binder));
|
2008-10-21 14:00:00 +00:00
|
|
|
} else {
|
|
|
|
*out = NULL;
|
|
|
|
}
|
|
|
|
return finish_unflatten_binder(NULL, *flat, in);
|
|
|
|
case BINDER_TYPE_HANDLE:
|
|
|
|
case BINDER_TYPE_WEAK_HANDLE:
|
|
|
|
*out = proc->getWeakProxyForHandle(flat->handle);
|
|
|
|
return finish_unflatten_binder(
|
|
|
|
static_cast<BpBinder*>(out->unsafe_get()), *flat, in);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return BAD_TYPE;
|
|
|
|
}
|
|
|
|
|
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
Parcel::Parcel()
|
|
|
|
{
|
2014-11-11 20:22:53 +00:00
|
|
|
LOG_ALLOC("Parcel %p: constructing", this);
|
2008-10-21 14:00:00 +00:00
|
|
|
initState();
|
|
|
|
}
|
|
|
|
|
|
|
|
Parcel::~Parcel()
|
|
|
|
{
|
|
|
|
freeDataNoInit();
|
2014-11-11 20:22:53 +00:00
|
|
|
LOG_ALLOC("Parcel %p: destroyed", this);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t Parcel::getGlobalAllocSize() {
|
2014-11-14 01:07:40 +00:00
|
|
|
pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
|
|
|
|
size_t size = gParcelGlobalAllocSize;
|
|
|
|
pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
|
|
|
|
return size;
|
2014-11-11 20:22:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
size_t Parcel::getGlobalAllocCount() {
|
2014-11-14 01:07:40 +00:00
|
|
|
pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
|
|
|
|
size_t count = gParcelGlobalAllocCount;
|
|
|
|
pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
|
|
|
|
return count;
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const uint8_t* Parcel::data() const
|
|
|
|
{
|
|
|
|
return mData;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t Parcel::dataSize() const
|
|
|
|
{
|
|
|
|
return (mDataSize > mDataPos ? mDataSize : mDataPos);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t Parcel::dataAvail() const
|
|
|
|
{
|
|
|
|
// TODO: decide what to do about the possibility that this can
|
|
|
|
// report an available-data size that exceeds a Java int's max
|
|
|
|
// positive value, causing havoc. Fortunately this will only
|
|
|
|
// happen if someone constructs a Parcel containing more than two
|
|
|
|
// gigabytes of data, which on typical phone hardware is simply
|
|
|
|
// not possible.
|
|
|
|
return dataSize() - dataPosition();
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t Parcel::dataPosition() const
|
|
|
|
{
|
|
|
|
return mDataPos;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t Parcel::dataCapacity() const
|
|
|
|
{
|
|
|
|
return mDataCapacity;
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t Parcel::setDataSize(size_t size)
|
|
|
|
{
|
2015-04-02 16:36:02 +00:00
|
|
|
if (size > INT32_MAX) {
|
|
|
|
// don't accept size_t values which may have come from an
|
|
|
|
// inadvertent conversion from a negative int.
|
|
|
|
return BAD_VALUE;
|
|
|
|
}
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
status_t err;
|
|
|
|
err = continueWrite(size);
|
|
|
|
if (err == NO_ERROR) {
|
|
|
|
mDataSize = size;
|
2014-05-30 23:35:57 +00:00
|
|
|
ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize);
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Parcel::setDataPosition(size_t pos) const
|
|
|
|
{
|
2015-04-02 16:36:02 +00:00
|
|
|
if (pos > INT32_MAX) {
|
|
|
|
// don't accept size_t values which may have come from an
|
|
|
|
// inadvertent conversion from a negative int.
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
mDataPos = pos;
|
|
|
|
mNextObjectHint = 0;
|
2017-11-18 02:25:05 +00:00
|
|
|
mObjectsSorted = false;
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
status_t Parcel::setDataCapacity(size_t size)
|
|
|
|
{
|
2015-04-02 16:36:02 +00:00
|
|
|
if (size > INT32_MAX) {
|
|
|
|
// don't accept size_t values which may have come from an
|
|
|
|
// inadvertent conversion from a negative int.
|
|
|
|
return BAD_VALUE;
|
|
|
|
}
|
|
|
|
|
2011-04-14 01:15:56 +00:00
|
|
|
if (size > mDataCapacity) return continueWrite(size);
|
2008-10-21 14:00:00 +00:00
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t Parcel::setData(const uint8_t* buffer, size_t len)
|
|
|
|
{
|
2015-04-02 16:36:02 +00:00
|
|
|
if (len > INT32_MAX) {
|
|
|
|
// don't accept size_t values which may have come from an
|
|
|
|
// inadvertent conversion from a negative int.
|
|
|
|
return BAD_VALUE;
|
|
|
|
}
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
status_t err = restartWrite(len);
|
|
|
|
if (err == NO_ERROR) {
|
|
|
|
memcpy(const_cast<uint8_t*>(data()), buffer, len);
|
|
|
|
mDataSize = len;
|
|
|
|
mFdsKnown = false;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2011-04-13 17:21:56 +00:00
|
|
|
status_t Parcel::appendFrom(const Parcel *parcel, size_t offset, size_t len)
|
2008-10-21 14:00:00 +00:00
|
|
|
{
|
|
|
|
const sp<ProcessState> proc(ProcessState::self());
|
|
|
|
status_t err;
|
2011-04-13 17:21:56 +00:00
|
|
|
const uint8_t *data = parcel->mData;
|
2014-01-29 04:12:59 +00:00
|
|
|
const binder_size_t *objects = parcel->mObjects;
|
2008-10-21 14:00:00 +00:00
|
|
|
size_t size = parcel->mObjectsSize;
|
|
|
|
int startPos = mDataPos;
|
|
|
|
int firstIndex = -1, lastIndex = -2;
|
|
|
|
|
|
|
|
if (len == 0) {
|
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
|
2015-04-02 16:36:02 +00:00
|
|
|
if (len > INT32_MAX) {
|
|
|
|
// don't accept size_t values which may have come from an
|
|
|
|
// inadvertent conversion from a negative int.
|
|
|
|
return BAD_VALUE;
|
|
|
|
}
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
// range checks against the source parcel size
|
|
|
|
if ((offset > parcel->mDataSize)
|
|
|
|
|| (len > parcel->mDataSize)
|
|
|
|
|| (offset + len > parcel->mDataSize)) {
|
|
|
|
return BAD_VALUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Count objects in range
|
|
|
|
for (int i = 0; i < (int) size; i++) {
|
|
|
|
size_t off = objects[i];
|
2015-05-28 00:53:02 +00:00
|
|
|
if ((off >= offset) && (off + sizeof(flat_binder_object) <= offset + len)) {
|
2008-10-21 14:00:00 +00:00
|
|
|
if (firstIndex == -1) {
|
|
|
|
firstIndex = i;
|
|
|
|
}
|
|
|
|
lastIndex = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
int numObjects = lastIndex - firstIndex + 1;
|
|
|
|
|
2011-04-14 01:15:56 +00:00
|
|
|
if ((mDataSize+len) > mDataCapacity) {
|
|
|
|
// grow data
|
|
|
|
err = growData(len);
|
|
|
|
if (err != NO_ERROR) {
|
|
|
|
return err;
|
|
|
|
}
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// append data
|
|
|
|
memcpy(mData + mDataPos, data + offset, len);
|
|
|
|
mDataPos += len;
|
|
|
|
mDataSize += len;
|
|
|
|
|
2011-09-29 03:19:47 +00:00
|
|
|
err = NO_ERROR;
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
if (numObjects > 0) {
|
|
|
|
// grow objects
|
|
|
|
if (mObjectsCapacity < mObjectsSize + numObjects) {
|
2015-06-08 21:45:14 +00:00
|
|
|
size_t newSize = ((mObjectsSize + numObjects)*3)/2;
|
2016-11-03 20:32:41 +00:00
|
|
|
if (newSize*sizeof(binder_size_t) < mObjectsSize) return NO_MEMORY; // overflow
|
2014-01-29 04:12:59 +00:00
|
|
|
binder_size_t *objects =
|
|
|
|
(binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
|
|
|
|
if (objects == (binder_size_t*)0) {
|
2008-10-21 14:00:00 +00:00
|
|
|
return NO_MEMORY;
|
|
|
|
}
|
|
|
|
mObjects = objects;
|
|
|
|
mObjectsCapacity = newSize;
|
|
|
|
}
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
// append and acquire objects
|
|
|
|
int idx = mObjectsSize;
|
|
|
|
for (int i = firstIndex; i <= lastIndex; i++) {
|
|
|
|
size_t off = objects[i] - offset + startPos;
|
|
|
|
mObjects[idx++] = off;
|
|
|
|
mObjectsSize++;
|
|
|
|
|
2009-05-22 20:20:23 +00:00
|
|
|
flat_binder_object* flat
|
2008-10-21 14:00:00 +00:00
|
|
|
= reinterpret_cast<flat_binder_object*>(mData + off);
|
2015-12-03 09:34:15 +00:00
|
|
|
#ifndef DISABLE_ASHMEM_TRACKING
|
2015-10-22 23:12:53 +00:00
|
|
|
acquire_object(proc, *flat, this, &mOpenAshmemSize);
|
2015-12-03 09:34:15 +00:00
|
|
|
#else
|
|
|
|
acquire_object(proc, *flat, this);
|
|
|
|
#endif
|
2008-10-21 14:00:00 +00:00
|
|
|
|
|
|
|
if (flat->type == BINDER_TYPE_FD) {
|
2009-05-22 20:20:23 +00:00
|
|
|
// If this is a file descriptor, we need to dup it so the
|
|
|
|
// new Parcel now owns its own fd, and can declare that we
|
|
|
|
// officially know we have fds.
|
|
|
|
flat->handle = dup(flat->handle);
|
2014-01-29 04:12:59 +00:00
|
|
|
flat->cookie = 1;
|
2008-10-21 14:00:00 +00:00
|
|
|
mHasFds = mFdsKnown = true;
|
2011-09-29 03:19:47 +00:00
|
|
|
if (!mAllowFds) {
|
|
|
|
err = FDS_NOT_ALLOWED;
|
|
|
|
}
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-09-29 03:19:47 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2014-11-12 00:44:25 +00:00
|
|
|
bool Parcel::allowFds() const
|
|
|
|
{
|
|
|
|
return mAllowFds;
|
|
|
|
}
|
|
|
|
|
2011-10-04 04:09:35 +00:00
|
|
|
bool Parcel::pushAllowFds(bool allowFds)
|
2011-09-29 03:19:47 +00:00
|
|
|
{
|
|
|
|
const bool origValue = mAllowFds;
|
2011-10-04 04:09:35 +00:00
|
|
|
if (!allowFds) {
|
|
|
|
mAllowFds = false;
|
|
|
|
}
|
2011-09-29 03:19:47 +00:00
|
|
|
return origValue;
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
|
2011-10-04 04:09:35 +00:00
|
|
|
void Parcel::restoreAllowFds(bool lastValue)
|
|
|
|
{
|
|
|
|
mAllowFds = lastValue;
|
|
|
|
}
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
bool Parcel::hasFileDescriptors() const
|
|
|
|
{
|
|
|
|
if (!mFdsKnown) {
|
|
|
|
scanForFds();
|
|
|
|
}
|
|
|
|
return mHasFds;
|
|
|
|
}
|
|
|
|
|
2010-06-18 20:07:53 +00:00
|
|
|
// Write RPC headers. (previously just the interface token)
|
2008-10-21 14:00:00 +00:00
|
|
|
status_t Parcel::writeInterfaceToken(const String16& interface)
|
|
|
|
{
|
2010-07-07 23:06:39 +00:00
|
|
|
writeInt32(IPCThreadState::self()->getStrictModePolicy() |
|
|
|
|
STRICT_MODE_PENALTY_GATHER);
|
2008-10-21 14:00:00 +00:00
|
|
|
// currently the interface identification token is just its name as a string
|
|
|
|
return writeString16(interface);
|
|
|
|
}
|
|
|
|
|
2009-05-23 02:00:22 +00:00
|
|
|
bool Parcel::checkInterface(IBinder* binder) const
|
|
|
|
{
|
2010-06-18 20:07:53 +00:00
|
|
|
return enforceInterface(binder->getInterfaceDescriptor());
|
2009-05-23 02:00:22 +00:00
|
|
|
}
|
|
|
|
|
2010-07-07 23:06:39 +00:00
|
|
|
bool Parcel::enforceInterface(const String16& interface,
|
2010-07-27 16:49:11 +00:00
|
|
|
IPCThreadState* threadState) const
|
2008-10-21 14:00:00 +00:00
|
|
|
{
|
2010-07-27 16:49:11 +00:00
|
|
|
int32_t strictPolicy = readInt32();
|
|
|
|
if (threadState == NULL) {
|
|
|
|
threadState = IPCThreadState::self();
|
2010-07-07 23:06:39 +00:00
|
|
|
}
|
2010-08-30 23:01:16 +00:00
|
|
|
if ((threadState->getLastTransactionBinderFlags() &
|
|
|
|
IBinder::FLAG_ONEWAY) != 0) {
|
|
|
|
// For one-way calls, the callee is running entirely
|
|
|
|
// disconnected from the caller, so disable StrictMode entirely.
|
|
|
|
// Not only does disk/network usage not impact the caller, but
|
|
|
|
// there's no way to commuicate back any violations anyway.
|
|
|
|
threadState->setStrictModePolicy(0);
|
|
|
|
} else {
|
|
|
|
threadState->setStrictModePolicy(strictPolicy);
|
|
|
|
}
|
2009-05-23 02:00:22 +00:00
|
|
|
const String16 str(readString16());
|
2008-10-21 14:00:00 +00:00
|
|
|
if (str == interface) {
|
|
|
|
return true;
|
|
|
|
} else {
|
2014-05-30 23:35:57 +00:00
|
|
|
ALOGW("**** enforceInterface() expected '%s' but read '%s'",
|
2008-10-21 14:00:00 +00:00
|
|
|
String8(interface).string(), String8(str).string());
|
|
|
|
return false;
|
|
|
|
}
|
2010-06-18 20:07:53 +00:00
|
|
|
}
|
2008-10-21 14:00:00 +00:00
|
|
|
|
2014-01-29 04:12:59 +00:00
|
|
|
const binder_size_t* Parcel::objects() const
|
2008-10-21 14:00:00 +00:00
|
|
|
{
|
|
|
|
return mObjects;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t Parcel::objectsCount() const
|
|
|
|
{
|
|
|
|
return mObjectsSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t Parcel::errorCheck() const
|
|
|
|
{
|
|
|
|
return mError;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Parcel::setError(status_t err)
|
|
|
|
{
|
|
|
|
mError = err;
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t Parcel::finishWrite(size_t len)
|
|
|
|
{
|
2015-04-02 16:36:02 +00:00
|
|
|
if (len > INT32_MAX) {
|
|
|
|
// don't accept size_t values which may have come from an
|
|
|
|
// inadvertent conversion from a negative int.
|
|
|
|
return BAD_VALUE;
|
|
|
|
}
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
//printf("Finish write of %d\n", len);
|
|
|
|
mDataPos += len;
|
2014-05-30 23:35:57 +00:00
|
|
|
ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos);
|
2008-10-21 14:00:00 +00:00
|
|
|
if (mDataPos > mDataSize) {
|
|
|
|
mDataSize = mDataPos;
|
2014-05-30 23:35:57 +00:00
|
|
|
ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize);
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
//printf("New pos=%d, size=%d\n", mDataPos, mDataSize);
|
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t Parcel::writeUnpadded(const void* data, size_t len)
|
|
|
|
{
|
2015-04-02 16:36:02 +00:00
|
|
|
if (len > INT32_MAX) {
|
|
|
|
// don't accept size_t values which may have come from an
|
|
|
|
// inadvertent conversion from a negative int.
|
|
|
|
return BAD_VALUE;
|
|
|
|
}
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
size_t end = mDataPos + len;
|
|
|
|
if (end < mDataPos) {
|
|
|
|
// integer overflow
|
|
|
|
return BAD_VALUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (end <= mDataCapacity) {
|
|
|
|
restart_write:
|
|
|
|
memcpy(mData+mDataPos, data, len);
|
|
|
|
return finishWrite(len);
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t err = growData(len);
|
|
|
|
if (err == NO_ERROR) goto restart_write;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t Parcel::write(const void* data, size_t len)
|
|
|
|
{
|
2015-04-02 16:36:02 +00:00
|
|
|
if (len > INT32_MAX) {
|
|
|
|
// don't accept size_t values which may have come from an
|
|
|
|
// inadvertent conversion from a negative int.
|
|
|
|
return BAD_VALUE;
|
|
|
|
}
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
void* const d = writeInplace(len);
|
|
|
|
if (d) {
|
|
|
|
memcpy(d, data, len);
|
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
return mError;
|
|
|
|
}
|
|
|
|
|
|
|
|
void* Parcel::writeInplace(size_t len)
|
|
|
|
{
|
2015-04-02 16:36:02 +00:00
|
|
|
if (len > INT32_MAX) {
|
|
|
|
// don't accept size_t values which may have come from an
|
|
|
|
// inadvertent conversion from a negative int.
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
const size_t padded = pad_size(len);
|
2008-10-21 14:00:00 +00:00
|
|
|
|
|
|
|
// sanity check for integer overflow
|
|
|
|
if (mDataPos+padded < mDataPos) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((mDataPos+padded) <= mDataCapacity) {
|
|
|
|
restart_write:
|
|
|
|
//printf("Writing %ld bytes, padded to %ld\n", len, padded);
|
|
|
|
uint8_t* const data = mData+mDataPos;
|
|
|
|
|
|
|
|
// Need to pad at end?
|
|
|
|
if (padded != len) {
|
|
|
|
#if BYTE_ORDER == BIG_ENDIAN
|
|
|
|
static const uint32_t mask[4] = {
|
|
|
|
0x00000000, 0xffffff00, 0xffff0000, 0xff000000
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
#if BYTE_ORDER == LITTLE_ENDIAN
|
|
|
|
static const uint32_t mask[4] = {
|
|
|
|
0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
//printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len],
|
|
|
|
// *reinterpret_cast<void**>(data+padded-4));
|
|
|
|
*reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len];
|
|
|
|
}
|
|
|
|
|
|
|
|
finishWrite(padded);
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t err = growData(padded);
|
|
|
|
if (err == NO_ERROR) goto restart_write;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t Parcel::writeInt32(int32_t val)
|
|
|
|
{
|
2009-08-17 20:33:27 +00:00
|
|
|
return writeAligned(val);
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
2014-12-01 18:01:10 +00:00
|
|
|
|
|
|
|
status_t Parcel::writeUint32(uint32_t val)
|
|
|
|
{
|
|
|
|
return writeAligned(val);
|
|
|
|
}
|
|
|
|
|
2013-10-16 17:57:51 +00:00
|
|
|
status_t Parcel::writeInt32Array(size_t len, const int32_t *val) {
|
2015-04-02 16:36:02 +00:00
|
|
|
if (len > INT32_MAX) {
|
|
|
|
// don't accept size_t values which may have come from an
|
|
|
|
// inadvertent conversion from a negative int.
|
|
|
|
return BAD_VALUE;
|
|
|
|
}
|
|
|
|
|
2013-10-16 17:57:51 +00:00
|
|
|
if (!val) {
|
2015-06-30 21:03:55 +00:00
|
|
|
return writeInt32(-1);
|
2013-10-16 17:57:51 +00:00
|
|
|
}
|
2015-06-30 21:03:55 +00:00
|
|
|
status_t ret = writeInt32(static_cast<uint32_t>(len));
|
2013-10-16 17:57:51 +00:00
|
|
|
if (ret == NO_ERROR) {
|
|
|
|
ret = write(val, len * sizeof(*val));
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
2014-03-13 21:17:40 +00:00
|
|
|
status_t Parcel::writeByteArray(size_t len, const uint8_t *val) {
|
2015-04-02 16:36:02 +00:00
|
|
|
if (len > INT32_MAX) {
|
|
|
|
// don't accept size_t values which may have come from an
|
|
|
|
// inadvertent conversion from a negative int.
|
|
|
|
return BAD_VALUE;
|
|
|
|
}
|
|
|
|
|
2014-03-13 21:17:40 +00:00
|
|
|
if (!val) {
|
2015-06-30 21:03:55 +00:00
|
|
|
return writeInt32(-1);
|
2014-03-13 21:17:40 +00:00
|
|
|
}
|
2015-06-30 21:03:55 +00:00
|
|
|
status_t ret = writeInt32(static_cast<uint32_t>(len));
|
2014-03-13 21:17:40 +00:00
|
|
|
if (ret == NO_ERROR) {
|
|
|
|
ret = write(val, len * sizeof(*val));
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
2008-10-21 14:00:00 +00:00
|
|
|
|
|
|
|
status_t Parcel::writeInt64(int64_t val)
|
|
|
|
{
|
2009-08-17 20:33:27 +00:00
|
|
|
return writeAligned(val);
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
|
2015-03-16 18:11:07 +00:00
|
|
|
status_t Parcel::writeUint64(uint64_t val)
|
|
|
|
{
|
|
|
|
return writeAligned(val);
|
|
|
|
}
|
|
|
|
|
2013-11-05 16:53:55 +00:00
|
|
|
status_t Parcel::writePointer(uintptr_t val)
|
|
|
|
{
|
2014-01-29 04:12:59 +00:00
|
|
|
return writeAligned<binder_uintptr_t>(val);
|
2013-11-05 16:53:55 +00:00
|
|
|
}
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
status_t Parcel::writeFloat(float val)
|
|
|
|
{
|
2009-08-17 20:33:27 +00:00
|
|
|
return writeAligned(val);
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
|
2013-01-11 23:00:55 +00:00
|
|
|
#if defined(__mips__) && defined(__mips_hard_float)
|
|
|
|
|
|
|
|
status_t Parcel::writeDouble(double val)
|
|
|
|
{
|
|
|
|
union {
|
|
|
|
double d;
|
|
|
|
unsigned long long ll;
|
|
|
|
} u;
|
|
|
|
u.d = val;
|
|
|
|
return writeAligned(u.ll);
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
status_t Parcel::writeDouble(double val)
|
|
|
|
{
|
2009-08-17 20:33:27 +00:00
|
|
|
return writeAligned(val);
|
|
|
|
}
|
2008-10-21 14:00:00 +00:00
|
|
|
|
2013-01-11 23:00:55 +00:00
|
|
|
#endif
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
status_t Parcel::writeCString(const char* str)
|
|
|
|
{
|
|
|
|
return write(str, strlen(str)+1);
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t Parcel::writeString8(const String8& str)
|
|
|
|
{
|
|
|
|
status_t err = writeInt32(str.bytes());
|
2010-12-15 07:40:00 +00:00
|
|
|
// only write string if its length is more than zero characters,
|
|
|
|
// as readString8 will only read if the length field is non-zero.
|
|
|
|
// this is slightly different from how writeString16 works.
|
|
|
|
if (str.bytes() > 0 && err == NO_ERROR) {
|
2008-10-21 14:00:00 +00:00
|
|
|
err = write(str.string(), str.bytes()+1);
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t Parcel::writeString16(const String16& str)
|
|
|
|
{
|
|
|
|
return writeString16(str.string(), str.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t Parcel::writeString16(const char16_t* str, size_t len)
|
|
|
|
{
|
|
|
|
if (str == NULL) return writeInt32(-1);
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
status_t err = writeInt32(len);
|
|
|
|
if (err == NO_ERROR) {
|
|
|
|
len *= sizeof(char16_t);
|
|
|
|
uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t));
|
|
|
|
if (data) {
|
|
|
|
memcpy(data, str, len);
|
|
|
|
*reinterpret_cast<char16_t*>(data+len) = 0;
|
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
err = mError;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
|
|
|
|
{
|
|
|
|
return flatten_binder(ProcessState::self(), val, this);
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t Parcel::writeWeakBinder(const wp<IBinder>& val)
|
|
|
|
{
|
|
|
|
return flatten_binder(ProcessState::self(), val, this);
|
|
|
|
}
|
|
|
|
|
2009-05-21 23:29:38 +00:00
|
|
|
status_t Parcel::writeNativeHandle(const native_handle* handle)
|
2009-01-20 22:03:58 +00:00
|
|
|
{
|
2009-07-31 23:12:13 +00:00
|
|
|
if (!handle || handle->version != sizeof(native_handle))
|
2009-01-20 22:03:58 +00:00
|
|
|
return BAD_TYPE;
|
|
|
|
|
|
|
|
status_t err;
|
2009-05-21 23:29:38 +00:00
|
|
|
err = writeInt32(handle->numFds);
|
2009-01-20 22:03:58 +00:00
|
|
|
if (err != NO_ERROR) return err;
|
2009-03-04 03:31:44 +00:00
|
|
|
|
2009-05-21 23:29:38 +00:00
|
|
|
err = writeInt32(handle->numInts);
|
2009-01-20 22:03:58 +00:00
|
|
|
if (err != NO_ERROR) return err;
|
2009-03-04 03:31:44 +00:00
|
|
|
|
2009-05-21 23:29:38 +00:00
|
|
|
for (int i=0 ; err==NO_ERROR && i<handle->numFds ; i++)
|
|
|
|
err = writeDupFileDescriptor(handle->data[i]);
|
2009-03-04 03:31:44 +00:00
|
|
|
|
|
|
|
if (err != NO_ERROR) {
|
2011-12-20 16:23:08 +00:00
|
|
|
ALOGD("write native handle, write dup fd failed");
|
2009-03-04 03:31:44 +00:00
|
|
|
return err;
|
|
|
|
}
|
2009-05-21 23:29:38 +00:00
|
|
|
err = write(handle->data + handle->numFds, sizeof(int)*handle->numInts);
|
2009-01-20 22:03:58 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2011-11-05 02:01:44 +00:00
|
|
|
status_t Parcel::writeFileDescriptor(int fd, bool takeOwnership)
|
2008-10-21 14:00:00 +00:00
|
|
|
{
|
|
|
|
flat_binder_object obj;
|
|
|
|
obj.type = BINDER_TYPE_FD;
|
|
|
|
obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
|
2014-02-19 05:10:29 +00:00
|
|
|
obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
|
2008-10-21 14:00:00 +00:00
|
|
|
obj.handle = fd;
|
2014-01-29 04:12:59 +00:00
|
|
|
obj.cookie = takeOwnership ? 1 : 0;
|
2008-10-21 14:00:00 +00:00
|
|
|
return writeObject(obj, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t Parcel::writeDupFileDescriptor(int fd)
|
|
|
|
{
|
2011-11-05 03:19:33 +00:00
|
|
|
int dupFd = dup(fd);
|
|
|
|
if (dupFd < 0) {
|
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
status_t err = writeFileDescriptor(dupFd, true /*takeOwnership*/);
|
|
|
|
if (err) {
|
|
|
|
close(dupFd);
|
|
|
|
}
|
|
|
|
return err;
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
|
2014-11-12 00:44:25 +00:00
|
|
|
status_t Parcel::writeBlob(size_t len, bool mutableCopy, WritableBlob* outBlob)
|
2011-09-24 04:17:56 +00:00
|
|
|
{
|
2015-04-02 16:36:02 +00:00
|
|
|
if (len > INT32_MAX) {
|
|
|
|
// don't accept size_t values which may have come from an
|
|
|
|
// inadvertent conversion from a negative int.
|
|
|
|
return BAD_VALUE;
|
|
|
|
}
|
|
|
|
|
2014-11-12 00:44:25 +00:00
|
|
|
status_t status;
|
|
|
|
if (!mAllowFds || len <= BLOB_INPLACE_LIMIT) {
|
2011-10-20 10:56:00 +00:00
|
|
|
ALOGV("writeBlob: write in place");
|
2014-11-12 00:44:25 +00:00
|
|
|
status = writeInt32(BLOB_INPLACE);
|
2011-09-24 04:17:56 +00:00
|
|
|
if (status) return status;
|
|
|
|
|
|
|
|
void* ptr = writeInplace(len);
|
|
|
|
if (!ptr) return NO_MEMORY;
|
|
|
|
|
2014-11-12 00:44:25 +00:00
|
|
|
outBlob->init(-1, ptr, len, false);
|
2011-09-24 04:17:56 +00:00
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
|
2011-10-20 10:56:00 +00:00
|
|
|
ALOGV("writeBlob: write to ashmem");
|
2011-09-24 04:17:56 +00:00
|
|
|
int fd = ashmem_create_region("Parcel Blob", len);
|
|
|
|
if (fd < 0) return NO_MEMORY;
|
|
|
|
|
|
|
|
int result = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE);
|
|
|
|
if (result < 0) {
|
2011-10-10 21:50:10 +00:00
|
|
|
status = result;
|
2011-09-24 04:17:56 +00:00
|
|
|
} else {
|
|
|
|
void* ptr = ::mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
|
|
|
if (ptr == MAP_FAILED) {
|
|
|
|
status = -errno;
|
|
|
|
} else {
|
2014-11-12 00:44:25 +00:00
|
|
|
if (!mutableCopy) {
|
|
|
|
result = ashmem_set_prot_region(fd, PROT_READ);
|
|
|
|
}
|
2011-09-24 04:17:56 +00:00
|
|
|
if (result < 0) {
|
2011-10-10 21:50:10 +00:00
|
|
|
status = result;
|
2011-09-24 04:17:56 +00:00
|
|
|
} else {
|
2014-11-12 00:44:25 +00:00
|
|
|
status = writeInt32(mutableCopy ? BLOB_ASHMEM_MUTABLE : BLOB_ASHMEM_IMMUTABLE);
|
2011-09-24 04:17:56 +00:00
|
|
|
if (!status) {
|
2011-11-05 02:01:44 +00:00
|
|
|
status = writeFileDescriptor(fd, true /*takeOwnership*/);
|
2011-09-24 04:17:56 +00:00
|
|
|
if (!status) {
|
2014-11-12 00:44:25 +00:00
|
|
|
outBlob->init(fd, ptr, len, mutableCopy);
|
2011-09-24 04:17:56 +00:00
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
::munmap(ptr, len);
|
|
|
|
}
|
|
|
|
::close(fd);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2014-11-12 00:44:25 +00:00
|
|
|
status_t Parcel::writeDupImmutableBlobFileDescriptor(int fd)
|
|
|
|
{
|
|
|
|
// Must match up with what's done in writeBlob.
|
|
|
|
if (!mAllowFds) return FDS_NOT_ALLOWED;
|
|
|
|
status_t status = writeInt32(BLOB_ASHMEM_IMMUTABLE);
|
|
|
|
if (status) return status;
|
|
|
|
return writeDupFileDescriptor(fd);
|
|
|
|
}
|
|
|
|
|
2013-07-30 04:24:40 +00:00
|
|
|
status_t Parcel::write(const FlattenableHelperInterface& val)
|
2010-02-12 01:30:52 +00:00
|
|
|
{
|
|
|
|
status_t err;
|
|
|
|
|
|
|
|
// size if needed
|
2013-07-30 04:24:40 +00:00
|
|
|
const size_t len = val.getFlattenedSize();
|
|
|
|
const size_t fd_count = val.getFdCount();
|
2010-02-12 01:30:52 +00:00
|
|
|
|
2015-04-02 16:36:02 +00:00
|
|
|
if ((len > INT32_MAX) || (fd_count > INT32_MAX)) {
|
|
|
|
// don't accept size_t values which may have come from an
|
|
|
|
// inadvertent conversion from a negative int.
|
|
|
|
return BAD_VALUE;
|
|
|
|
}
|
|
|
|
|
2010-02-12 01:30:52 +00:00
|
|
|
err = this->writeInt32(len);
|
|
|
|
if (err) return err;
|
|
|
|
|
|
|
|
err = this->writeInt32(fd_count);
|
|
|
|
if (err) return err;
|
|
|
|
|
|
|
|
// payload
|
2018-04-04 09:46:56 +00:00
|
|
|
void* const buf = this->writeInplace(len);
|
2010-02-12 01:30:52 +00:00
|
|
|
if (buf == NULL)
|
|
|
|
return BAD_VALUE;
|
|
|
|
|
|
|
|
int* fds = NULL;
|
|
|
|
if (fd_count) {
|
|
|
|
fds = new int[fd_count];
|
|
|
|
}
|
|
|
|
|
|
|
|
err = val.flatten(buf, len, fds, fd_count);
|
|
|
|
for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
|
|
|
|
err = this->writeDupFileDescriptor( fds[i] );
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fd_count) {
|
|
|
|
delete [] fds;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData)
|
|
|
|
{
|
|
|
|
const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;
|
|
|
|
const bool enoughObjects = mObjectsSize < mObjectsCapacity;
|
|
|
|
if (enoughData && enoughObjects) {
|
|
|
|
restart_write:
|
|
|
|
*reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val;
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
// remember if it's a file descriptor
|
|
|
|
if (val.type == BINDER_TYPE_FD) {
|
2011-09-29 03:19:47 +00:00
|
|
|
if (!mAllowFds) {
|
2015-06-04 01:44:15 +00:00
|
|
|
// fail before modifying our object index
|
2011-09-29 03:19:47 +00:00
|
|
|
return FDS_NOT_ALLOWED;
|
|
|
|
}
|
2008-10-21 14:00:00 +00:00
|
|
|
mHasFds = mFdsKnown = true;
|
|
|
|
}
|
|
|
|
|
2015-06-04 01:44:15 +00:00
|
|
|
// Need to write meta-data?
|
|
|
|
if (nullMetaData || val.binder != 0) {
|
|
|
|
mObjects[mObjectsSize] = mDataPos;
|
2015-12-03 09:34:15 +00:00
|
|
|
#ifndef DISABLE_ASHMEM_TRACKING
|
2015-10-22 23:12:53 +00:00
|
|
|
acquire_object(ProcessState::self(), val, this, &mOpenAshmemSize);
|
2015-12-03 09:34:15 +00:00
|
|
|
#else
|
|
|
|
acquire_object(ProcessState::self(), val, this);
|
|
|
|
#endif
|
2015-06-04 01:44:15 +00:00
|
|
|
mObjectsSize++;
|
|
|
|
}
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
return finishWrite(sizeof(flat_binder_object));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!enoughData) {
|
|
|
|
const status_t err = growData(sizeof(val));
|
|
|
|
if (err != NO_ERROR) return err;
|
|
|
|
}
|
|
|
|
if (!enoughObjects) {
|
|
|
|
size_t newSize = ((mObjectsSize+2)*3)/2;
|
2016-11-03 20:32:41 +00:00
|
|
|
if (newSize*sizeof(binder_size_t) < mObjectsSize) return NO_MEMORY; // overflow
|
2014-01-29 04:12:59 +00:00
|
|
|
binder_size_t* objects = (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
|
2008-10-21 14:00:00 +00:00
|
|
|
if (objects == NULL) return NO_MEMORY;
|
|
|
|
mObjects = objects;
|
|
|
|
mObjectsCapacity = newSize;
|
|
|
|
}
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
goto restart_write;
|
|
|
|
}
|
|
|
|
|
Add Parcel::readExceptionCode() and Parcel::writeNoException()
Add native Parcel methods analogous to the Java versions.
Currently, these don't do much, but upcoming StrictMode work changes
the RPC calling conventions in some cases, so it's important that
everybody uses these consistently, rather than having a lot of code
trying to parse RPC responses out of Parcels themselves.
As a summary, the current convention that Java Binder services use is
to prepend the reply Parcel with an int32 signaling the exception
status:
0: no exception
-1: Security exception
-2: Bad Parcelable
-3: ...
-4: ...
-5: ...
... followed by Parceled String if the exception code is non-zero.
With an upcoming change, it'll be the case that a response Parcel can,
non-exceptionally return rich data in the header, and also return data
to the caller. The important thing to note in this new case is that
the first int32 in the reply parcel *will not be zero*, so anybody
manually checking for it with reply.readInt32() will get false
negative failures.
Short summary: If you're calling into a Java service and manually
checking the exception status with reply.readInt32(), change it to
reply.readExceptionCode().
Change-Id: I23f9a0e53a8cfbbd9759242cfde16723641afe04
2010-07-13 22:33:35 +00:00
|
|
|
status_t Parcel::writeNoException()
|
|
|
|
{
|
|
|
|
return writeInt32(0);
|
|
|
|
}
|
|
|
|
|
2014-02-06 01:42:44 +00:00
|
|
|
void Parcel::remove(size_t /*start*/, size_t /*amt*/)
|
2008-10-21 14:00:00 +00:00
|
|
|
{
|
|
|
|
LOG_ALWAYS_FATAL("Parcel::remove() not yet implemented!");
|
|
|
|
}
|
|
|
|
|
2017-11-18 02:25:05 +00:00
|
|
|
status_t Parcel::validateReadData(size_t upperBound) const
|
|
|
|
{
|
|
|
|
// Don't allow non-object reads on object data
|
|
|
|
if (mObjectsSorted || mObjectsSize <= 1) {
|
|
|
|
data_sorted:
|
|
|
|
// Expect to check only against the next object
|
|
|
|
if (mNextObjectHint < mObjectsSize && upperBound > mObjects[mNextObjectHint]) {
|
|
|
|
// For some reason the current read position is greater than the next object
|
|
|
|
// hint. Iterate until we find the right object
|
|
|
|
size_t nextObject = mNextObjectHint;
|
|
|
|
do {
|
|
|
|
if (mDataPos < mObjects[nextObject] + sizeof(flat_binder_object)) {
|
|
|
|
// Requested info overlaps with an object
|
|
|
|
ALOGE("Attempt to read from protected data in Parcel %p", this);
|
|
|
|
return PERMISSION_DENIED;
|
|
|
|
}
|
|
|
|
nextObject++;
|
|
|
|
} while (nextObject < mObjectsSize && upperBound > mObjects[nextObject]);
|
|
|
|
mNextObjectHint = nextObject;
|
|
|
|
}
|
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
// Quickly determine if mObjects is sorted.
|
|
|
|
binder_size_t* currObj = mObjects + mObjectsSize - 1;
|
|
|
|
binder_size_t* prevObj = currObj;
|
|
|
|
while (currObj > mObjects) {
|
|
|
|
prevObj--;
|
|
|
|
if(*prevObj > *currObj) {
|
|
|
|
goto data_unsorted;
|
|
|
|
}
|
|
|
|
currObj--;
|
|
|
|
}
|
|
|
|
mObjectsSorted = true;
|
|
|
|
goto data_sorted;
|
|
|
|
|
|
|
|
data_unsorted:
|
|
|
|
// Insertion Sort mObjects
|
|
|
|
// Great for mostly sorted lists. If randomly sorted or reverse ordered mObjects become common,
|
|
|
|
// switch to std::sort(mObjects, mObjects + mObjectsSize);
|
|
|
|
for (binder_size_t* iter0 = mObjects + 1; iter0 < mObjects + mObjectsSize; iter0++) {
|
|
|
|
binder_size_t temp = *iter0;
|
|
|
|
binder_size_t* iter1 = iter0 - 1;
|
|
|
|
while (iter1 >= mObjects && *iter1 > temp) {
|
|
|
|
*(iter1 + 1) = *iter1;
|
|
|
|
iter1--;
|
|
|
|
}
|
|
|
|
*(iter1 + 1) = temp;
|
|
|
|
}
|
|
|
|
mNextObjectHint = 0;
|
|
|
|
mObjectsSorted = true;
|
|
|
|
goto data_sorted;
|
|
|
|
}
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
status_t Parcel::read(void* outData, size_t len) const
|
|
|
|
{
|
2015-04-02 16:36:02 +00:00
|
|
|
if (len > INT32_MAX) {
|
|
|
|
// don't accept size_t values which may have come from an
|
|
|
|
// inadvertent conversion from a negative int.
|
|
|
|
return BAD_VALUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
|
|
|
|
&& len <= pad_size(len)) {
|
2017-11-18 02:25:05 +00:00
|
|
|
if (mObjectsSize > 0) {
|
|
|
|
status_t err = validateReadData(mDataPos + pad_size(len));
|
2018-04-17 23:52:40 +00:00
|
|
|
if(err != NO_ERROR) {
|
|
|
|
// Still increment the data position by the expected length
|
|
|
|
mDataPos += pad_size(len);
|
|
|
|
ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
|
|
|
|
return err;
|
|
|
|
}
|
2017-11-18 02:25:05 +00:00
|
|
|
}
|
2008-10-21 14:00:00 +00:00
|
|
|
memcpy(outData, mData+mDataPos, len);
|
2015-04-02 16:36:02 +00:00
|
|
|
mDataPos += pad_size(len);
|
2014-05-30 23:35:57 +00:00
|
|
|
ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
|
2008-10-21 14:00:00 +00:00
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
return NOT_ENOUGH_DATA;
|
|
|
|
}
|
|
|
|
|
|
|
|
const void* Parcel::readInplace(size_t len) const
|
|
|
|
{
|
2015-04-02 16:36:02 +00:00
|
|
|
if (len > INT32_MAX) {
|
|
|
|
// don't accept size_t values which may have come from an
|
|
|
|
// inadvertent conversion from a negative int.
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
|
|
|
|
&& len <= pad_size(len)) {
|
2017-11-18 02:25:05 +00:00
|
|
|
if (mObjectsSize > 0) {
|
|
|
|
status_t err = validateReadData(mDataPos + pad_size(len));
|
2018-04-17 23:52:40 +00:00
|
|
|
if(err != NO_ERROR) {
|
|
|
|
// Still increment the data position by the expected length
|
|
|
|
mDataPos += pad_size(len);
|
|
|
|
ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
|
|
|
|
return NULL;
|
|
|
|
}
|
2017-11-18 02:25:05 +00:00
|
|
|
}
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
const void* data = mData+mDataPos;
|
2015-04-02 16:36:02 +00:00
|
|
|
mDataPos += pad_size(len);
|
2014-05-30 23:35:57 +00:00
|
|
|
ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
|
2008-10-21 14:00:00 +00:00
|
|
|
return data;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2009-08-17 20:33:27 +00:00
|
|
|
template<class T>
|
|
|
|
status_t Parcel::readAligned(T *pArg) const {
|
2015-04-02 16:36:02 +00:00
|
|
|
COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
|
2009-08-17 20:33:27 +00:00
|
|
|
|
|
|
|
if ((mDataPos+sizeof(T)) <= mDataSize) {
|
2017-11-18 02:25:05 +00:00
|
|
|
if (mObjectsSize > 0) {
|
|
|
|
status_t err = validateReadData(mDataPos + sizeof(T));
|
2018-04-17 23:52:40 +00:00
|
|
|
if(err != NO_ERROR) {
|
|
|
|
// Still increment the data position by the expected length
|
|
|
|
mDataPos += sizeof(T);
|
|
|
|
return err;
|
|
|
|
}
|
2017-11-18 02:25:05 +00:00
|
|
|
}
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
const void* data = mData+mDataPos;
|
2009-08-17 20:33:27 +00:00
|
|
|
mDataPos += sizeof(T);
|
|
|
|
*pArg = *reinterpret_cast<const T*>(data);
|
2008-10-21 14:00:00 +00:00
|
|
|
return NO_ERROR;
|
|
|
|
} else {
|
|
|
|
return NOT_ENOUGH_DATA;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-17 20:33:27 +00:00
|
|
|
template<class T>
|
|
|
|
T Parcel::readAligned() const {
|
|
|
|
T result;
|
|
|
|
if (readAligned(&result) != NO_ERROR) {
|
|
|
|
result = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
template<class T>
|
|
|
|
status_t Parcel::writeAligned(T val) {
|
2015-04-02 16:36:02 +00:00
|
|
|
COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
|
2009-08-17 20:33:27 +00:00
|
|
|
|
|
|
|
if ((mDataPos+sizeof(val)) <= mDataCapacity) {
|
|
|
|
restart_write:
|
|
|
|
*reinterpret_cast<T*>(mData+mDataPos) = val;
|
|
|
|
return finishWrite(sizeof(val));
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t err = growData(sizeof(val));
|
|
|
|
if (err == NO_ERROR) goto restart_write;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t Parcel::readInt32(int32_t *pArg) const
|
|
|
|
{
|
|
|
|
return readAligned(pArg);
|
|
|
|
}
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
int32_t Parcel::readInt32() const
|
|
|
|
{
|
2009-08-17 20:33:27 +00:00
|
|
|
return readAligned<int32_t>();
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
|
2014-12-01 18:01:10 +00:00
|
|
|
status_t Parcel::readUint32(uint32_t *pArg) const
|
|
|
|
{
|
|
|
|
return readAligned(pArg);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t Parcel::readUint32() const
|
|
|
|
{
|
|
|
|
return readAligned<uint32_t>();
|
|
|
|
}
|
2008-10-21 14:00:00 +00:00
|
|
|
|
|
|
|
status_t Parcel::readInt64(int64_t *pArg) const
|
|
|
|
{
|
2009-08-17 20:33:27 +00:00
|
|
|
return readAligned(pArg);
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int64_t Parcel::readInt64() const
|
|
|
|
{
|
2009-08-17 20:33:27 +00:00
|
|
|
return readAligned<int64_t>();
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
|
2015-03-16 18:11:07 +00:00
|
|
|
status_t Parcel::readUint64(uint64_t *pArg) const
|
|
|
|
{
|
|
|
|
return readAligned(pArg);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t Parcel::readUint64() const
|
|
|
|
{
|
|
|
|
return readAligned<uint64_t>();
|
|
|
|
}
|
|
|
|
|
2013-11-05 16:53:55 +00:00
|
|
|
status_t Parcel::readPointer(uintptr_t *pArg) const
|
|
|
|
{
|
2014-01-29 04:12:59 +00:00
|
|
|
status_t ret;
|
|
|
|
binder_uintptr_t ptr;
|
|
|
|
ret = readAligned(&ptr);
|
|
|
|
if (!ret)
|
|
|
|
*pArg = ptr;
|
|
|
|
return ret;
|
2013-11-05 16:53:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uintptr_t Parcel::readPointer() const
|
|
|
|
{
|
2014-01-29 04:12:59 +00:00
|
|
|
return readAligned<binder_uintptr_t>();
|
2013-11-05 16:53:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
status_t Parcel::readFloat(float *pArg) const
|
|
|
|
{
|
2009-08-17 20:33:27 +00:00
|
|
|
return readAligned(pArg);
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
float Parcel::readFloat() const
|
|
|
|
{
|
2009-08-17 20:33:27 +00:00
|
|
|
return readAligned<float>();
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
|
2013-01-11 23:00:55 +00:00
|
|
|
#if defined(__mips__) && defined(__mips_hard_float)
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
status_t Parcel::readDouble(double *pArg) const
|
|
|
|
{
|
2013-01-11 23:00:55 +00:00
|
|
|
union {
|
|
|
|
double d;
|
|
|
|
unsigned long long ll;
|
|
|
|
} u;
|
2014-06-04 14:04:29 +00:00
|
|
|
u.d = 0;
|
2013-01-11 23:00:55 +00:00
|
|
|
status_t status;
|
|
|
|
status = readAligned(&u.ll);
|
|
|
|
*pArg = u.d;
|
|
|
|
return status;
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
|
2013-01-11 23:00:55 +00:00
|
|
|
double Parcel::readDouble() const
|
|
|
|
{
|
|
|
|
union {
|
|
|
|
double d;
|
|
|
|
unsigned long long ll;
|
|
|
|
} u;
|
|
|
|
u.ll = readAligned<unsigned long long>();
|
|
|
|
return u.d;
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
status_t Parcel::readDouble(double *pArg) const
|
|
|
|
{
|
|
|
|
return readAligned(pArg);
|
|
|
|
}
|
2008-10-21 14:00:00 +00:00
|
|
|
|
|
|
|
double Parcel::readDouble() const
|
|
|
|
{
|
2009-08-17 20:33:27 +00:00
|
|
|
return readAligned<double>();
|
|
|
|
}
|
|
|
|
|
2013-01-11 23:00:55 +00:00
|
|
|
#endif
|
|
|
|
|
2009-08-17 20:33:27 +00:00
|
|
|
status_t Parcel::readIntPtr(intptr_t *pArg) const
|
|
|
|
{
|
|
|
|
return readAligned(pArg);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
intptr_t Parcel::readIntPtr() const
|
|
|
|
{
|
|
|
|
return readAligned<intptr_t>();
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
const char* Parcel::readCString() const
|
|
|
|
{
|
|
|
|
const size_t avail = mDataSize-mDataPos;
|
|
|
|
if (avail > 0) {
|
|
|
|
const char* str = reinterpret_cast<const char*>(mData+mDataPos);
|
|
|
|
// is the string's trailing NUL within the parcel's valid bounds?
|
|
|
|
const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail));
|
|
|
|
if (eos) {
|
|
|
|
const size_t len = eos - str;
|
2015-04-02 16:36:02 +00:00
|
|
|
mDataPos += pad_size(len+1);
|
2014-05-30 23:35:57 +00:00
|
|
|
ALOGV("readCString Setting data pos of %p to %zu", this, mDataPos);
|
2008-10-21 14:00:00 +00:00
|
|
|
return str;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
String8 Parcel::readString8() const
|
|
|
|
{
|
|
|
|
int32_t size = readInt32();
|
|
|
|
// watch for potential int overflow adding 1 for trailing NUL
|
|
|
|
if (size > 0 && size < INT32_MAX) {
|
|
|
|
const char* str = (const char*)readInplace(size+1);
|
|
|
|
if (str) return String8(str, size);
|
|
|
|
}
|
|
|
|
return String8();
|
|
|
|
}
|
|
|
|
|
|
|
|
String16 Parcel::readString16() const
|
|
|
|
{
|
|
|
|
size_t len;
|
|
|
|
const char16_t* str = readString16Inplace(&len);
|
|
|
|
if (str) return String16(str, len);
|
2012-01-06 19:20:56 +00:00
|
|
|
ALOGE("Reading a NULL string not supported here.");
|
2008-10-21 14:00:00 +00:00
|
|
|
return String16();
|
|
|
|
}
|
|
|
|
|
|
|
|
const char16_t* Parcel::readString16Inplace(size_t* outLen) const
|
|
|
|
{
|
|
|
|
int32_t size = readInt32();
|
|
|
|
// watch for potential int overflow from size+1
|
|
|
|
if (size >= 0 && size < INT32_MAX) {
|
|
|
|
*outLen = size;
|
|
|
|
const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t));
|
|
|
|
if (str != NULL) {
|
|
|
|
return str;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
*outLen = 0;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
sp<IBinder> Parcel::readStrongBinder() const
|
|
|
|
{
|
|
|
|
sp<IBinder> val;
|
|
|
|
unflatten_binder(ProcessState::self(), *this, &val);
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
wp<IBinder> Parcel::readWeakBinder() const
|
|
|
|
{
|
|
|
|
wp<IBinder> val;
|
|
|
|
unflatten_binder(ProcessState::self(), *this, &val);
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
Add Parcel::readExceptionCode() and Parcel::writeNoException()
Add native Parcel methods analogous to the Java versions.
Currently, these don't do much, but upcoming StrictMode work changes
the RPC calling conventions in some cases, so it's important that
everybody uses these consistently, rather than having a lot of code
trying to parse RPC responses out of Parcels themselves.
As a summary, the current convention that Java Binder services use is
to prepend the reply Parcel with an int32 signaling the exception
status:
0: no exception
-1: Security exception
-2: Bad Parcelable
-3: ...
-4: ...
-5: ...
... followed by Parceled String if the exception code is non-zero.
With an upcoming change, it'll be the case that a response Parcel can,
non-exceptionally return rich data in the header, and also return data
to the caller. The important thing to note in this new case is that
the first int32 in the reply parcel *will not be zero*, so anybody
manually checking for it with reply.readInt32() will get false
negative failures.
Short summary: If you're calling into a Java service and manually
checking the exception status with reply.readInt32(), change it to
reply.readExceptionCode().
Change-Id: I23f9a0e53a8cfbbd9759242cfde16723641afe04
2010-07-13 22:33:35 +00:00
|
|
|
int32_t Parcel::readExceptionCode() const
|
|
|
|
{
|
|
|
|
int32_t exception_code = readAligned<int32_t>();
|
2010-07-12 18:05:38 +00:00
|
|
|
if (exception_code == EX_HAS_REPLY_HEADER) {
|
2011-05-03 13:44:00 +00:00
|
|
|
int32_t header_start = dataPosition();
|
2010-07-12 18:05:38 +00:00
|
|
|
int32_t header_size = readAligned<int32_t>();
|
|
|
|
// Skip over fat responses headers. Not used (or propagated) in
|
|
|
|
// native code
|
2011-05-03 13:44:00 +00:00
|
|
|
setDataPosition(header_start + header_size);
|
2010-07-12 18:05:38 +00:00
|
|
|
// And fat response headers are currently only used when there are no
|
|
|
|
// exceptions, so return no error:
|
|
|
|
return 0;
|
|
|
|
}
|
Add Parcel::readExceptionCode() and Parcel::writeNoException()
Add native Parcel methods analogous to the Java versions.
Currently, these don't do much, but upcoming StrictMode work changes
the RPC calling conventions in some cases, so it's important that
everybody uses these consistently, rather than having a lot of code
trying to parse RPC responses out of Parcels themselves.
As a summary, the current convention that Java Binder services use is
to prepend the reply Parcel with an int32 signaling the exception
status:
0: no exception
-1: Security exception
-2: Bad Parcelable
-3: ...
-4: ...
-5: ...
... followed by Parceled String if the exception code is non-zero.
With an upcoming change, it'll be the case that a response Parcel can,
non-exceptionally return rich data in the header, and also return data
to the caller. The important thing to note in this new case is that
the first int32 in the reply parcel *will not be zero*, so anybody
manually checking for it with reply.readInt32() will get false
negative failures.
Short summary: If you're calling into a Java service and manually
checking the exception status with reply.readInt32(), change it to
reply.readExceptionCode().
Change-Id: I23f9a0e53a8cfbbd9759242cfde16723641afe04
2010-07-13 22:33:35 +00:00
|
|
|
return exception_code;
|
|
|
|
}
|
2009-01-20 22:03:58 +00:00
|
|
|
|
2009-05-21 23:29:38 +00:00
|
|
|
native_handle* Parcel::readNativeHandle() const
|
2009-01-20 22:03:58 +00:00
|
|
|
{
|
|
|
|
int numFds, numInts;
|
|
|
|
status_t err;
|
|
|
|
err = readInt32(&numFds);
|
|
|
|
if (err != NO_ERROR) return 0;
|
|
|
|
err = readInt32(&numInts);
|
|
|
|
if (err != NO_ERROR) return 0;
|
|
|
|
|
2009-05-21 23:29:38 +00:00
|
|
|
native_handle* h = native_handle_create(numFds, numInts);
|
2015-05-13 00:35:48 +00:00
|
|
|
if (!h) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-01-20 22:03:58 +00:00
|
|
|
for (int i=0 ; err==NO_ERROR && i<numFds ; i++) {
|
2009-02-14 00:34:38 +00:00
|
|
|
h->data[i] = dup(readFileDescriptor());
|
2016-04-26 15:44:09 +00:00
|
|
|
if (h->data[i] < 0) {
|
|
|
|
for (int j = 0; j < i; j++) {
|
|
|
|
close(h->data[j]);
|
|
|
|
}
|
|
|
|
native_handle_delete(h);
|
|
|
|
return 0;
|
|
|
|
}
|
2009-01-20 22:03:58 +00:00
|
|
|
}
|
|
|
|
err = read(h->data + numFds, sizeof(int)*numInts);
|
|
|
|
if (err != NO_ERROR) {
|
2009-05-21 23:29:38 +00:00
|
|
|
native_handle_close(h);
|
|
|
|
native_handle_delete(h);
|
2009-01-20 22:03:58 +00:00
|
|
|
h = 0;
|
|
|
|
}
|
|
|
|
return h;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
int Parcel::readFileDescriptor() const
|
|
|
|
{
|
|
|
|
const flat_binder_object* flat = readObject(true);
|
|
|
|
if (flat) {
|
|
|
|
switch (flat->type) {
|
|
|
|
case BINDER_TYPE_FD:
|
2014-05-30 23:35:57 +00:00
|
|
|
//ALOGI("Returning file descriptor %ld from parcel %p", flat->handle, this);
|
2008-10-21 14:00:00 +00:00
|
|
|
return flat->handle;
|
2014-05-30 23:35:57 +00:00
|
|
|
}
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
return BAD_TYPE;
|
|
|
|
}
|
|
|
|
|
2011-09-24 04:17:56 +00:00
|
|
|
status_t Parcel::readBlob(size_t len, ReadableBlob* outBlob) const
|
|
|
|
{
|
2014-11-12 00:44:25 +00:00
|
|
|
int32_t blobType;
|
|
|
|
status_t status = readInt32(&blobType);
|
2011-09-24 04:17:56 +00:00
|
|
|
if (status) return status;
|
|
|
|
|
2014-11-12 00:44:25 +00:00
|
|
|
if (blobType == BLOB_INPLACE) {
|
2011-10-20 10:56:00 +00:00
|
|
|
ALOGV("readBlob: read in place");
|
2011-09-24 04:17:56 +00:00
|
|
|
const void* ptr = readInplace(len);
|
|
|
|
if (!ptr) return BAD_VALUE;
|
|
|
|
|
2014-11-12 00:44:25 +00:00
|
|
|
outBlob->init(-1, const_cast<void*>(ptr), len, false);
|
2011-09-24 04:17:56 +00:00
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
|
2011-10-20 10:56:00 +00:00
|
|
|
ALOGV("readBlob: read from ashmem");
|
2014-11-12 00:44:25 +00:00
|
|
|
bool isMutable = (blobType == BLOB_ASHMEM_MUTABLE);
|
2011-09-24 04:17:56 +00:00
|
|
|
int fd = readFileDescriptor();
|
|
|
|
if (fd == int(BAD_TYPE)) return BAD_VALUE;
|
|
|
|
|
2014-11-12 00:44:25 +00:00
|
|
|
void* ptr = ::mmap(NULL, len, isMutable ? PROT_READ | PROT_WRITE : PROT_READ,
|
|
|
|
MAP_SHARED, fd, 0);
|
2014-10-08 16:35:45 +00:00
|
|
|
if (ptr == MAP_FAILED) return NO_MEMORY;
|
2011-09-24 04:17:56 +00:00
|
|
|
|
2014-11-12 00:44:25 +00:00
|
|
|
outBlob->init(fd, ptr, len, isMutable);
|
2011-09-24 04:17:56 +00:00
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
|
2013-07-30 04:24:40 +00:00
|
|
|
status_t Parcel::read(FlattenableHelperInterface& val) const
|
2010-02-12 01:30:52 +00:00
|
|
|
{
|
|
|
|
// size
|
|
|
|
const size_t len = this->readInt32();
|
|
|
|
const size_t fd_count = this->readInt32();
|
|
|
|
|
2015-04-02 16:36:02 +00:00
|
|
|
if (len > INT32_MAX) {
|
|
|
|
// don't accept size_t values which may have come from an
|
|
|
|
// inadvertent conversion from a negative int.
|
|
|
|
return BAD_VALUE;
|
|
|
|
}
|
|
|
|
|
2010-02-12 01:30:52 +00:00
|
|
|
// payload
|
2015-04-02 16:36:02 +00:00
|
|
|
void const* const buf = this->readInplace(pad_size(len));
|
2010-02-12 01:30:52 +00:00
|
|
|
if (buf == NULL)
|
|
|
|
return BAD_VALUE;
|
|
|
|
|
|
|
|
int* fds = NULL;
|
|
|
|
if (fd_count) {
|
|
|
|
fds = new int[fd_count];
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t err = NO_ERROR;
|
|
|
|
for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
|
2014-11-04 16:36:31 +00:00
|
|
|
fds[i] = dup(this->readFileDescriptor());
|
2014-04-29 06:22:10 +00:00
|
|
|
if (fds[i] < 0) {
|
|
|
|
err = BAD_VALUE;
|
2014-11-04 16:36:31 +00:00
|
|
|
ALOGE("dup() failed in Parcel::read, i is %zu, fds[i] is %d, fd_count is %zu, error: %s",
|
|
|
|
i, fds[i], fd_count, strerror(errno));
|
2014-04-29 06:22:10 +00:00
|
|
|
}
|
2010-02-12 01:30:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (err == NO_ERROR) {
|
|
|
|
err = val.unflatten(buf, len, fds, fd_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fd_count) {
|
|
|
|
delete [] fds;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
2008-10-21 14:00:00 +00:00
|
|
|
const flat_binder_object* Parcel::readObject(bool nullMetaData) const
|
|
|
|
{
|
|
|
|
const size_t DPOS = mDataPos;
|
|
|
|
if ((DPOS+sizeof(flat_binder_object)) <= mDataSize) {
|
|
|
|
const flat_binder_object* obj
|
|
|
|
= reinterpret_cast<const flat_binder_object*>(mData+DPOS);
|
|
|
|
mDataPos = DPOS + sizeof(flat_binder_object);
|
2014-01-29 04:12:59 +00:00
|
|
|
if (!nullMetaData && (obj->cookie == 0 && obj->binder == 0)) {
|
2009-01-20 22:03:58 +00:00
|
|
|
// When transferring a NULL object, we don't write it into
|
2008-10-21 14:00:00 +00:00
|
|
|
// the object list, so we don't want to check for it when
|
|
|
|
// reading.
|
2014-05-30 23:35:57 +00:00
|
|
|
ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
|
2008-10-21 14:00:00 +00:00
|
|
|
return obj;
|
|
|
|
}
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
// Ensure that this object is valid...
|
2014-01-29 04:12:59 +00:00
|
|
|
binder_size_t* const OBJS = mObjects;
|
2008-10-21 14:00:00 +00:00
|
|
|
const size_t N = mObjectsSize;
|
|
|
|
size_t opos = mNextObjectHint;
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
if (N > 0) {
|
2014-05-30 23:35:57 +00:00
|
|
|
ALOGV("Parcel %p looking for obj at %zu, hint=%zu",
|
2008-10-21 14:00:00 +00:00
|
|
|
this, DPOS, opos);
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
// Start at the current hint position, looking for an object at
|
|
|
|
// the current data position.
|
|
|
|
if (opos < N) {
|
|
|
|
while (opos < (N-1) && OBJS[opos] < DPOS) {
|
|
|
|
opos++;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
opos = N-1;
|
|
|
|
}
|
|
|
|
if (OBJS[opos] == DPOS) {
|
|
|
|
// Found it!
|
2014-05-30 23:35:57 +00:00
|
|
|
ALOGV("Parcel %p found obj %zu at index %zu with forward search",
|
2008-10-21 14:00:00 +00:00
|
|
|
this, DPOS, opos);
|
|
|
|
mNextObjectHint = opos+1;
|
2014-05-30 23:35:57 +00:00
|
|
|
ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
|
2008-10-21 14:00:00 +00:00
|
|
|
return obj;
|
|
|
|
}
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
// Look backwards for it...
|
|
|
|
while (opos > 0 && OBJS[opos] > DPOS) {
|
|
|
|
opos--;
|
|
|
|
}
|
|
|
|
if (OBJS[opos] == DPOS) {
|
|
|
|
// Found it!
|
2014-05-30 23:35:57 +00:00
|
|
|
ALOGV("Parcel %p found obj %zu at index %zu with backward search",
|
2008-10-21 14:00:00 +00:00
|
|
|
this, DPOS, opos);
|
|
|
|
mNextObjectHint = opos+1;
|
2014-05-30 23:35:57 +00:00
|
|
|
ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
|
2008-10-21 14:00:00 +00:00
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
}
|
2014-02-06 01:42:44 +00:00
|
|
|
ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object list",
|
2008-10-21 14:00:00 +00:00
|
|
|
this, DPOS);
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Parcel::closeFileDescriptors()
|
|
|
|
{
|
|
|
|
size_t i = mObjectsSize;
|
|
|
|
if (i > 0) {
|
2014-05-30 23:35:57 +00:00
|
|
|
//ALOGI("Closing file descriptors for %zu objects...", i);
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
while (i > 0) {
|
|
|
|
i--;
|
|
|
|
const flat_binder_object* flat
|
|
|
|
= reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
|
|
|
|
if (flat->type == BINDER_TYPE_FD) {
|
2014-05-30 23:35:57 +00:00
|
|
|
//ALOGI("Closing fd: %ld", flat->handle);
|
2008-10-21 14:00:00 +00:00
|
|
|
close(flat->handle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-29 04:12:59 +00:00
|
|
|
uintptr_t Parcel::ipcData() const
|
2008-10-21 14:00:00 +00:00
|
|
|
{
|
2014-01-29 04:12:59 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(mData);
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
size_t Parcel::ipcDataSize() const
|
|
|
|
{
|
|
|
|
return (mDataSize > mDataPos ? mDataSize : mDataPos);
|
|
|
|
}
|
|
|
|
|
2014-01-29 04:12:59 +00:00
|
|
|
uintptr_t Parcel::ipcObjects() const
|
2008-10-21 14:00:00 +00:00
|
|
|
{
|
2014-01-29 04:12:59 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(mObjects);
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
size_t Parcel::ipcObjectsCount() const
|
|
|
|
{
|
|
|
|
return mObjectsSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize,
|
2014-01-29 04:12:59 +00:00
|
|
|
const binder_size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie)
|
2008-10-21 14:00:00 +00:00
|
|
|
{
|
2014-02-20 04:42:13 +00:00
|
|
|
binder_size_t minOffset = 0;
|
2008-10-21 14:00:00 +00:00
|
|
|
freeDataNoInit();
|
|
|
|
mError = NO_ERROR;
|
|
|
|
mData = const_cast<uint8_t*>(data);
|
|
|
|
mDataSize = mDataCapacity = dataSize;
|
2014-05-30 23:35:57 +00:00
|
|
|
//ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)", this, mDataSize, getpid());
|
2008-10-21 14:00:00 +00:00
|
|
|
mDataPos = 0;
|
2014-05-30 23:35:57 +00:00
|
|
|
ALOGV("setDataReference Setting data pos of %p to %zu", this, mDataPos);
|
2014-01-29 04:12:59 +00:00
|
|
|
mObjects = const_cast<binder_size_t*>(objects);
|
2008-10-21 14:00:00 +00:00
|
|
|
mObjectsSize = mObjectsCapacity = objectsCount;
|
|
|
|
mNextObjectHint = 0;
|
2017-11-18 02:25:05 +00:00
|
|
|
mObjectsSorted = false;
|
2008-10-21 14:00:00 +00:00
|
|
|
mOwner = relFunc;
|
|
|
|
mOwnerCookie = relCookie;
|
2014-02-14 03:22:08 +00:00
|
|
|
for (size_t i = 0; i < mObjectsSize; i++) {
|
2014-02-20 04:42:13 +00:00
|
|
|
binder_size_t offset = mObjects[i];
|
2014-02-14 03:22:08 +00:00
|
|
|
if (offset < minOffset) {
|
2014-11-20 19:50:23 +00:00
|
|
|
ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n",
|
2014-02-20 04:42:13 +00:00
|
|
|
__func__, (uint64_t)offset, (uint64_t)minOffset);
|
2014-02-14 03:22:08 +00:00
|
|
|
mObjectsSize = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
minOffset = offset + sizeof(flat_binder_object);
|
|
|
|
}
|
2008-10-21 14:00:00 +00:00
|
|
|
scanForFds();
|
|
|
|
}
|
|
|
|
|
2014-02-06 01:42:44 +00:00
|
|
|
void Parcel::print(TextOutput& to, uint32_t /*flags*/) const
|
2008-10-21 14:00:00 +00:00
|
|
|
{
|
|
|
|
to << "Parcel(";
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
if (errorCheck() != NO_ERROR) {
|
|
|
|
const status_t err = errorCheck();
|
2014-02-06 01:42:44 +00:00
|
|
|
to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\"";
|
2008-10-21 14:00:00 +00:00
|
|
|
} else if (dataSize() > 0) {
|
|
|
|
const uint8_t* DATA = data();
|
|
|
|
to << indent << HexDump(DATA, dataSize()) << dedent;
|
2014-01-29 04:12:59 +00:00
|
|
|
const binder_size_t* OBJS = objects();
|
2008-10-21 14:00:00 +00:00
|
|
|
const size_t N = objectsCount();
|
|
|
|
for (size_t i=0; i<N; i++) {
|
|
|
|
const flat_binder_object* flat
|
|
|
|
= reinterpret_cast<const flat_binder_object*>(DATA+OBJS[i]);
|
|
|
|
to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << ": "
|
|
|
|
<< TypeCode(flat->type & 0x7f7f7f00)
|
|
|
|
<< " = " << flat->binder;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
to << "NULL";
|
|
|
|
}
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
to << ")";
|
|
|
|
}
|
|
|
|
|
|
|
|
void Parcel::releaseObjects()
|
|
|
|
{
|
|
|
|
const sp<ProcessState> proc(ProcessState::self());
|
|
|
|
size_t i = mObjectsSize;
|
|
|
|
uint8_t* const data = mData;
|
2014-01-29 04:12:59 +00:00
|
|
|
binder_size_t* const objects = mObjects;
|
2008-10-21 14:00:00 +00:00
|
|
|
while (i > 0) {
|
|
|
|
i--;
|
|
|
|
const flat_binder_object* flat
|
|
|
|
= reinterpret_cast<flat_binder_object*>(data+objects[i]);
|
2015-12-03 09:34:15 +00:00
|
|
|
#ifndef DISABLE_ASHMEM_TRACKING
|
2015-10-22 23:12:53 +00:00
|
|
|
release_object(proc, *flat, this, &mOpenAshmemSize);
|
2015-12-03 09:34:15 +00:00
|
|
|
#else
|
|
|
|
release_object(proc, *flat, this);
|
|
|
|
#endif
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Parcel::acquireObjects()
|
|
|
|
{
|
|
|
|
const sp<ProcessState> proc(ProcessState::self());
|
|
|
|
size_t i = mObjectsSize;
|
|
|
|
uint8_t* const data = mData;
|
2014-01-29 04:12:59 +00:00
|
|
|
binder_size_t* const objects = mObjects;
|
2008-10-21 14:00:00 +00:00
|
|
|
while (i > 0) {
|
|
|
|
i--;
|
|
|
|
const flat_binder_object* flat
|
|
|
|
= reinterpret_cast<flat_binder_object*>(data+objects[i]);
|
2015-12-03 09:34:15 +00:00
|
|
|
#ifndef DISABLE_ASHMEM_TRACKING
|
2015-10-22 23:12:53 +00:00
|
|
|
acquire_object(proc, *flat, this, &mOpenAshmemSize);
|
2015-12-03 09:34:15 +00:00
|
|
|
#else
|
|
|
|
acquire_object(proc, *flat, this);
|
|
|
|
#endif
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Parcel::freeData()
|
|
|
|
{
|
|
|
|
freeDataNoInit();
|
|
|
|
initState();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Parcel::freeDataNoInit()
|
|
|
|
{
|
|
|
|
if (mOwner) {
|
2014-11-11 20:22:53 +00:00
|
|
|
LOG_ALLOC("Parcel %p: freeing other owner data", this);
|
2014-05-30 23:35:57 +00:00
|
|
|
//ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
|
2008-10-21 14:00:00 +00:00
|
|
|
mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
|
|
|
|
} else {
|
2014-11-11 20:22:53 +00:00
|
|
|
LOG_ALLOC("Parcel %p: freeing allocated data", this);
|
2008-10-21 14:00:00 +00:00
|
|
|
releaseObjects();
|
2014-11-11 20:22:53 +00:00
|
|
|
if (mData) {
|
|
|
|
LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity);
|
2014-11-14 01:07:40 +00:00
|
|
|
pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
|
2014-11-11 20:22:53 +00:00
|
|
|
gParcelGlobalAllocSize -= mDataCapacity;
|
|
|
|
gParcelGlobalAllocCount--;
|
2014-11-14 01:07:40 +00:00
|
|
|
pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
|
2014-11-11 20:22:53 +00:00
|
|
|
free(mData);
|
|
|
|
}
|
2008-10-21 14:00:00 +00:00
|
|
|
if (mObjects) free(mObjects);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t Parcel::growData(size_t len)
|
|
|
|
{
|
2015-04-02 16:36:02 +00:00
|
|
|
if (len > INT32_MAX) {
|
|
|
|
// don't accept size_t values which may have come from an
|
|
|
|
// inadvertent conversion from a negative int.
|
|
|
|
return BAD_VALUE;
|
|
|
|
}
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
size_t newSize = ((mDataSize+len)*3)/2;
|
|
|
|
return (newSize <= mDataSize)
|
|
|
|
? (status_t) NO_MEMORY
|
|
|
|
: continueWrite(newSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t Parcel::restartWrite(size_t desired)
|
|
|
|
{
|
2015-04-02 16:36:02 +00:00
|
|
|
if (desired > INT32_MAX) {
|
|
|
|
// don't accept size_t values which may have come from an
|
|
|
|
// inadvertent conversion from a negative int.
|
|
|
|
return BAD_VALUE;
|
|
|
|
}
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
if (mOwner) {
|
|
|
|
freeData();
|
|
|
|
return continueWrite(desired);
|
|
|
|
}
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
uint8_t* data = (uint8_t*)realloc(mData, desired);
|
|
|
|
if (!data && desired > mDataCapacity) {
|
|
|
|
mError = NO_MEMORY;
|
|
|
|
return NO_MEMORY;
|
|
|
|
}
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
releaseObjects();
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
if (data) {
|
2014-11-11 20:22:53 +00:00
|
|
|
LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired);
|
2014-11-14 01:07:40 +00:00
|
|
|
pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
|
2014-11-11 20:22:53 +00:00
|
|
|
gParcelGlobalAllocSize += desired;
|
|
|
|
gParcelGlobalAllocSize -= mDataCapacity;
|
2014-11-14 01:07:40 +00:00
|
|
|
pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
|
2008-10-21 14:00:00 +00:00
|
|
|
mData = data;
|
|
|
|
mDataCapacity = desired;
|
|
|
|
}
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
mDataSize = mDataPos = 0;
|
2014-05-30 23:35:57 +00:00
|
|
|
ALOGV("restartWrite Setting data size of %p to %zu", this, mDataSize);
|
|
|
|
ALOGV("restartWrite Setting data pos of %p to %zu", this, mDataPos);
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
free(mObjects);
|
|
|
|
mObjects = NULL;
|
|
|
|
mObjectsSize = mObjectsCapacity = 0;
|
|
|
|
mNextObjectHint = 0;
|
2017-11-18 02:25:05 +00:00
|
|
|
mObjectsSorted = false;
|
2008-10-21 14:00:00 +00:00
|
|
|
mHasFds = false;
|
|
|
|
mFdsKnown = true;
|
2011-09-29 03:19:47 +00:00
|
|
|
mAllowFds = true;
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t Parcel::continueWrite(size_t desired)
|
|
|
|
{
|
2015-04-02 16:36:02 +00:00
|
|
|
if (desired > INT32_MAX) {
|
|
|
|
// don't accept size_t values which may have come from an
|
|
|
|
// inadvertent conversion from a negative int.
|
|
|
|
return BAD_VALUE;
|
|
|
|
}
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
// If shrinking, first adjust for any objects that appear
|
|
|
|
// after the new data size.
|
|
|
|
size_t objectsSize = mObjectsSize;
|
|
|
|
if (desired < mDataSize) {
|
|
|
|
if (desired == 0) {
|
|
|
|
objectsSize = 0;
|
|
|
|
} else {
|
|
|
|
while (objectsSize > 0) {
|
|
|
|
if (mObjects[objectsSize-1] < desired)
|
|
|
|
break;
|
|
|
|
objectsSize--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
if (mOwner) {
|
|
|
|
// If the size is going to zero, just release the owner's data.
|
|
|
|
if (desired == 0) {
|
|
|
|
freeData();
|
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there is a different owner, we need to take
|
|
|
|
// posession.
|
|
|
|
uint8_t* data = (uint8_t*)malloc(desired);
|
|
|
|
if (!data) {
|
|
|
|
mError = NO_MEMORY;
|
|
|
|
return NO_MEMORY;
|
|
|
|
}
|
2014-01-29 04:12:59 +00:00
|
|
|
binder_size_t* objects = NULL;
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
if (objectsSize) {
|
2015-04-28 23:21:30 +00:00
|
|
|
objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t));
|
2008-10-21 14:00:00 +00:00
|
|
|
if (!objects) {
|
2013-03-09 02:28:54 +00:00
|
|
|
free(data);
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
mError = NO_MEMORY;
|
|
|
|
return NO_MEMORY;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Little hack to only acquire references on objects
|
|
|
|
// we will be keeping.
|
|
|
|
size_t oldObjectsSize = mObjectsSize;
|
|
|
|
mObjectsSize = objectsSize;
|
|
|
|
acquireObjects();
|
|
|
|
mObjectsSize = oldObjectsSize;
|
|
|
|
}
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
if (mData) {
|
|
|
|
memcpy(data, mData, mDataSize < desired ? mDataSize : desired);
|
|
|
|
}
|
|
|
|
if (objects && mObjects) {
|
2014-01-29 04:12:59 +00:00
|
|
|
memcpy(objects, mObjects, objectsSize*sizeof(binder_size_t));
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
2014-05-30 23:35:57 +00:00
|
|
|
//ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
|
2008-10-21 14:00:00 +00:00
|
|
|
mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
|
|
|
|
mOwner = NULL;
|
|
|
|
|
2014-11-11 20:22:53 +00:00
|
|
|
LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired);
|
2014-11-14 01:07:40 +00:00
|
|
|
pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
|
2014-11-11 20:22:53 +00:00
|
|
|
gParcelGlobalAllocSize += desired;
|
|
|
|
gParcelGlobalAllocCount++;
|
2014-11-14 01:07:40 +00:00
|
|
|
pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
|
2014-11-11 20:22:53 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
mData = data;
|
|
|
|
mObjects = objects;
|
|
|
|
mDataSize = (mDataSize < desired) ? mDataSize : desired;
|
2014-05-30 23:35:57 +00:00
|
|
|
ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
|
2008-10-21 14:00:00 +00:00
|
|
|
mDataCapacity = desired;
|
|
|
|
mObjectsSize = mObjectsCapacity = objectsSize;
|
|
|
|
mNextObjectHint = 0;
|
2017-11-18 02:25:05 +00:00
|
|
|
mObjectsSorted = false;
|
2008-10-21 14:00:00 +00:00
|
|
|
|
|
|
|
} else if (mData) {
|
|
|
|
if (objectsSize < mObjectsSize) {
|
|
|
|
// Need to release refs on any objects we are dropping.
|
|
|
|
const sp<ProcessState> proc(ProcessState::self());
|
|
|
|
for (size_t i=objectsSize; i<mObjectsSize; i++) {
|
|
|
|
const flat_binder_object* flat
|
|
|
|
= reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
|
|
|
|
if (flat->type == BINDER_TYPE_FD) {
|
|
|
|
// will need to rescan because we may have lopped off the only FDs
|
|
|
|
mFdsKnown = false;
|
|
|
|
}
|
2015-12-03 09:34:15 +00:00
|
|
|
#ifndef DISABLE_ASHMEM_TRACKING
|
2015-10-22 23:12:53 +00:00
|
|
|
release_object(proc, *flat, this, &mOpenAshmemSize);
|
2015-12-03 09:34:15 +00:00
|
|
|
#else
|
|
|
|
release_object(proc, *flat, this);
|
|
|
|
#endif
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
2014-01-29 04:12:59 +00:00
|
|
|
binder_size_t* objects =
|
|
|
|
(binder_size_t*)realloc(mObjects, objectsSize*sizeof(binder_size_t));
|
2008-10-21 14:00:00 +00:00
|
|
|
if (objects) {
|
|
|
|
mObjects = objects;
|
|
|
|
}
|
|
|
|
mObjectsSize = objectsSize;
|
|
|
|
mNextObjectHint = 0;
|
2017-11-18 02:25:05 +00:00
|
|
|
mObjectsSorted = false;
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// We own the data, so we can just do a realloc().
|
|
|
|
if (desired > mDataCapacity) {
|
|
|
|
uint8_t* data = (uint8_t*)realloc(mData, desired);
|
|
|
|
if (data) {
|
2014-11-11 20:22:53 +00:00
|
|
|
LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity,
|
|
|
|
desired);
|
2014-11-14 01:07:40 +00:00
|
|
|
pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
|
2014-11-11 20:22:53 +00:00
|
|
|
gParcelGlobalAllocSize += desired;
|
|
|
|
gParcelGlobalAllocSize -= mDataCapacity;
|
2014-11-14 01:07:40 +00:00
|
|
|
pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
|
2008-10-21 14:00:00 +00:00
|
|
|
mData = data;
|
|
|
|
mDataCapacity = desired;
|
|
|
|
} else if (desired > mDataCapacity) {
|
|
|
|
mError = NO_MEMORY;
|
|
|
|
return NO_MEMORY;
|
|
|
|
}
|
|
|
|
} else {
|
2011-04-14 01:15:56 +00:00
|
|
|
if (mDataSize > desired) {
|
|
|
|
mDataSize = desired;
|
2014-05-30 23:35:57 +00:00
|
|
|
ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
|
2011-04-14 01:15:56 +00:00
|
|
|
}
|
2008-10-21 14:00:00 +00:00
|
|
|
if (mDataPos > desired) {
|
|
|
|
mDataPos = desired;
|
2014-05-30 23:35:57 +00:00
|
|
|
ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
}
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
} else {
|
|
|
|
// This is the first data. Easy!
|
|
|
|
uint8_t* data = (uint8_t*)malloc(desired);
|
|
|
|
if (!data) {
|
|
|
|
mError = NO_MEMORY;
|
|
|
|
return NO_MEMORY;
|
|
|
|
}
|
2013-03-09 02:28:54 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
if(!(mDataCapacity == 0 && mObjects == NULL
|
|
|
|
&& mObjectsCapacity == 0)) {
|
2014-02-06 01:42:44 +00:00
|
|
|
ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity, mObjects, mObjectsCapacity, desired);
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
2014-05-30 23:35:57 +00:00
|
|
|
|
2014-11-11 20:22:53 +00:00
|
|
|
LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired);
|
2014-11-14 01:07:40 +00:00
|
|
|
pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
|
2014-11-11 20:22:53 +00:00
|
|
|
gParcelGlobalAllocSize += desired;
|
|
|
|
gParcelGlobalAllocCount++;
|
2014-11-14 01:07:40 +00:00
|
|
|
pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
|
2014-11-11 20:22:53 +00:00
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
mData = data;
|
|
|
|
mDataSize = mDataPos = 0;
|
2014-05-30 23:35:57 +00:00
|
|
|
ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
|
|
|
|
ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
|
2008-10-21 14:00:00 +00:00
|
|
|
mDataCapacity = desired;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Parcel::initState()
|
|
|
|
{
|
2014-11-11 20:22:53 +00:00
|
|
|
LOG_ALLOC("Parcel %p: initState", this);
|
2008-10-21 14:00:00 +00:00
|
|
|
mError = NO_ERROR;
|
|
|
|
mData = 0;
|
|
|
|
mDataSize = 0;
|
|
|
|
mDataCapacity = 0;
|
|
|
|
mDataPos = 0;
|
2014-05-30 23:35:57 +00:00
|
|
|
ALOGV("initState Setting data size of %p to %zu", this, mDataSize);
|
|
|
|
ALOGV("initState Setting data pos of %p to %zu", this, mDataPos);
|
2008-10-21 14:00:00 +00:00
|
|
|
mObjects = NULL;
|
|
|
|
mObjectsSize = 0;
|
|
|
|
mObjectsCapacity = 0;
|
|
|
|
mNextObjectHint = 0;
|
2017-11-18 02:25:05 +00:00
|
|
|
mObjectsSorted = false;
|
2008-10-21 14:00:00 +00:00
|
|
|
mHasFds = false;
|
|
|
|
mFdsKnown = true;
|
2011-09-29 03:19:47 +00:00
|
|
|
mAllowFds = true;
|
2008-10-21 14:00:00 +00:00
|
|
|
mOwner = NULL;
|
2015-12-03 09:34:15 +00:00
|
|
|
#ifndef DISABLE_ASHMEM_TRACKING
|
2015-10-22 23:12:53 +00:00
|
|
|
mOpenAshmemSize = 0;
|
2015-12-03 09:34:15 +00:00
|
|
|
#endif
|
2008-10-21 14:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Parcel::scanForFds() const
|
|
|
|
{
|
|
|
|
bool hasFds = false;
|
|
|
|
for (size_t i=0; i<mObjectsSize; i++) {
|
|
|
|
const flat_binder_object* flat
|
|
|
|
= reinterpret_cast<const flat_binder_object*>(mData + mObjects[i]);
|
|
|
|
if (flat->type == BINDER_TYPE_FD) {
|
|
|
|
hasFds = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mHasFds = hasFds;
|
|
|
|
mFdsKnown = true;
|
|
|
|
}
|
|
|
|
|
2015-04-10 14:08:45 +00:00
|
|
|
size_t Parcel::getBlobAshmemSize() const
|
|
|
|
{
|
2015-10-22 23:46:12 +00:00
|
|
|
// This used to return the size of all blobs that were written to ashmem, now we're returning
|
|
|
|
// the ashmem currently referenced by this Parcel, which should be equivalent.
|
|
|
|
// TODO: Remove method once ABI can be changed.
|
2015-12-03 09:34:15 +00:00
|
|
|
#ifndef DISABLE_ASHMEM_TRACKING
|
2015-10-22 23:46:12 +00:00
|
|
|
return mOpenAshmemSize;
|
2015-12-03 09:34:15 +00:00
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
2015-04-10 14:08:45 +00:00
|
|
|
}
|
|
|
|
|
2015-10-22 23:12:53 +00:00
|
|
|
size_t Parcel::getOpenAshmemSize() const
|
|
|
|
{
|
2015-12-03 09:34:15 +00:00
|
|
|
#ifndef DISABLE_ASHMEM_TRACKING
|
2015-10-22 23:12:53 +00:00
|
|
|
return mOpenAshmemSize;
|
2015-12-03 09:34:15 +00:00
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
2015-10-22 23:12:53 +00:00
|
|
|
}
|
|
|
|
|
2011-09-24 04:17:56 +00:00
|
|
|
// --- Parcel::Blob ---
|
|
|
|
|
|
|
|
Parcel::Blob::Blob() :
|
2014-11-12 00:44:25 +00:00
|
|
|
mFd(-1), mData(NULL), mSize(0), mMutable(false) {
|
2011-09-24 04:17:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Parcel::Blob::~Blob() {
|
|
|
|
release();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Parcel::Blob::release() {
|
2014-11-12 00:44:25 +00:00
|
|
|
if (mFd != -1 && mData) {
|
2011-09-24 04:17:56 +00:00
|
|
|
::munmap(mData, mSize);
|
|
|
|
}
|
|
|
|
clear();
|
|
|
|
}
|
|
|
|
|
2014-11-12 00:44:25 +00:00
|
|
|
void Parcel::Blob::init(int fd, void* data, size_t size, bool isMutable) {
|
|
|
|
mFd = fd;
|
2011-09-24 04:17:56 +00:00
|
|
|
mData = data;
|
|
|
|
mSize = size;
|
2014-11-12 00:44:25 +00:00
|
|
|
mMutable = isMutable;
|
2011-09-24 04:17:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Parcel::Blob::clear() {
|
2014-11-12 00:44:25 +00:00
|
|
|
mFd = -1;
|
2011-09-24 04:17:56 +00:00
|
|
|
mData = NULL;
|
|
|
|
mSize = 0;
|
2014-11-12 00:44:25 +00:00
|
|
|
mMutable = false;
|
2011-09-24 04:17:56 +00:00
|
|
|
}
|
|
|
|
|
2008-10-21 14:00:00 +00:00
|
|
|
}; // namespace android
|