fix the threading issue for setBuffercount()

this change introduces R/W locks in the right places.
on the server-side, it guarantees that setBufferCount()
is synchronized with "retire" and "resize".
on the client-side, it guarantees that setBufferCount()
is synchronized with "dequeue", "lockbuffer" and "queue"
This commit is contained in:
Mathias Agopian 2010-05-18 17:06:55 -07:00
parent dcf01e6644
commit bb641244d7
15 changed files with 273 additions and 186 deletions

View File

@ -55,12 +55,6 @@ namespace android {
*
*/
// When changing these values, the COMPILE_TIME_ASSERT at the end of this
// file need to be updated.
const unsigned int NUM_LAYERS_MAX = 31;
const unsigned int NUM_BUFFER_MAX = 16;
const unsigned int NUM_DISPLAY_MAX = 4;
// ----------------------------------------------------------------------------
class Region;
@ -82,6 +76,12 @@ class SharedBufferStack
friend class SharedBufferServer;
public:
// When changing these values, the COMPILE_TIME_ASSERT at the end of this
// file need to be updated.
static const unsigned int NUM_LAYERS_MAX = 31;
static const unsigned int NUM_BUFFER_MAX = 16;
static const unsigned int NUM_DISPLAY_MAX = 4;
struct Statistics { // 4 longs
typedef int32_t usecs_t;
usecs_t totalTime;
@ -147,7 +147,7 @@ private:
// FIXME: this should be replaced by a lock-less primitive
Mutex lock;
Condition cv;
SharedBufferStack surfaces[ NUM_LAYERS_MAX ];
SharedBufferStack surfaces[ SharedBufferStack::NUM_LAYERS_MAX ];
};
// ============================================================================
@ -155,7 +155,7 @@ private:
class SharedBufferBase
{
public:
SharedBufferBase(SharedClient* sharedClient, int surface, int num,
SharedBufferBase(SharedClient* sharedClient, int surface,
int32_t identity);
~SharedBufferBase();
uint32_t getIdentity();
@ -166,9 +166,7 @@ public:
protected:
SharedClient* const mSharedClient;
SharedBufferStack* const mSharedStack;
int mNumBuffers;
const int mIdentity;
int32_t computeTail() const;
friend struct Update;
friend struct QueueUpdate;
@ -217,8 +215,16 @@ public:
bool needNewBuffer(int buffer) const;
status_t setDirtyRegion(int buffer, const Region& reg);
status_t setCrop(int buffer, const Rect& reg);
status_t setBufferCount(int bufferCount);
class SetBufferCountCallback {
friend class SharedBufferClient;
virtual status_t operator()(int bufferCount) const = 0;
protected:
virtual ~SetBufferCountCallback() { }
};
status_t setBufferCount(int bufferCount, const SetBufferCountCallback& ipc);
private:
friend struct Condition;
friend struct DequeueCondition;
@ -249,11 +255,16 @@ private:
inline const char* name() const { return "LockCondition"; }
};
int32_t computeTail() const;
mutable RWLock mLock;
int mNumBuffers;
int32_t tail;
int32_t undoDequeueTail;
int32_t queued_head;
// statistics...
nsecs_t mDequeueTime[NUM_BUFFER_MAX];
nsecs_t mDequeueTime[SharedBufferStack::NUM_BUFFER_MAX];
};
// ----------------------------------------------------------------------------
@ -287,7 +298,8 @@ private:
size_t mCapacity;
uint32_t mList;
public:
BufferList(size_t c = NUM_BUFFER_MAX) : mCapacity(c), mList(0) { }
BufferList(size_t c = SharedBufferStack::NUM_BUFFER_MAX)
: mCapacity(c), mList(0) { }
status_t add(int value);
status_t remove(int value);
@ -324,6 +336,9 @@ private:
}
};
// this protects mNumBuffers and mBufferList
mutable RWLock mLock;
int mNumBuffers;
BufferList mBufferList;
struct UnlockUpdate : public UpdateBase {
@ -373,7 +388,7 @@ struct surface_flinger_cblk_t // 4KB max
uint8_t connected;
uint8_t reserved[3];
uint32_t pad[7];
display_cblk_t displays[NUM_DISPLAY_MAX];
display_cblk_t displays[SharedBufferStack::NUM_DISPLAY_MAX];
};
// ---------------------------------------------------------------------------

View File

@ -29,10 +29,6 @@ public:
inline Barrier() : state(CLOSED) { }
inline ~Barrier() { }
void open() {
// gcc memory barrier, this makes sure all memory writes
// have been issued by gcc. On an SMP system we'd need a real
// h/w barrier.
asm volatile ("":::"memory");
Mutex::Autolock _l(lock);
state = OPENED;
cv.broadcast();

View File

@ -60,7 +60,7 @@ Layer::Layer(SurfaceFlinger* flinger, DisplayID display,
// no OpenGL operation is possible here, since we might not be
// in the OpenGL thread.
lcblk = new SharedBufferServer(
client->ctrlblk, i, mBufferManager.getBufferCount(),
client->ctrlblk, i, mBufferManager.getDefaultBufferCount(),
getIdentity());
mBufferManager.setActiveBufferIndex( lcblk->getFrontBuffer() );
@ -68,7 +68,10 @@ Layer::Layer(SurfaceFlinger* flinger, DisplayID display,
Layer::~Layer()
{
destroy();
// FIXME: must be called from the main UI thread
EGLDisplay dpy(mFlinger->graphicPlane(0).getEGLDisplay());
mBufferManager.destroy(dpy);
// the actual buffers will be destroyed here
delete lcblk;
}
@ -81,17 +84,6 @@ void Layer::onRemoved()
lcblk->setStatus(NO_INIT);
}
void Layer::destroy()
{
EGLDisplay dpy(mFlinger->graphicPlane(0).getEGLDisplay());
mBufferManager.destroy(dpy);
mSurface.clear();
Mutex::Autolock _l(mLock);
mWidth = mHeight = 0;
}
sp<LayerBaseClient::Surface> Layer::createSurface() const
{
return mSurface;
@ -99,9 +91,17 @@ sp<LayerBaseClient::Surface> Layer::createSurface() const
status_t Layer::ditch()
{
// NOTE: Called from the main UI thread
// the layer is not on screen anymore. free as much resources as possible
mFreezeLock.clear();
destroy();
EGLDisplay dpy(mFlinger->graphicPlane(0).getEGLDisplay());
mBufferManager.destroy(dpy);
mSurface.clear();
Mutex::Autolock _l(mLock);
mWidth = mHeight = 0;
return NO_ERROR;
}
@ -211,7 +211,7 @@ void Layer::onDraw(const Region& clip) const
status_t Layer::setBufferCount(int bufferCount)
{
// this ensures our client doesn't go away while we're accessing
// Ensures our client doesn't go away while we're accessing
// the shared area.
sp<Client> ourClient(client.promote());
if (ourClient == 0) {
@ -219,15 +219,10 @@ status_t Layer::setBufferCount(int bufferCount)
return DEAD_OBJECT;
}
status_t err;
// FIXME: resize() below is NOT thread-safe, we need to synchronize
// the users of lcblk in our process (ie: retire), and we assume the
// client is not mucking with the SharedStack, which is only enforced
// by construction, therefore we need to protect ourselves against
// buggy and malicious client (as always)
err = lcblk->resize(bufferCount);
// NOTE: lcblk->resize() is protected by an internal lock
status_t err = lcblk->resize(bufferCount);
if (err == NO_ERROR)
mBufferManager.resize(bufferCount);
return err;
}
@ -248,7 +243,7 @@ sp<GraphicBuffer> Layer::requestBuffer(int index, int usage)
* This is called from the client's Surface::dequeue(). This can happen
* at any time, especially while we're in the middle of using the
* buffer 'index' as our front buffer.
*
*
* Make sure the buffer we're resizing is not the front buffer and has been
* dequeued. Once this condition is asserted, we are guaranteed that this
* buffer cannot become the front buffer under our feet, since we're called
@ -544,12 +539,20 @@ void Layer::dump(String8& result, char* buffer, size_t SIZE) const
// ---------------------------------------------------------------------------
Layer::BufferManager::BufferManager(TextureManager& tm)
: mTextureManager(tm), mActiveBuffer(0), mFailover(false)
: mNumBuffers(NUM_BUFFERS), mTextureManager(tm),
mActiveBuffer(0), mFailover(false)
{
}
size_t Layer::BufferManager::getBufferCount() const {
return NUM_BUFFERS;
Layer::BufferManager::~BufferManager()
{
}
status_t Layer::BufferManager::resize(size_t size)
{
Mutex::Autolock _l(mLock);
mNumBuffers = size;
return NO_ERROR;
}
// only for debugging
@ -568,51 +571,55 @@ size_t Layer::BufferManager::getActiveBufferIndex() const {
}
Texture Layer::BufferManager::getActiveTexture() const {
return mFailover ? mFailoverTexture : mBufferData[mActiveBuffer].texture;
Texture res;
if (mFailover) {
res = mFailoverTexture;
} else {
static_cast<Image&>(res) = mBufferData[mActiveBuffer].texture;
}
return res;
}
sp<GraphicBuffer> Layer::BufferManager::getActiveBuffer() const {
const size_t activeBuffer = mActiveBuffer;
BufferData const * const buffers = mBufferData;
Mutex::Autolock _l(mLock);
return mBufferData[mActiveBuffer].buffer;
return buffers[activeBuffer].buffer;
}
sp<GraphicBuffer> Layer::BufferManager::detachBuffer(size_t index)
{
BufferData* const buffers = mBufferData;
sp<GraphicBuffer> buffer;
Mutex::Autolock _l(mLock);
buffer = mBufferData[index].buffer;
mBufferData[index].buffer = 0;
buffer = buffers[index].buffer;
buffers[index].buffer = 0;
return buffer;
}
status_t Layer::BufferManager::attachBuffer(size_t index,
const sp<GraphicBuffer>& buffer)
{
BufferData* const buffers = mBufferData;
Mutex::Autolock _l(mLock);
mBufferData[index].buffer = buffer;
mBufferData[index].texture.dirty = true;
return NO_ERROR;
}
status_t Layer::BufferManager::destroyTexture(Texture* tex, EGLDisplay dpy)
{
if (tex->name != -1U) {
glDeleteTextures(1, &tex->name);
tex->name = -1U;
}
if (tex->image != EGL_NO_IMAGE_KHR) {
eglDestroyImageKHR(dpy, tex->image);
tex->image = EGL_NO_IMAGE_KHR;
}
buffers[index].buffer = buffer;
buffers[index].texture.dirty = true;
return NO_ERROR;
}
status_t Layer::BufferManager::destroy(EGLDisplay dpy)
{
Mutex::Autolock _l(mLock);
for (size_t i=0 ; i<NUM_BUFFERS ; i++) {
destroyTexture(&mBufferData[i].texture, dpy);
mBufferData[i].buffer = 0;
BufferData* const buffers = mBufferData;
size_t num;
{ // scope for the lock
Mutex::Autolock _l(mLock);
num = mNumBuffers;
for (size_t i=0 ; i<num ; i++) {
buffers[i].buffer = 0;
}
}
for (size_t i=0 ; i<num ; i++) {
destroyTexture(&buffers[i].texture, dpy);
}
destroyTexture(&mFailoverTexture, dpy);
return NO_ERROR;
@ -622,7 +629,7 @@ status_t Layer::BufferManager::initEglImage(EGLDisplay dpy,
const sp<GraphicBuffer>& buffer)
{
size_t index = mActiveBuffer;
Texture& texture(mBufferData[index].texture);
Image& texture(mBufferData[index].texture);
status_t err = mTextureManager.initEglImage(&texture, dpy, buffer);
// if EGLImage fails, we switch to regular texture mode, and we
// free all resources associated with using EGLImages.
@ -631,7 +638,8 @@ status_t Layer::BufferManager::initEglImage(EGLDisplay dpy,
destroyTexture(&mFailoverTexture, dpy);
} else {
mFailover = true;
for (size_t i=0 ; i<NUM_BUFFERS ; i++) {
const size_t num = mNumBuffers;
for (size_t i=0 ; i<num ; i++) {
destroyTexture(&mBufferData[i].texture, dpy);
}
}
@ -644,6 +652,19 @@ status_t Layer::BufferManager::loadTexture(
return mTextureManager.loadTexture(&mFailoverTexture, dirty, t);
}
status_t Layer::BufferManager::destroyTexture(Image* tex, EGLDisplay dpy)
{
if (tex->name != -1U) {
glDeleteTextures(1, &tex->name);
tex->name = -1U;
}
if (tex->image != EGL_NO_IMAGE_KHR) {
eglDestroyImageKHR(dpy, tex->image);
tex->image = EGL_NO_IMAGE_KHR;
}
return NO_ERROR;
}
// ---------------------------------------------------------------------------
Layer::SurfaceLayer::SurfaceLayer(const sp<SurfaceFlinger>& flinger,
@ -661,6 +682,11 @@ sp<GraphicBuffer> Layer::SurfaceLayer::requestBuffer(int index, int usage)
sp<GraphicBuffer> buffer;
sp<Layer> owner(getOwner());
if (owner != 0) {
/*
* requestBuffer() cannot be called from the main thread
* as it could cause a dead-lock, since it may have to wait
* on conditions updated my the main thread.
*/
buffer = owner->requestBuffer(index, usage);
}
return buffer;
@ -671,6 +697,11 @@ status_t Layer::SurfaceLayer::setBufferCount(int bufferCount)
status_t err = DEAD_OBJECT;
sp<Layer> owner(getOwner());
if (owner != 0) {
/*
* setBufferCount() cannot be called from the main thread
* as it could cause a dead-lock, since it may have to wait
* on conditions updated my the main thread.
*/
err = owner->setBufferCount(bufferCount);
}
return err;

View File

@ -90,7 +90,6 @@ private:
sp<GraphicBuffer> requestBuffer(int index, int usage);
status_t setBufferCount(int bufferCount);
void destroy();
class SurfaceLayer : public LayerBaseClient::Surface {
public:
@ -120,25 +119,32 @@ private:
static const size_t NUM_BUFFERS = 2;
struct BufferData {
sp<GraphicBuffer> buffer;
Texture texture;
Image texture;
};
// this lock protect mBufferData[].buffer but since there
// is very little contention, we have only one like for
// the whole array, we also use it to protect mNumBuffers.
mutable Mutex mLock;
BufferData mBufferData[NUM_BUFFERS];
BufferData mBufferData[SharedBufferStack::NUM_BUFFER_MAX];
size_t mNumBuffers;
Texture mFailoverTexture;
TextureManager& mTextureManager;
ssize_t mActiveBuffer;
bool mFailover;
static status_t destroyTexture(Texture* tex, EGLDisplay dpy);
static status_t destroyTexture(Image* tex, EGLDisplay dpy);
public:
static size_t getDefaultBufferCount() { return NUM_BUFFERS; }
BufferManager(TextureManager& tm);
size_t getBufferCount() const;
~BufferManager();
// detach/attach buffer from/to given index
sp<GraphicBuffer> detachBuffer(size_t index);
status_t attachBuffer(size_t index, const sp<GraphicBuffer>& buffer);
// resize the number of active buffers
status_t resize(size_t size);
// ----------------------------------------------
// must be called from GL thread
@ -170,6 +176,8 @@ private:
TextureManager mTextureManager;
BufferManager mBufferManager;
// this lock protects mWidth and mHeight which are accessed from
// the main thread and requestBuffer's binder transaction thread.
mutable Mutex mLock;
uint32_t mWidth;
uint32_t mHeight;

View File

@ -95,7 +95,9 @@ void LayerBlur::unlockPageFlip(const Transform& planeTransform, Region& outDirty
mCacheDirty = false;
} else {
if (!mAutoRefreshPending) {
mFlinger->signalDelayedEvent(ms2ns(500));
mFlinger->postMessageAsync(
new MessageBase(MessageQueue::INVALIDATE),
ms2ns(500));
mAutoRefreshPending = true;
}
}

View File

@ -60,9 +60,9 @@ MessageQueue::~MessageQueue()
{
}
MessageList::value_type MessageQueue::waitMessage(nsecs_t timeout)
sp<MessageBase> MessageQueue::waitMessage(nsecs_t timeout)
{
MessageList::value_type result;
sp<MessageBase> result;
bool again;
do {
@ -132,6 +132,7 @@ MessageList::value_type MessageQueue::waitMessage(nsecs_t timeout)
if (again) {
// the message has been processed. release our reference to it
// without holding the lock.
result->notify();
result = 0;
}
@ -141,7 +142,7 @@ MessageList::value_type MessageQueue::waitMessage(nsecs_t timeout)
}
status_t MessageQueue::postMessage(
const MessageList::value_type& message, nsecs_t relTime, uint32_t flags)
const sp<MessageBase>& message, nsecs_t relTime, uint32_t flags)
{
return queueMessage(message, relTime, flags);
}
@ -154,7 +155,7 @@ status_t MessageQueue::invalidate() {
}
status_t MessageQueue::queueMessage(
const MessageList::value_type& message, nsecs_t relTime, uint32_t flags)
const sp<MessageBase>& message, nsecs_t relTime, uint32_t flags)
{
Mutex::Autolock _l(mLock);
message->when = systemTime() + relTime;
@ -167,13 +168,13 @@ status_t MessageQueue::queueMessage(
return NO_ERROR;
}
void MessageQueue::dump(const MessageList::value_type& message)
void MessageQueue::dump(const sp<MessageBase>& message)
{
Mutex::Autolock _l(mLock);
dumpLocked(message);
}
void MessageQueue::dumpLocked(const MessageList::value_type& message)
void MessageQueue::dumpLocked(const sp<MessageBase>& message)
{
LIST::const_iterator cur(mMessages.begin());
LIST::const_iterator end(mMessages.end());

View File

@ -25,6 +25,7 @@
#include <utils/Timers.h>
#include <utils/List.h>
#include "Barrier.h"
namespace android {
@ -37,7 +38,6 @@ class MessageList
List< sp<MessageBase> > mList;
typedef List< sp<MessageBase> > LIST;
public:
typedef sp<MessageBase> value_type;
inline LIST::iterator begin() { return mList.begin(); }
inline LIST::const_iterator begin() const { return mList.begin(); }
inline LIST::iterator end() { return mList.end(); }
@ -63,11 +63,19 @@ public:
// return true if message has a handler
virtual bool handler() { return false; }
// waits for the handler to be processed
void wait() const { barrier.wait(); }
// releases all waiters. this is done automatically if
// handler returns true
void notify() const { barrier.open(); }
protected:
virtual ~MessageBase() { }
private:
mutable Barrier barrier;
friend class LightRefBase<MessageBase>;
};
@ -82,42 +90,33 @@ class MessageQueue
typedef List< sp<MessageBase> > LIST;
public:
// this is a work-around the multichar constant warning. A macro would
// work too, but would pollute the namespace.
template <int a, int b, int c, int d>
struct WHAT {
static const uint32_t Value =
(uint32_t(a&0xff)<<24)|(uint32_t(b&0xff)<<16)|
(uint32_t(c&0xff)<<8)|uint32_t(d&0xff);
};
MessageQueue();
~MessageQueue();
// pre-defined messages
enum {
INVALIDATE = WHAT<'_','p','d','t'>::Value
INVALIDATE = '_upd'
};
MessageList::value_type waitMessage(nsecs_t timeout = -1);
sp<MessageBase> waitMessage(nsecs_t timeout = -1);
status_t postMessage(const MessageList::value_type& message,
status_t postMessage(const sp<MessageBase>& message,
nsecs_t reltime=0, uint32_t flags = 0);
status_t invalidate();
void dump(const MessageList::value_type& message);
void dump(const sp<MessageBase>& message);
private:
status_t queueMessage(const MessageList::value_type& message,
status_t queueMessage(const sp<MessageBase>& message,
nsecs_t reltime, uint32_t flags);
void dumpLocked(const MessageList::value_type& message);
void dumpLocked(const sp<MessageBase>& message);
Mutex mLock;
Condition mCondition;
MessageList mMessages;
bool mInvalidate;
MessageList::value_type mInvalidateMessage;
sp<MessageBase> mInvalidateMessage;
};
// ---------------------------------------------------------------------------

View File

@ -426,7 +426,7 @@ void SurfaceFlinger::waitForEvent()
timeout = waitTime>0 ? waitTime : 0;
}
MessageList::value_type msg = mEventQueue.waitMessage(timeout);
sp<MessageBase> msg = mEventQueue.waitMessage(timeout);
// see if we timed out
if (isFrozen()) {
@ -461,9 +461,20 @@ void SurfaceFlinger::signal() const {
const_cast<SurfaceFlinger*>(this)->signalEvent();
}
void SurfaceFlinger::signalDelayedEvent(nsecs_t delay)
status_t SurfaceFlinger::postMessageAsync(const sp<MessageBase>& msg,
nsecs_t reltime, uint32_t flags)
{
mEventQueue.postMessage( new MessageBase(MessageQueue::INVALIDATE), delay);
return mEventQueue.postMessage(msg, reltime, flags);
}
status_t SurfaceFlinger::postMessageSync(const sp<MessageBase>& msg,
nsecs_t reltime, uint32_t flags)
{
status_t res = mEventQueue.postMessage(msg, reltime, flags);
if (res == NO_ERROR) {
msg->wait();
}
return res;
}
// ----------------------------------------------------------------------------
@ -1135,15 +1146,11 @@ uint32_t SurfaceFlinger::getTransactionFlags(uint32_t flags)
return android_atomic_and(~flags, &mTransactionFlags) & flags;
}
uint32_t SurfaceFlinger::setTransactionFlags(uint32_t flags, nsecs_t delay)
uint32_t SurfaceFlinger::setTransactionFlags(uint32_t flags)
{
uint32_t old = android_atomic_or(flags, &mTransactionFlags);
if ((old & flags)==0) { // wake the server up
if (delay > 0) {
signalDelayedEvent(delay);
} else {
signalEvent();
}
signalEvent();
}
return old;
}
@ -1245,7 +1252,7 @@ sp<ISurface> SurfaceFlinger::createSurface(ClientID clientId, int pid,
//LOGD("createSurface for pid %d (%d x %d)", pid, w, h);
int32_t id = client->generateId(pid);
if (uint32_t(id) >= NUM_LAYERS_MAX) {
if (uint32_t(id) >= SharedBufferStack::NUM_LAYERS_MAX) {
LOGE("createSurface() failed, generateId = %d", id);
return surfaceHandle;
}
@ -1399,7 +1406,7 @@ status_t SurfaceFlinger::destroySurface(const sp<LayerBaseClient>& layer)
}
};
mEventQueue.postMessage( new MessageDestroySurface(this, layer) );
postMessageAsync( new MessageDestroySurface(this, layer) );
return NO_ERROR;
}
@ -1672,7 +1679,7 @@ Client::~Client() {
int32_t Client::generateId(int pid)
{
const uint32_t i = clz( ~mBitmap );
if (i >= NUM_LAYERS_MAX) {
if (i >= SharedBufferStack::NUM_LAYERS_MAX) {
return NO_MEMORY;
}
mPid = pid;
@ -1699,7 +1706,8 @@ void Client::free(int32_t id)
}
bool Client::isValid(int32_t i) const {
return (uint32_t(i)<NUM_LAYERS_MAX) && (mBitmap & (1<<(31-i)));
return (uint32_t(i)<SharedBufferStack::NUM_LAYERS_MAX) &&
(mBitmap & (1<<(31-i)));
}
sp<LayerBaseClient> Client::getLayerUser(int32_t i) const {

View File

@ -255,8 +255,6 @@ private:
public: // hack to work around gcc 4.0.3 bug
void signalEvent();
private:
void signalDelayedEvent(nsecs_t delay);
void handleConsoleEvents();
void handleTransaction(uint32_t transactionFlags);
void handleTransactionLocked(
@ -286,7 +284,7 @@ private:
void free_resources_l();
uint32_t getTransactionFlags(uint32_t flags);
uint32_t setTransactionFlags(uint32_t flags, nsecs_t delay = 0);
uint32_t setTransactionFlags(uint32_t flags);
void commitTransaction();
@ -310,7 +308,12 @@ private:
mutable MessageQueue mEventQueue;
status_t postMessageAsync(const sp<MessageBase>& msg,
nsecs_t reltime=0, uint32_t flags = 0);
status_t postMessageSync(const sp<MessageBase>& msg,
nsecs_t reltime=0, uint32_t flags = 0);
// access must be protected by mStateLock

View File

@ -68,7 +68,7 @@ bool TextureManager::isSupportedYuvFormat(int format)
return false;
}
status_t TextureManager::initEglImage(Texture* texture,
status_t TextureManager::initEglImage(Image* texture,
EGLDisplay dpy, const sp<GraphicBuffer>& buffer)
{
status_t err = NO_ERROR;
@ -108,7 +108,6 @@ status_t TextureManager::initEglImage(Texture* texture,
err = INVALID_OPERATION;
} else {
// Everything went okay!
texture->NPOTAdjust = false;
texture->dirty = false;
texture->width = clientBuf->width;
texture->height = clientBuf->height;

View File

@ -36,21 +36,24 @@ class GraphicBuffer;
// ---------------------------------------------------------------------------
struct Texture {
Texture() : name(-1U), width(0), height(0),
image(EGL_NO_IMAGE_KHR), transform(0),
NPOTAdjust(false), dirty(true) { }
struct Image {
Image() : name(-1U), image(EGL_NO_IMAGE_KHR), width(0), height(0),
transform(0), dirty(true) { }
GLuint name;
EGLImageKHR image;
GLuint width;
GLuint height;
uint32_t transform;
bool dirty;
};
struct Texture : public Image {
Texture() : Image(), NPOTAdjust(false) { }
GLuint potWidth;
GLuint potHeight;
GLfloat wScale;
GLfloat hScale;
EGLImageKHR image;
uint32_t transform;
bool NPOTAdjust;
bool dirty;
};
// ---------------------------------------------------------------------------
@ -68,7 +71,7 @@ public:
const Region& dirty, const GGLSurface& t);
// make active buffer an EGLImage if needed
status_t initEglImage(Texture* texture,
status_t initEglImage(Image* texture,
EGLDisplay dpy, const sp<GraphicBuffer>& buffer);
};

View File

@ -44,7 +44,7 @@ SharedClient::~SharedClient() {
// these functions are used by the clients
status_t SharedClient::validate(size_t i) const {
if (uint32_t(i) >= uint32_t(NUM_LAYERS_MAX))
if (uint32_t(i) >= uint32_t(SharedBufferStack::NUM_LAYERS_MAX))
return BAD_INDEX;
return surfaces[i].status;
}
@ -144,10 +144,10 @@ Region SharedBufferStack::getDirtyRegion(int buffer) const
// ----------------------------------------------------------------------------
SharedBufferBase::SharedBufferBase(SharedClient* sharedClient,
int surface, int num, int32_t identity)
int surface, int32_t identity)
: mSharedClient(sharedClient),
mSharedStack(sharedClient->surfaces + surface),
mNumBuffers(num), mIdentity(identity)
mIdentity(identity)
{
}
@ -179,34 +179,16 @@ String8 SharedBufferBase::dump(char const* prefix) const
char buffer[SIZE];
String8 result;
SharedBufferStack& stack( *mSharedStack );
int tail = computeTail();
snprintf(buffer, SIZE,
"%s[ head=%2d, available=%2d, queued=%2d, tail=%2d ] "
"%s[ head=%2d, available=%2d, queued=%2d ] "
"reallocMask=%08x, inUse=%2d, identity=%d, status=%d",
prefix, stack.head, stack.available, stack.queued, tail,
prefix, stack.head, stack.available, stack.queued,
stack.reallocMask, stack.inUse, stack.identity, stack.status);
result.append(buffer);
snprintf(buffer, SIZE, " { ");
result.append(buffer);
for (int i=0 ; i<mNumBuffers ; i++) {
snprintf(buffer, SIZE, "%d ", stack.index[i]);
result.append(buffer);
}
snprintf(buffer, SIZE, " }\n");
result.append(buffer);
result.append("\n");
return result;
}
int32_t SharedBufferBase::computeTail() const
{
SharedBufferStack& stack( *mSharedStack );
return (mNumBuffers + stack.head - stack.available + 1) % mNumBuffers;
}
status_t SharedBufferBase::waitForCondition(const ConditionBase& condition)
{
const SharedBufferStack& stack( *mSharedStack );
@ -270,7 +252,7 @@ SharedBufferServer::ReallocateCondition::ReallocateCondition(
}
bool SharedBufferServer::ReallocateCondition::operator()() const {
int32_t head = stack.head;
if (uint32_t(head) >= NUM_BUFFER_MAX) {
if (uint32_t(head) >= SharedBufferStack::NUM_BUFFER_MAX) {
// if stack.head is messed up, we cannot allow the server to
// crash (since stack.head is mapped on the client side)
stack.status = BAD_VALUE;
@ -318,7 +300,7 @@ SharedBufferServer::RetireUpdate::RetireUpdate(
}
ssize_t SharedBufferServer::RetireUpdate::operator()() {
int32_t head = stack.head;
if (uint32_t(head) >= NUM_BUFFER_MAX)
if (uint32_t(head) >= SharedBufferStack::NUM_BUFFER_MAX)
return BAD_VALUE;
// Preventively lock the current buffer before updating queued.
@ -361,14 +343,20 @@ ssize_t SharedBufferServer::StatusUpdate::operator()() {
SharedBufferClient::SharedBufferClient(SharedClient* sharedClient,
int surface, int num, int32_t identity)
: SharedBufferBase(sharedClient, surface, num, identity),
tail(0), undoDequeueTail(0)
: SharedBufferBase(sharedClient, surface, identity),
mNumBuffers(num), tail(0), undoDequeueTail(0)
{
SharedBufferStack& stack( *mSharedStack );
tail = computeTail();
queued_head = stack.head;
}
int32_t SharedBufferClient::computeTail() const
{
SharedBufferStack& stack( *mSharedStack );
return (mNumBuffers + stack.head - stack.available + 1) % mNumBuffers;
}
ssize_t SharedBufferClient::dequeue()
{
SharedBufferStack& stack( *mSharedStack );
@ -377,7 +365,9 @@ ssize_t SharedBufferClient::dequeue()
LOGW("dequeue: tail=%d, head=%d, avail=%d, queued=%d",
tail, stack.head, stack.available, stack.queued);
}
RWLock::AutoRLock _rd(mLock);
const nsecs_t dequeueTime = systemTime(SYSTEM_TIME_THREAD);
//LOGD("[%d] about to dequeue a buffer",
@ -407,6 +397,8 @@ ssize_t SharedBufferClient::dequeue()
status_t SharedBufferClient::undoDequeue(int buf)
{
RWLock::AutoRLock _rd(mLock);
// TODO: we can only undo the previous dequeue, we should
// enforce that in the api
UndoDequeueUpdate update(this);
@ -419,6 +411,8 @@ status_t SharedBufferClient::undoDequeue(int buf)
status_t SharedBufferClient::lock(int buf)
{
RWLock::AutoRLock _rd(mLock);
SharedBufferStack& stack( *mSharedStack );
LockCondition condition(this, buf);
status_t err = waitForCondition(condition);
@ -427,6 +421,8 @@ status_t SharedBufferClient::lock(int buf)
status_t SharedBufferClient::queue(int buf)
{
RWLock::AutoRLock _rd(mLock);
SharedBufferStack& stack( *mSharedStack );
queued_head = (queued_head + 1) % mNumBuffers;
@ -460,21 +456,29 @@ status_t SharedBufferClient::setDirtyRegion(int buf, const Region& reg)
return stack.setDirtyRegion(buf, reg);
}
status_t SharedBufferClient::setBufferCount(int bufferCount)
status_t SharedBufferClient::setBufferCount(
int bufferCount, const SetBufferCountCallback& ipc)
{
SharedBufferStack& stack( *mSharedStack );
if (uint32_t(bufferCount) >= NUM_BUFFER_MAX)
if (uint32_t(bufferCount) >= SharedBufferStack::NUM_BUFFER_MAX)
return BAD_VALUE;
mNumBuffers = bufferCount;
queued_head = (stack.head + stack.queued) % mNumBuffers;
return NO_ERROR;
RWLock::AutoWLock _wr(mLock);
status_t err = ipc(bufferCount);
if (err == NO_ERROR) {
mNumBuffers = bufferCount;
queued_head = (stack.head + stack.queued) % mNumBuffers;
}
return err;
}
// ----------------------------------------------------------------------------
SharedBufferServer::SharedBufferServer(SharedClient* sharedClient,
int surface, int num, int32_t identity)
: SharedBufferBase(sharedClient, surface, num, identity)
: SharedBufferBase(sharedClient, surface, identity),
mNumBuffers(num)
{
mSharedStack->init(identity);
mSharedStack->head = num-1;
@ -490,10 +494,12 @@ SharedBufferServer::SharedBufferServer(SharedClient* sharedClient,
ssize_t SharedBufferServer::retireAndLock()
{
RWLock::AutoRLock _l(mLock);
RetireUpdate update(this, mNumBuffers);
ssize_t buf = updateCondition( update );
if (buf >= 0) {
if (uint32_t(buf) >= NUM_BUFFER_MAX)
if (uint32_t(buf) >= SharedBufferStack::NUM_BUFFER_MAX)
return BAD_VALUE;
SharedBufferStack& stack( *mSharedStack );
buf = stack.index[buf];
@ -520,6 +526,8 @@ void SharedBufferServer::setStatus(status_t status)
status_t SharedBufferServer::reallocate()
{
RWLock::AutoRLock _l(mLock);
SharedBufferStack& stack( *mSharedStack );
uint32_t mask = (1<<mNumBuffers)-1;
android_atomic_or(mask, &stack.reallocMask);
@ -534,6 +542,13 @@ int32_t SharedBufferServer::getQueuedCount() const
status_t SharedBufferServer::assertReallocate(int buf)
{
/*
* NOTE: it's safe to hold mLock for read while waiting for
* the ReallocateCondition because that condition is not updated
* by the thread that holds mLock for write.
*/
RWLock::AutoRLock _l(mLock);
// TODO: need to validate "buf"
ReallocateCondition condition(this, buf);
status_t err = waitForCondition(condition);
@ -546,9 +561,7 @@ Region SharedBufferServer::getDirtyRegion(int buf) const
return stack.getDirtyRegion(buf);
}
/*
*
* NOTE: this is not thread-safe on the server-side, meaning
* 'head' cannot move during this operation. The client-side
* can safely operate an usual.
@ -556,9 +569,11 @@ Region SharedBufferServer::getDirtyRegion(int buf) const
*/
status_t SharedBufferServer::resize(int newNumBuffers)
{
if (uint32_t(newNumBuffers) >= NUM_BUFFER_MAX)
if (uint32_t(newNumBuffers) >= SharedBufferStack::NUM_BUFFER_MAX)
return BAD_VALUE;
RWLock::AutoWLock _l(mLock);
// for now we're not supporting shrinking
const int numBuffers = mNumBuffers;
if (newNumBuffers < numBuffers)
@ -569,7 +584,7 @@ status_t SharedBufferServer::resize(int newNumBuffers)
// read the head, make sure it's valid
int32_t head = stack.head;
if (uint32_t(head) >= NUM_BUFFER_MAX)
if (uint32_t(head) >= SharedBufferStack::NUM_BUFFER_MAX)
return BAD_VALUE;
int base = numBuffers;

View File

@ -678,19 +678,18 @@ int Surface::setBufferCount(int bufferCount)
sp<ISurface> s(mSurface);
if (s == 0) return NO_INIT;
// FIXME: this needs to be synchronized dequeue/queue
class SetBufferCountIPC : public SharedBufferClient::SetBufferCountCallback {
sp<ISurface> surface;
virtual status_t operator()(int bufferCount) const {
return surface->setBufferCount(bufferCount);
}
public:
SetBufferCountIPC(const sp<ISurface>& surface) : surface(surface) { }
} ipc(s);
status_t err = s->setBufferCount(bufferCount);
status_t err = mSharedBufferClient->setBufferCount(bufferCount, ipc);
LOGE_IF(err, "ISurface::setBufferCount(%d) returned %s",
bufferCount, strerror(-err));
if (err == NO_ERROR) {
err = mSharedBufferClient->getStatus();
LOGE_IF(err, "Surface (identity=%d) state = %d", mIdentity, err);
if (!err) {
// update our local copy of the buffer count
mSharedBufferClient->setBufferCount(bufferCount);
}
}
return err;
}

View File

@ -250,7 +250,7 @@ void SurfaceComposerClient::dispose()
status_t SurfaceComposerClient::getDisplayInfo(
DisplayID dpy, DisplayInfo* info)
{
if (uint32_t(dpy)>=NUM_DISPLAY_MAX)
if (uint32_t(dpy)>=SharedBufferStack::NUM_DISPLAY_MAX)
return BAD_VALUE;
volatile surface_flinger_cblk_t const * cblk = get_cblk();
@ -268,7 +268,7 @@ status_t SurfaceComposerClient::getDisplayInfo(
ssize_t SurfaceComposerClient::getDisplayWidth(DisplayID dpy)
{
if (uint32_t(dpy)>=NUM_DISPLAY_MAX)
if (uint32_t(dpy)>=SharedBufferStack::NUM_DISPLAY_MAX)
return BAD_VALUE;
volatile surface_flinger_cblk_t const * cblk = get_cblk();
volatile display_cblk_t const * dcblk = cblk->displays + dpy;
@ -277,7 +277,7 @@ ssize_t SurfaceComposerClient::getDisplayWidth(DisplayID dpy)
ssize_t SurfaceComposerClient::getDisplayHeight(DisplayID dpy)
{
if (uint32_t(dpy)>=NUM_DISPLAY_MAX)
if (uint32_t(dpy)>=SharedBufferStack::NUM_DISPLAY_MAX)
return BAD_VALUE;
volatile surface_flinger_cblk_t const * cblk = get_cblk();
volatile display_cblk_t const * dcblk = cblk->displays + dpy;
@ -286,7 +286,7 @@ ssize_t SurfaceComposerClient::getDisplayHeight(DisplayID dpy)
ssize_t SurfaceComposerClient::getDisplayOrientation(DisplayID dpy)
{
if (uint32_t(dpy)>=NUM_DISPLAY_MAX)
if (uint32_t(dpy)>=SharedBufferStack::NUM_DISPLAY_MAX)
return BAD_VALUE;
volatile surface_flinger_cblk_t const * cblk = get_cblk();
volatile display_cblk_t const * dcblk = cblk->displays + dpy;
@ -345,7 +345,7 @@ sp<SurfaceControl> SurfaceComposerClient::createSurface(
sp<ISurface> surface = mClient->createSurface(&data, pid, name,
display, w, h, format, flags);
if (surface != 0) {
if (uint32_t(data.token) < NUM_LAYERS_MAX) {
if (uint32_t(data.token) < SharedBufferStack::NUM_LAYERS_MAX) {
result = new SurfaceControl(this, surface, data, w, h, format, flags);
}
}

View File

@ -54,8 +54,16 @@ int main(int argc, char** argv)
printf("resize test\n");
s.resize(6);
c.setBufferCount(6);
class SetBufferCountIPC : public SharedBufferClient::SetBufferCountCallback {
SharedBufferServer& s;
virtual status_t operator()(int bufferCount) const {
return s.resize(bufferCount);
}
public:
SetBufferCountIPC(SharedBufferServer& s) : s(s) { }
} resize(s);
c.setBufferCount(6, resize);
int list3[6] = {3, 2, 1, 4, 5, 0};
test0(s, c, 6, list3);