zoukankan      html  css  js  c++  java
  • Android Camera Preview ANativeWindow的处理

    从JNI的调用看,Java通过surfacetexture来显示preview。CameraService通过surfacetexture获取ANativeWindow,向OpenGL传递数据。

    ANativeWindow的函数实现是在CameraHardwareInterface.h中,

        struct camera_preview_window {
            struct preview_stream_ops nw;
            void *user;
        };
    
        struct camera_preview_window mHalPreviewWindow;

    preview_stream_ops 的定义在hardware/libhardware/include/hardware/camera.h中

    typedef struct preview_stream_ops {
        int (*dequeue_buffer)(struct preview_stream_ops* w,
                              buffer_handle_t** buffer, int *stride);
        int (*enqueue_buffer)(struct preview_stream_ops* w,
                    buffer_handle_t* buffer);
        int (*cancel_buffer)(struct preview_stream_ops* w,
                    buffer_handle_t* buffer);
        int (*set_buffer_count)(struct preview_stream_ops* w, int count);
        int (*set_buffers_geometry)(struct preview_stream_ops* pw,
                    int w, int h, int format);
        int (*set_crop)(struct preview_stream_ops *w,
                    int left, int top, int right, int bottom);
        int (*set_usage)(struct preview_stream_ops* w, int usage);
        int (*set_swap_interval)(struct preview_stream_ops *w, int interval);
        int (*get_min_undequeued_buffer_count)(const struct preview_stream_ops *w,
                    int *count);
        int (*lock_buffer)(struct preview_stream_ops* w,
                    buffer_handle_t* buffer);
        // Timestamps are measured in nanoseconds, and must be comparable
        // and monotonically increasing between two frames in the same
        // preview stream. They do not need to be comparable between
        // consecutive or parallel preview streams, cameras, or app runs.
        int (*set_timestamp)(struct preview_stream_ops *w, int64_t timestamp);
    } preview_stream_ops_t;

    在CameraHardwareInterface.h中的初始化函数中,调用initHalPreviewWindow()

        status_t initialize(hw_module_t *module)
        {
            ALOGI("Opening camera %s", mName.string());
            int rc = module->methods->open(module, mName.string(),
                                           (hw_device_t **)&mDevice);
            if (rc != OK) {
                ALOGE("Could not open camera %s: %d", mName.string(), rc);
                return rc;
            }
            initHalPreviewWindow();                                                                                                            
            return rc;              
        }   
        void initHalPreviewWindow()
        {
            mHalPreviewWindow.nw.cancel_buffer = __cancel_buffer;
            mHalPreviewWindow.nw.lock_buffer = __lock_buffer;
            mHalPreviewWindow.nw.dequeue_buffer = __dequeue_buffer;
            mHalPreviewWindow.nw.enqueue_buffer = __enqueue_buffer;
            mHalPreviewWindow.nw.set_buffer_count = __set_buffer_count;
            mHalPreviewWindow.nw.set_buffers_geometry = __set_buffers_geometry;
            mHalPreviewWindow.nw.set_crop = __set_crop;
            mHalPreviewWindow.nw.set_timestamp = __set_timestamp;
            mHalPreviewWindow.nw.set_usage = __set_usage;
            mHalPreviewWindow.nw.set_swap_interval = __set_swap_interval;
    
            mHalPreviewWindow.nw.get_min_undequeued_buffer_count =
                    __get_min_undequeued_buffer_count;
        }

    OK,将__xxxx函数赋值给mHalPreviewWindow.nw中的函数指针。

    在Preview之前需要将ANativeWindow传递到HAL,

        /** Set the ANativeWindow to which preview frames are sent */
        status_t setPreviewWindow(const sp<ANativeWindow>& buf)
        {
            ALOGV("%s(%s) buf %p", __FUNCTION__, mName.string(), buf.get());
    
            if (mDevice->ops->set_preview_window) {
                mPreviewWindow = buf;
                mHalPreviewWindow.user = this;
                ALOGV("%s &mHalPreviewWindow %p mHalPreviewWindow.user %p", __FUNCTION__,
                        &mHalPreviewWindow, mHalPreviewWindow.user);
                return mDevice->ops->set_preview_window(mDevice,
                        buf.get() ? &mHalPreviewWindow.nw : 0);
            }
            return INVALID_OPERATION;
        }

    在HAL中就可以使用这些函数对ANativeWindow进行操作,例如其中的一个函数实现__dequeue_buffer

    此函数是从ANativeWindow队列中获取一个闲置buffer,

    @CameraHardwareInterface.h

    #define anw(n) __to_anw(((struct camera_preview_window *)n)->user)
    
        static int __dequeue_buffer(struct preview_stream_ops* w,
                                    buffer_handle_t** buffer, int *stride)
        {
            int rc;
            ANativeWindow *a = anw(w);
            ANativeWindowBuffer* anb;
            rc = a->dequeueBuffer(a, &anb);
            if (!rc) {
                *buffer = &anb->handle;
                *stride = anb->stride;
            }
            return rc;
        }

    在HAL中,我们可以看到对display部分的初始化,初始视频格式设置为HAL_PIXEL_FORMAT_YCrCb_420_SP

    ANativeWindowDisplayAdapter::ANativeWindowDisplayAdapter():mDisplayThread(NULL),
                                            mDisplayState(ANativeWindowDisplayAdapter::DISPLAY_INIT),
                                            mDisplayEnabled(false),
                                            mBufferCount(0)
    
    
    
    {
        LOG_FUNCTION_NAME;
    
    ...
    
        mPixelFormat = NULL;
        mNativeWindowPixelFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP;
     }

    以及改变pixelFormat的函数:

        err = mANativeWindow->set_buffers_geometry(
                mANativeWindow,
                width,
                height,
                mNativeWindowPixelFormat); //NV21

    此函数直接会直接回调到CameraHardwareInterface.h中的ANativeWindow的函数实现:

        static int __set_buffers_geometry(struct preview_stream_ops* w,
                          int width, int height, int format)
        {
            ANativeWindow *a = anw(w);
            return native_window_set_buffers_geometry(a,
                              width, height, format);
        }

    而native_window_set_buffers_geometry()是跨进程的函数调用,定义在system/core/include/system/window.h

    /*
     * native_window_set_buffers_geometry(..., int w, int h, int format)                                                                       
     * All buffers dequeued after this call will have the dimensions and format
     * specified.  A successful call to this function has the same effect as calling
     * native_window_set_buffers_size and native_window_set_buffers_format.
     *
     * XXX: This function is deprecated.  The native_window_set_buffers_dimensions
     * and native_window_set_buffers_format functions should be used instead.
     */
    static inline int native_window_set_buffers_geometry(
            struct ANativeWindow* window,
            int w, int h, int format)
    {
        return window->perform(window, NATIVE_WINDOW_SET_BUFFERS_GEOMETRY,
                w, h, format);
    }

    Struct ANativeWindow也定义在此文件中,ANativeWindow是OpenGL画图的一个接口,Surface类继承了ANativeWindow类。ANativeWindow类是连接OpenGL和Android窗口系统的桥梁,即OpenGL需要通过ANativeWindow类来间接地操作Android窗口系统。这种桥梁关系是通过EGL库来建立的,所有以egl为前缀的函数名均为EGL库提供的接口,暂时知道这么多。上面的函数是通过window->preform(,,,)实现的

        /*
         * hook used to perform various operations on the surface.
         * (*perform)() is a generic mechanism to add functionality to
         * ANativeWindow while keeping backward binary compatibility.
         *
         * DO NOT CALL THIS HOOK DIRECTLY.  Instead, use the helper functions
         * defined below.
         *
         *  (*perform)() returns -ENOENT if the 'what' parameter is not supported
         *  by the surface's implementation.
         *
         * The valid operations are:
         *     NATIVE_WINDOW_SET_USAGE
         *     NATIVE_WINDOW_CONNECT               (deprecated)
         *     NATIVE_WINDOW_DISCONNECT            (deprecated)
         *     NATIVE_WINDOW_SET_CROP              (private)
         *     NATIVE_WINDOW_SET_BUFFER_COUNT
         *     NATIVE_WINDOW_SET_BUFFERS_GEOMETRY  (deprecated)
         *     NATIVE_WINDOW_SET_BUFFERS_TRANSFORM
         *     NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP
         *     NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS
         *     NATIVE_WINDOW_SET_BUFFERS_FORMAT
         *     NATIVE_WINDOW_SET_SCALING_MODE       (private)
         *     NATIVE_WINDOW_LOCK                   (private)
         *     NATIVE_WINDOW_UNLOCK_AND_POST        (private)
         *     NATIVE_WINDOW_API_CONNECT            (private)
         *     NATIVE_WINDOW_API_DISCONNECT         (private)
         *     NATIVE_WINDOW_SET_BUFFERS_USER_DIMENSIONS (private)
         *     NATIVE_WINDOW_SET_POST_TRANSFORM_CROP (private)
         *
         */
    
        int     (*perform)(struct ANativeWindow* window,
                    int operation, ... );

    定义了一个函数指针int (*perform)(struct ANativeWindow* window, int operation, ... );

    @./frameworks/native/libs/gui/SurfaceTextureClient.cpp

    int SurfaceTextureClient::perform(int operation, va_list args)
    {
        int res = NO_ERROR;
        switch (operation) {
        case NATIVE_WINDOW_CONNECT:
            // deprecated. must return NO_ERROR.
            break;
        case NATIVE_WINDOW_DISCONNECT:
            // deprecated. must return NO_ERROR.
            break;
        case NATIVE_WINDOW_SET_USAGE:
            res = dispatchSetUsage(args);
            break;
        case NATIVE_WINDOW_SET_CROP:
            res = dispatchSetCrop(args);
            break;
        case NATIVE_WINDOW_SET_BUFFER_COUNT:
            res = dispatchSetBufferCount(args);
            break;
        case NATIVE_WINDOW_SET_BUFFERS_GEOMETRY:
            res = dispatchSetBuffersGeometry(args);
            break;
        case NATIVE_WINDOW_SET_BUFFERS_TRANSFORM:
            res = dispatchSetBuffersTransform(args);
            break;
        case NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP:
            res = dispatchSetBuffersTimestamp(args);
            break;
        case NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS:
            res = dispatchSetBuffersDimensions(args);
            break;
        case NATIVE_WINDOW_SET_BUFFERS_USER_DIMENSIONS:
            res = dispatchSetBuffersUserDimensions(args);
            break;
        case NATIVE_WINDOW_SET_BUFFERS_FORMAT:
            res = dispatchSetBuffersFormat(args);
            break;
        case NATIVE_WINDOW_LOCK:
            res = dispatchLock(args);
            break;
        case NATIVE_WINDOW_UNLOCK_AND_POST:
            res = dispatchUnlockAndPost(args);
            break;
        case NATIVE_WINDOW_SET_SCALING_MODE:
            res = dispatchSetScalingMode(args);
            break;
        case NATIVE_WINDOW_API_CONNECT:
            res = dispatchConnect(args);
            break;
        case NATIVE_WINDOW_API_DISCONNECT:
            res = dispatchDisconnect(args);
            break;
        default:
            res = NAME_NOT_FOUND;
            break;
        }
        return res;
    }
    int SurfaceTextureClient::dispatchSetBuffersGeometry(va_list args) {
        int w = va_arg(args, int);
        int h = va_arg(args, int);
        int f = va_arg(args, int);
        int err = setBuffersDimensions(w, h);
        if (err != 0) {
            return err;
        }
        return setBuffersFormat(f);
    }
    int SurfaceTextureClient::setBuffersFormat(int format)
    {
        ALOGV("SurfaceTextureClient::setBuffersFormat");
    
        if (format<0)
            return BAD_VALUE;
    
        Mutex::Autolock lock(mMutex);
        mReqFormat = format;
        return NO_ERROR;
    }

    看不出来什么。mReqFormat也没有什么条件进行判断。再看另外一个函数

    int SurfaceTextureClient::dequeueBuffer(android_native_buffer_t** buffer) {
        ATRACE_CALL();
        ALOGV("SurfaceTextureClient::dequeueBuffer");
        Mutex::Autolock lock(mMutex);
        int buf = -1;
        int reqW = mReqWidth ? mReqWidth : mUserWidth;
        int reqH = mReqHeight ? mReqHeight : mUserHeight;
        status_t result = mSurfaceTexture->dequeueBuffer(&buf, reqW, reqH,
                mReqFormat, mReqUsage);
     

    mSurfaceTexture是I

    mSurfaceTexture是ISurfaceTexture类型,是SurfaceTexture服务client service结构中的client。

    SurfaceTexture.cpp中

    class BpSurfaceTexture : public BpInterface<ISurfaceTexture>
    {
    public:
        virtual status_t dequeueBuffer(int *buf, uint32_t w, uint32_t h,
                uint32_t format, uint32_t usage) {
            Parcel data, reply;                                                                                                                
            data.writeInterfaceToken(ISurfaceTexture::getInterfaceDescriptor());
            data.writeInt32(w);
            data.writeInt32(h);
            data.writeInt32(format);
            data.writeInt32(usage);
            status_t result = remote()->transact(DEQUEUE_BUFFER, data, &reply);
            if (result != NO_ERROR) {
                return result;
            }
            *buf = reply.readInt32();
            result = reply.readInt32();
            return result;
        }
    
    }

    再看BnSurfaceTexture是如何实现DEQUEUE_BUFFER的

    status_t BnSurfaceTexture::onTransact(
        uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
    {
        switch(code) {
            case DEQUEUE_BUFFER: {
                CHECK_INTERFACE(ISurfaceTexture, data, reply);
                uint32_t w      = data.readInt32();
                uint32_t h      = data.readInt32();
                uint32_t format = data.readInt32();
                uint32_t usage  = data.readInt32();
                int buf;
                int result = dequeueBuffer(&buf, w, h, format, usage);
                reply->writeInt32(buf);
                reply->writeInt32(result);
                return NO_ERROR;
            } break;
    }

    dequeueBuffer在BnSurfaceTexture中没有实现,子类BufferQueue中(@BufferQueue.h)

    class BufferQueue : public BnSurfaceTexture {
    public:
    
        // dequeueBuffer gets the next buffer slot index for the client to use. If a
        // buffer slot is available then that slot index is written to the location
        // pointed to by the buf argument and a status of OK is returned.  If no
        // slot is available then a status of -EBUSY is returned and buf is
        // unmodified.
        // The width and height parameters must be no greater than the minimum of
        // GL_MAX_VIEWPORT_DIMS and GL_MAX_TEXTURE_SIZE (see: glGetIntegerv).
        // An error due to invalid dimensions might not be reported until
        // updateTexImage() is called.
        virtual status_t dequeueBuffer(int *buf, uint32_t width, uint32_t height,
                uint32_t format, uint32_t usage);
    
    }

    具体实现是在BufferQueue.cpp

    status_t BufferQueue::dequeueBuffer(int *outBuf, uint32_t w, uint32_t h,
            uint32_t format, uint32_t usage) {
        ATRACE_CALL();
        ST_LOGV("dequeueBuffer: w=%d h=%d fmt=%#x usage=%#x", w, h, format, usage);
    
        if ((w && !h) || (!w && h)) {
            ST_LOGE("dequeueBuffer: invalid size: w=%u, h=%u", w, h);
            return BAD_VALUE;
        }
    
        status_t returnFlags(OK);
        EGLDisplay dpy = EGL_NO_DISPLAY;
        EGLSyncKHR fence = EGL_NO_SYNC_KHR;
    
        { // Scope for the lock
            Mutex::Autolock lock(mMutex);
    
            if (format == 0) {
                format = mDefaultBufferFormat;
            }
            // turn on usage bits the consumer requested
            usage |= mConsumerUsageBits;
    
            int found = -1;
            int foundSync = -1;
            int dequeuedCount = 0;
            bool tryAgain = true;
            while (tryAgain) {
                if (mAbandoned) {
                    ST_LOGE("dequeueBuffer: SurfaceTexture has been abandoned!");
                    return NO_INIT;
                }
    
                // We need to wait for the FIFO to drain if the number of buffer
                // needs to change.
                //
                // The condition "number of buffers needs to change" is true if
                // - the client doesn't care about how many buffers there are
                // - AND the actual number of buffer is different from what was
                //   set in the last setBufferCountServer()
                //                         - OR -
                //   setBufferCountServer() was set to a value incompatible with
                //   the synchronization mode (for instance because the sync mode
                //   changed since)
                //
                // As long as this condition is true AND the FIFO is not empty, we
                // wait on mDequeueCondition.
    
                const int minBufferCountNeeded = mSynchronousMode ?
                        mMinSyncBufferSlots : mMinAsyncBufferSlots;
    
                const bool numberOfBuffersNeedsToChange = !mClientBufferCount &&
                        ((mServerBufferCount != mBufferCount) ||
                                (mServerBufferCount < minBufferCountNeeded));
    
                if (!mQueue.isEmpty() && numberOfBuffersNeedsToChange) {
                    // wait for the FIFO to drain
                    mDequeueCondition.wait(mMutex);
                    // NOTE: we continue here because we need to reevaluate our
                    // whole state (eg: we could be abandoned or disconnected)
                    continue;
                }
    
                if (numberOfBuffersNeedsToChange) {
                    // here we're guaranteed that mQueue is empty
                    freeAllBuffersLocked();
                    mBufferCount = mServerBufferCount;
                    if (mBufferCount < minBufferCountNeeded)
                        mBufferCount = minBufferCountNeeded;
                    mBufferHasBeenQueued = false;
                    returnFlags |= ISurfaceTexture::RELEASE_ALL_BUFFERS;
                }
    
                // look for a free buffer to give to the client
                found = INVALID_BUFFER_SLOT;
                foundSync = INVALID_BUFFER_SLOT;
                dequeuedCount = 0;
                for (int i = 0; i < mBufferCount; i++) {
                    const int state = mSlots[i].mBufferState;
                    if (state == BufferSlot::DEQUEUED) {
                        dequeuedCount++;
                    }
    
                    // this logic used to be if (FLAG_ALLOW_DEQUEUE_CURRENT_BUFFER)
                    // but dequeuing the current buffer is disabled.
                    if (false) {
                        // This functionality has been temporarily removed so
                        // BufferQueue and SurfaceTexture can be refactored into
                        // separate objects
                    } else {
                        if (state == BufferSlot::FREE) {
                            /* We return the oldest of the free buffers to avoid
                             * stalling the producer if possible.  This is because
                             * the consumer may still have pending reads of the
                             * buffers in flight.
                             */
                            bool isOlder = mSlots[i].mFrameNumber <
                                    mSlots[found].mFrameNumber;
                            if (found < 0 || isOlder) {
                                foundSync = i;
                                found = i;
                            }
                        }
                    }
                }
    
                // clients are not allowed to dequeue more than one buffer
                // if they didn't set a buffer count.
                if (!mClientBufferCount && dequeuedCount) {
                    ST_LOGE("dequeueBuffer: can't dequeue multiple buffers without "
                            "setting the buffer count");
                    return -EINVAL;
                }
    
                // See whether a buffer has been queued since the last
                // setBufferCount so we know whether to perform the
                // mMinUndequeuedBuffers check below.
                if (mBufferHasBeenQueued) {
                    // make sure the client is not trying to dequeue more buffers
                    // than allowed.
                    const int avail = mBufferCount - (dequeuedCount+1);
                    if (avail < (mMinUndequeuedBuffers-int(mSynchronousMode))) {
                        ST_LOGE("dequeueBuffer: mMinUndequeuedBuffers=%d exceeded "
                                "(dequeued=%d)",
                                mMinUndequeuedBuffers-int(mSynchronousMode),
                                dequeuedCount);
                        return -EBUSY;
                    }
                }
    
                // if no buffer is found, wait for a buffer to be released
                tryAgain = found == INVALID_BUFFER_SLOT;
                if (tryAgain) {
                    mDequeueCondition.wait(mMutex);
                }
            }
    
    
            if (found == INVALID_BUFFER_SLOT) {
                // This should not happen.
                ST_LOGE("dequeueBuffer: no available buffer slots");
                return -EBUSY;
            }
    
            const int buf = found;
            *outBuf = found;
    
            ATRACE_BUFFER_INDEX(buf);
    
            const bool useDefaultSize = !w && !h;
            if (useDefaultSize) {
                // use the default size
                w = mDefaultWidth;
                h = mDefaultHeight;
            }
    
            const bool updateFormat = (format != 0);
            if (!updateFormat) {
                // keep the current (or default) format
                format = mPixelFormat;
            }
    
            // buffer is now in DEQUEUED (but can also be current at the same time,
            // if we're in synchronous mode)
            mSlots[buf].mBufferState = BufferSlot::DEQUEUED;
    
            const sp<GraphicBuffer>& buffer(mSlots[buf].mGraphicBuffer);
            if ((buffer == NULL) ||
                (uint32_t(buffer->width)  != w) ||
                (uint32_t(buffer->height) != h) ||
                (uint32_t(buffer->format) != format) ||
                ((uint32_t(buffer->usage) & usage) != usage))
            {
                status_t error;
                sp<GraphicBuffer> graphicBuffer(
                        mGraphicBufferAlloc->createGraphicBuffer(
                                w, h, format, usage, &error));
                if (graphicBuffer == 0) {
                    ST_LOGE("dequeueBuffer: SurfaceComposer::createGraphicBuffer "
                            "failed");
                    return error;
                }
                if (updateFormat) {
                    mPixelFormat = format;
                }
    
                mSlots[buf].mAcquireCalled = false;
                mSlots[buf].mGraphicBuffer = graphicBuffer;
                mSlots[buf].mRequestBufferCalled = false;
                mSlots[buf].mFence = EGL_NO_SYNC_KHR;
                mSlots[buf].mEglDisplay = EGL_NO_DISPLAY;
    
                returnFlags |= ISurfaceTexture::BUFFER_NEEDS_REALLOCATION;
            }
    
            dpy = mSlots[buf].mEglDisplay;
            fence = mSlots[buf].mFence;
            mSlots[buf].mFence = EGL_NO_SYNC_KHR;
            mDequeueUsage = usage;
        }  // end lock scope
    
        if (fence != EGL_NO_SYNC_KHR) {
            EGLint result = eglClientWaitSyncKHR(dpy, fence, 0, 1000000000);
            // If something goes wrong, log the error, but return the buffer without
            // synchronizing access to it.  It's too late at this point to abort the
            // dequeue operation.
            if (result == EGL_FALSE) {
                ST_LOGE("dequeueBuffer: error waiting for fence: %#x", eglGetError());
            } else if (result == EGL_TIMEOUT_EXPIRED_KHR) {
                ST_LOGE("dequeueBuffer: timeout waiting for fence");
            }
            eglDestroySyncKHR(dpy, fence);
        }
    
        ST_LOGV("dequeueBuffer: returning slot=%d buf=%p flags=%#x", *outBuf,
                mSlots[*outBuf].mGraphicBuffer->handle, returnFlags);
    
        return returnFlags;
    }
    BufferQueue::dequeueBuffer

    上面的函数太长,如果当前的format跟参数中的format不一致,则需要重新申请,

                sp<GraphicBuffer> graphicBuffer(
                        mGraphicBufferAlloc->createGraphicBuffer(
                                w, h, format, usage, &error));
        // mGraphicBufferAlloc is the connection to SurfaceFlinger that is used to
        // allocate new GraphicBuffer objects.
        sp<IGraphicBufferAlloc> mGraphicBufferAlloc;

    到surfaceFliger了@9:SurfaceFlinger.cpp

    sp<GraphicBuffer> GraphicBufferAlloc::createGraphicBuffer(uint32_t w, uint32_t h,
            PixelFormat format, uint32_t usage, status_t* error) {
        sp<GraphicBuffer> graphicBuffer(new GraphicBuffer(w, h, format, usage));
        status_t err = graphicBuffer->initCheck();
        *error = err;
        if (err != 0 || graphicBuffer->handle == 0) {
            if (err == NO_MEMORY) {
                GraphicBuffer::dumpAllocationsToSystemLog();
            }
            ALOGE("GraphicBufferAlloc::createGraphicBuffer(w=%d, h=%d) "
                 "failed (%s), handle=%p",
                    w, h, strerror(-err), graphicBuffer->handle);
            return 0;
        }
        return graphicBuffer;
    }

    GraphicBuffer定义在native/include/ui/GraphicBuffer.h

    构造函数中指定format的类型为PixelFormat

    namespace android {
    
    enum {
        //
        // these constants need to match those
        // in graphics/PixelFormat.java & pixelflinger/format.h
        //
        PIXEL_FORMAT_UNKNOWN    =   0,
        PIXEL_FORMAT_NONE       =   0,
    
        // logical pixel formats used by the SurfaceFlinger -----------------------
        PIXEL_FORMAT_CUSTOM         = -4,
            // Custom pixel-format described by a PixelFormatInfo structure
    
        PIXEL_FORMAT_TRANSLUCENT    = -3,
            // System chooses a format that supports translucency (many alpha bits)
    
        PIXEL_FORMAT_TRANSPARENT    = -2,
            // System chooses a format that supports transparency
            // (at least 1 alpha bit)
    
        PIXEL_FORMAT_OPAQUE         = -1,
            // System chooses an opaque format (no alpha bits required)
    
        // real pixel formats supported for rendering -----------------------------
    
        PIXEL_FORMAT_RGBA_8888   = HAL_PIXEL_FORMAT_RGBA_8888,  // 4x8-bit RGBA
        PIXEL_FORMAT_RGBX_8888   = HAL_PIXEL_FORMAT_RGBX_8888,  // 4x8-bit RGB0
        PIXEL_FORMAT_RGB_888     = HAL_PIXEL_FORMAT_RGB_888,    // 3x8-bit RGB
        PIXEL_FORMAT_RGB_565     = HAL_PIXEL_FORMAT_RGB_565,    // 16-bit RGB
        PIXEL_FORMAT_BGRA_8888   = HAL_PIXEL_FORMAT_BGRA_8888,  // 4x8-bit BGRA
        PIXEL_FORMAT_RGBA_5551   = HAL_PIXEL_FORMAT_RGBA_5551,  // 16-bit ARGB
        PIXEL_FORMAT_RGBA_4444   = HAL_PIXEL_FORMAT_RGBA_4444,  // 16-bit ARGB
        PIXEL_FORMAT_A_8         = 8,                           // 8-bit A
    };
    
    typedef int32_t PixelFormat;
    
    struct PixelFormatInfo {
        enum {
            INDEX_ALPHA   = 0,
            INDEX_RED     = 1,
            INDEX_GREEN   = 2,
            INDEX_BLUE    = 3
        };
    
        enum { // components
            ALPHA   = 1,
            RGB     = 2,
            RGBA    = 3,
            L       = 4,
            LA      = 5,
            OTHER   = 0xFF
        };
    
        struct szinfo {
            uint8_t h;
            uint8_t l;
        };
    
        inline PixelFormatInfo() : version(sizeof(PixelFormatInfo)) { }
        size_t getScanlineSize(unsigned int width) const;
        size_t getSize(size_t ci) const {
            return (ci <= 3) ? (cinfo[ci].h - cinfo[ci].l) : 0;
        }
        size_t      version;
        PixelFormat format;
        size_t      bytesPerPixel;
        size_t      bitsPerPixel;
        union {
            szinfo      cinfo[4];
            struct {
                uint8_t     h_alpha;
                uint8_t     l_alpha;
                uint8_t     h_red;
                uint8_t     l_red;
                uint8_t     h_green;
                uint8_t     l_green;
                uint8_t     h_blue;
                uint8_t     l_blue;
            };
        };
        uint8_t     components;
        uint8_t     reserved0[3];
        uint32_t    reserved1;
    };

    感觉有点问题,这里的PixelFormat跟下面的定义不一致,好像是对PixelFormat的扩展。

    格式HAL_PIXEL_FORMAT_YCrCb_420_SP的定义在./system/core/include/system/graphics.h

    enum {
        HAL_PIXEL_FORMAT_RGBA_8888          = 1,
        HAL_PIXEL_FORMAT_RGBX_8888          = 2,
        HAL_PIXEL_FORMAT_RGB_888            = 3,
        HAL_PIXEL_FORMAT_RGB_565            = 4,
        HAL_PIXEL_FORMAT_BGRA_8888          = 5,
        HAL_PIXEL_FORMAT_RGBA_5551          = 6,
        HAL_PIXEL_FORMAT_RGBA_4444          = 7,
    
        /* 0x8 - 0xFF range unavailable */
    
        /*
         * 0x100 - 0x1FF
         *
         * This range is reserved for pixel formats that are specific to the HAL
         * implementation.  Implementations can use any value in this range to
         * communicate video pixel formats between their HAL modules.  These formats
         * must not have an alpha channel.  Additionally, an EGLimage created from a
         * gralloc buffer of one of these formats must be supported for use with the
         * GL_OES_EGL_image_external OpenGL ES extension.
         */
    
        /*
         * Android YUV format:
         *
         * This format is exposed outside of the HAL to software decoders and
         * applications.  EGLImageKHR must support it in conjunction with the
         * OES_EGL_image_external extension.
         *
         * YV12 is a 4:2:0 YCrCb planar format comprised of a WxH Y plane followed
         * by (W/2) x (H/2) Cr and Cb planes.
         *
         * This format assumes
         * - an even width
         * - an even height
         * - a horizontal stride multiple of 16 pixels
         * - a vertical stride equal to the height
         *
         *   y_size = stride * height
         *   c_stride = ALIGN(stride/2, 16)
         *   c_size = c_stride * height/2
         *   size = y_size + c_size * 2
         *   cr_offset = y_size
         *   cb_offset = y_size + c_size
         *
         */
        HAL_PIXEL_FORMAT_YV12   = 0x32315659, // YCrCb 4:2:0 Planar
    
        /*
         * Android RAW sensor format:
         *
         * This format is exposed outside of the HAL to applications.
         *
         * RAW_SENSOR is a single-channel 16-bit format, typically representing raw
         * Bayer-pattern images from an image sensor, with minimal processing.
         *
         * The exact pixel layout of the data in the buffer is sensor-dependent, and
         * needs to be queried from the camera device.
         *
         * Generally, not all 16 bits are used; more common values are 10 or 12
         * bits. All parameters to interpret the raw data (black and white points,
         * color space, etc) must be queried from the camera device.
         *
         * This format assumes
         * - an even width
         * - an even height
         * - a horizontal stride multiple of 16 pixels (32 bytes).
         */
        HAL_PIXEL_FORMAT_RAW_SENSOR = 0x20,
    
        /* Legacy formats (deprecated), used by ImageFormat.java */
        HAL_PIXEL_FORMAT_YCbCr_422_SP       = 0x10, // NV16
        HAL_PIXEL_FORMAT_YCrCb_420_SP       = 0x11, // NV21
        HAL_PIXEL_FORMAT_YCbCr_422_I        = 0x14, // YUY2
    };
  • 相关阅读:
    深入理解sizeof
    trie树详解
    高精度计算
    编写高效的Android代码
    Android Architecture
    AIDL Android中的远程接口
    性能测试常见术语
    软件与软件测试相关
    注解实现Springmvc+jsp步骤
    非注解实现SpringMvc+JSP (一般用不到 主要用于了解研究底层)
  • 原文地址:https://www.cnblogs.com/leino11121/p/3157081.html
Copyright © 2011-2022 走看看