在Android中显示YUV图像

在我的应用程序中,我们需要将从服务器接收的video帧显示给我们的android应用程序,
服务器正在发送每秒50帧的video数据,在WebM中编码,即使用libvpx对图像进行编码和解码,

现在从libvpx解码得到YUV数据后,我们就可以在图像布局上显示,

目前的实施是这样的,

在JNI / Native C ++代码中,我们将YUV数据转换为RGB数据在Android框架中调用

public Bitmap createImgae(byte[] bits, int width, int height, int scan) { Bitmap bitmap=null; System.out.println("video: creating bitmap"); //try{ bitmap = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888); bitmap.copyPixelsFromBuffer(ByteBuffer.wrap(bits)); //}catch(OutOfMemoryError ex){ //} System.out.println("video: bitmap created"); return bitmap; } 

要创build位图图像,

使用以下代码在imageView上显示图像,

  img = createImgae(imgRaw, imgInfo[0], imgInfo[1], 1); if(img!=null && !img.isRecycled()){ iv.setImageBitmap(img); //img.recycle(); img=null; System.out.println("video: image displayed"); } 

我的查询是,总体来说这个function大约需要40毫秒,有什么办法来优化它,
1 – 有没有办法将YUV数据显示到imageView?

2 – 有没有其他的方法来从RGB数据创build图像(位图图像​​)

3 – 我相信我总是在创造形象,但是我想我应该只创build一次位图,一直做/提供新的缓冲,当我们收到。
请分享您的观点。

Related of "在Android中显示YUV图像"

下面的代码解决了你的问题,可能需要更less的时间在Yuv格式数据,因为YuvImage类是与Android-SDK一起提供的。

你可以试试这个,

 ByteArrayOutputStream out = new ByteArrayOutputStream(); YuvImage yuvImage = new YuvImage(data, ImageFormat.NV21, width, height, null); yuvImage.compressToJpeg(new Rect(0, 0, width, height), 50, out); byte[] imageBytes = out.toByteArray(); Bitmap image = BitmapFactory.decodeByteArray(imageBytes, 0, imageBytes.length); iv.setImageBitmap(image); 

要么

 void yourFunction(byte[] data, int mWidth, int mHeight) { int[] mIntArray = new int[mWidth*mHeight]; // Decode Yuv data to integer array decodeYUV420SP(mIntArray, data, mWidth, mHeight); //Initialize the bitmap, with the replaced color Bitmap bmp = Bitmap.createBitmap(mIntArray, mWidth, mHeight, Bitmap.Config.ARGB_8888); // Draw the bitmap with the replaced color iv.setImageBitmap(bmp); } static public void decodeYUV420SP(int[] rgba, byte[] yuv420sp, int width, int height) { final int frameSize = width * height; for (int j = 0, yp = 0; j < height; j++) { int uvp = frameSize + (j >> 1) * width, u = 0, v = 0; for (int i = 0; i < width; i++, yp++) { int y = (0xff & ((int) yuv420sp[yp])) - 16; if (y < 0) y = 0; if ((i & 1) == 0) { v = (0xff & yuv420sp[uvp++]) - 128; u = (0xff & yuv420sp[uvp++]) - 128; } int y1192 = 1192 * y; int r = (y1192 + 1634 * v); int g = (y1192 - 833 * v - 400 * u); int b = (y1192 + 2066 * u); if (r < 0) r = 0; else if (r > 262143) r = 262143; if (g < 0) g = 0; else if (g > 262143) g = 262143; if (b < 0) b = 0; else if (b > 262143) b = 262143; // rgb[yp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & // 0xff00) | ((b >> 10) & 0xff); // rgba, divide 2^10 ( >> 10) rgba[yp] = ((r << 14) & 0xff000000) | ((g << 6) & 0xff0000) | ((b >> 2) | 0xff00); } } } 

在onCreate中获取宽度和高度后创build一个位图。

 editedBitmap = Bitmap.createBitmap(widthPreview, heightPreview, android.graphics.Bitmap.Config.ARGB_8888); 

并在onPreviewFrame.

 int[] rgbData = decodeGreyscale(aNv21Byte,widthPreview,heightPreview); editedBitmap.setPixels(rgbData, 0, widthPreview, 0, 0, widthPreview, heightPreview); 

 private int[] decodeGreyscale(byte[] nv21, int width, int height) { int pixelCount = width * height; int[] out = new int[pixelCount]; for (int i = 0; i < pixelCount; ++i) { int luminance = nv21[i] & 0xFF; // out[i] = Color.argb(0xFF, luminance, luminance, luminance); out[i] = 0xff000000 | luminance <<16 | luminance <<8 | luminance;//No need to create Color object for each. } return out; } 

和奖金。

 if(cameraId==CameraInfo.CAMERA_FACING_FRONT) { matrix.setRotate(270F); } finalBitmap = Bitmap.createBitmap(editedBitmap, 0, 0, widthPreview, heightPreview, matrix, true);