使用Google Vision API的媒体记录器

我正在使用Android vision API中的FaceTracker示例。 但是,我在录制video时遇到了困难,同时在它们上面绘制了叠加层。

一种方法是将位图存储为图像并使用FFmpeg或Xuggler将它们合并为video,但我想知道如果我们可以在预计投影时在运行时录制video,是否有更好的解决方案。

更新1:我使用媒体记录器更新了以下类,但录制仍然无法正常工作。 当我调用triggerRecording()函数时抛出以下错误:

MediaRecorder:以无效状态调用:4

我在清单文件中有外部存储权限。

更新2:

我在代码中修复了上述问题,并在onSurfaceCreated回调中移动了setupMediaRecorder()。 但是,当我停止录制时,它会抛出运行时exception。 根据文档,如果没有video/音频数据,将抛出运行时exception。

那么,我在这里错过了什么?

public class CameraSourcePreview extends ViewGroup { private static final String TAG = "CameraSourcePreview"; private static final SparseIntArray ORIENTATIONS = new SparseIntArray(); static { ORIENTATIONS.append(Surface.ROTATION_0, 90); ORIENTATIONS.append(Surface.ROTATION_90, 0); ORIENTATIONS.append(Surface.ROTATION_180, 270); ORIENTATIONS.append(Surface.ROTATION_270, 180); } private MediaRecorder mMediaRecorder; /** * Whether the app is recording video now */ private boolean mIsRecordingVideo; private Context mContext; private SurfaceView mSurfaceView; private boolean mStartRequested; private boolean mSurfaceAvailable; private CameraSource mCameraSource; private GraphicOverlay mOverlay; public CameraSourcePreview(Context context, AttributeSet attrs) { super(context, attrs); mContext = context; mStartRequested = false; mSurfaceAvailable = false; mSurfaceView = new SurfaceView(context); mSurfaceView.getHolder().addCallback(new SurfaceCallback()); addView(mSurfaceView); mMediaRecorder = new MediaRecorder(); } private void setUpMediaRecorder() throws IOException { mMediaRecorder.setPreviewDisplay(mSurfaceView.getHolder().getSurface()); mMediaRecorder.setAudioSource(MediaRecorder.AudioSource.MIC); mMediaRecorder.setVideoSource(MediaRecorder.VideoSource.SURFACE); mMediaRecorder.setOutputFormat(MediaRecorder.OutputFormat.MPEG_4); mMediaRecorder.setOutputFile(Environment.getExternalStorageDirectory() + File.separator + Environment.DIRECTORY_DCIM + File.separator + System.currentTimeMillis() + ".mp4"); mMediaRecorder.setVideoEncodingBitRate(10000000); mMediaRecorder.setVideoFrameRate(30); mMediaRecorder.setVideoSize(480, 640); mMediaRecorder.setVideoEncoder(MediaRecorder.VideoEncoder.H264); mMediaRecorder.setAudioEncoder(MediaRecorder.AudioEncoder.AAC); //int rotation = mContext.getWindowManager().getDefaultDisplay().getRotation(); //int orientation = ORIENTATIONS.get(rotation); mMediaRecorder.setOrientationHint(ORIENTATIONS.get(0)); mMediaRecorder.prepare(); mMediaRecorder.setOnErrorListener(new MediaRecorder.OnErrorListener() { @Override public void onError(MediaRecorder mr, int what, int extra) { Timber.d(mr.toString() + " : what[" + what + "]" + " Extras[" + extra + "]"); } }); } public void start(CameraSource cameraSource) throws IOException { if (cameraSource == null) { stop(); } mCameraSource = cameraSource; if (mCameraSource != null) { mStartRequested = true; startIfReady(); } } public void start(CameraSource cameraSource, GraphicOverlay overlay) throws IOException { mOverlay = overlay; start(cameraSource); } public void stop() { if (mCameraSource != null) { mCameraSource.stop(); } } public void release() { if (mCameraSource != null) { mCameraSource.release(); mCameraSource = null; } } private void startIfReady() throws IOException { if (mStartRequested && mSurfaceAvailable) { mCameraSource.start(mSurfaceView.getHolder()); if (mOverlay != null) { Size size = mCameraSource.getPreviewSize(); int min = Math.min(size.getWidth(), size.getHeight()); int max = Math.max(size.getWidth(), size.getHeight()); if (isPortraitMode()) { // Swap width and height sizes when in portrait, since it will be rotated by // 90 degrees mOverlay.setCameraInfo(min, max, mCameraSource.getCameraFacing()); } else { mOverlay.setCameraInfo(max, min, mCameraSource.getCameraFacing()); } mOverlay.clear(); } mStartRequested = false; } } private class SurfaceCallback implements SurfaceHolder.Callback { @Override public void surfaceCreated(SurfaceHolder surface) { mSurfaceAvailable = true; surface.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS); // setup the media recorder try { setUpMediaRecorder(); } catch (IOException e) { e.printStackTrace(); } try { startIfReady(); } catch (IOException e) { Timber.e(TAG, "Could not start camera source.", e); } } @Override public void surfaceDestroyed(SurfaceHolder surface) { mSurfaceAvailable = false; } @Override public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) { } } @Override protected void onLayout(boolean changed, int left, int top, int right, int bottom) { int width = 320; int height = 240; if (mCameraSource != null) { Size size = mCameraSource.getPreviewSize(); if (size != null) { width = size.getWidth(); height = size.getHeight(); } } // Swap width and height sizes when in portrait, since it will be rotated 90 degrees if (isPortraitMode()) { int tmp = width; width = height; height = tmp; } final int layoutWidth = right - left; final int layoutHeight = bottom - top; // Computes height and width for potentially doing fit width. int childWidth = layoutWidth; int childHeight = (int) (((float) layoutWidth / (float) width) * height); // If height is too tall using fit width, does fit height instead. if (childHeight > layoutHeight) { childHeight = layoutHeight; childWidth = (int) (((float) layoutHeight / (float) height) * width); } for (int i = 0; i < getChildCount(); ++i) { getChildAt(i).layout(0, 0, childWidth, childHeight); } try { startIfReady(); } catch (IOException e) { Timber.e(TAG, "Could not start camera source.", e); } } private boolean isPortraitMode() { int orientation = mContext.getResources().getConfiguration().orientation; if (orientation == Configuration.ORIENTATION_LANDSCAPE) { return false; } if (orientation == Configuration.ORIENTATION_PORTRAIT) { return true; } Timber.d(TAG, "isPortraitMode returning false by default"); return false; } private void startRecordingVideo() { try { // Start recording mMediaRecorder.start(); mIsRecordingVideo = true; } catch (IllegalStateException e) { e.printStackTrace(); } } private void stopRecordingVideo() { // UI mIsRecordingVideo = false; // Stop recording mMediaRecorder.stop(); mMediaRecorder.reset(); } public void triggerRecording() { if (mIsRecordingVideo) { stopRecordingVideo(); Timber.d("Recording stopped"); } else { startRecordingVideo(); Timber.d("Recording starting"); } } } 

Solutions Collecting From Web of "使用Google Vision API的媒体记录器"

解决方案1:在Android Lollipop中,引入了MediaProjection API,它与MediaRecorder一起用于将SurfaceView保存到video文件中。 此示例显示如何将SurfaceView输出到video文件。

解决方案2:或者,您可以使用Grafika存储库中提供的一个简洁的Encoder类。 请注意,这将要求您移植FaceTracker应用程序,以便它使用OpenGL执行所有渲染。 这是因为Grafika样本利用OpenGL管道来快速读取和写入纹理数据。

有一个最小的例子可以在ContinuousCaptureActivity类中使用CircularEncoder实现您想要的function。 这提供了帧Blitting的示例,同时将帧缓冲器数据显示到屏幕并输出到video。

主要的变化是使用Grafika WindowSurface而不是SurfaceTracker应用程序的SurfaceView ,这将设置EGL上下文,允许您通过编码器将帧缓冲区数据保存到文件。 一旦您可以将所有内容渲染到WindowSurface,以与ContinuousCaptureActivity类相同的方式设置记录是微不足道的。