How do I rotate the live object detection screen on Temi Robot?

257 Views Asked by At

I am currently using android-demo-app/ObjectDetection/ On Temi Robot, the preloaded images are working so far but when I press "live" to go to live object detection screen, it is rotated 90 degrees to the right.

Temi robot only have a front facing camera on the same side of the screen.

I have tried changing textureView.setTransform() imageAnalysisConfig.Builder().setTargetRotation() imageAnalysis.setTargetRotation() but to no avail

Also tried changing AndroidManifest.xml screenOrientation under activity tag to fullSenor or Landscape but nothing changed.

I have been looking up and down on the Android Developer CameraX page for an answer first link second link but I can't find any. Maybe I am not smart enough to find the solution here.

Any help is much appreciated!

AbstactCameraXActivity.java

    private void setupCameraX() {
        final TextureView textureView = getCameraPreviewTextureView();
        final PreviewConfig previewConfig = new PreviewConfig.Builder().build();
        final Preview preview = new Preview(previewConfig);
//        Matrix m = new Matrix();
//        m.postRotate(180);
//        textureView.setTransform(m); //not working
        preview.setOnPreviewOutputUpdateListener(output -> textureView.setSurfaceTexture(output.getSurfaceTexture()));



        final var imageAnalysisConfig =
                new ImageAnalysisConfig.Builder()
                        .setTargetResolution(new Size(500, 500))
                        .setCallbackHandler(mBackgroundHandler)
                        .setImageReaderMode(ImageAnalysis.ImageReaderMode.ACQUIRE_LATEST_IMAGE)
                        //.setTargetRotation(Surface.ROTATION_0) // not working
                        .build();

         imageAnalysis = new ImageAnalysis(imageAnalysisConfig);
        imageAnalysis.setAnalyzer((image, rotationDegrees) -> {
            if (SystemClock.elapsedRealtime() - mLastAnalysisResultTime < 500) {
                return;
            }

            final R2 result = analyzeImage(image, rotationDegrees);
            if (result != null) {
                mLastAnalysisResultTime = SystemClock.elapsedRealtime();
                runOnUiThread(() -> applyToUiAnalyzeImageResult(result));
            }
        });
        //imageAnalysis.setTargetRotation(Surface.ROTATION_180); // not working
        CameraX.bindToLifecycle(this, preview, imageAnalysis);
    }

ObjectDetectionActivity.java

@Override
@WorkerThread
@Nullable
protected AnalysisResult analyzeImage(ImageProxy image, int rotationDegrees) {
    try {
        if (mModule == null) {
            mModule = LiteModuleLoader.load(MainActivity.assetFilePath(getApplicationContext(), "yolov5s.torchscript.ptl"));
        }
    } catch (IOException e) {
        Log.e("Object Detection", "Error reading assets", e);
        return null;
    }

    Bitmap bitmap = imgToBitmap(Objects.requireNonNull(image.getImage()));
    Matrix matrix = new Matrix();
    matrix.postRotate(90.0f);
    bitmap = Bitmap.createBitmap(bitmap, 0, 0, bitmap.getWidth(), bitmap.getHeight(), matrix, true);
    Bitmap resizedBitmap = Bitmap.createScaledBitmap(bitmap, PrePostProcessor.mInputWidth, PrePostProcessor.mInputHeight, true);

    final Tensor inputTensor = TensorImageUtils.bitmapToFloat32Tensor(resizedBitmap, PrePostProcessor.NO_MEAN_RGB, PrePostProcessor.NO_STD_RGB);
    IValue[] outputTuple = mModule.forward(IValue.from(inputTensor)).toTuple();
    final Tensor outputTensor = outputTuple[0].toTensor();
    final float[] outputs = outputTensor.getDataAsFloatArray();

    float imgScaleX = (float)bitmap.getWidth() / PrePostProcessor.mInputWidth;
    float imgScaleY = (float)bitmap.getHeight() / PrePostProcessor.mInputHeight;
    float ivScaleX = (float)mResultView.getWidth() / bitmap.getWidth();
    float ivScaleY = (float)mResultView.getHeight() / bitmap.getHeight();

    final ArrayList<Result> results = PrePostProcessor.outputsToNMSPredictions(outputs, imgScaleX, imgScaleY, ivScaleX, ivScaleY, 0, 0);
    return new AnalysisResult(results);
}

AndroidManifest.xml

<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
    package="org.pytorch.demo.objectdetection">

    <uses-permission android:name="android.permission.READ_EXTERNAL_STORAGE" />
    <uses-permission android:name="android.permission.CAMERA" />

    <application
        android:allowBackup="true"
        android:icon="@mipmap/ic_launcher"
        android:label="@string/app_name"
        android:roundIcon="@mipmap/ic_launcher_round"
        android:supportsRtl="true"
        android:theme="@style/AppTheme">
        <activity android:name=".MainActivity"
            android:configChanges="orientation"
            android:screenOrientation="fullSensor">
            <intent-filter>
                <action android:name="android.intent.action.MAIN" />
                <category android:name="android.intent.category.LAUNCHER" />
            </intent-filter>
        </activity>
        <activity
            android:name=".ObjectDetectionActivity"
            android:configChanges="orientation"
            android:screenOrientation="fullSensor">
        </activity>
    </application>

</manifest>

Update

I think I may know the problem now. In ObjectDetectionActivity's setupCameraX() method, I should manipulate the textureView and manupilating the pivot of the matrix transform is what I need. I began to see some of the cameraView on screen. However I don't know what is the x and y needed in this parameter...

      final TextureView textureView = getCameraPreviewTextureView();
    final PreviewConfig previewConfig = new PreviewConfig.Builder().build();
    final Preview preview = new Preview(previewConfig);
    Matrix m = new Matrix();
    m.postRotate(180,x,y);//potential solution here.
    textureView.setTransform(m); //not working
    preview.setOnPreviewOutputUpdateListener(output -> textureView.setSurfaceTexture(output.getSurfaceTexture()));
1

There are 1 best solutions below

0
Ryan On BEST ANSWER

I have changed the cameraX version to 1.0.0 from 1.0.0-alpha5

  private void setupCameraX() {
    ListenableFuture<ProcessCameraProvider> cameraProviderFuture =
            ProcessCameraProvider.getInstance(this);

    cameraProviderFuture.addListener(() -> {
        try {
            ProcessCameraProvider cameraProvider = cameraProviderFuture.get();
            PreviewView previewView = getCameraPreviewTextureView();
            final Preview preview = new Preview.Builder()
                    .setTargetRotation(Surface.ROTATION_270)//working nicely
                    .build();
            //TODO: Check if result_view can render over preview_view


            CameraSelector cameraSelector = new CameraSelector
                    .Builder()
                    .requireLensFacing(CameraSelector.LENS_FACING_FRONT)
                    .build();

            preview.setSurfaceProvider(previewView.getSurfaceProvider());

            executor = Executors.newSingleThreadExecutor();
            imageAnalysis = new ImageAnalysis.Builder()
                    .setTargetResolution(new Size(500, 500))

                    .setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
                    .build();
            imageAnalysis.setAnalyzer(executor,
                    image -> {
                        Log.d("image analyzer","Entered Analyse method");
                        if (SystemClock.elapsedRealtime() - mLastAnalysisResultTime < 500) {
                            return;
                        }

                        final T result = analyzeImage(image, 90);
                        if (result != null) {
                            mLastAnalysisResultTime = SystemClock.elapsedRealtime();
                            runOnUiThread(() -> applyToUiAnalyzeImageResult(result));
                        }
                    });
            camera = cameraProvider.bindToLifecycle(
                    this,
                    cameraSelector,
                    imageAnalysis,
                    preview);
        } catch (InterruptedException | ExecutionException e) {
            new AlertDialog
                    .Builder(this)
                    .setTitle("Camera setup error")
                    .setMessage(e.getMessage())
                    .setPositiveButton("Ok",
                            (dialog, which) -> {
                            })
                    .show();
        }
    }, ContextCompat.getMainExecutor(this));

Note: getCameraPreviewTextureView() is methods that inflate a ViewStub. I am just following a pytorch android example.

  @Override
protected PreviewView getCameraPreviewTextureView() {
    mResultView = findViewById(R.id.resultView);
    //
    return ((ViewStub) findViewById(R.id.preview_view_stub))
            .inflate()
            .findViewById(R.id.preview_view);
}