Here is my simple react component attempting to record "screen with option to record tab audio" and "mic audio":
unction ScreenRecorder({
recAudio = false,
setRecOptions,
recording,
setRecording,
isStartingTranscribeJob,
setIsUploadingToSupabase,
setRecordingVideoUrl,
setSupabaseAssetPath,
recOptions,
mediaStream,
setMediaStream,
handleStopRecording,
}: {
recAudio: boolean;
setRecOptions: React.Dispatch<React.SetStateAction<TRecordOption>>;
recording: boolean;
setRecording: React.Dispatch<React.SetStateAction<boolean>>;
isStartingTranscribeJob: boolean;
setIsUploadingToSupabase: React.Dispatch<React.SetStateAction<boolean>>;
setRecordingVideoUrl: React.Dispatch<React.SetStateAction<any>>;
setSupabaseAssetPath: React.Dispatch<React.SetStateAction<string>>;
recOptions: TRecordOption;
mediaStream: MediaStream | null;
setMediaStream: React.Dispatch<React.SetStateAction<MediaStream | null>>;
handleStopRecording: () => void;
}) {
const router = useRouter();
const { context } = useStore();
const supabase = createClientComponentClient();
const screenVideoRef = useRef(null);
const mediaChunks: any[] = [];
const {
context: { currentTenant },
} = useStore();
useEffect(() => {
startRecording();
}, []);
const startRecording = async () => {
try {
let audioStream = null;
if (recAudio) {
audioStream = await navigator.mediaDevices.getUserMedia({
audio: true,
});
}
// let screenStream = await navigator.mediaDevices.getDisplayMedia({
// video: true,
// audio: true,
// });
let screenStream = null;
try {
screenStream = await navigator.mediaDevices.getDisplayMedia({
video: true,
audio: true,
});
} catch (error) {
router.reload();
}
const combinedStream = new MediaStream();
screenStream &&
screenStream
.getTracks()
.forEach((track) => combinedStream.addTrack(track));
if (recAudio && audioStream) {
console.log('adding audio stream');
audioStream
.getAudioTracks()
.forEach((track) => {
console.log('adding audio track');
combinedStream.addTrack(track)
});
}
screenStream &&
screenStream
.getTracks()
.forEach((track) => combinedStream.addTrack(track));
setMediaStream(combinedStream);
if (screenVideoRef.current) {
(screenVideoRef.current as HTMLVideoElement).srcObject = combinedStream;
}
const mediaRecorder = new MediaRecorder(combinedStream, {
mimeType: 'video/webm;codecs=VP9',
});
mediaRecorder.ondataavailable = (e) => {
if (e.data.size > 0) {
mediaChunks.push(e.data);
}
};
combinedStream?.getTracks().forEach((track) => {
track.onended = () => {
// Stop all tracks
combinedStream.getTracks().forEach((track) => track.stop());
// Clear the video element's source
if (screenVideoRef.current) {
(screenVideoRef.current as HTMLVideoElement).srcObject = null;
}
// Update state
setRecording(false);
};
});
// to downlssd the captured video
mediaRecorder.onstop = async () => {
setIsUploadingToSupabase(true);
const blob = new Blob(mediaChunks, { type: 'video/mp4' });
const fileName = 'screen-recording.mp4';
try {
const uploadedFile = await fixHeadersAndUploadBlob({
blob,
fileName,
mimeType: 'video/mp4',
tenantId: currentTenant?.id,
setRecordingVideoUrl,
});
setSupabaseAssetPath(uploadedFile.path);
setIsUploadingToSupabase(false);
} catch (e) {
setIsUploadingToSupabase(false);
toast.error('Error processing recording');
return;
}
// onstop final tasks
if (screenVideoRef.current) {
(screenVideoRef.current as any).srcObject = null;
}
setRecOptions({
recAudio: false,
recVideo: false,
recDisplayMedia: false,
});
};
mediaRecorder.start();
setRecording(true);
} catch (error) {
console.error('Error starting recording:', error);
}
};
return (
<div className='flex w-[600px] flex-col gap-5 '>
<video
ref={screenVideoRef}
muted
autoPlay
className={`border-gray max-h-[380px] w-full rounded-lg border `}
/>
</div>
);
}
It simply try to combine both the streams (displayMedia and userMedia) and then process this combined stream.
But it is not working.
Please assist me with this. There is no need to modify my code if don't feel like; instead you can, provide a solution or any references that address this issue, or share your code if applicable.