i am trying to implement a voice chat on a mumble server using flutter and dumble package, i have tried many many things and end up with this code that of course does not work ;) this is why i am here to get assistance and advices on how to setup everything correctly .
for the sender user i am using mic_stream package to record voice .
based on some code from the dumble demo :
_client = await MumbleClient.connect(
options: connectionoptions,
onBadCertificate: (X509Certificate certificate) {
//Accept every certificate
return true;
});
audioOutput = _client.audio.sendAudio(codec: AudioCodec.opus);
// _client.self.add(new SelfCallback(client.self.session));
var lib = await opus_flutter.load();
initOpus(lib);
encoder = StreamOpusEncoder.bytes(
frameTime: frameTime,
floatInput: false,
sampleRate: inputSampleRate,
channels: channels,
application: Application.voip);
Future<bool> StartRecording() async {
mic_stream.MicStream.shouldRequestPermission(true);
Stream<List<int>> microphoneStream = (await mic_stream.MicStream.microphone(
audioSource: mic_stream.AudioSource.DEFAULT,
sampleRate: 48000,
channelConfig: mic_stream.ChannelConfig.CHANNEL_IN_MONO,
audioFormat: AUDIO_FORMAT))!
.cast<List<int>>();
microphoneStream
.asyncMap((List<int> bytes) async {
return bytes;
})
.transform(encoder)
.map((Uint8List audioBytes) => AudioFrame.outgoing(frame: audioBytes))
.pipe(audioOutput);
setState(() {
_isRecording = true;
});
listener = microphoneStream!.listen(
(data) {
//this block is executed when data event is recieved by listener
print(data);
},
);
return true;
}
with that i can see the data flow in the console
the reciever user :
_client.audio.add(StreamAudioListener());
class StreamAudioListener with AudioListener {
final AudioPlayer _audioPlayer = AudioPlayer();
@override
Future<void> onAudioReceived(Stream<AudioFrame> voiceData, AudioCodec codec,
User? speaker, TalkMode talkMode) async {
if (codec == AudioCodec.opus) {
StreamOpusDecoder decoder = StreamOpusDecoder.bytes(
floatOutput: false, sampleRate: outputSampleRate, channels: channels);
var audiostream = voiceData
.map<Uint8List>((AudioFrame frame) =>
frame.frame) //we are only interested in the bytes
.cast<Uint8List?>()
.transform(decoder)
.cast<Uint8List>();
audiostream.listen(
(data) async {
// print(data);
try {
await _audioPlayer.setAudioSource(
CustomStreamAudioSource(data),
);
// await _audioPlayer.play();
} catch (e) {
print("exception"); // print error details
print(e); // print error details
print(data); // print error details
}
},
onError: (err) {
//this block is executed when error event is recieved by listener
print('Error: ${err}');
},
cancelOnError:
false, //this decides if subscription is cancelled on error or not
onDone: () {
//this block is executed when done event is recieved by listener
print('StreamDone!');
},
);
} else {
print('But we don\'t know how do decode $codec');
}
}
}
please note that on the audiostream.listen i am using just_audio package to play the audio bytes from the stream .
the problem is that i can't hear any sound and periodically get an onerror callback however i can see the stream bytes flow on the listen event so the data is recieved from the sender peer.
i can see both users connected on the mumble client application but no sound is transfered between them .
any help , idea, suggestion is welcome .
thanks .
ps : i know about webrtc ;)