I am trying to send audio with using AudioEngine (getting over iPhone microphone) to another listening device via GCDAsyncUdpSocket, and I get audio in another ios device and I can listen auido in below getComingAudio method , but audio comes with background noise and echo, How can I avoid background noise and audio echo? ,thanks.
func setupAudio () {
self.audioEngine = AVAudioEngine()
self.mixer = AVAudioMixerNode()
self.mixer.volume = 0
self.audioEngine.attach(mixer)
self.socket = GCDAsyncUdpSocket(delegate: self, delegateQueue: DispatchQueue.main)
audioFormat = AVAudioFormat(standardFormatWithSampleRate: Double(sampleRate), channels: 1)
}
//Sending Audio
func sendAudio {
try! AVAudioSession.sharedInstance().setCategory(AVAudioSession.Category.playAndRecord)
try! AVAudioSession.sharedInstance().setActive(true)
let format = AVAudioFormat(commonFormat: AVAudioCommonFormat.pcmFormatInt16,
sampleRate: 44100.0,
channels: 1,
interleaved: true)
self.audioEngine.connect(self.audioEngine.inputNode, to: self.mixer, format: format)
self.audioEngine.connect(self.mixer, to: self.audioEngine.mainMixerNode, format: format)
try! self.audioEngine.inputNode.setVoiceProcessingEnabled(true)
DispatchQueue.global(qos: .background).async { [weak self] in
guard let self = self else { return }
do {
self.socket.setIPv4Enabled(true)
self.socket.setIPv6Enabled(false)
try self.socket.connect(toHost:"235.10.10.100" ?? "", onPort: 4646 ?? 0)
try self.socket.beginReceiving()
print("Socket started")
} catch {
print("Socket Started Error: \(error)")
}
}
self.mixer.installTap(onBus: 0, bufferSize: 2048, format: format, block: { (buffer: AVAudioPCMBuffer!, time: AVAudioTime!) -> Void in
let data = Data(bytes: (buffer.int16ChannelData![0]), count: Int(buffer.frameLength))
print(buffer)
DispatchQueue.global(qos: .background).async { [weak self] in
guard let self = self else { return }
do {
self.socket.send(data, withTimeout: 0, tag: 0)
} catch {
print("Socket send Error: \(error)")
}
}
})
audioEngine.prepare()
do {
try audioEngine.start()
} catch {
print("Can't start the engine: \(error)")
}
}
// Listening Audio
func udpSocket(_ sock: GCDAsyncUdpSocket, didReceive data: Data, fromAddress address: Data, withFilterContext filterContext: Any?) {
audioPlayer.scheduleBuffer(getComingAudio(with: data), completionHandler: nil)
}
func getComingAudio(with data: Data) -> AVAudioPCMBuffer {
let audioBuffer = AVAudioPCMBuffer(pcmFormat: audioFormat, frameCapacity: UInt32(data.count) / 2)!
data.withUnsafeBytes { (bufferPointer: UnsafeRawBufferPointer) in
let int16Array = Array(bufferPointer.bindMemory(to: Int16.self))
let floatArray = int16Array.map { Float($0) / Float(Int16.max) }
floatArray.withUnsafeBufferPointer { audioBuffer.floatChannelData!.pointee.assign(from: $0.baseAddress!, count: floatArray.count) }
}
audioBuffer.frameLength = audioBuffer.frameCapacity
return audioBuffer
}
The very short answer is to turn on voice processing:
For more information (and you'll likely want more information because these things can be tricky at times), see WWDC2019/510 What's New in AVAudioEngine and the relevant sample code, Using Voice Processing.