Last active
December 2, 2024 09:30
-
-
Save SineVector241/58152564e615066132b081e8e2d00645 to your computer and use it in GitHub Desktop.
NAudio Record Driver for Xamarin.Android. This code is based off of https://gist.github.com/neilt6/6d07322070470536ea0ba409c343c2a5 and the WaveInEvent.cs from NAudio's source.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| using Android.Media; | |
| using NAudio.CoreAudioApi; | |
| using System; | |
| using System.Threading; | |
| namespace NAudio.Wave | |
| { | |
| public class AndroidAudioRecorder : IWaveIn | |
| { | |
| private readonly SynchronizationContext? _synchronizationContext; | |
| private CaptureState _captureState; | |
| private AudioRecord? _audioRecord; | |
| public WaveFormat WaveFormat { get; set; } | |
| public int BufferMilliseconds { get; set; } | |
| public AudioSource audioSource { get; set; } | |
| public event EventHandler<WaveInEventArgs>? DataAvailable; | |
| public event EventHandler<StoppedEventArgs>? RecordingStopped; | |
| public AndroidAudioRecorder() | |
| { | |
| _synchronizationContext = SynchronizationContext.Current; | |
| audioSource = AudioSource.Mic; | |
| WaveFormat = new WaveFormat(8000, 16, 1); | |
| BufferMilliseconds = 100; | |
| _captureState = CaptureState.Stopped; | |
| } | |
| public void StartRecording() | |
| { | |
| //Starting capture procedure | |
| OpenRecorder(); | |
| //Check if we are already recording. | |
| if (_captureState == CaptureState.Capturing) | |
| { | |
| return; | |
| } | |
| //Make sure that we have some format to use. | |
| if (WaveFormat == null) | |
| { | |
| throw new ArgumentNullException(nameof(WaveFormat)); | |
| } | |
| _captureState = CaptureState.Starting; | |
| _audioRecord?.StartRecording(); | |
| ThreadPool.QueueUserWorkItem((state) => RecordThread(), null); | |
| } | |
| public void StopRecording() | |
| { | |
| if (_audioRecord == null) | |
| { | |
| return; | |
| } | |
| //Check if it has already been stopped | |
| if (_captureState != CaptureState.Stopped) | |
| { | |
| _captureState = CaptureState.Stopped; | |
| CloseRecorder(); | |
| } | |
| } | |
| public void Dispose() | |
| { | |
| Dispose(true); | |
| GC.SuppressFinalize(this); | |
| } | |
| protected virtual void Dispose(bool disposing) | |
| { | |
| if (disposing) | |
| { | |
| if (_captureState != CaptureState.Stopped) | |
| { | |
| StopRecording(); | |
| } | |
| _audioRecord?.Release(); | |
| _audioRecord?.Dispose(); | |
| _audioRecord = null; | |
| } | |
| } | |
| private void OpenRecorder() | |
| { | |
| //We want to make sure the recorder is definitely closed. | |
| CloseRecorder(); | |
| Encoding encoding; | |
| ChannelIn channelMask; | |
| //Set the encoding | |
| if (WaveFormat.Encoding == WaveFormatEncoding.Pcm || WaveFormat.Encoding == WaveFormatEncoding.IeeeFloat) | |
| { | |
| encoding = WaveFormat.BitsPerSample switch | |
| { | |
| 8 => Encoding.Pcm8bit, | |
| 16 => Encoding.Pcm16bit, | |
| 32 => Encoding.PcmFloat, | |
| _ => throw new ArgumentException("Input wave provider must be 8-bit, 16-bit or 32bit", nameof(WaveFormat)) | |
| }; | |
| } | |
| else | |
| { | |
| throw new ArgumentException("Input wave provider must be PCM or IEEE Float", nameof(WaveFormat)); | |
| } | |
| //Set the channel type. Only accepts Mono or Stereo | |
| channelMask = WaveFormat.Channels switch | |
| { | |
| 1 => ChannelIn.Mono, | |
| 2 => ChannelIn.Stereo, | |
| _ => throw new ArgumentException("Input wave provider must be mono or stereo", nameof(WaveFormat)) | |
| }; | |
| //Determine the buffer size | |
| int bufferSize = BufferMilliseconds * WaveFormat.AverageBytesPerSecond / 1000; | |
| if (bufferSize % WaveFormat.BlockAlign != 0) | |
| { | |
| bufferSize -= bufferSize % WaveFormat.BlockAlign; | |
| } | |
| //Determine min buffer size. | |
| var minBufferSize = AudioRecord.GetMinBufferSize(WaveFormat.SampleRate, channelMask, encoding); | |
| if (bufferSize < minBufferSize) | |
| { | |
| bufferSize = minBufferSize; | |
| } | |
| //Create the AudioRecord Object. | |
| _audioRecord = new AudioRecord(audioSource, WaveFormat.SampleRate, channelMask, encoding, bufferSize); | |
| } | |
| private void CloseRecorder() | |
| { | |
| //Make sure that the recorder was opened | |
| if (_audioRecord != null && _audioRecord.RecordingState != RecordState.Stopped) | |
| { | |
| _audioRecord.Stop(); | |
| _audioRecord.Release(); | |
| _audioRecord.Dispose(); | |
| _audioRecord = null; | |
| } | |
| } | |
| private void RecordThread() | |
| { | |
| Exception? exception = null; | |
| try | |
| { | |
| RecordingLogic(); | |
| } | |
| catch (Exception ex) | |
| { | |
| exception = ex; | |
| } | |
| finally | |
| { | |
| _captureState = CaptureState.Stopped; | |
| RaiseRecordingStoppedEvent(exception); | |
| } | |
| } | |
| private void RaiseRecordingStoppedEvent(Exception? e) | |
| { | |
| var handler = RecordingStopped; | |
| if (handler != null) | |
| { | |
| if (_synchronizationContext == null) | |
| { | |
| handler(this, new StoppedEventArgs(e)); | |
| } | |
| else | |
| { | |
| _synchronizationContext.Post(state => handler(this, new StoppedEventArgs(e)), null); | |
| } | |
| } | |
| } | |
| private void RecordingLogic() | |
| { | |
| //Initialize the wave buffer | |
| int bufferSize = BufferMilliseconds * WaveFormat.AverageBytesPerSecond / 1000; | |
| if (bufferSize % WaveFormat.BlockAlign != 0) | |
| { | |
| bufferSize -= bufferSize % WaveFormat.BlockAlign; | |
| } | |
| _captureState = CaptureState.Capturing; | |
| //Run the record loop | |
| while (_captureState != CaptureState.Stopped && _audioRecord != null) | |
| { | |
| if (_captureState != CaptureState.Capturing) | |
| { | |
| Thread.Sleep(10); | |
| continue; | |
| } | |
| if (WaveFormat.Encoding == WaveFormatEncoding.Pcm) | |
| { | |
| byte[] byteBuffer = new byte[bufferSize]; | |
| var bytesRead = _audioRecord.Read(byteBuffer, 0, bufferSize); | |
| if (bytesRead > 0) | |
| { | |
| DataAvailable?.Invoke(this, new WaveInEventArgs(byteBuffer, bytesRead)); | |
| } | |
| } | |
| else if (WaveFormat.Encoding == WaveFormatEncoding.IeeeFloat) | |
| { | |
| float[] floatBuffer = new float[bufferSize / 4]; | |
| byte[] byteBuffer = new byte[bufferSize]; | |
| var floatsRead = _audioRecord.Read(floatBuffer, 0, floatBuffer.Length, 0); | |
| Buffer.BlockCopy(floatBuffer, 0, byteBuffer, 0, byteBuffer.Length); | |
| if (floatsRead > 0) | |
| { | |
| DataAvailable?.Invoke(this, new WaveInEventArgs(byteBuffer, floatsRead * 4)); | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } |
Author
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Reminder when using this code, This does not account for hardware on phones. So you may need to use a hard limiter to normalize the volume of the samples.