Created
November 13, 2020 00:11
-
-
Save alekhinen/98720f01b3d1de6010d3e71c96f9a279 to your computer and use it in GitHub Desktop.
Patch for WebRTC M84 to not prompt mic permissions for "viewers" of a WebRTC call on iOS
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
diff --git a/sdk/objc/components/audio/RTCAudioSessionConfiguration.m b/sdk/objc/components/audio/RTCAudioSessionConfiguration.m | |
index 39e9ac13ec..a11a0037cd 100644 | |
--- a/sdk/objc/components/audio/RTCAudioSessionConfiguration.m | |
+++ b/sdk/objc/components/audio/RTCAudioSessionConfiguration.m | |
@@ -65,15 +65,16 @@ static RTC_OBJC_TYPE(RTCAudioSessionConfiguration) *gWebRTCConfiguration = nil; | |
- (instancetype)init { | |
if (self = [super init]) { | |
+ AVAudioSession *session = [AVAudioSession sharedInstance]; | |
// Use a category which supports simultaneous recording and playback. | |
// By default, using this category implies that our app’s audio is | |
// nonmixable, hence activating the session will interrupt any other | |
- // audio sessions which are also nonmixable. | |
- _category = AVAudioSessionCategoryPlayAndRecord; | |
- _categoryOptions = AVAudioSessionCategoryOptionAllowBluetooth; | |
+ // audio sessions which are also nonmixable. | |
+ _category = session.category; | |
+ _categoryOptions = session.categoryOptions; | |
// Specify mode for two-way voice communication (e.g. VoIP). | |
- _mode = AVAudioSessionModeVoiceChat; | |
+ _mode = session.mode; | |
// Set the session's sample rate or the hardware sample rate. | |
// It is essential that we use the same sample rate as stream format | |
diff --git a/sdk/objc/native/src/audio/voice_processing_audio_unit.mm b/sdk/objc/native/src/audio/voice_processing_audio_unit.mm | |
index a2aa7f323b..52aac4f315 100644 | |
--- a/sdk/objc/native/src/audio/voice_processing_audio_unit.mm | |
+++ b/sdk/objc/native/src/audio/voice_processing_audio_unit.mm | |
@@ -110,16 +110,22 @@ bool VoiceProcessingAudioUnit::Init() { | |
} | |
// Enable input on the input scope of the input element. | |
- UInt32 enable_input = 1; | |
- result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, | |
- kAudioUnitScope_Input, kInputBus, &enable_input, | |
- sizeof(enable_input)); | |
- if (result != noErr) { | |
- DisposeAudioUnit(); | |
- RTCLogError(@"Failed to enable input on input scope of input element. " | |
- "Error=%ld.", | |
- (long)result); | |
- return false; | |
+ AVAudioSession *session = [AVAudioSession sharedInstance]; | |
+ if (session.category == AVAudioSessionCategoryPlayAndRecord) { | |
+ UInt32 enable_input = 1; | |
+ result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, | |
+ kAudioUnitScope_Input, kInputBus, &enable_input, | |
+ sizeof(enable_input)); | |
+ if (result != noErr) { | |
+ DisposeAudioUnit(); | |
+ RTCLogError(@"Failed to enable input on input scope of input element. " | |
+ "Error=%ld.", | |
+ (long)result); | |
+ return false; | |
+ } | |
+ } | |
+ else { | |
+ RTCLog("@Not Enable input on the input scope of the input element."); | |
} | |
// Enable output on the output scope of the output element. | |
@@ -153,34 +159,44 @@ bool VoiceProcessingAudioUnit::Init() { | |
// Disable AU buffer allocation for the recorder, we allocate our own. | |
// TODO(henrika): not sure that it actually saves resource to make this call. | |
- UInt32 flag = 0; | |
- result = AudioUnitSetProperty( | |
- vpio_unit_, kAudioUnitProperty_ShouldAllocateBuffer, | |
- kAudioUnitScope_Output, kInputBus, &flag, sizeof(flag)); | |
- if (result != noErr) { | |
- DisposeAudioUnit(); | |
- RTCLogError(@"Failed to disable buffer allocation on the input bus. " | |
- "Error=%ld.", | |
- (long)result); | |
- return false; | |
+ if (session.category == AVAudioSessionCategoryPlayAndRecord) { | |
+ UInt32 flag = 0; | |
+ result = AudioUnitSetProperty( | |
+ vpio_unit_, kAudioUnitProperty_ShouldAllocateBuffer, | |
+ kAudioUnitScope_Output, kInputBus, &flag, sizeof(flag)); | |
+ if (result != noErr) { | |
+ DisposeAudioUnit(); | |
+ RTCLogError(@"Failed to disable buffer allocation on the input bus. " | |
+ "Error=%ld.", | |
+ (long)result); | |
+ return false; | |
+ } | |
+ } | |
+ else { | |
+ RTCLog("@NOT Disable AU buffer allocation for the recorder, we allocate our own."); | |
} | |
// Specify the callback to be called by the I/O thread to us when input audio | |
// is available. The recorded samples can then be obtained by calling the | |
// AudioUnitRender() method. | |
- AURenderCallbackStruct input_callback; | |
- input_callback.inputProc = OnDeliverRecordedData; | |
- input_callback.inputProcRefCon = this; | |
- result = AudioUnitSetProperty(vpio_unit_, | |
- kAudioOutputUnitProperty_SetInputCallback, | |
- kAudioUnitScope_Global, kInputBus, | |
- &input_callback, sizeof(input_callback)); | |
- if (result != noErr) { | |
- DisposeAudioUnit(); | |
- RTCLogError(@"Failed to specify the input callback on the input bus. " | |
- "Error=%ld.", | |
- (long)result); | |
- return false; | |
+ if (session.category == AVAudioSessionCategoryPlayAndRecord) { | |
+ AURenderCallbackStruct input_callback; | |
+ input_callback.inputProc = OnDeliverRecordedData; | |
+ input_callback.inputProcRefCon = this; | |
+ result = AudioUnitSetProperty(vpio_unit_, | |
+ kAudioOutputUnitProperty_SetInputCallback, | |
+ kAudioUnitScope_Global, kInputBus, | |
+ &input_callback, sizeof(input_callback)); | |
+ if (result != noErr) { | |
+ DisposeAudioUnit(); | |
+ RTCLogError(@"Failed to specify the input callback on the input bus. " | |
+ "Error=%ld.", | |
+ (long)result); | |
+ return false; | |
+ } | |
+ } | |
+ else { | |
+ RTCLog("@NOT Specify the callback to be called by the I/O thread to us when input audio"); | |
} | |
state_ = kUninitialized; | |
@@ -203,14 +219,20 @@ bool VoiceProcessingAudioUnit::Initialize(Float64 sample_rate) { | |
#endif | |
// Set the format on the output scope of the input element/bus. | |
- result = | |
- AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, | |
- kAudioUnitScope_Output, kInputBus, &format, size); | |
- if (result != noErr) { | |
- RTCLogError(@"Failed to set format on output scope of input bus. " | |
- "Error=%ld.", | |
- (long)result); | |
- return false; | |
+ AVAudioSession *session = [AVAudioSession sharedInstance]; | |
+ if (session.category == AVAudioSessionCategoryPlayAndRecord) { | |
+ result = | |
+ AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, | |
+ kAudioUnitScope_Output, kInputBus, &format, size); | |
+ if (result != noErr) { | |
+ RTCLogError(@"Failed to set format on output scope of input bus. " | |
+ "Error=%ld.", | |
+ (long)result); | |
+ return false; | |
+ } | |
+ } | |
+ else { | |
+ RTCLog("@NOT setting the format on the output sscope of the input element because it's movie mode"); | |
} | |
// Set the format on the input scope of the output element/bus. |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment