Committed by
GitHub
Signal local audio track feature updates (#456)
* Signal local audio track feature updates * Fix npe * Fix custom audio processing factory
正在显示
13 个修改的文件
包含
366 行增加
和
73 行删除
| @@ -16,28 +16,62 @@ | @@ -16,28 +16,62 @@ | ||
| 16 | 16 | ||
| 17 | package io.livekit.android.audio | 17 | package io.livekit.android.audio |
| 18 | 18 | ||
| 19 | +import io.livekit.android.util.FlowObservable | ||
| 20 | + | ||
| 19 | /** | 21 | /** |
| 20 | * Interface for controlling external audio processing. | 22 | * Interface for controlling external audio processing. |
| 21 | */ | 23 | */ |
| 22 | interface AudioProcessingController { | 24 | interface AudioProcessingController { |
| 23 | /** | 25 | /** |
| 24 | - * Set the audio processing to be used for capture post. | 26 | + * the audio processor to be used for capture post processing. |
| 27 | + */ | ||
| 28 | + @FlowObservable | ||
| 29 | + @get:FlowObservable | ||
| 30 | + var capturePostProcessor: AudioProcessorInterface? | ||
| 31 | + | ||
| 32 | + /** | ||
| 33 | + * the audio processor to be used for render pre processing. | ||
| 34 | + */ | ||
| 35 | + @FlowObservable | ||
| 36 | + @get:FlowObservable | ||
| 37 | + var renderPreProcessor: AudioProcessorInterface? | ||
| 38 | + | ||
| 39 | + /** | ||
| 40 | + * whether to bypass mode the render pre processing. | ||
| 41 | + */ | ||
| 42 | + @FlowObservable | ||
| 43 | + @get:FlowObservable | ||
| 44 | + var bypassRenderPreProcessing: Boolean | ||
| 45 | + | ||
| 46 | + /** | ||
| 47 | + * whether to bypass the capture post processing. | ||
| 48 | + */ | ||
| 49 | + @FlowObservable | ||
| 50 | + @get:FlowObservable | ||
| 51 | + var bypassCapturePostProcessing: Boolean | ||
| 52 | + | ||
| 53 | + /** | ||
| 54 | + * Set the audio processor to be used for capture post processing. | ||
| 25 | */ | 55 | */ |
| 56 | + @Deprecated("Use the capturePostProcessing variable directly instead") | ||
| 26 | fun setCapturePostProcessing(processing: AudioProcessorInterface?) | 57 | fun setCapturePostProcessing(processing: AudioProcessorInterface?) |
| 27 | 58 | ||
| 28 | /** | 59 | /** |
| 29 | - * Set whether to bypass mode the capture post processing. | 60 | + * Set whether to bypass the capture post processing. |
| 30 | */ | 61 | */ |
| 62 | + @Deprecated("Use the bypassCapturePostProcessing variable directly instead") | ||
| 31 | fun setBypassForCapturePostProcessing(bypass: Boolean) | 63 | fun setBypassForCapturePostProcessing(bypass: Boolean) |
| 32 | 64 | ||
| 33 | /** | 65 | /** |
| 34 | - * Set the audio processing to be used for render pre. | 66 | + * Set the audio processor to be used for render pre processing. |
| 35 | */ | 67 | */ |
| 68 | + @Deprecated("Use the renderPreProcessing variable directly instead") | ||
| 36 | fun setRenderPreProcessing(processing: AudioProcessorInterface?) | 69 | fun setRenderPreProcessing(processing: AudioProcessorInterface?) |
| 37 | 70 | ||
| 38 | /** | 71 | /** |
| 39 | - * Set whether to bypass mode the render pre processing. | 72 | + * Set whether to bypass the render pre processing. |
| 40 | */ | 73 | */ |
| 74 | + @Deprecated("Use the bypassRendererPreProcessing variable directly instead") | ||
| 41 | fun setBypassForRenderPreProcessing(bypass: Boolean) | 75 | fun setBypassForRenderPreProcessing(bypass: Boolean) |
| 42 | } | 76 | } |
| 43 | 77 |
| @@ -41,12 +41,3 @@ data class AudioProcessorOptions( | @@ -41,12 +41,3 @@ data class AudioProcessorOptions( | ||
| 41 | */ | 41 | */ |
| 42 | val renderPreBypass: Boolean = false, | 42 | val renderPreBypass: Boolean = false, |
| 43 | ) | 43 | ) |
| 44 | - | ||
| 45 | -internal fun AudioProcessorOptions.authenticateProcessors(url: String, token: String) { | ||
| 46 | - if (capturePostProcessor is AuthedAudioProcessorInterface) { | ||
| 47 | - capturePostProcessor.authenticate(url, token) | ||
| 48 | - } | ||
| 49 | - if (renderPreProcessor is AuthedAudioProcessorInterface) { | ||
| 50 | - renderPreProcessor.authenticate(url, token) | ||
| 51 | - } | ||
| 52 | -} |
| @@ -55,6 +55,7 @@ import kotlinx.coroutines.sync.Mutex | @@ -55,6 +55,7 @@ import kotlinx.coroutines.sync.Mutex | ||
| 55 | import kotlinx.coroutines.sync.withLock | 55 | import kotlinx.coroutines.sync.withLock |
| 56 | import kotlinx.coroutines.yield | 56 | import kotlinx.coroutines.yield |
| 57 | import livekit.LivekitModels | 57 | import livekit.LivekitModels |
| 58 | +import livekit.LivekitModels.AudioTrackFeature | ||
| 58 | import livekit.LivekitRtc | 59 | import livekit.LivekitRtc |
| 59 | import livekit.LivekitRtc.JoinResponse | 60 | import livekit.LivekitRtc.JoinResponse |
| 60 | import livekit.LivekitRtc.ReconnectResponse | 61 | import livekit.LivekitRtc.ReconnectResponse |
| @@ -348,6 +349,10 @@ internal constructor( | @@ -348,6 +349,10 @@ internal constructor( | ||
| 348 | client.sendMuteTrack(sid, muted) | 349 | client.sendMuteTrack(sid, muted) |
| 349 | } | 350 | } |
| 350 | 351 | ||
| 352 | + fun updateLocalAudioTrack(sid: String, features: Collection<AudioTrackFeature>) { | ||
| 353 | + client.sendUpdateLocalAudioTrack(sid, features) | ||
| 354 | + } | ||
| 355 | + | ||
| 351 | fun close(reason: String = "Normal Closure") { | 356 | fun close(reason: String = "Normal Closure") { |
| 352 | if (isClosed) { | 357 | if (isClosed) { |
| 353 | return | 358 | return |
| @@ -29,22 +29,34 @@ import io.livekit.android.util.CloseableCoroutineScope | @@ -29,22 +29,34 @@ import io.livekit.android.util.CloseableCoroutineScope | ||
| 29 | import io.livekit.android.util.Either | 29 | import io.livekit.android.util.Either |
| 30 | import io.livekit.android.util.LKLog | 30 | import io.livekit.android.util.LKLog |
| 31 | import io.livekit.android.webrtc.toProtoSessionDescription | 31 | import io.livekit.android.webrtc.toProtoSessionDescription |
| 32 | -import kotlinx.coroutines.* | 32 | +import kotlinx.coroutines.CancellableContinuation |
| 33 | +import kotlinx.coroutines.CoroutineDispatcher | ||
| 34 | +import kotlinx.coroutines.ExperimentalCoroutinesApi | ||
| 35 | +import kotlinx.coroutines.Job | ||
| 36 | +import kotlinx.coroutines.SupervisorJob | ||
| 37 | +import kotlinx.coroutines.delay | ||
| 33 | import kotlinx.coroutines.flow.MutableSharedFlow | 38 | import kotlinx.coroutines.flow.MutableSharedFlow |
| 39 | +import kotlinx.coroutines.launch | ||
| 40 | +import kotlinx.coroutines.suspendCancellableCoroutine | ||
| 34 | import kotlinx.serialization.decodeFromString | 41 | import kotlinx.serialization.decodeFromString |
| 35 | import kotlinx.serialization.encodeToString | 42 | import kotlinx.serialization.encodeToString |
| 36 | import kotlinx.serialization.json.Json | 43 | import kotlinx.serialization.json.Json |
| 37 | import livekit.LivekitModels | 44 | import livekit.LivekitModels |
| 45 | +import livekit.LivekitModels.AudioTrackFeature | ||
| 38 | import livekit.LivekitRtc | 46 | import livekit.LivekitRtc |
| 39 | import livekit.LivekitRtc.JoinResponse | 47 | import livekit.LivekitRtc.JoinResponse |
| 40 | import livekit.LivekitRtc.ReconnectResponse | 48 | import livekit.LivekitRtc.ReconnectResponse |
| 41 | import livekit.org.webrtc.IceCandidate | 49 | import livekit.org.webrtc.IceCandidate |
| 42 | import livekit.org.webrtc.PeerConnection | 50 | import livekit.org.webrtc.PeerConnection |
| 43 | import livekit.org.webrtc.SessionDescription | 51 | import livekit.org.webrtc.SessionDescription |
| 44 | -import okhttp3.* | 52 | +import okhttp3.OkHttpClient |
| 53 | +import okhttp3.Request | ||
| 54 | +import okhttp3.Response | ||
| 55 | +import okhttp3.WebSocket | ||
| 56 | +import okhttp3.WebSocketListener | ||
| 45 | import okio.ByteString | 57 | import okio.ByteString |
| 46 | import okio.ByteString.Companion.toByteString | 58 | import okio.ByteString.Companion.toByteString |
| 47 | -import java.util.* | 59 | +import java.util.Date |
| 48 | import javax.inject.Inject | 60 | import javax.inject.Inject |
| 49 | import javax.inject.Named | 61 | import javax.inject.Named |
| 50 | import javax.inject.Singleton | 62 | import javax.inject.Singleton |
| @@ -552,6 +564,19 @@ constructor( | @@ -552,6 +564,19 @@ constructor( | ||
| 552 | return time | 564 | return time |
| 553 | } | 565 | } |
| 554 | 566 | ||
| 567 | + fun sendUpdateLocalAudioTrack(trackSid: String, features: Collection<AudioTrackFeature>) { | ||
| 568 | + val request = with(LivekitRtc.SignalRequest.newBuilder()) { | ||
| 569 | + updateAudioTrack = with(LivekitRtc.UpdateLocalAudioTrack.newBuilder()) { | ||
| 570 | + setTrackSid(trackSid) | ||
| 571 | + addAllFeatures(features) | ||
| 572 | + build() | ||
| 573 | + } | ||
| 574 | + build() | ||
| 575 | + } | ||
| 576 | + | ||
| 577 | + sendRequest(request) | ||
| 578 | + } | ||
| 579 | + | ||
| 555 | private fun sendRequest(request: LivekitRtc.SignalRequest) { | 580 | private fun sendRequest(request: LivekitRtc.SignalRequest) { |
| 556 | val skipQueue = skipQueueTypes.contains(request.messageCase) | 581 | val skipQueue = skipQueueTypes.contains(request.messageCase) |
| 557 | 582 |
| @@ -47,8 +47,10 @@ import io.livekit.android.room.track.VideoCodec | @@ -47,8 +47,10 @@ import io.livekit.android.room.track.VideoCodec | ||
| 47 | import io.livekit.android.room.track.VideoEncoding | 47 | import io.livekit.android.room.track.VideoEncoding |
| 48 | import io.livekit.android.room.util.EncodingUtils | 48 | import io.livekit.android.room.util.EncodingUtils |
| 49 | import io.livekit.android.util.LKLog | 49 | import io.livekit.android.util.LKLog |
| 50 | +import io.livekit.android.util.flow | ||
| 50 | import io.livekit.android.webrtc.sortVideoCodecPreferences | 51 | import io.livekit.android.webrtc.sortVideoCodecPreferences |
| 51 | import kotlinx.coroutines.CoroutineDispatcher | 52 | import kotlinx.coroutines.CoroutineDispatcher |
| 53 | +import kotlinx.coroutines.Job | ||
| 52 | import kotlinx.coroutines.launch | 54 | import kotlinx.coroutines.launch |
| 53 | import livekit.LivekitModels | 55 | import livekit.LivekitModels |
| 54 | import livekit.LivekitRtc | 56 | import livekit.LivekitRtc |
| @@ -76,6 +78,7 @@ internal constructor( | @@ -76,6 +78,7 @@ internal constructor( | ||
| 76 | private val eglBase: EglBase, | 78 | private val eglBase: EglBase, |
| 77 | private val screencastVideoTrackFactory: LocalScreencastVideoTrack.Factory, | 79 | private val screencastVideoTrackFactory: LocalScreencastVideoTrack.Factory, |
| 78 | private val videoTrackFactory: LocalVideoTrack.Factory, | 80 | private val videoTrackFactory: LocalVideoTrack.Factory, |
| 81 | + private val audioTrackFactory: LocalAudioTrack.Factory, | ||
| 79 | private val defaultsManager: DefaultsManager, | 82 | private val defaultsManager: DefaultsManager, |
| 80 | @Named(InjectionNames.DISPATCHER_DEFAULT) | 83 | @Named(InjectionNames.DISPATCHER_DEFAULT) |
| 81 | coroutineDispatcher: CoroutineDispatcher, | 84 | coroutineDispatcher: CoroutineDispatcher, |
| @@ -94,6 +97,8 @@ internal constructor( | @@ -94,6 +97,8 @@ internal constructor( | ||
| 94 | .mapNotNull { it as? LocalTrackPublication } | 97 | .mapNotNull { it as? LocalTrackPublication } |
| 95 | .toList() | 98 | .toList() |
| 96 | 99 | ||
| 100 | + private val jobs = mutableMapOf<Any, Job>() | ||
| 101 | + | ||
| 97 | /** | 102 | /** |
| 98 | * Creates an audio track, recording audio through the microphone with the given [options]. | 103 | * Creates an audio track, recording audio through the microphone with the given [options]. |
| 99 | * | 104 | * |
| @@ -103,7 +108,7 @@ internal constructor( | @@ -103,7 +108,7 @@ internal constructor( | ||
| 103 | name: String = "", | 108 | name: String = "", |
| 104 | options: LocalAudioTrackOptions = audioTrackCaptureDefaults, | 109 | options: LocalAudioTrackOptions = audioTrackCaptureDefaults, |
| 105 | ): LocalAudioTrack { | 110 | ): LocalAudioTrack { |
| 106 | - return LocalAudioTrack.createTrack(context, peerConnectionFactory, options, name) | 111 | + return LocalAudioTrack.createTrack(context, peerConnectionFactory, options, audioTrackFactory, name) |
| 107 | } | 112 | } |
| 108 | 113 | ||
| 109 | /** | 114 | /** |
| @@ -295,7 +300,7 @@ internal constructor( | @@ -295,7 +300,7 @@ internal constructor( | ||
| 295 | } | 300 | } |
| 296 | }, | 301 | }, |
| 297 | ) | 302 | ) |
| 298 | - publishTrackImpl( | 303 | + val publication = publishTrackImpl( |
| 299 | track = track, | 304 | track = track, |
| 300 | options = options, | 305 | options = options, |
| 301 | requestConfig = { | 306 | requestConfig = { |
| @@ -306,6 +311,15 @@ internal constructor( | @@ -306,6 +311,15 @@ internal constructor( | ||
| 306 | encodings = encodings, | 311 | encodings = encodings, |
| 307 | publishListener = publishListener, | 312 | publishListener = publishListener, |
| 308 | ) | 313 | ) |
| 314 | + | ||
| 315 | + if (publication != null) { | ||
| 316 | + val job = scope.launch { | ||
| 317 | + track::features.flow.collect { | ||
| 318 | + engine.updateLocalAudioTrack(publication.sid, it) | ||
| 319 | + } | ||
| 320 | + } | ||
| 321 | + jobs[publication] = job | ||
| 322 | + } | ||
| 309 | } | 323 | } |
| 310 | 324 | ||
| 311 | /** | 325 | /** |
| @@ -379,14 +393,14 @@ internal constructor( | @@ -379,14 +393,14 @@ internal constructor( | ||
| 379 | requestConfig: AddTrackRequest.Builder.() -> Unit, | 393 | requestConfig: AddTrackRequest.Builder.() -> Unit, |
| 380 | encodings: List<RtpParameters.Encoding> = emptyList(), | 394 | encodings: List<RtpParameters.Encoding> = emptyList(), |
| 381 | publishListener: PublishListener? = null, | 395 | publishListener: PublishListener? = null, |
| 382 | - ): Boolean { | 396 | + ): LocalTrackPublication? { |
| 383 | @Suppress("NAME_SHADOWING") var options = options | 397 | @Suppress("NAME_SHADOWING") var options = options |
| 384 | 398 | ||
| 385 | @Suppress("NAME_SHADOWING") var encodings = encodings | 399 | @Suppress("NAME_SHADOWING") var encodings = encodings |
| 386 | 400 | ||
| 387 | if (localTrackPublications.any { it.track == track }) { | 401 | if (localTrackPublications.any { it.track == track }) { |
| 388 | publishListener?.onPublishFailure(TrackException.PublishException("Track has already been published")) | 402 | publishListener?.onPublishFailure(TrackException.PublishException("Track has already been published")) |
| 389 | - return false | 403 | + return null |
| 390 | } | 404 | } |
| 391 | 405 | ||
| 392 | val cid = track.rtcTrack.id() | 406 | val cid = track.rtcTrack.id() |
| @@ -435,7 +449,7 @@ internal constructor( | @@ -435,7 +449,7 @@ internal constructor( | ||
| 435 | 449 | ||
| 436 | if (transceiver == null) { | 450 | if (transceiver == null) { |
| 437 | publishListener?.onPublishFailure(TrackException.PublishException("null sender returned from peer connection")) | 451 | publishListener?.onPublishFailure(TrackException.PublishException("null sender returned from peer connection")) |
| 438 | - return false | 452 | + return null |
| 439 | } | 453 | } |
| 440 | 454 | ||
| 441 | track.statsGetter = engine.createStatsGetter(transceiver.sender) | 455 | track.statsGetter = engine.createStatsGetter(transceiver.sender) |
| @@ -475,7 +489,7 @@ internal constructor( | @@ -475,7 +489,7 @@ internal constructor( | ||
| 475 | internalListener?.onTrackPublished(publication, this) | 489 | internalListener?.onTrackPublished(publication, this) |
| 476 | eventBus.postEvent(ParticipantEvent.LocalTrackPublished(this, publication), scope) | 490 | eventBus.postEvent(ParticipantEvent.LocalTrackPublished(this, publication), scope) |
| 477 | 491 | ||
| 478 | - return true | 492 | + return publication |
| 479 | } | 493 | } |
| 480 | 494 | ||
| 481 | private fun computeVideoEncodings( | 495 | private fun computeVideoEncodings( |
| @@ -606,6 +620,12 @@ internal constructor( | @@ -606,6 +620,12 @@ internal constructor( | ||
| 606 | return | 620 | return |
| 607 | } | 621 | } |
| 608 | 622 | ||
| 623 | + val publicationJob = jobs[publication] | ||
| 624 | + if (publicationJob != null) { | ||
| 625 | + publicationJob.cancel() | ||
| 626 | + jobs.remove(publicationJob) | ||
| 627 | + } | ||
| 628 | + | ||
| 609 | val sid = publication.sid | 629 | val sid = publication.sid |
| 610 | trackPublications = trackPublications.toMutableMap().apply { remove(sid) } | 630 | trackPublications = trackPublications.toMutableMap().apply { remove(sid) } |
| 611 | 631 |
| @@ -20,23 +20,50 @@ import android.Manifest | @@ -20,23 +20,50 @@ import android.Manifest | ||
| 20 | import android.content.Context | 20 | import android.content.Context |
| 21 | import android.content.pm.PackageManager | 21 | import android.content.pm.PackageManager |
| 22 | import androidx.core.content.ContextCompat | 22 | import androidx.core.content.ContextCompat |
| 23 | +import dagger.assisted.Assisted | ||
| 24 | +import dagger.assisted.AssistedFactory | ||
| 25 | +import dagger.assisted.AssistedInject | ||
| 26 | +import io.livekit.android.audio.AudioProcessingController | ||
| 27 | +import io.livekit.android.dagger.InjectionNames | ||
| 23 | import io.livekit.android.room.participant.LocalParticipant | 28 | import io.livekit.android.room.participant.LocalParticipant |
| 29 | +import io.livekit.android.util.FlowObservable | ||
| 30 | +import io.livekit.android.util.flow | ||
| 31 | +import io.livekit.android.util.flowDelegate | ||
| 24 | import io.livekit.android.webrtc.peerconnection.executeBlockingOnRTCThread | 32 | import io.livekit.android.webrtc.peerconnection.executeBlockingOnRTCThread |
| 33 | +import kotlinx.coroutines.CoroutineDispatcher | ||
| 34 | +import kotlinx.coroutines.CoroutineScope | ||
| 35 | +import kotlinx.coroutines.SupervisorJob | ||
| 36 | +import kotlinx.coroutines.flow.SharingStarted | ||
| 37 | +import kotlinx.coroutines.flow.combine | ||
| 38 | +import kotlinx.coroutines.flow.map | ||
| 39 | +import kotlinx.coroutines.flow.stateIn | ||
| 40 | +import livekit.LivekitModels.AudioTrackFeature | ||
| 25 | import livekit.org.webrtc.MediaConstraints | 41 | import livekit.org.webrtc.MediaConstraints |
| 26 | import livekit.org.webrtc.PeerConnectionFactory | 42 | import livekit.org.webrtc.PeerConnectionFactory |
| 27 | import livekit.org.webrtc.RtpSender | 43 | import livekit.org.webrtc.RtpSender |
| 28 | import livekit.org.webrtc.RtpTransceiver | 44 | import livekit.org.webrtc.RtpTransceiver |
| 29 | import java.util.UUID | 45 | import java.util.UUID |
| 46 | +import javax.inject.Named | ||
| 30 | 47 | ||
| 31 | /** | 48 | /** |
| 32 | * Represents a local audio track (generally using the microphone as input). | 49 | * Represents a local audio track (generally using the microphone as input). |
| 33 | * | 50 | * |
| 34 | * This class should not be constructed directly, but rather through [LocalParticipant.createAudioTrack]. | 51 | * This class should not be constructed directly, but rather through [LocalParticipant.createAudioTrack]. |
| 35 | */ | 52 | */ |
| 36 | -class LocalAudioTrack( | ||
| 37 | - name: String, | ||
| 38 | - mediaTrack: livekit.org.webrtc.AudioTrack | 53 | +class LocalAudioTrack |
| 54 | +@AssistedInject | ||
| 55 | +constructor( | ||
| 56 | + @Assisted name: String, | ||
| 57 | + @Assisted mediaTrack: livekit.org.webrtc.AudioTrack, | ||
| 58 | + @Assisted private val options: LocalAudioTrackOptions, | ||
| 59 | + private val audioProcessingController: AudioProcessingController, | ||
| 60 | + @Named(InjectionNames.DISPATCHER_DEFAULT) | ||
| 61 | + private val dispatcher: CoroutineDispatcher, | ||
| 39 | ) : AudioTrack(name, mediaTrack) { | 62 | ) : AudioTrack(name, mediaTrack) { |
| 63 | + /** | ||
| 64 | + * To only be used for flow delegate scoping, and should not be cancelled. | ||
| 65 | + **/ | ||
| 66 | + private val delegateScope = CoroutineScope(dispatcher + SupervisorJob()) | ||
| 40 | var enabled: Boolean | 67 | var enabled: Boolean |
| 41 | get() = executeBlockingOnRTCThread { rtcTrack.enabled() } | 68 | get() = executeBlockingOnRTCThread { rtcTrack.enabled() } |
| 42 | set(value) { | 69 | set(value) { |
| @@ -47,12 +74,52 @@ class LocalAudioTrack( | @@ -47,12 +74,52 @@ class LocalAudioTrack( | ||
| 47 | internal val sender: RtpSender? | 74 | internal val sender: RtpSender? |
| 48 | get() = transceiver?.sender | 75 | get() = transceiver?.sender |
| 49 | 76 | ||
| 77 | + /** | ||
| 78 | + * Changes can be observed by using [io.livekit.android.util.flow] | ||
| 79 | + */ | ||
| 80 | + @FlowObservable | ||
| 81 | + @get:FlowObservable | ||
| 82 | + val features by flowDelegate( | ||
| 83 | + stateFlow = combine( | ||
| 84 | + audioProcessingController::capturePostProcessor.flow, | ||
| 85 | + audioProcessingController::bypassCapturePostProcessing.flow, | ||
| 86 | + ) { processor, bypass -> | ||
| 87 | + processor to bypass | ||
| 88 | + } | ||
| 89 | + .map { | ||
| 90 | + val features = getConstantFeatures() | ||
| 91 | + val (processor, bypass) = it | ||
| 92 | + if (!bypass && processor?.getName() == "krisp_noise_cancellation") { | ||
| 93 | + features.add(AudioTrackFeature.TF_ENHANCED_NOISE_CANCELLATION) | ||
| 94 | + } | ||
| 95 | + return@map features | ||
| 96 | + } | ||
| 97 | + .stateIn(delegateScope, SharingStarted.Eagerly, emptySet()), | ||
| 98 | + ) | ||
| 99 | + | ||
| 100 | + private fun getConstantFeatures(): MutableSet<AudioTrackFeature> { | ||
| 101 | + val features = mutableSetOf<AudioTrackFeature>() | ||
| 102 | + | ||
| 103 | + if (options.echoCancellation) { | ||
| 104 | + features.add(AudioTrackFeature.TF_ECHO_CANCELLATION) | ||
| 105 | + } | ||
| 106 | + if (options.noiseSuppression) { | ||
| 107 | + features.add(AudioTrackFeature.TF_NOISE_SUPPRESSION) | ||
| 108 | + } | ||
| 109 | + if (options.autoGainControl) { | ||
| 110 | + features.add(AudioTrackFeature.TF_AUTO_GAIN_CONTROL) | ||
| 111 | + } | ||
| 112 | + // TODO: Handle getting other info from JavaAudioDeviceModule | ||
| 113 | + return features | ||
| 114 | + } | ||
| 115 | + | ||
| 50 | companion object { | 116 | companion object { |
| 51 | internal fun createTrack( | 117 | internal fun createTrack( |
| 52 | context: Context, | 118 | context: Context, |
| 53 | factory: PeerConnectionFactory, | 119 | factory: PeerConnectionFactory, |
| 54 | options: LocalAudioTrackOptions = LocalAudioTrackOptions(), | 120 | options: LocalAudioTrackOptions = LocalAudioTrackOptions(), |
| 55 | - name: String = "" | 121 | + audioTrackFactory: Factory, |
| 122 | + name: String = "", | ||
| 56 | ): LocalAudioTrack { | 123 | ): LocalAudioTrack { |
| 57 | if (ContextCompat.checkSelfPermission(context, Manifest.permission.RECORD_AUDIO) != | 124 | if (ContextCompat.checkSelfPermission(context, Manifest.permission.RECORD_AUDIO) != |
| 58 | PackageManager.PERMISSION_GRANTED | 125 | PackageManager.PERMISSION_GRANTED |
| @@ -74,7 +141,16 @@ class LocalAudioTrack( | @@ -74,7 +141,16 @@ class LocalAudioTrack( | ||
| 74 | val rtcAudioTrack = | 141 | val rtcAudioTrack = |
| 75 | factory.createAudioTrack(UUID.randomUUID().toString(), audioSource) | 142 | factory.createAudioTrack(UUID.randomUUID().toString(), audioSource) |
| 76 | 143 | ||
| 77 | - return LocalAudioTrack(name = name, mediaTrack = rtcAudioTrack) | 144 | + return audioTrackFactory.create(name = name, mediaTrack = rtcAudioTrack, options = options) |
| 78 | } | 145 | } |
| 79 | } | 146 | } |
| 147 | + | ||
| 148 | + @AssistedFactory | ||
| 149 | + interface Factory { | ||
| 150 | + fun create( | ||
| 151 | + name: String, | ||
| 152 | + mediaTrack: livekit.org.webrtc.AudioTrack, | ||
| 153 | + options: LocalAudioTrackOptions, | ||
| 154 | + ): LocalAudioTrack | ||
| 155 | + } | ||
| 80 | } | 156 | } |
| @@ -19,29 +19,43 @@ package io.livekit.android.webrtc | @@ -19,29 +19,43 @@ package io.livekit.android.webrtc | ||
| 19 | import io.livekit.android.audio.AudioProcessorInterface | 19 | import io.livekit.android.audio.AudioProcessorInterface |
| 20 | import io.livekit.android.audio.AudioProcessorOptions | 20 | import io.livekit.android.audio.AudioProcessorOptions |
| 21 | import io.livekit.android.audio.AuthedAudioProcessingController | 21 | import io.livekit.android.audio.AuthedAudioProcessingController |
| 22 | -import io.livekit.android.audio.authenticateProcessors | 22 | +import io.livekit.android.audio.AuthedAudioProcessorInterface |
| 23 | +import io.livekit.android.util.flowDelegate | ||
| 23 | import livekit.org.webrtc.AudioProcessingFactory | 24 | import livekit.org.webrtc.AudioProcessingFactory |
| 24 | import livekit.org.webrtc.ExternalAudioProcessingFactory | 25 | import livekit.org.webrtc.ExternalAudioProcessingFactory |
| 25 | import java.nio.ByteBuffer | 26 | import java.nio.ByteBuffer |
| 26 | 27 | ||
| 27 | -class CustomAudioProcessingFactory(private var audioProcessorOptions: AudioProcessorOptions) : AuthedAudioProcessingController { | 28 | +/** |
| 29 | + * @suppress | ||
| 30 | + */ | ||
| 31 | +internal class CustomAudioProcessingFactory() : AuthedAudioProcessingController { | ||
| 32 | + constructor(audioProcessorOptions: AudioProcessorOptions) : this() { | ||
| 33 | + capturePostProcessor = audioProcessorOptions.capturePostProcessor | ||
| 34 | + renderPreProcessor = audioProcessorOptions.renderPreProcessor | ||
| 35 | + bypassCapturePostProcessing = audioProcessorOptions.capturePostBypass | ||
| 36 | + bypassRenderPreProcessing = audioProcessorOptions.renderPreBypass | ||
| 37 | + } | ||
| 28 | 38 | ||
| 29 | private val externalAudioProcessor = ExternalAudioProcessingFactory() | 39 | private val externalAudioProcessor = ExternalAudioProcessingFactory() |
| 30 | 40 | ||
| 31 | - init { | ||
| 32 | - if (audioProcessorOptions.capturePostProcessor != null) { | ||
| 33 | - setCapturePostProcessing(audioProcessorOptions.capturePostProcessor) | ||
| 34 | - } else { | ||
| 35 | - setCapturePostProcessing(null) | ||
| 36 | - setBypassForCapturePostProcessing(false) | ||
| 37 | - } | ||
| 38 | - if (audioProcessorOptions.renderPreProcessor != null) { | ||
| 39 | - setRenderPreProcessing(audioProcessorOptions.renderPreProcessor) | ||
| 40 | - setBypassForRenderPreProcessing(audioProcessorOptions.renderPreBypass) | ||
| 41 | - } else { | ||
| 42 | - setRenderPreProcessing(null) | ||
| 43 | - setBypassForRenderPreProcessing(false) | ||
| 44 | - } | 41 | + override var capturePostProcessor: AudioProcessorInterface? by flowDelegate(null) { value, _ -> |
| 42 | + externalAudioProcessor.setCapturePostProcessing( | ||
| 43 | + value.toAudioProcessing(), | ||
| 44 | + ) | ||
| 45 | + } | ||
| 46 | + | ||
| 47 | + override var renderPreProcessor: AudioProcessorInterface? by flowDelegate(null) { value, _ -> | ||
| 48 | + externalAudioProcessor.setRenderPreProcessing( | ||
| 49 | + value.toAudioProcessing(), | ||
| 50 | + ) | ||
| 51 | + } | ||
| 52 | + | ||
| 53 | + override var bypassCapturePostProcessing: Boolean by flowDelegate(false) { value, _ -> | ||
| 54 | + externalAudioProcessor.setBypassFlagForCapturePost(value) | ||
| 55 | + } | ||
| 56 | + | ||
| 57 | + override var bypassRenderPreProcessing: Boolean by flowDelegate(false) { value, _ -> | ||
| 58 | + externalAudioProcessor.setBypassFlagForRenderPre(value) | ||
| 45 | } | 59 | } |
| 46 | 60 | ||
| 47 | fun getAudioProcessingFactory(): AudioProcessingFactory { | 61 | fun getAudioProcessingFactory(): AudioProcessingFactory { |
| @@ -49,31 +63,28 @@ class CustomAudioProcessingFactory(private var audioProcessorOptions: AudioProce | @@ -49,31 +63,28 @@ class CustomAudioProcessingFactory(private var audioProcessorOptions: AudioProce | ||
| 49 | } | 63 | } |
| 50 | 64 | ||
| 51 | override fun authenticate(url: String, token: String) { | 65 | override fun authenticate(url: String, token: String) { |
| 52 | - audioProcessorOptions.authenticateProcessors(url, token) | 66 | + (capturePostProcessor as? AuthedAudioProcessorInterface)?.authenticate(url, token) |
| 67 | + (renderPreProcessor as? AuthedAudioProcessorInterface)?.authenticate(url, token) | ||
| 53 | } | 68 | } |
| 54 | 69 | ||
| 70 | + @Deprecated("Use the capturePostProcessing variable directly instead", ReplaceWith("capturePostProcessor = processing")) | ||
| 55 | override fun setCapturePostProcessing(processing: AudioProcessorInterface?) { | 71 | override fun setCapturePostProcessing(processing: AudioProcessorInterface?) { |
| 56 | - audioProcessorOptions = audioProcessorOptions.copy(capturePostProcessor = processing) | ||
| 57 | - externalAudioProcessor.setCapturePostProcessing( | ||
| 58 | - processing.toAudioProcessing(), | ||
| 59 | - ) | 72 | + capturePostProcessor = processing |
| 60 | } | 73 | } |
| 61 | 74 | ||
| 62 | - override fun setBypassForCapturePostProcessing(bypass: Boolean) { | ||
| 63 | - audioProcessorOptions = audioProcessorOptions.copy(capturePostBypass = bypass) | ||
| 64 | - externalAudioProcessor.setBypassFlagForCapturePost(bypass) | 75 | + @Deprecated("Use the renderPreProcessing variable directly instead", ReplaceWith("renderPreProcessor = processing")) |
| 76 | + override fun setRenderPreProcessing(processing: AudioProcessorInterface?) { | ||
| 77 | + renderPreProcessor = processing | ||
| 65 | } | 78 | } |
| 66 | 79 | ||
| 67 | - override fun setRenderPreProcessing(processing: AudioProcessorInterface?) { | ||
| 68 | - audioProcessorOptions = audioProcessorOptions.copy(renderPreProcessor = processing) | ||
| 69 | - externalAudioProcessor.setRenderPreProcessing( | ||
| 70 | - processing.toAudioProcessing(), | ||
| 71 | - ) | 80 | + @Deprecated("Use the bypassCapturePostProcessing variable directly instead", ReplaceWith("bypassCapturePostProcessing = bypass")) |
| 81 | + override fun setBypassForCapturePostProcessing(bypass: Boolean) { | ||
| 82 | + bypassCapturePostProcessing = bypass | ||
| 72 | } | 83 | } |
| 73 | 84 | ||
| 85 | + @Deprecated("Use the bypassRendererPreProcessing variable directly instead", ReplaceWith("bypassRenderPreProcessing = bypass")) | ||
| 74 | override fun setBypassForRenderPreProcessing(bypass: Boolean) { | 86 | override fun setBypassForRenderPreProcessing(bypass: Boolean) { |
| 75 | - audioProcessorOptions = audioProcessorOptions.copy(renderPreBypass = bypass) | ||
| 76 | - externalAudioProcessor.setBypassFlagForRenderPre(bypass) | 87 | + bypassRenderPreProcessing = bypass |
| 77 | } | 88 | } |
| 78 | 89 | ||
| 79 | private class AudioProcessingBridge( | 90 | private class AudioProcessingBridge( |
| @@ -18,8 +18,27 @@ package io.livekit.android.test.mock | @@ -18,8 +18,27 @@ package io.livekit.android.test.mock | ||
| 18 | 18 | ||
| 19 | import io.livekit.android.audio.AudioProcessingController | 19 | import io.livekit.android.audio.AudioProcessingController |
| 20 | import io.livekit.android.audio.AudioProcessorInterface | 20 | import io.livekit.android.audio.AudioProcessorInterface |
| 21 | +import io.livekit.android.util.FlowObservable | ||
| 22 | +import io.livekit.android.util.flowDelegate | ||
| 21 | 23 | ||
| 22 | class MockAudioProcessingController : AudioProcessingController { | 24 | class MockAudioProcessingController : AudioProcessingController { |
| 25 | + | ||
| 26 | + @FlowObservable | ||
| 27 | + @get:FlowObservable | ||
| 28 | + override var capturePostProcessor: AudioProcessorInterface? by flowDelegate(null) | ||
| 29 | + | ||
| 30 | + @FlowObservable | ||
| 31 | + @get:FlowObservable | ||
| 32 | + override var renderPreProcessor: AudioProcessorInterface? by flowDelegate(null) | ||
| 33 | + | ||
| 34 | + @FlowObservable | ||
| 35 | + @get:FlowObservable | ||
| 36 | + override var bypassRenderPreProcessing: Boolean by flowDelegate(false) | ||
| 37 | + | ||
| 38 | + @FlowObservable | ||
| 39 | + @get:FlowObservable | ||
| 40 | + override var bypassCapturePostProcessing: Boolean by flowDelegate(false) | ||
| 41 | + | ||
| 23 | override fun setCapturePostProcessing(processing: AudioProcessorInterface?) { | 42 | override fun setCapturePostProcessing(processing: AudioProcessorInterface?) { |
| 24 | } | 43 | } |
| 25 | 44 |
| @@ -17,14 +17,18 @@ | @@ -17,14 +17,18 @@ | ||
| 17 | package io.livekit.android.room | 17 | package io.livekit.android.room |
| 18 | 18 | ||
| 19 | import android.net.Network | 19 | import android.net.Network |
| 20 | -import io.livekit.android.events.* | 20 | +import io.livekit.android.events.DisconnectReason |
| 21 | +import io.livekit.android.events.RoomEvent | ||
| 22 | +import io.livekit.android.events.convert | ||
| 21 | import io.livekit.android.room.participant.ConnectionQuality | 23 | import io.livekit.android.room.participant.ConnectionQuality |
| 22 | import io.livekit.android.room.track.LocalAudioTrack | 24 | import io.livekit.android.room.track.LocalAudioTrack |
| 25 | +import io.livekit.android.room.track.LocalAudioTrackOptions | ||
| 23 | import io.livekit.android.room.track.Track | 26 | import io.livekit.android.room.track.Track |
| 24 | import io.livekit.android.test.MockE2ETest | 27 | import io.livekit.android.test.MockE2ETest |
| 25 | import io.livekit.android.test.assert.assertIsClassList | 28 | import io.livekit.android.test.assert.assertIsClassList |
| 26 | import io.livekit.android.test.events.EventCollector | 29 | import io.livekit.android.test.events.EventCollector |
| 27 | import io.livekit.android.test.events.FlowCollector | 30 | import io.livekit.android.test.events.FlowCollector |
| 31 | +import io.livekit.android.test.mock.MockAudioProcessingController | ||
| 28 | import io.livekit.android.test.mock.MockAudioStreamTrack | 32 | import io.livekit.android.test.mock.MockAudioStreamTrack |
| 29 | import io.livekit.android.test.mock.MockMediaStream | 33 | import io.livekit.android.test.mock.MockMediaStream |
| 30 | import io.livekit.android.test.mock.MockRtpReceiver | 34 | import io.livekit.android.test.mock.MockRtpReceiver |
| @@ -335,8 +339,11 @@ class RoomMockE2ETest : MockE2ETest() { | @@ -335,8 +339,11 @@ class RoomMockE2ETest : MockE2ETest() { | ||
| 335 | 339 | ||
| 336 | room.localParticipant.publishAudioTrack( | 340 | room.localParticipant.publishAudioTrack( |
| 337 | LocalAudioTrack( | 341 | LocalAudioTrack( |
| 338 | - "", | ||
| 339 | - MockAudioStreamTrack(id = TestData.LOCAL_TRACK_PUBLISHED.trackPublished.cid), | 342 | + name = "", |
| 343 | + mediaTrack = MockAudioStreamTrack(id = TestData.LOCAL_TRACK_PUBLISHED.trackPublished.cid), | ||
| 344 | + options = LocalAudioTrackOptions(), | ||
| 345 | + audioProcessingController = MockAudioProcessingController(), | ||
| 346 | + dispatcher = coroutineRule.dispatcher, | ||
| 340 | ), | 347 | ), |
| 341 | ) | 348 | ) |
| 342 | 349 | ||
| @@ -381,8 +388,11 @@ class RoomMockE2ETest : MockE2ETest() { | @@ -381,8 +388,11 @@ class RoomMockE2ETest : MockE2ETest() { | ||
| 381 | } | 388 | } |
| 382 | room.localParticipant.publishAudioTrack( | 389 | room.localParticipant.publishAudioTrack( |
| 383 | LocalAudioTrack( | 390 | LocalAudioTrack( |
| 384 | - "", | ||
| 385 | - MockAudioStreamTrack(id = TestData.LOCAL_TRACK_PUBLISHED.trackPublished.cid), | 391 | + name = "", |
| 392 | + mediaTrack = MockAudioStreamTrack(id = TestData.LOCAL_TRACK_PUBLISHED.trackPublished.cid), | ||
| 393 | + options = LocalAudioTrackOptions(), | ||
| 394 | + audioProcessingController = MockAudioProcessingController(), | ||
| 395 | + dispatcher = coroutineRule.dispatcher, | ||
| 386 | ), | 396 | ), |
| 387 | ) | 397 | ) |
| 388 | 398 |
| @@ -17,7 +17,9 @@ | @@ -17,7 +17,9 @@ | ||
| 17 | package io.livekit.android.room | 17 | package io.livekit.android.room |
| 18 | 18 | ||
| 19 | import io.livekit.android.room.track.LocalAudioTrack | 19 | import io.livekit.android.room.track.LocalAudioTrack |
| 20 | +import io.livekit.android.room.track.LocalAudioTrackOptions | ||
| 20 | import io.livekit.android.test.MockE2ETest | 21 | import io.livekit.android.test.MockE2ETest |
| 22 | +import io.livekit.android.test.mock.MockAudioProcessingController | ||
| 21 | import io.livekit.android.test.mock.MockAudioStreamTrack | 23 | import io.livekit.android.test.mock.MockAudioStreamTrack |
| 22 | import io.livekit.android.test.mock.TestData | 24 | import io.livekit.android.test.mock.TestData |
| 23 | import io.livekit.android.test.util.toPBByteString | 25 | import io.livekit.android.test.util.toPBByteString |
| @@ -103,8 +105,11 @@ class RoomReconnectionMockE2ETest : MockE2ETest() { | @@ -103,8 +105,11 @@ class RoomReconnectionMockE2ETest : MockE2ETest() { | ||
| 103 | // publish track | 105 | // publish track |
| 104 | room.localParticipant.publishAudioTrack( | 106 | room.localParticipant.publishAudioTrack( |
| 105 | LocalAudioTrack( | 107 | LocalAudioTrack( |
| 106 | - "", | ||
| 107 | - MockAudioStreamTrack(id = TestData.LOCAL_TRACK_PUBLISHED.trackPublished.cid), | 108 | + name = "", |
| 109 | + mediaTrack = MockAudioStreamTrack(id = TestData.LOCAL_TRACK_PUBLISHED.trackPublished.cid), | ||
| 110 | + options = LocalAudioTrackOptions(), | ||
| 111 | + audioProcessingController = MockAudioProcessingController(), | ||
| 112 | + dispatcher = coroutineRule.dispatcher, | ||
| 108 | ), | 113 | ), |
| 109 | ) | 114 | ) |
| 110 | 115 |
| @@ -21,10 +21,12 @@ import io.livekit.android.events.RoomEvent | @@ -21,10 +21,12 @@ import io.livekit.android.events.RoomEvent | ||
| 21 | import io.livekit.android.events.TrackPublicationEvent | 21 | import io.livekit.android.events.TrackPublicationEvent |
| 22 | import io.livekit.android.room.participant.AudioTrackPublishOptions | 22 | import io.livekit.android.room.participant.AudioTrackPublishOptions |
| 23 | import io.livekit.android.room.track.LocalAudioTrack | 23 | import io.livekit.android.room.track.LocalAudioTrack |
| 24 | +import io.livekit.android.room.track.LocalAudioTrackOptions | ||
| 24 | import io.livekit.android.room.track.Track | 25 | import io.livekit.android.room.track.Track |
| 25 | import io.livekit.android.test.MockE2ETest | 26 | import io.livekit.android.test.MockE2ETest |
| 26 | import io.livekit.android.test.assert.assertIsClass | 27 | import io.livekit.android.test.assert.assertIsClass |
| 27 | import io.livekit.android.test.events.EventCollector | 28 | import io.livekit.android.test.events.EventCollector |
| 29 | +import io.livekit.android.test.mock.MockAudioProcessingController | ||
| 28 | import io.livekit.android.test.mock.MockAudioStreamTrack | 30 | import io.livekit.android.test.mock.MockAudioStreamTrack |
| 29 | import io.livekit.android.test.mock.MockDataChannel | 31 | import io.livekit.android.test.mock.MockDataChannel |
| 30 | import io.livekit.android.test.mock.MockPeerConnection | 32 | import io.livekit.android.test.mock.MockPeerConnection |
| @@ -41,8 +43,11 @@ class RoomTranscriptionMockE2ETest : MockE2ETest() { | @@ -41,8 +43,11 @@ class RoomTranscriptionMockE2ETest : MockE2ETest() { | ||
| 41 | connect() | 43 | connect() |
| 42 | room.localParticipant.publishAudioTrack( | 44 | room.localParticipant.publishAudioTrack( |
| 43 | LocalAudioTrack( | 45 | LocalAudioTrack( |
| 44 | - "", | ||
| 45 | - MockAudioStreamTrack(id = TestData.LOCAL_TRACK_PUBLISHED.trackPublished.cid), | 46 | + name = "", |
| 47 | + mediaTrack = MockAudioStreamTrack(id = TestData.LOCAL_TRACK_PUBLISHED.trackPublished.cid), | ||
| 48 | + options = LocalAudioTrackOptions(), | ||
| 49 | + audioProcessingController = MockAudioProcessingController(), | ||
| 50 | + dispatcher = coroutineRule.dispatcher, | ||
| 46 | ), | 51 | ), |
| 47 | options = AudioTrackPublishOptions( | 52 | options = AudioTrackPublishOptions( |
| 48 | source = Track.Source.MICROPHONE, | 53 | source = Track.Source.MICROPHONE, |
| @@ -16,10 +16,12 @@ | @@ -16,10 +16,12 @@ | ||
| 16 | 16 | ||
| 17 | package io.livekit.android.room.participant | 17 | package io.livekit.android.room.participant |
| 18 | 18 | ||
| 19 | +import io.livekit.android.audio.AudioProcessorInterface | ||
| 19 | import io.livekit.android.events.ParticipantEvent | 20 | import io.livekit.android.events.ParticipantEvent |
| 20 | import io.livekit.android.events.RoomEvent | 21 | import io.livekit.android.events.RoomEvent |
| 21 | import io.livekit.android.room.DefaultsManager | 22 | import io.livekit.android.room.DefaultsManager |
| 22 | import io.livekit.android.room.track.LocalAudioTrack | 23 | import io.livekit.android.room.track.LocalAudioTrack |
| 24 | +import io.livekit.android.room.track.LocalAudioTrackOptions | ||
| 23 | import io.livekit.android.room.track.LocalVideoTrack | 25 | import io.livekit.android.room.track.LocalVideoTrack |
| 24 | import io.livekit.android.room.track.LocalVideoTrackOptions | 26 | import io.livekit.android.room.track.LocalVideoTrackOptions |
| 25 | import io.livekit.android.room.track.Track | 27 | import io.livekit.android.room.track.Track |
| @@ -28,6 +30,7 @@ import io.livekit.android.room.track.VideoCodec | @@ -28,6 +30,7 @@ import io.livekit.android.room.track.VideoCodec | ||
| 28 | import io.livekit.android.test.MockE2ETest | 30 | import io.livekit.android.test.MockE2ETest |
| 29 | import io.livekit.android.test.assert.assertIsClassList | 31 | import io.livekit.android.test.assert.assertIsClassList |
| 30 | import io.livekit.android.test.events.EventCollector | 32 | import io.livekit.android.test.events.EventCollector |
| 33 | +import io.livekit.android.test.mock.MockAudioProcessingController | ||
| 31 | import io.livekit.android.test.mock.MockAudioStreamTrack | 34 | import io.livekit.android.test.mock.MockAudioStreamTrack |
| 32 | import io.livekit.android.test.mock.MockEglBase | 35 | import io.livekit.android.test.mock.MockEglBase |
| 33 | import io.livekit.android.test.mock.MockVideoCapturer | 36 | import io.livekit.android.test.mock.MockVideoCapturer |
| @@ -36,19 +39,25 @@ import io.livekit.android.test.mock.TestData | @@ -36,19 +39,25 @@ import io.livekit.android.test.mock.TestData | ||
| 36 | import io.livekit.android.test.util.toPBByteString | 39 | import io.livekit.android.test.util.toPBByteString |
| 37 | import io.livekit.android.util.toOkioByteString | 40 | import io.livekit.android.util.toOkioByteString |
| 38 | import kotlinx.coroutines.ExperimentalCoroutinesApi | 41 | import kotlinx.coroutines.ExperimentalCoroutinesApi |
| 42 | +import kotlinx.coroutines.test.advanceUntilIdle | ||
| 39 | import livekit.LivekitModels | 43 | import livekit.LivekitModels |
| 44 | +import livekit.LivekitModels.AudioTrackFeature | ||
| 40 | import livekit.LivekitRtc | 45 | import livekit.LivekitRtc |
| 41 | import livekit.LivekitRtc.SubscribedCodec | 46 | import livekit.LivekitRtc.SubscribedCodec |
| 42 | import livekit.LivekitRtc.SubscribedQuality | 47 | import livekit.LivekitRtc.SubscribedQuality |
| 43 | import livekit.org.webrtc.RtpParameters | 48 | import livekit.org.webrtc.RtpParameters |
| 44 | import livekit.org.webrtc.VideoSource | 49 | import livekit.org.webrtc.VideoSource |
| 45 | -import org.junit.Assert.* | 50 | +import org.junit.Assert.assertEquals |
| 51 | +import org.junit.Assert.assertFalse | ||
| 52 | +import org.junit.Assert.assertNull | ||
| 53 | +import org.junit.Assert.assertTrue | ||
| 46 | import org.junit.Test | 54 | import org.junit.Test |
| 47 | import org.junit.runner.RunWith | 55 | import org.junit.runner.RunWith |
| 48 | import org.mockito.Mockito | 56 | import org.mockito.Mockito |
| 49 | import org.mockito.Mockito.mock | 57 | import org.mockito.Mockito.mock |
| 50 | import org.mockito.kotlin.argThat | 58 | import org.mockito.kotlin.argThat |
| 51 | import org.robolectric.RobolectricTestRunner | 59 | import org.robolectric.RobolectricTestRunner |
| 60 | +import java.nio.ByteBuffer | ||
| 52 | 61 | ||
| 53 | @ExperimentalCoroutinesApi | 62 | @ExperimentalCoroutinesApi |
| 54 | @RunWith(RobolectricTestRunner::class) | 63 | @RunWith(RobolectricTestRunner::class) |
| @@ -60,8 +69,11 @@ class LocalParticipantMockE2ETest : MockE2ETest() { | @@ -60,8 +69,11 @@ class LocalParticipantMockE2ETest : MockE2ETest() { | ||
| 60 | 69 | ||
| 61 | room.localParticipant.publishAudioTrack( | 70 | room.localParticipant.publishAudioTrack( |
| 62 | LocalAudioTrack( | 71 | LocalAudioTrack( |
| 63 | - "", | ||
| 64 | - MockAudioStreamTrack(id = TestData.LOCAL_TRACK_PUBLISHED.trackPublished.cid), | 72 | + name = "", |
| 73 | + mediaTrack = MockAudioStreamTrack(id = TestData.LOCAL_TRACK_PUBLISHED.trackPublished.cid), | ||
| 74 | + options = LocalAudioTrackOptions(), | ||
| 75 | + audioProcessingController = MockAudioProcessingController(), | ||
| 76 | + dispatcher = coroutineRule.dispatcher, | ||
| 65 | ), | 77 | ), |
| 66 | ) | 78 | ) |
| 67 | 79 | ||
| @@ -348,4 +360,80 @@ class LocalParticipantMockE2ETest : MockE2ETest() { | @@ -348,4 +360,80 @@ class LocalParticipantMockE2ETest : MockE2ETest() { | ||
| 348 | 360 | ||
| 349 | assertEquals(preference, transceiver.sender.parameters.degradationPreference) | 361 | assertEquals(preference, transceiver.sender.parameters.degradationPreference) |
| 350 | } | 362 | } |
| 363 | + | ||
| 364 | + @Test | ||
| 365 | + fun sendsInitialAudioTrackFeatures() = runTest { | ||
| 366 | + connect() | ||
| 367 | + | ||
| 368 | + wsFactory.ws.clearRequests() | ||
| 369 | + room.localParticipant.publishAudioTrack( | ||
| 370 | + LocalAudioTrack( | ||
| 371 | + name = "", | ||
| 372 | + mediaTrack = MockAudioStreamTrack(id = TestData.LOCAL_TRACK_PUBLISHED.trackPublished.cid), | ||
| 373 | + options = LocalAudioTrackOptions(), | ||
| 374 | + audioProcessingController = MockAudioProcessingController(), | ||
| 375 | + dispatcher = coroutineRule.dispatcher, | ||
| 376 | + ), | ||
| 377 | + ) | ||
| 378 | + | ||
| 379 | + advanceUntilIdle() | ||
| 380 | + assertEquals(2, wsFactory.ws.sentRequests.size) | ||
| 381 | + | ||
| 382 | + // Verify the update audio track request gets the proper publish options set. | ||
| 383 | + val requestString = wsFactory.ws.sentRequests[1].toPBByteString() | ||
| 384 | + val sentRequest = LivekitRtc.SignalRequest.newBuilder() | ||
| 385 | + .mergeFrom(requestString) | ||
| 386 | + .build() | ||
| 387 | + | ||
| 388 | + assertTrue(sentRequest.hasUpdateAudioTrack()) | ||
| 389 | + val features = sentRequest.updateAudioTrack.featuresList | ||
| 390 | + assertTrue(features.contains(AudioTrackFeature.TF_ECHO_CANCELLATION)) | ||
| 391 | + assertTrue(features.contains(AudioTrackFeature.TF_NOISE_SUPPRESSION)) | ||
| 392 | + assertTrue(features.contains(AudioTrackFeature.TF_AUTO_GAIN_CONTROL)) | ||
| 393 | + } | ||
| 394 | + | ||
| 395 | + @Test | ||
| 396 | + fun sendsUpdatedAudioTrackFeatures() = runTest { | ||
| 397 | + connect() | ||
| 398 | + | ||
| 399 | + val audioProcessingController = MockAudioProcessingController() | ||
| 400 | + room.localParticipant.publishAudioTrack( | ||
| 401 | + LocalAudioTrack( | ||
| 402 | + name = "", | ||
| 403 | + mediaTrack = MockAudioStreamTrack(id = TestData.LOCAL_TRACK_PUBLISHED.trackPublished.cid), | ||
| 404 | + options = LocalAudioTrackOptions(), | ||
| 405 | + audioProcessingController = audioProcessingController, | ||
| 406 | + dispatcher = coroutineRule.dispatcher, | ||
| 407 | + ), | ||
| 408 | + ) | ||
| 409 | + | ||
| 410 | + advanceUntilIdle() | ||
| 411 | + wsFactory.ws.clearRequests() | ||
| 412 | + | ||
| 413 | + audioProcessingController.capturePostProcessor = object : AudioProcessorInterface { | ||
| 414 | + override fun isEnabled(): Boolean = true | ||
| 415 | + | ||
| 416 | + override fun getName(): String = "krisp_noise_cancellation" | ||
| 417 | + | ||
| 418 | + override fun initializeAudioProcessing(sampleRateHz: Int, numChannels: Int) {} | ||
| 419 | + | ||
| 420 | + override fun resetAudioProcessing(newRate: Int) {} | ||
| 421 | + | ||
| 422 | + override fun processAudio(numBands: Int, numFrames: Int, buffer: ByteBuffer) {} | ||
| 423 | + } | ||
| 424 | + assertEquals(1, wsFactory.ws.sentRequests.size) | ||
| 425 | + | ||
| 426 | + // Verify the update audio track request gets the proper publish options set. | ||
| 427 | + val requestString = wsFactory.ws.sentRequests[0].toPBByteString() | ||
| 428 | + val sentRequest = LivekitRtc.SignalRequest.newBuilder() | ||
| 429 | + .mergeFrom(requestString) | ||
| 430 | + .build() | ||
| 431 | + | ||
| 432 | + assertTrue(sentRequest.hasUpdateAudioTrack()) | ||
| 433 | + val features = sentRequest.updateAudioTrack.featuresList | ||
| 434 | + assertTrue(features.contains(AudioTrackFeature.TF_ECHO_CANCELLATION)) | ||
| 435 | + assertTrue(features.contains(AudioTrackFeature.TF_NOISE_SUPPRESSION)) | ||
| 436 | + assertTrue(features.contains(AudioTrackFeature.TF_AUTO_GAIN_CONTROL)) | ||
| 437 | + assertTrue(features.contains(AudioTrackFeature.TF_ENHANCED_NOISE_CANCELLATION)) | ||
| 438 | + } | ||
| 351 | } | 439 | } |
| @@ -19,12 +19,13 @@ package io.livekit.android.room.participant | @@ -19,12 +19,13 @@ package io.livekit.android.room.participant | ||
| 19 | import io.livekit.android.events.ParticipantEvent | 19 | import io.livekit.android.events.ParticipantEvent |
| 20 | import io.livekit.android.events.RoomEvent | 20 | import io.livekit.android.events.RoomEvent |
| 21 | import io.livekit.android.room.track.LocalAudioTrack | 21 | import io.livekit.android.room.track.LocalAudioTrack |
| 22 | +import io.livekit.android.room.track.LocalAudioTrackOptions | ||
| 22 | import io.livekit.android.test.MockE2ETest | 23 | import io.livekit.android.test.MockE2ETest |
| 23 | import io.livekit.android.test.assert.assertIsClassList | 24 | import io.livekit.android.test.assert.assertIsClassList |
| 24 | import io.livekit.android.test.events.EventCollector | 25 | import io.livekit.android.test.events.EventCollector |
| 26 | +import io.livekit.android.test.mock.MockAudioProcessingController | ||
| 25 | import io.livekit.android.test.mock.MockAudioStreamTrack | 27 | import io.livekit.android.test.mock.MockAudioStreamTrack |
| 26 | import io.livekit.android.test.mock.TestData | 28 | import io.livekit.android.test.mock.TestData |
| 27 | -import io.livekit.android.test.util.toOkioByteString | ||
| 28 | import io.livekit.android.util.toOkioByteString | 29 | import io.livekit.android.util.toOkioByteString |
| 29 | import kotlinx.coroutines.ExperimentalCoroutinesApi | 30 | import kotlinx.coroutines.ExperimentalCoroutinesApi |
| 30 | import org.junit.Assert.assertEquals | 31 | import org.junit.Assert.assertEquals |
| @@ -43,8 +44,11 @@ class ParticipantMockE2ETest : MockE2ETest() { | @@ -43,8 +44,11 @@ class ParticipantMockE2ETest : MockE2ETest() { | ||
| 43 | // publish track | 44 | // publish track |
| 44 | room.localParticipant.publishAudioTrack( | 45 | room.localParticipant.publishAudioTrack( |
| 45 | LocalAudioTrack( | 46 | LocalAudioTrack( |
| 46 | - "", | ||
| 47 | - MockAudioStreamTrack(id = TestData.LOCAL_TRACK_PUBLISHED.trackPublished.cid), | 47 | + name = "", |
| 48 | + mediaTrack = MockAudioStreamTrack(id = TestData.LOCAL_TRACK_PUBLISHED.trackPublished.cid), | ||
| 49 | + options = LocalAudioTrackOptions(), | ||
| 50 | + audioProcessingController = MockAudioProcessingController(), | ||
| 51 | + dispatcher = coroutineRule.dispatcher, | ||
| 48 | ), | 52 | ), |
| 49 | ) | 53 | ) |
| 50 | 54 |
-
请 注册 或 登录 后发表评论