davidliu
Committed by GitHub

Make LocalVideoTrack options FlowObservable (#220)

* Change track options to be FlowObservable

* View based sample update
... ... @@ -5,17 +5,19 @@ import android.content.Context
import android.content.pm.PackageManager
import android.hardware.camera2.CameraManager
import androidx.core.content.ContextCompat
import com.github.ajalt.timberkt.Timber
import dagger.assisted.Assisted
import dagger.assisted.AssistedFactory
import dagger.assisted.AssistedInject
import io.livekit.android.memory.CloseableManager
import io.livekit.android.memory.SurfaceTextureHelperCloser
import io.livekit.android.room.DefaultsManager
import io.livekit.android.room.track.video.Camera1CapturerWithSize
import io.livekit.android.room.track.video.Camera2CapturerWithSize
import io.livekit.android.room.track.video.VideoCapturerWithSize
import io.livekit.android.room.track.video.*
import io.livekit.android.util.FlowObservable
import io.livekit.android.util.LKLog
import io.livekit.android.util.flowDelegate
import org.webrtc.*
import org.webrtc.CameraVideoCapturer.CameraEventsHandler
import java.util.*
... ... @@ -30,7 +32,7 @@ constructor(
@Assisted private var capturer: VideoCapturer,
@Assisted private var source: VideoSource,
@Assisted name: String,
@Assisted var options: LocalVideoTrackOptions,
@Assisted options: LocalVideoTrackOptions,
@Assisted rtcTrack: org.webrtc.VideoTrack,
private val peerConnectionFactory: PeerConnectionFactory,
private val context: Context,
... ... @@ -42,6 +44,10 @@ constructor(
override var rtcTrack: org.webrtc.VideoTrack = rtcTrack
internal set
@FlowObservable
@get:FlowObservable
var options: LocalVideoTrackOptions by flowDelegate(options)
val dimensions: Dimensions
get() {
(capturer as? VideoCapturerWithSize)?.let { capturerWithSize ->
... ... @@ -116,13 +122,46 @@ constructor(
targetDeviceId = deviceNames[(currentIndex + 1) % deviceNames.size]
}
fun updateCameraOptions() {
val newOptions = options.copy(
deviceId = targetDeviceId,
position = enumerator.getCameraPosition(targetDeviceId)
)
options = newOptions
}
val cameraSwitchHandler = object : CameraVideoCapturer.CameraSwitchHandler {
override fun onCameraSwitchDone(isFrontFacing: Boolean) {
val newOptions = options.copy(
deviceId = targetDeviceId,
position = enumerator.getCameraPosition(targetDeviceId)
)
options = newOptions
// For cameras we control, wait until the first frame to ensure everything is okay.
if (cameraCapturer is CameraCapturerWithSize) {
cameraCapturer.cameraEventsDispatchHandler
.registerHandler(object : CameraEventsHandler {
override fun onFirstFrameAvailable() {
updateCameraOptions()
cameraCapturer.cameraEventsDispatchHandler.unregisterHandler(this)
}
override fun onCameraError(p0: String?) {
cameraCapturer.cameraEventsDispatchHandler.unregisterHandler(this)
}
override fun onCameraDisconnected() {
cameraCapturer.cameraEventsDispatchHandler.unregisterHandler(this)
}
override fun onCameraFreezed(p0: String?) {
}
override fun onCameraOpening(p0: String?) {
}
override fun onCameraClosed() {
cameraCapturer.cameraEventsDispatchHandler.unregisterHandler(this)
}
})
} else {
updateCameraOptions()
}
}
override fun onCameraSwitchError(errorDescription: String?) {
... ... @@ -154,6 +193,7 @@ constructor(
// sender owns rtcTrack, so it'll take care of disposing it.
oldRtcTrack.setEnabled(false)
// Close resources associated to the old track. new track resources is registered in createTrack.
val oldCloseable = closeableManager.unregisterResource(oldRtcTrack)
oldCloseable?.close()
... ... @@ -298,8 +338,9 @@ constructor(
enumerator: CameraEnumerator,
options: LocalVideoTrackOptions
): Pair<VideoCapturer, LocalVideoTrackOptions>? {
val cameraEventsDispatchHandler = CameraEventsDispatchHandler()
val targetDeviceName = enumerator.findCamera(options.deviceId, options.position) ?: return null
val targetVideoCapturer = enumerator.createCapturer(targetDeviceName, null)
val targetVideoCapturer = enumerator.createCapturer(targetDeviceName, cameraEventsDispatchHandler)
// back fill any missing information
val newOptions = options.copy(
... ... @@ -310,7 +351,11 @@ constructor(
// Cache supported capture formats ahead of time to avoid future camera locks.
Camera1Helper.getSupportedFormats(Camera1Helper.getCameraId(newOptions.deviceId))
return Pair(
Camera1CapturerWithSize(targetVideoCapturer, targetDeviceName),
Camera1CapturerWithSize(
targetVideoCapturer,
targetDeviceName,
cameraEventsDispatchHandler
),
newOptions
)
}
... ... @@ -320,7 +365,8 @@ constructor(
Camera2CapturerWithSize(
targetVideoCapturer,
context.getSystemService(Context.CAMERA_SERVICE) as CameraManager,
targetDeviceName
targetDeviceName,
cameraEventsDispatchHandler
),
newOptions
)
... ... @@ -370,10 +416,7 @@ constructor(
private fun CameraEnumerator.findCamera(predicate: (deviceName: String) -> Boolean): String? {
for (deviceName in deviceNames) {
if (predicate(deviceName)) {
val videoCapturer = createCapturer(deviceName, null)
if (videoCapturer != null) {
return deviceName
}
return deviceName
}
}
return null
... ...
package io.livekit.android.room.track.video
import org.webrtc.CameraVideoCapturer.CameraEventsHandler
/**
* Dispatches CameraEventsHandler callbacks to registered handlers.
*
* @suppress
*/
internal class CameraEventsDispatchHandler : CameraEventsHandler {
private val handlers = mutableSetOf<CameraEventsHandler>()
@Synchronized
fun registerHandler(handler: CameraEventsHandler) {
handlers.add(handler)
}
@Synchronized
fun unregisterHandler(handler: CameraEventsHandler) {
handlers.remove(handler)
}
override fun onCameraError(errorDescription: String) {
val handlersCopy = handlers.toMutableSet()
for (handler in handlersCopy) {
handler.onCameraError(errorDescription)
}
}
override fun onCameraDisconnected() {
val handlersCopy = handlers.toMutableSet()
for (handler in handlersCopy) {
handler.onCameraDisconnected()
}
}
override fun onCameraFreezed(errorDescription: String) {
val handlersCopy = handlers.toMutableSet()
for (handler in handlersCopy) {
handler.onCameraFreezed(errorDescription)
}
}
override fun onCameraOpening(cameraName: String) {
val handlersCopy = handlers.toMutableSet()
for (handler in handlersCopy) {
handler.onCameraOpening(cameraName)
}
}
override fun onFirstFrameAvailable() {
val handlersCopy = handlers.toMutableSet()
for (handler in handlersCopy) {
handler.onFirstFrameAvailable()
}
}
override fun onCameraClosed() {
val handlersCopy = handlers.toMutableSet()
for (handler in handlersCopy) {
handler.onCameraClosed()
}
}
}
\ No newline at end of file
... ...
... ... @@ -13,10 +13,19 @@ internal interface VideoCapturerWithSize : VideoCapturer {
/**
* @suppress
*/
internal abstract class CameraCapturerWithSize(
val cameraEventsDispatchHandler: CameraEventsDispatchHandler
) : VideoCapturerWithSize
/**
* @suppress
*/
internal class Camera1CapturerWithSize(
private val capturer: Camera1Capturer,
private val deviceName: String?
) : CameraVideoCapturer by capturer, VideoCapturerWithSize {
private val deviceName: String?,
cameraEventsDispatchHandler: CameraEventsDispatchHandler,
) : CameraCapturerWithSize(cameraEventsDispatchHandler), CameraVideoCapturer by capturer {
override fun findCaptureFormat(width: Int, height: Int): Size {
val cameraId = Camera1Helper.getCameraId(deviceName)
return Camera1Helper.findClosestCaptureFormat(cameraId, width, height)
... ... @@ -29,8 +38,9 @@ internal class Camera1CapturerWithSize(
internal class Camera2CapturerWithSize(
private val capturer: Camera2Capturer,
private val cameraManager: CameraManager,
private val deviceName: String?
) : CameraVideoCapturer by capturer, VideoCapturerWithSize {
private val deviceName: String?,
cameraEventsDispatchHandler: CameraEventsDispatchHandler,
) : CameraCapturerWithSize(cameraEventsDispatchHandler), CameraVideoCapturer by capturer {
override fun findCaptureFormat(width: Int, height: Int): Size {
return Camera2Helper.findClosestCaptureFormat(cameraManager, deviceName, width, height)
}
... ...
... ... @@ -69,9 +69,6 @@ class CallViewModel(
private val mutableCameraEnabled = MutableLiveData(true)
val cameraEnabled = mutableCameraEnabled.hide()
private val mutableFlipVideoButtonEnabled = MutableLiveData(true)
val flipButtonVideoEnabled = mutableFlipVideoButtonEnabled.hide()
private val mutableScreencastEnabled = MutableLiveData(false)
val screenshareEnabled = mutableScreencastEnabled.hide()
... ... @@ -102,8 +99,8 @@ class CallViewModel(
}
}
// Handle room events.
launch {
// Handle room events.
room.events.collect {
when (it) {
is RoomEvent.FailedToConnect -> mutableError.value = it.error
... ... @@ -118,6 +115,7 @@ class CallViewModel(
}
}
}
connectToRoom()
}
... ...
... ... @@ -69,7 +69,6 @@ class CallActivity : AppCompatActivity() {
val activeSpeakers by viewModel.activeSpeakers.collectAsState(initial = emptyList())
val micEnabled by viewModel.micEnabled.observeAsState(true)
val videoEnabled by viewModel.cameraEnabled.observeAsState(true)
val flipButtonEnabled by viewModel.flipButtonVideoEnabled.observeAsState(true)
val screencastEnabled by viewModel.screenshareEnabled.observeAsState(false)
val permissionAllowed by viewModel.permissionAllowed.collectAsState()
Content(
... ... @@ -79,7 +78,6 @@ class CallActivity : AppCompatActivity() {
activeSpeakers,
micEnabled,
videoEnabled,
flipButtonEnabled,
screencastEnabled,
audioSwitchHandler = viewModel.audioHandler,
permissionAllowed = permissionAllowed,
... ... @@ -128,7 +126,6 @@ class CallActivity : AppCompatActivity() {
activeSpeakers: List<Participant> = listOf(previewParticipant),
micEnabled: Boolean = true,
videoEnabled: Boolean = true,
flipButtonEnabled: Boolean = true,
screencastEnabled: Boolean = false,
permissionAllowed: Boolean = true,
audioSwitchHandler: AudioSwitchHandler? = null,
... ...
... ... @@ -10,6 +10,8 @@ import androidx.compose.ui.res.painterResource
import io.livekit.android.compose.VideoRenderer
import io.livekit.android.room.Room
import io.livekit.android.room.participant.Participant
import io.livekit.android.room.track.CameraPosition
import io.livekit.android.room.track.LocalVideoTrack
import io.livekit.android.room.track.Track
import io.livekit.android.room.track.VideoTrack
import io.livekit.android.util.flow
... ... @@ -22,7 +24,6 @@ fun VideoItemTrackSelector(
room: Room,
participant: Participant,
modifier: Modifier = Modifier,
mirror: Boolean = false,
) {
val videoTrackMap by participant::videoTracks.flow.collectAsState(initial = emptyList())
val videoPubs = videoTrackMap.filter { (pub) -> pub.subscribed }
... ... @@ -35,12 +36,22 @@ fun VideoItemTrackSelector(
?: videoPubs.firstOrNull()
val videoTrack = videoPub?.track as? VideoTrack
val videoMuted by
if (videoPub != null) {
videoPub::muted.flow.collectAsState()
} else {
remember(videoPub) {
derivedStateOf { false }
var videoMuted by remember { mutableStateOf(false) }
var cameraFacingFront by remember { mutableStateOf(false) }
// monitor muted state
LaunchedEffect(videoPub) {
if (videoPub != null) {
videoPub::muted.flow.collect { muted -> videoMuted = muted }
}
}
// monitor camera facing for local participant
LaunchedEffect(participant, videoTrack) {
if (room.localParticipant == participant && videoTrack as? LocalVideoTrack != null) {
videoTrack::options.flow.collect { options ->
cameraFacingFront = options.position == CameraPosition.FRONT
}
}
}
... ... @@ -48,7 +59,7 @@ fun VideoItemTrackSelector(
VideoRenderer(
room = room,
videoTrack = videoTrack,
mirror = mirror,
mirror = room.localParticipant == participant && cameraFacingFront,
modifier = modifier
)
} else {
... ...
@file:OptIn(ExperimentalCoroutinesApi::class)
package io.livekit.android.sample
import android.graphics.Color
import android.graphics.drawable.GradientDrawable
import android.view.View
import com.github.ajalt.timberkt.Timber
import com.xwray.groupie.viewbinding.BindableItem
... ... @@ -10,6 +10,8 @@ import com.xwray.groupie.viewbinding.GroupieViewHolder
import io.livekit.android.room.Room
import io.livekit.android.room.participant.ConnectionQuality
import io.livekit.android.room.participant.Participant
import io.livekit.android.room.track.CameraPosition
import io.livekit.android.room.track.LocalVideoTrack
import io.livekit.android.room.track.Track
import io.livekit.android.room.track.VideoTrack
import io.livekit.android.sample.databinding.ParticipantItemBinding
... ... @@ -17,7 +19,6 @@ import io.livekit.android.util.flow
import kotlinx.coroutines.*
import kotlinx.coroutines.flow.*
@OptIn(ExperimentalCoroutinesApi::class)
class ParticipantItem(
private val room: Room,
private val participant: Participant,
... ... @@ -92,29 +93,34 @@ class ParticipantItem(
}
coroutineScope?.launch {
videoTrackPubFlow
.flatMapLatest { pub ->
if (pub != null) {
pub::track.flow
} else {
flowOf(null)
}
}
.collectLatest { videoTrack ->
val videoTrackFlow = videoTrackPubFlow
.flatMapLatestOrNull { pub -> pub::track.flow }
// Configure video view with track
launch {
videoTrackFlow.collectLatest { videoTrack ->
setupVideoIfNeeded(videoTrack as? VideoTrack, viewBinding)
}
}
// For local participants, mirror camera if using front camera.
if (participant == room.localParticipant) {
launch {
videoTrackFlow
.flatMapLatestOrNull { track -> (track as LocalVideoTrack)::options.flow }
.collectLatest { options ->
viewBinding.renderer.setMirror(options?.position == CameraPosition.FRONT)
}
}
}
}
// Handle muted changes
coroutineScope?.launch {
videoTrackPubFlow
.flatMapLatest { pub ->
if (pub != null) {
pub::muted.flow
} else {
flowOf(true)
}
}
.flatMapLatestOrNull { pub -> pub::muted.flow }
.collectLatest { muted ->
viewBinding.renderer.visibleOrInvisible(!muted)
viewBinding.renderer.visibleOrInvisible(!(muted ?: true))
}
}
val existingTrack = getVideoTrack()
... ... @@ -175,3 +181,15 @@ private fun showFocus(binding: ParticipantItemBinding) {
private fun hideFocus(binding: ParticipantItemBinding) {
binding.speakingIndicator.visibility = View.INVISIBLE
}
private inline fun <T, R> Flow<T?>.flatMapLatestOrNull(
crossinline transform: suspend (value: T) -> Flow<R>
): Flow<R?> {
return flatMapLatest {
if (it == null) {
flowOf(null)
} else {
transform(it)
}
}
}
\ No newline at end of file
... ...