Bind Webcam and WebRTC video stream to CallInterface
Add working mute and webcam on/off buttons.
Add rudimentary start webrtc call functionnality. This is only a first step, it will be improved in the future.
GitLab: #74
Change-Id: Ic3afde596a60fa2e9ea6199e3d632366078ec9fe
diff --git a/client/.env.development b/client/.env.development
index 6d453cd..503c414 100644
--- a/client/.env.development
+++ b/client/.env.development
@@ -1,2 +1,5 @@
ESLINT_NO_DEV_ERRORS=true
-VITE_API_URL=http://localhost:5000
\ No newline at end of file
+VITE_API_URL=http://localhost:5000
+
+# This is the url to the temporary socket server in `jami-web/routes/fakeServerForWebRTC.js`
+VITE_SOCKET_URL=
diff --git a/client/src/components/CallButtons.tsx b/client/src/components/CallButtons.tsx
index 6cc3cb0..815fade 100644
--- a/client/src/components/CallButtons.tsx
+++ b/client/src/components/CallButtons.tsx
@@ -19,7 +19,7 @@
import { styled } from '@mui/material/styles';
import React, { useContext } from 'react';
-import { CallContext } from '../contexts/CallProvider';
+import { WebRTCContext } from '../contexts/WebRTCProvider';
import { ToggleIconButton, ToggleIconButtonProps } from './Button';
import {
CallEndIcon,
@@ -100,7 +100,7 @@
};
export const CallingMicButton = (props: Partial<ToggleIconButtonProps>) => {
- const { micOn, setMicOn } = useContext(CallContext);
+ const { isAudioOn, setAudioStatus } = useContext(WebRTCContext);
return (
<ToggleIconButton
@@ -108,15 +108,15 @@
sx={{ color: 'white' }}
IconOn={MicroIcon}
IconOff={MicroOffIcon}
- selected={micOn}
- toggle={() => setMicOn((s) => !s)}
+ selected={isAudioOn}
+ toggle={() => setAudioStatus(!isAudioOn)}
{...props}
/>
);
};
export const CallingVideoCameraButton = (props: Partial<ToggleIconButtonProps>) => {
- const { camOn, setCamOn } = useContext(CallContext);
+ const { isVideoOn, setVideoStatus } = useContext(WebRTCContext);
return (
<ToggleIconButton
@@ -124,8 +124,8 @@
sx={{ color: 'white' }}
IconOn={VideoCameraIcon}
IconOff={VideoCameraOffIcon}
- selected={camOn}
- toggle={() => setCamOn((s) => !s)}
+ selected={isVideoOn}
+ toggle={() => setVideoStatus(!isVideoOn)}
{...props}
/>
);
diff --git a/client/src/contexts/CallProvider.tsx b/client/src/contexts/CallProvider.tsx
deleted file mode 100644
index 0716c8c..0000000
--- a/client/src/contexts/CallProvider.tsx
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (C) 2022 Savoir-faire Linux Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation; either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public
- * License along with this program. If not, see
- * <https://www.gnu.org/licenses/>.
- */
-import { createContext, useState } from 'react';
-
-import { SetState, WithChildren } from '../utils/utils';
-
-interface ICallContext {
- micOn: boolean;
- setMicOn: SetState<boolean>;
- camOn: boolean;
- setCamOn: SetState<boolean>;
-}
-
-const defaultCallContext: ICallContext = {
- micOn: false,
- setMicOn: () => {},
- camOn: false,
- setCamOn: () => {},
-};
-
-export const CallContext = createContext<ICallContext>(defaultCallContext);
-
-type CallProviderProps = WithChildren & {
- micOn?: boolean;
- camOn?: boolean;
-};
-
-export default ({
- children,
- micOn: _micOn = defaultCallContext.micOn,
- camOn: _camOn = defaultCallContext.camOn,
-}: CallProviderProps) => {
- const [micOn, setMicOn] = useState(_micOn);
- const [camOn, setCamOn] = useState(_camOn);
-
- return (
- <CallContext.Provider
- value={{
- micOn,
- setMicOn,
- camOn,
- setCamOn,
- }}
- >
- {children}
- </CallContext.Provider>
- );
-};
diff --git a/client/src/contexts/WebRTCProvider.tsx b/client/src/contexts/WebRTCProvider.tsx
index a18d24d..e8ee1d2 100644
--- a/client/src/contexts/WebRTCProvider.tsx
+++ b/client/src/contexts/WebRTCProvider.tsx
@@ -16,7 +16,7 @@
* <https://www.gnu.org/licenses/>.
*/
-import React, { createContext, useCallback, useRef } from 'react';
+import React, { createContext, useCallback, useEffect, useRef, useState } from 'react';
import { connect, Socket } from 'socket.io-client';
import { WithChildren } from '../utils/utils';
@@ -25,140 +25,203 @@
* TODO: This socket is temporary, it will be replaced by the real socket
* for communication with webrtc
* */
-const socket = connect('http://192.168.0.12:8080', { transports: ['websocket'] });
+const socket = connect(import.meta.env.VITE_SOCKET_URL, { transports: ['websocket'] });
interface IWebRTCContext {
localVideoRef: React.RefObject<HTMLVideoElement> | null;
remoteVideoRef: React.RefObject<HTMLVideoElement> | null;
- createWebRTCConnection: () => void;
- sendWebRTCOffer: () => void;
- sendWebRTCAnswer: (remoteSdp: RTCSessionDescriptionInit) => void;
- handleWebRTCAnswer: (remoteSdp: RTCSessionDescriptionInit) => void;
- addIceCandidate: (candidate: RTCIceCandidateInit) => void;
socket: Socket;
+
+ isAudioOn: boolean;
+ setAudioStatus: (isOn: boolean) => void;
+ isVideoOn: boolean;
+ setVideoStatus: (isOn: boolean) => void;
+ sendWebRTCOffer: () => void;
}
-const DefaultWebRTCContext: IWebRTCContext = {
+const defaultWebRTCContext: IWebRTCContext = {
localVideoRef: null,
remoteVideoRef: null,
- createWebRTCConnection: () => {},
+ socket,
+
+ isAudioOn: false,
+ setAudioStatus: () => {},
+ isVideoOn: false,
+ setVideoStatus: () => {},
+
sendWebRTCOffer: () => {},
- sendWebRTCAnswer: () => {},
- handleWebRTCAnswer: () => {},
- addIceCandidate: () => {},
- socket: socket,
};
-export const WebRTCContext = createContext<IWebRTCContext>(DefaultWebRTCContext);
+export const WebRTCContext = createContext<IWebRTCContext>(defaultWebRTCContext);
-export default ({ children }: WithChildren) => {
+type WebRTCProviderProps = WithChildren & {
+ isAudioOn?: boolean;
+ isVideoOn?: boolean;
+};
+
+// TODO: This is a WIP. The calling logic will be improved in other CRs
+export default ({
+ children,
+ isAudioOn: _isAudioOn = defaultWebRTCContext.isAudioOn,
+ isVideoOn: _isVideoOn = defaultWebRTCContext.isVideoOn,
+}: WebRTCProviderProps) => {
+ const [isAudioOn, setIsAudioOn] = useState(_isAudioOn);
+ const [isVideoOn, setIsVideoOn] = useState(_isVideoOn);
const localVideoRef = useRef<HTMLVideoElement>(null);
const remoteVideoRef = useRef<HTMLVideoElement>(null);
- const webRTCConnectionRef = useRef<RTCPeerConnection>();
+ const [webRTCConnection, setWebRTCConnection] = useState<RTCPeerConnection | undefined>();
+ const localStreamRef = useRef<MediaStream>();
- const createWebRTCConnection = useCallback(async () => {
- //TODO use SFL iceServers
- const iceConfig = { iceServers: [{ urls: 'stun:stun.l.google.com:19302' }] };
- webRTCConnectionRef.current = new RTCPeerConnection(iceConfig);
- const localStream = await navigator.mediaDevices.getUserMedia({
- video: true,
- audio: true,
- });
+ useEffect(() => {
+ if (!webRTCConnection) {
+ // TODO use SFL iceServers
+ const iceConfig = { iceServers: [{ urls: 'stun:stun.l.google.com:19302' }] };
+ setWebRTCConnection(new RTCPeerConnection(iceConfig));
+ }
+ }, [webRTCConnection]);
- if (localVideoRef.current) {
- localVideoRef.current.srcObject = localStream;
+ useEffect(() => {
+ if (!webRTCConnection) {
+ return;
}
- localStream.getTracks().forEach((track) => {
- if (webRTCConnectionRef.current) {
- webRTCConnectionRef.current.addTrack(track, localStream);
+ if (isVideoOn || isAudioOn) {
+ try {
+ // TODO: When toggling mute on/off, the camera flickers
+ // https://git.jami.net/savoirfairelinux/jami-web/-/issues/90
+ navigator.mediaDevices
+ .getUserMedia({
+ audio: true,
+ video: true,
+ })
+ .then((stream) => {
+ if (localVideoRef.current) {
+ localVideoRef.current.srcObject = stream;
+ }
+
+ stream.getTracks().forEach((track) => {
+ if (track.kind === 'audio') {
+ track.enabled = isAudioOn;
+ } else if (track.kind === 'video') {
+ track.enabled = isVideoOn;
+ }
+ webRTCConnection.addTrack(track, stream);
+ });
+ localStreamRef.current = stream;
+ });
+ } catch (e) {
+ console.error('Could not get media devices: ', e);
}
- });
- webRTCConnectionRef.current.addEventListener('icecandidate', (event) => {
- if (event.candidate && socket) {
+ }
+
+ const icecandidateEventListener = (event: RTCPeerConnectionIceEvent) => {
+ if (event.candidate) {
console.log('webRTCConnection : onicecandidate');
socket.emit('candidate', event.candidate);
}
- });
- webRTCConnectionRef.current.addEventListener('track', async (event) => {
+ };
+
+ const trackEventListener = (event: RTCTrackEvent) => {
+ console.log('remote TrackEvent');
if (remoteVideoRef.current) {
remoteVideoRef.current.srcObject = event.streams[0];
console.log('webRTCConnection : add remotetrack success');
}
+ };
+
+ webRTCConnection.addEventListener('icecandidate', icecandidateEventListener);
+ webRTCConnection.addEventListener('track', trackEventListener);
+
+ return () => {
+ webRTCConnection.removeEventListener('icecandidate', icecandidateEventListener);
+ webRTCConnection.removeEventListener('track', trackEventListener);
+ };
+ }, [webRTCConnection, isVideoOn, isAudioOn]);
+
+ useEffect(() => {
+ if (!webRTCConnection) {
+ return;
+ }
+
+ const sendWebRTCAnswer = async (remoteSdp: RTCSessionDescriptionInit) => {
+ await webRTCConnection.setRemoteDescription(new RTCSessionDescription(remoteSdp));
+ const mySdp = await webRTCConnection.createAnswer({
+ offerToReceiveAudio: true,
+ offerToReceiveVideo: true,
+ });
+ await webRTCConnection.setLocalDescription(new RTCSessionDescription(mySdp));
+ socket.emit('answer', mySdp);
+ };
+
+ const handleWebRTCAnswer = async (remoteSdp: RTCSessionDescriptionInit) => {
+ await webRTCConnection.setRemoteDescription(new RTCSessionDescription(remoteSdp));
+ };
+
+ const addIceCandidate = async (candidate: RTCIceCandidateInit) => {
+ await webRTCConnection.addIceCandidate(new RTCIceCandidate(candidate));
+ };
+
+ socket.on('getOffer', (remoteSdp: RTCSessionDescription) => {
+ sendWebRTCAnswer(remoteSdp);
+ console.log('get offer and aswering');
});
- }, [webRTCConnectionRef, localVideoRef, remoteVideoRef]);
+
+ socket.on('getAnswer', (remoteSdp: RTCSessionDescription) => {
+ handleWebRTCAnswer(remoteSdp);
+ console.log('get answer');
+ });
+
+ socket.on('getCandidate', (candidate: RTCIceCandidateInit) => {
+ addIceCandidate(candidate);
+ console.log('webRTCConnection : candidate add success');
+ });
+
+ return () => {
+ socket.off('getOffer');
+ socket.off('getAnswer');
+ socket.off('getCandidate');
+ };
+ }, [webRTCConnection]);
+
+ const setAudioStatus = useCallback((isOn: boolean) => {
+ setIsAudioOn(isOn);
+ localStreamRef.current?.getAudioTracks().forEach((track) => {
+ track.enabled = isOn;
+ });
+ }, []);
+
+ const setVideoStatus = useCallback((isOn: boolean) => {
+ setIsVideoOn(isOn);
+ localStreamRef.current?.getVideoTracks().forEach((track) => {
+ track.enabled = isOn;
+ });
+ }, []);
const sendWebRTCOffer = useCallback(async () => {
- try {
- if (webRTCConnectionRef.current && socket) {
- const sdp = await webRTCConnectionRef.current.createOffer({
+ if (webRTCConnection) {
+ webRTCConnection
+ .createOffer({
offerToReceiveAudio: true,
offerToReceiveVideo: true,
+ })
+ .then((sdp) => {
+ socket.emit('offer', sdp);
+ webRTCConnection.setLocalDescription(new RTCSessionDescription(sdp));
});
- await webRTCConnectionRef.current.setLocalDescription(new RTCSessionDescription(sdp));
- socket.emit('offer', sdp);
- }
- } catch (e) {
- console.error(e);
}
- }, [webRTCConnectionRef]);
-
- const sendWebRTCAnswer = useCallback(
- async (remoteSdp: RTCSessionDescriptionInit) => {
- try {
- if (webRTCConnectionRef.current && socket && remoteSdp) {
- await webRTCConnectionRef.current.setRemoteDescription(new RTCSessionDescription(remoteSdp));
- const mySdp = await webRTCConnectionRef.current.createAnswer({
- offerToReceiveAudio: true,
- offerToReceiveVideo: true,
- });
- await webRTCConnectionRef.current.setLocalDescription(new RTCSessionDescription(mySdp));
- socket.emit('answer', mySdp);
- }
- } catch (e) {
- console.error(e);
- }
- },
- [webRTCConnectionRef]
- );
-
- const handleWebRTCAnswer = useCallback(
- async (remoteSdp: RTCSessionDescriptionInit) => {
- try {
- if (webRTCConnectionRef.current && remoteSdp) {
- await webRTCConnectionRef.current.setRemoteDescription(new RTCSessionDescription(remoteSdp));
- }
- } catch (e) {
- console.error(e);
- }
- },
- [webRTCConnectionRef]
- );
-
- const addIceCandidate = useCallback(
- async (candidate: RTCIceCandidateInit) => {
- try {
- if (webRTCConnectionRef.current) {
- await webRTCConnectionRef.current.addIceCandidate(new RTCIceCandidate(candidate));
- }
- } catch (e) {
- console.error(e);
- }
- },
- [webRTCConnectionRef]
- );
+ }, [webRTCConnection]);
return (
<WebRTCContext.Provider
value={{
localVideoRef,
remoteVideoRef,
- createWebRTCConnection,
- sendWebRTCOffer,
- sendWebRTCAnswer,
- handleWebRTCAnswer,
- addIceCandidate,
socket,
+ isAudioOn,
+ setAudioStatus,
+ isVideoOn,
+ setVideoStatus,
+ sendWebRTCOffer,
}}
>
{children}
diff --git a/client/src/pages/CallInterface.tsx b/client/src/pages/CallInterface.tsx
index adc12c7..e729474 100644
--- a/client/src/pages/CallInterface.tsx
+++ b/client/src/pages/CallInterface.tsx
@@ -15,8 +15,8 @@
* License along with this program. If not, see
* <https://www.gnu.org/licenses/>.
*/
-import { Box, Card, Grid, Stack, Typography } from '@mui/material';
-import React from 'react';
+import { Box, Button, Card, Grid, Stack, Typography } from '@mui/material';
+import { useContext } from 'react';
import {
CallingChatButton,
@@ -30,7 +30,7 @@
CallingVideoCameraButton,
CallingVolumeButton,
} from '../components/CallButtons';
-import CallProvider from '../contexts/CallProvider';
+import WebRTCProvider, { WebRTCContext } from '../contexts/WebRTCProvider';
import { useUrlParams } from '../utils/hooks';
import { CallRouteParams } from './JamiMessenger';
@@ -40,18 +40,22 @@
} = useUrlParams<CallRouteParams>();
return (
- <CallProvider camOn={video === 'true'}>
+ <WebRTCProvider isVideoOn={video === 'true'}>
<CallInterface />
- </CallProvider>
+ </WebRTCProvider>
);
};
const CallInterface = () => {
+ const { localVideoRef, remoteVideoRef, isVideoOn } = useContext(WebRTCContext);
+
return (
<>
- <Box sx={{ backgroundColor: 'blue', width: '100%', height: '100%', position: 'absolute' }}>
- {/* Host video will be shown here */}
- </Box>
+ <video
+ ref={remoteVideoRef}
+ autoPlay
+ style={{ backgroundColor: 'black', width: '100%', height: '100%', position: 'absolute' }}
+ />
<Stack
position="absolute"
direction="column"
@@ -65,20 +69,22 @@
</Box>
{/* Guest video, with empty space to be moved around and stickied to walls */}
<Box height="100%">
- <Box
- sx={{
- aspectRatio: '16/9',
- position: 'absolute',
- right: 0,
- zIndex: 2,
- backgroundColor: 'white',
- borderRadius: '12px',
- minWidth: '25%',
- minHeight: '25%',
- maxWidth: '50%',
- maxHeight: '50%',
- }}
- />
+ {isVideoOn && (
+ <video
+ ref={localVideoRef}
+ autoPlay
+ style={{
+ position: 'absolute',
+ right: 0,
+ zIndex: 2,
+ borderRadius: '12px',
+ minWidth: '25%',
+ minHeight: '25%',
+ maxWidth: '50%',
+ maxHeight: '50%',
+ }}
+ />
+ )}
</Box>
{/* Bottom panel with calling buttons */}
<Grid container justifyContent="space-between">
@@ -109,8 +115,19 @@
};
const CallInterfacePrimaryButtons = () => {
+ const { sendWebRTCOffer } = useContext(WebRTCContext);
+
return (
<Card sx={{ backgroundColor: 'black', textAlign: 'center' }}>
+ <Button
+ variant="contained"
+ onClick={() => {
+ sendWebRTCOffer();
+ }}
+ >
+ {/* TODO: Remove this button and make calling automatic (https://git.jami.net/savoirfairelinux/jami-web/-/issues/91)*/}
+ Call
+ </Button>
<CallingMicButton />
<CallingEndButton />
<CallingVideoCameraButton />