Not able to use Azure AI Speech Avatar on ReactJs

Jivi Health 0 Reputation points
2024-05-06T07:43:18.2433333+00:00

Hello,

I am trying to implement Live chat avatar using ReactJS in my application. When implementing the sample code, I am getting the following console logs:

is TURN server active?  yes
Avatar started.
Speech and avatar synthesized to video stream.
Unable to speak. Result ID: 4CF4A9751632486181334256923E67D8
0
Protocols.Core.BadClientRequestException: The request is throttled because you have exceeded the con websocket error code: 1011

The code I am using for this is as follows:

function checkTURNServer(turnConfig, timeout) {
    return new Promise(function (resolve, reject) {
        setTimeout(function () {
            if (promiseResolved) return;
            resolve(false);
            promiseResolved = true;
        }, timeout || 5000);

        var promiseResolved = false
            , myPeerConnection = window.RTCPeerConnection || window.mozRTCPeerConnection || window.webkitRTCPeerConnection
            , pc = new myPeerConnection({ iceServers: [turnConfig] })
            , noop = function () { };
        pc.createDataChannel("");
        pc.createOffer(function (sdp) {
            if (sdp.sdp.indexOf('typ relay') > -1) {
                promiseResolved = true;
                resolve(true);
            }
            pc.setLocalDescription(sdp, noop, noop);
        }, noop);
        pc.onicecandidate = function (ice) {
            if (promiseResolved || !ice || !ice.candidate || !ice.candidate.candidate || !(ice.candidate.candidate.indexOf('typ relay') > -1)) return;
            promiseResolved = true;
            resolve(true);
        };
    });
}

useEffect(() => {
    checkTURNServer({
        urls: 'turn:<turn_server_url>',
        username: '<turn_server_username>',
        credential: '<turn_server_password>'
    }).then(function (bool) {
        console.log('is TURN server active? ', bool ? 'yes' : 'no');
        if (bool) {
            const speechConfig = SpeechSDK.SpeechConfig.fromSubscription("<subscription_key>", "westus2");
            speechConfig.speechSynthesisLanguage = "en-US";
            const avatarConfig = new SpeechSDK.AvatarConfig(
                "lisa",
                "casual-sitting",
            );

            const peerConnection = new RTCPeerConnection({
                iceServers: [{
                    urls: 'turn:<turn_server_url>',
                    username: '<turn_server_username>',
                    credential: '<turn_server_password>'
                }]
            })

            peerConnection.ontrack = function (event) {
                if (event.track.kind === 'video') {
                    const videoElement = document.createElement(event.track.kind)
                    videoElement.id = 'videoPlayer'
                    videoElement.srcObject = event.streams[0]
                    videoElement.autoplay = true
                }

                if (event.track.kind === 'audio') {
                    const audioElement = document.createElement(event.track.kind)
                    audioElement.id = 'audioPlayer'
                    audioElement.srcObject = event.streams[0]
                    audioElement.autoplay = true
                }
            }

            // Offer to receive one video track, and one audio track
            peerConnection.addTransceiver('video', { direction: 'sendrecv' })
            peerConnection.addTransceiver('audio', { direction: 'sendrecv' })

            // Create avatar synthesizer
            var avatarSynthesizer = new SpeechSDK.AvatarSynthesizer(speechConfig, avatarConfig)

            // Start avatar and establish WebRTC connection
            avatarSynthesizer.startAvatarAsync(peerConnection).then(
                (r) => {
                    console.log("Avatar started.")
                    var spokenText = "I'm excited to try text to speech avatar."
                    avatarSynthesizer.speakTextAsync(spokenText).then(
                        (result) => {
                            if (result.reason === SpeechSDK.ResultReason.SynthesizingAudioCompleted) {
                                console.log("Speech and avatar synthesized to video stream.")
                            } else {
                                console.log("Unable to speak. Result ID: " + result.resultId)
                                if (result.reason === SpeechSDK.ResultReason.Canceled) {
                                    let cancellationDetails = SpeechSDK.CancellationDetails.fromResult(result)
                                    console.log(cancellationDetails.reason)
                                    if (cancellationDetails.reason === SpeechSDK.CancellationReason.Error) {
                                        console.log(cancellationDetails.errorDetails)
                                    }
                                }
                            }
                        }).catch((error) => {
                            console.log(error)
                            avatarSynthesizer.close()
                        });
                }
            ).catch(
                (error) => { console.log("Avatar failed to start. Error: " + error) }
            );
        }
    }).catch(console.error.bind(console));
}, [])


Azure AI Speech
Azure AI Speech
An Azure service that integrates speech processing into apps and services.
1,808 questions
{count} votes

Your answer

Answers can be marked as Accepted Answers by the question author, which helps users to know the answer solved the author's problem.