你当前正在访问 Microsoft Azure Global Edition 技术文档网站。 如果需要访问由世纪互联运营的 Microsoft Azure 中国技术文档网站,请访问 https://docs.azure.cn

快速入门:在应用中添加原始媒体访问

本快速入门介绍如何使用适用于 Unity 的 Azure 通信服务通话 SDK 实现原始媒体访问。 Azure 通信服务通话 SDK 提供所需的 API,使应用能够生成自身的、要发送的视频帧或呈现来自通话中的远程参与者的原始视频帧。 本快速入门是基于适用于 Unity 的快速入门:在应用中添加 1:1 视频通话制作的。

RawVideo 访问

由于应用会生成视频帧,因此应用必须向 Azure 通信服务通话 SDK 告知它能够生成的视频格式。 Azure 通信服务通话 SDK 可以使用此信息根据当时的网络条件选择最佳的视频格式配置。

虚拟视频

支持的视频分辨率

宽高比 解决方法 最大 FPS
16x9 1080p 30
16x9 720p 30
16x9 540p 30
16x9 480p 30
16x9 360p 30
16x9 270p 15
16x9 240p 15
16x9 180p 15
4x3 VGA (640x480) 30
4x3 424x320 15
4x3 QVGA (320x240) 15
4x3 212x160 15
  1. 按照此处的步骤快速入门:在应用中添加 1:1 视频通话创建 Unity 游戏。 目标是获取准备开始通话的 CallAgent 对象。 在 GitHub 上查找此快速入门的最终代码。

  2. 使用 SDK 支持的 VideoStreamPixelFormat 创建一个由 VideoFormat 构成的数组。 如果提供了多种格式,列表中格式的顺序不会影响使用的格式,也不会确定所用格式的优先级。 格式选择的条件基于网络带宽等外部因素。

    var videoStreamFormat = new VideoStreamFormat
    {
        Resolution = VideoStreamResolution.P360, // For VirtualOutgoingVideoStream the width/height should be set using VideoStreamResolution enum
        PixelFormat = VideoStreamPixelFormat.Rgba,
        FramesPerSecond = 15,
        Stride1 = 640 * 4 // It is times 4 because RGBA is a 32-bit format
    };
    VideoStreamFormat[] videoStreamFormats = { videoStreamFormat };
    
  3. 创建 RawOutgoingVideoStreamOptions,并使用前面创建的对象设置 Formats

    var rawOutgoingVideoStreamOptions = new RawOutgoingVideoStreamOptions
    {
        Formats = videoStreamFormats
    };
    
  4. 使用前面创建的 RawOutgoingVideoStreamOptions 实例创建 VirtualOutgoingVideoStream 的实例。

    var rawOutgoingVideoStream = new VirtualOutgoingVideoStream(rawOutgoingVideoStreamOptions);
    
  5. 订阅 RawOutgoingVideoStream.FormatChanged 委托。 每当 VideoStreamFormat 从列表中提供的视频格式之一更改为另一种格式时,此事件将发出通知。

    rawOutgoingVideoStream.FormatChanged += (object sender, VideoStreamFormatChangedEventArgs args)
    {
        VideoStreamFormat videoStreamFormat = args.Format;
    }
    
  6. 订阅 RawOutgoingVideoStream.StateChanged 委托。 只要 State 发生更改,此事件就会通知。

    rawOutgoingVideoStream.StateChanged += (object sender, VideoStreamFormatChangedEventArgs args)
    {
        CallVideoStream callVideoStream = e.Stream;
    
        switch (callVideoStream.Direction)
        {
            case StreamDirection.Outgoing:
                OnRawOutgoingVideoStreamStateChanged(callVideoStream as OutgoingVideoStream);
                break;
            case StreamDirection.Incoming:
                OnRawIncomingVideoStreamStateChanged(callVideoStream as IncomingVideoStream);
                break;
        }
    }
    
  7. 处理原始传出视频流状态事务,例如“启动”和“停止”,并开始生成自定义视频帧或暂停帧生成算法。

    private async void OnRawOutgoingVideoStreamStateChanged(OutgoingVideoStream outgoingVideoStream)
    {
        switch (outgoingVideoStream.State)
        {
            case VideoStreamState.Started:
                switch (outgoingVideoStream.Kind)
                {
                    case VideoStreamKind.VirtualOutgoing:
                        outgoingVideoPlayer.StartGenerateFrames(outgoingVideoStream); // This is where a background worker thread can be started to feed the outgoing video frames.
                        break;
                }
                break;
    
            case VideoStreamState.Stopped:
                switch (outgoingVideoStream.Kind)
                {
                    case VideoStreamKind.VirtualOutgoing:
                        break;
                }
                break;
        }
    }
    

    下面是传出视频帧生成器的示例:

    private unsafe RawVideoFrame GenerateRawVideoFrame(RawOutgoingVideoStream rawOutgoingVideoStream)
    {
        var format = rawOutgoingVideoStream.Format;
        int w = format.Width;
        int h = format.Height;
        int rgbaCapacity = w * h * 4;
    
        var rgbaBuffer = new NativeBuffer(rgbaCapacity);
        rgbaBuffer.GetData(out IntPtr rgbaArrayBuffer, out rgbaCapacity);
    
        byte r = (byte)random.Next(1, 255);
        byte g = (byte)random.Next(1, 255);
        byte b = (byte)random.Next(1, 255);
    
        for (int y = 0; y < h; y++)
        {
            for (int x = 0; x < w*4; x += 4)
            {
                ((byte*)rgbaArrayBuffer)[(w * 4 * y) + x + 0] = (byte)(y % r);
                ((byte*)rgbaArrayBuffer)[(w * 4 * y) + x + 1] = (byte)(y % g);
                ((byte*)rgbaArrayBuffer)[(w * 4 * y) + x + 2] = (byte)(y % b);
                ((byte*)rgbaArrayBuffer)[(w * 4 * y) + x + 3] = 255;
            }
        }
    
        // Call ACS Unity SDK API to deliver the frame
        rawOutgoingVideoStream.SendRawVideoFrameAsync(new RawVideoFrameBuffer() {
            Buffers = new NativeBuffer[] { rgbaBuffer },
            StreamFormat = rawOutgoingVideoStream.Format,
            TimestampInTicks = rawOutgoingVideoStream.TimestampInTicks
        }).Wait();
    
        return new RawVideoFrameBuffer()
        {
            Buffers = new NativeBuffer[] { rgbaBuffer },
            StreamFormat = rawOutgoingVideoStream.Format
        };
    }
    

    注意

    此方法使用 unsafe 修饰符,因为 NativeBuffer 需要访问本机内存资源。 因此,还需要在 Unity 编辑器中启用 Allow unsafe 选项。

  8. 同样,我们可以处理传入的视频帧,以响应视频流 StateChanged 事件。

    private void OnRawIncomingVideoStreamStateChanged(IncomingVideoStream incomingVideoStream)
    {
        switch (incomingVideoStream.State)
        {
            case VideoStreamState.Available:
                {
                    var rawIncomingVideoStream = incomingVideoStream as RawIncomingVideoStream;
                    rawIncomingVideoStream.RawVideoFrameReceived += OnRawVideoFrameReceived;
                    rawIncomingVideoStream.Start();
                    break;
                }
            case VideoStreamState.Stopped:
                break;
            case VideoStreamState.NotAvailable:
                break;
        }
    }
    
    private void OnRawVideoFrameReceived(object sender, RawVideoFrameReceivedEventArgs e)
    {
        incomingVideoPlayer.RenderRawVideoFrame(e.Frame);
    }
    
    public void RenderRawVideoFrame(RawVideoFrame rawVideoFrame)
    {
        var videoFrameBuffer = rawVideoFrame as RawVideoFrameBuffer;
        pendingIncomingFrames.Enqueue(new PendingFrame() {
                frame = rawVideoFrame,
                kind = RawVideoFrameKind.Buffer });
    }
    
  9. 强烈建议通过缓冲机制管理传入和传出视频帧,避免 MonoBehaviour.Update() 回调方法重载,该方法应保持轻量级,避免 CPU 或网络繁忙,并确保更流畅的视频体验。 开发人员可通过上述优化决定最适合其方案的做法。

    下面是如何通过从内部队列调用 Graphics.Blit 将传入帧呈现到 Unity VideoTexture 的示例:

    private void Update()
    {
        if (pendingIncomingFrames.TryDequeue(out PendingFrame pendingFrame))
        {
            switch (pendingFrame.kind)
            {
                case RawVideoFrameKind.Buffer:
                    var videoFrameBuffer = pendingFrame.frame as RawVideoFrameBuffer;
                    VideoStreamFormat videoFormat = videoFrameBuffer.StreamFormat;
                    int width = videoFormat.Width;
                    int height = videoFormat.Height;
                    var texture = new Texture2D(width, height, TextureFormat.RGBA32, mipChain: false);
    
                    var buffers = videoFrameBuffer.Buffers;
                    NativeBuffer buffer = buffers.Count > 0 ? buffers[0] : null;
                    buffer.GetData(out IntPtr bytes, out int signedSize);
    
                    texture.LoadRawTextureData(bytes, signedSize);
                    texture.Apply();
    
                    Graphics.Blit(source: texture, dest: rawIncomingVideoRenderTexture);
                    break;
    
                case RawVideoFrameKind.Texture:
                    break;
            }
            pendingFrame.frame.Dispose();
        }
    }
    

本快速入门介绍如何使用适用于 Windows 的 Azure 通信服务通话 SDK 实现原始媒体访问。 Azure 通信服务通话 SDK 提供所需的 API,使应用能够生成自身的、要发送给通话中的远程参与者的视频帧。 本快速入门是基于适用于 Windows 的快速入门:在应用中添加 1:1 视频通话制作的。

RawAudio 访问

访问原始音频媒体可以访问传入通话音频流,并可以在通话期间发送自定义传出音频流。

发送原始传出音频

创建一个选项对象,指定要发送的原始流属性。

    RawOutgoingAudioStreamProperties outgoingAudioProperties = new RawOutgoingAudioStreamProperties()
    {
        Format = ACSAudioStreamFormat.Pcm16Bit,
        SampleRate = AudioStreamSampleRate.Hz48000,
        ChannelMode = AudioStreamChannelMode.Stereo,
        BufferDuration = AudioStreamBufferDuration.InMs20
    };
    RawOutgoingAudioStreamOptions outgoingAudioStreamOptions = new RawOutgoingAudioStreamOptions()
    {
        Properties = outgoingAudioProperties
    };

创建 RawOutgoingAudioStream 并将其附加到加入通话选项,当连接通话时,流会自动启动。

    JoinCallOptions options =  JoinCallOptions(); // or StartCallOptions()
    OutgoingAudioOptions outgoingAudioOptions = new OutgoingAudioOptions();
    RawOutgoingAudioStream rawOutgoingAudioStream = new RawOutgoingAudioStream(outgoingAudioStreamOptions);
    outgoingAudioOptions.Stream = rawOutgoingAudioStream;
    options.OutgoingAudioOptions = outgoingAudioOptions;
    // Start or Join call with those call options.

将流附加到通话

或者,也可以改为将流附加到现有 Call 实例:

    await call.StartAudio(rawOutgoingAudioStream);

开始发送原始示例

只有在流状态为 AudioStreamState.Started 时,我们才能开始发送数据。 若要观察音频流状态的更改,请添加 OnStateChangedListener 事件侦听器。

    unsafe private void AudioStateChanged(object sender, AudioStreamStateChanged args)
    {
        if (args.AudioStreamState == AudioStreamState.Started)
        {
            // We can now start sending samples.
        }
    }
    outgoingAudioStream.StateChanged += AudioStateChanged;

当流启动时,我们可以开始向通话发送 MemoryBuffer 音频样本。 音频缓冲区格式应与指定的流属性匹配。

    void Start()
    {
        RawOutgoingAudioStreamProperties properties = outgoingAudioStream.Properties;
        RawAudioBuffer buffer;
        new Thread(() =>
        {
            DateTime nextDeliverTime = DateTime.Now;
            while (true)
            {
                MemoryBuffer memoryBuffer = new MemoryBuffer((uint)outgoingAudioStream.ExpectedBufferSizeInBytes);
                using (IMemoryBufferReference reference = memoryBuffer.CreateReference())
                {
                    byte* dataInBytes;
                    uint capacityInBytes;
                    ((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes, out capacityInBytes);
                    // Use AudioGraph here to grab data from microphone if you want microphone data
                }
                nextDeliverTime = nextDeliverTime.AddMilliseconds(20);
                buffer = new RawAudioBuffer(memoryBuffer);
                outgoingAudioStream.SendOutgoingAudioBuffer(buffer);
                TimeSpan wait = nextDeliverTime - DateTime.Now;
                if (wait > TimeSpan.Zero)
                {
                    Thread.Sleep(wait);
                }
            }
        }).Start();
    }

接收原始传入音频

如果想要在播放前处理通话音频流,我们还可以接收通话音频流样本作为 MemoryBuffer。 创建一个 RawIncomingAudioStreamOptions 对象,指定要接收的原始流属性。

    RawIncomingAudioStreamProperties properties = new RawIncomingAudioStreamProperties()
    {
        Format = AudioStreamFormat.Pcm16Bit,
        SampleRate = AudioStreamSampleRate.Hz44100,
        ChannelMode = AudioStreamChannelMode.Stereo
    };
    RawIncomingAudioStreamOptions options = new RawIncomingAudioStreamOptions()
    {
        Properties = properties
    };

创建 RawIncomingAudioStream 并将其附加到加入通话选项

    JoinCallOptions options =  JoinCallOptions(); // or StartCallOptions()
    RawIncomingAudioStream rawIncomingAudioStream = new RawIncomingAudioStream(audioStreamOptions);
    IncomingAudioOptions incomingAudioOptions = new IncomingAudioOptions()
    {
        Stream = rawIncomingAudioStream
    };
    options.IncomingAudioOptions = incomingAudioOptions;

或者,我们也可以改为将流附加到现有 Call 实例:

    await call.startAudio(context, rawIncomingAudioStream);

若要开始从传入流接收原始音频缓冲区,请添加传入流状态侦听器和缓冲区接收事件侦听器。

    unsafe private void OnAudioStateChanged(object sender, AudioStreamStateChanged args)
    {
        if (args.AudioStreamState == AudioStreamState.Started)
        {
            // When value is `AudioStreamState.STARTED` we'll be able to receive samples.
        }
    }
    private void OnRawIncomingMixedAudioBufferAvailable(object sender, IncomingMixedAudioEventArgs args)
    {
        // Received a raw audio buffers(MemoryBuffer).
        using (IMemoryBufferReference reference = args.IncomingAudioBuffer.Buffer.CreateReference())
        {
            byte* dataInBytes;
            uint capacityInBytes;
            ((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes, out capacityInBytes);
            // Process the data using AudioGraph class
        }
    }
    rawIncomingAudioStream.StateChanged += OnAudioStateChanged;
    rawIncomingAudioStream.MixedAudioBufferReceived += OnRawIncomingMixedAudioBufferAvailable;

RawVideo 访问

由于应用会生成视频帧,因此应用必须向 Azure 通信服务通话 SDK 告知它能够生成的视频格式。 Azure 通信服务通话 SDK 可以使用此信息根据当时的网络条件选择最佳的视频格式配置。

虚拟视频

支持的视频分辨率

宽高比 解决方法 最大 FPS
16x9 1080p 30
16x9 720p 30
16x9 540p 30
16x9 480p 30
16x9 360p 30
16x9 270p 15
16x9 240p 15
16x9 180p 15
4x3 VGA (640x480) 30
4x3 424x320 15
4x3 QVGA (320x240) 15
4x3 212x160 15
  1. 使用 SDK 支持的 VideoStreamPixelFormat 创建一个由 VideoFormat 构成的数组。 如果提供了多种格式,列表中格式的顺序不会影响使用的格式,也不会确定所用格式的优先级。 格式选择的条件基于网络带宽等外部因素。

    var videoStreamFormat = new VideoStreamFormat
    {
        Resolution = VideoStreamResolution.P720, // For VirtualOutgoingVideoStream the width/height should be set using VideoStreamResolution enum
        PixelFormat = VideoStreamPixelFormat.Rgba,
        FramesPerSecond = 30,
        Stride1 = 1280 * 4 // It is times 4 because RGBA is a 32-bit format
    };
    VideoStreamFormat[] videoStreamFormats = { videoStreamFormat };
    
  2. 创建 RawOutgoingVideoStreamOptions,并使用前面创建的对象设置 Formats

    var rawOutgoingVideoStreamOptions = new RawOutgoingVideoStreamOptions
    {
        Formats = videoStreamFormats
    };
    
  3. 使用前面创建的 RawOutgoingVideoStreamOptions 实例创建 VirtualOutgoingVideoStream 的实例。

    var rawOutgoingVideoStream = new VirtualOutgoingVideoStream(rawOutgoingVideoStreamOptions);
    
  4. 订阅 RawOutgoingVideoStream.FormatChanged 委托。 每当 VideoStreamFormat 从列表中提供的视频格式之一更改为另一种格式时,此事件将发出通知。

    rawOutgoingVideoStream.FormatChanged += (object sender, VideoStreamFormatChangedEventArgs args)
    {
        VideoStreamFormat videoStreamFormat = args.Format;
    }
    
  5. 创建以下帮助程序类的实例以访问缓冲区数据

    [ComImport]
    [Guid("5B0D3235-4DBA-4D44-865E-8F1D0E4FD04D")]
    [InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
    unsafe interface IMemoryBufferByteAccess
    {
        void GetBuffer(out byte* buffer, out uint capacity);
    }
    [ComImport]
    [Guid("905A0FEF-BC53-11DF-8C49-001E4FC686DA")]
    [InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
    unsafe interface IBufferByteAccess
    {
        void Buffer(out byte* buffer);
    }
    internal static class BufferExtensions
    {
        // For accessing MemoryBuffer
        public static unsafe byte* GetArrayBuffer(IMemoryBuffer memoryBuffer)
        {
            IMemoryBufferReference memoryBufferReference = memoryBuffer.CreateReference();
            var memoryBufferByteAccess = memoryBufferReference as IMemoryBufferByteAccess;
            memoryBufferByteAccess.GetBuffer(out byte* arrayBuffer, out uint arrayBufferCapacity);
            GC.AddMemoryPressure(arrayBufferCapacity);
            return arrayBuffer;
        }
        // For accessing MediaStreamSample
        public static unsafe byte* GetArrayBuffer(IBuffer buffer)
        {
            var bufferByteAccess = buffer as IBufferByteAccess;
            bufferByteAccess.Buffer(out byte* arrayBuffer);
            uint arrayBufferCapacity = buffer.Capacity;
            GC.AddMemoryPressure(arrayBufferCapacity);
            return arrayBuffer;
        }
    }
    
  6. 创建以下帮助程序类的实例,以使用 VideoStreamPixelFormat.Rgba 生成随机的 RawVideoFrame

    public class VideoFrameSender
    {
        private RawOutgoingVideoStream rawOutgoingVideoStream;
        private RawVideoFrameKind rawVideoFrameKind;
        private Thread frameIteratorThread;
        private Random random = new Random();
        private volatile bool stopFrameIterator = false;
        public VideoFrameSender(RawVideoFrameKind rawVideoFrameKind, RawOutgoingVideoStream rawOutgoingVideoStream)
        {
            this.rawVideoFrameKind = rawVideoFrameKind;
            this.rawOutgoingVideoStream = rawOutgoingVideoStream;
        }
        public async void VideoFrameIterator()
        {
            while (!stopFrameIterator)
            {
                if (rawOutgoingVideoStream != null &&
                    rawOutgoingVideoStream.Format != null &&
                    rawOutgoingVideoStream.State == VideoStreamState.Started)
                {
                    await SendRandomVideoFrameRGBA();
                }
            }
        }
        private async Task SendRandomVideoFrameRGBA()
        {
            uint rgbaCapacity = (uint)(rawOutgoingVideoStream.Format.Width * rawOutgoingVideoStream.Format.Height * 4);
            RawVideoFrame videoFrame = null;
            switch (rawVideoFrameKind)
            {
                case RawVideoFrameKind.Buffer:
                    videoFrame = 
                        GenerateRandomVideoFrameBuffer(rawOutgoingVideoStream.Format, rgbaCapacity);
                    break;
                case RawVideoFrameKind.Texture:
                    videoFrame = 
                        GenerateRandomVideoFrameTexture(rawOutgoingVideoStream.Format, rgbaCapacity);
                    break;
            }
            try
            {
                using (videoFrame)
                {
                    await rawOutgoingVideoStream.SendRawVideoFrameAsync(videoFrame);
                }
            }
            catch (Exception ex)
            {
                string msg = ex.Message;
            }
            try
            {
                int delayBetweenFrames = (int)(1000.0 / rawOutgoingVideoStream.Format.FramesPerSecond);
                await Task.Delay(delayBetweenFrames);
            }
            catch (Exception ex)
            {
                string msg = ex.Message;
            }
        }
        private unsafe RawVideoFrame GenerateRandomVideoFrameBuffer(VideoStreamFormat videoFormat, uint rgbaCapacity)
        {
            var rgbaBuffer = new MemoryBuffer(rgbaCapacity);
            byte* rgbaArrayBuffer = BufferExtensions.GetArrayBuffer(rgbaBuffer);
            GenerateRandomVideoFrame(&rgbaArrayBuffer);
            return new RawVideoFrameBuffer()
            {
                Buffers = new MemoryBuffer[] { rgbaBuffer },
                StreamFormat = videoFormat
            };
        }
        private unsafe RawVideoFrame GenerateRandomVideoFrameTexture(VideoStreamFormat videoFormat, uint rgbaCapacity)
        {
            var timeSpan = new TimeSpan(rawOutgoingVideoStream.TimestampInTicks);
            var rgbaBuffer = new Buffer(rgbaCapacity)
            {
                Length = rgbaCapacity
            };
            byte* rgbaArrayBuffer = BufferExtensions.GetArrayBuffer(rgbaBuffer);
            GenerateRandomVideoFrame(&rgbaArrayBuffer);
            var mediaStreamSample = MediaStreamSample.CreateFromBuffer(rgbaBuffer, timeSpan);
            return new RawVideoFrameTexture()
            {
                Texture = mediaStreamSample,
                StreamFormat = videoFormat
            };
        }
        private unsafe void GenerateRandomVideoFrame(byte** rgbaArrayBuffer)
        {
            int w = rawOutgoingVideoStream.Format.Width;
            int h = rawOutgoingVideoStream.Format.Height;
            byte r = (byte)random.Next(1, 255);
            byte g = (byte)random.Next(1, 255);
            byte b = (byte)random.Next(1, 255);
            int rgbaStride = w * 4;
            for (int y = 0; y < h; y++)
            {
                for (int x = 0; x < rgbaStride; x += 4)
                {
                    (*rgbaArrayBuffer)[(w * 4 * y) + x + 0] = (byte)(y % r);
                    (*rgbaArrayBuffer)[(w * 4 * y) + x + 1] = (byte)(y % g);
                    (*rgbaArrayBuffer)[(w * 4 * y) + x + 2] = (byte)(y % b);
                    (*rgbaArrayBuffer)[(w * 4 * y) + x + 3] = 255;
                }
            }
        }
        public void Start()
        {
            frameIteratorThread = new Thread(VideoFrameIterator);
            frameIteratorThread.Start();
        }
        public void Stop()
        {
            try
            {
                if (frameIteratorThread != null)
                {
                    stopFrameIterator = true;
                    frameIteratorThread.Join();
                    frameIteratorThread = null;
                    stopFrameIterator = false;
                }
            }
            catch (Exception ex)
            {
                string msg = ex.Message;
            }
        }
    }
    
  7. 订阅 VideoStream.StateChanged 委托。 此事件会告知当前流的状态。 如果状态不等于 VideoStreamState.Started,则不发送帧。

    private VideoFrameSender videoFrameSender;
    rawOutgoingVideoStream.StateChanged += (object sender, VideoStreamStateChangedEventArgs args) =>
    {
        CallVideoStream callVideoStream = args.Stream;
        switch (callVideoStream.State)
            {
                case VideoStreamState.Available:
                    // VideoStream has been attached to the call
                    var frameKind = RawVideoFrameKind.Buffer; // Use the frameKind you prefer
                    //var frameKind = RawVideoFrameKind.Texture;
                    videoFrameSender = new VideoFrameSender(frameKind, rawOutgoingVideoStream);
                    break;
                case VideoStreamState.Started:
                    // Start sending frames
                    videoFrameSender.Start();
                    break;
                case VideoStreamState.Stopped:
                    // Stop sending frames
                    videoFrameSender.Stop();
                    break;
            }
    };
    

屏幕共享视频

由于 Windows 系统会生成帧,因此你需要实现自己的前台服务来捕获帧并使用 Azure 通信服务通话 API 发送它们。

支持的视频分辨率

宽高比 解决方法 最大 FPS
任意 分辨率高达 1080p 30

创建屏幕共享视频流的步骤

  1. 使用 SDK 支持的 VideoStreamPixelFormat 创建一个由 VideoFormat 构成的数组。 如果提供了多种格式,列表中格式的顺序不会影响使用的格式,也不会确定所用格式的优先级。 格式选择的条件基于网络带宽等外部因素。
    var videoStreamFormat = new VideoStreamFormat
    {
        Width = 1280, // Width and height can be used for ScreenShareOutgoingVideoStream for custom resolutions or use one of the predefined values inside VideoStreamResolution
        Height = 720,
        //Resolution = VideoStreamResolution.P720,
        PixelFormat = VideoStreamPixelFormat.Rgba,
        FramesPerSecond = 30,
        Stride1 = 1280 * 4 // It is times 4 because RGBA is a 32-bit format.
    };
    VideoStreamFormat[] videoStreamFormats = { videoStreamFormat };
    
  2. 创建 RawOutgoingVideoStreamOptions,并使用前面创建的对象设置 VideoFormats
    var rawOutgoingVideoStreamOptions = new RawOutgoingVideoStreamOptions
    {
        Formats = videoStreamFormats
    };
    
  3. 使用前面创建的 RawOutgoingVideoStreamOptions 实例创建 VirtualOutgoingVideoStream 的实例。
    var rawOutgoingVideoStream = new ScreenShareOutgoingVideoStream(rawOutgoingVideoStreamOptions);
    
  4. 按以下方式捕获并发送视频帧。
    private async Task SendRawVideoFrame()
    {
        RawVideoFrame videoFrame = null;
        switch (rawVideoFrameKind) //it depends on the frame kind you want to send
        {
            case RawVideoFrameKind.Buffer:
                MemoryBuffer memoryBuffer = // Fill it with the content you got from the Windows APIs
                videoFrame = new RawVideoFrameBuffer()
                {
                    Buffers = memoryBuffer // The number of buffers depends on the VideoStreamPixelFormat
                    StreamFormat = rawOutgoingVideoStream.Format
                };
                break;
            case RawVideoFrameKind.Texture:
                MediaStreamSample mediaStreamSample = // Fill it with the content you got from the Windows APIs
                videoFrame = new RawVideoFrameTexture()
                {
                    Texture = mediaStreamSample, // Texture only receive planar buffers
                    StreamFormat = rawOutgoingVideoStream.Format
                };
                break;
        }
    
        try
        {
            using (videoFrame)
            {
                await rawOutgoingVideoStream.SendRawVideoFrameAsync(videoFrame);
            }
        }
        catch (Exception ex)
        {
            string msg = ex.Message;
        }
    
        try
        {
            int delayBetweenFrames = (int)(1000.0 / rawOutgoingVideoStream.Format.FramesPerSecond);
            await Task.Delay(delayBetweenFrames);
        }
        catch (Exception ex)
        {
            string msg = ex.Message;
        }
    }
    

原始传入视频

此功能使你能够访问 IncomingVideoStream 内部的视频帧,以便在本地操作这些流

  1. 创建通过 JoinCallOptions 设置 VideoStreamKind.RawIncoming 设置的 IncomingVideoOptions 实例
    var frameKind = RawVideoFrameKind.Buffer;  // Use the frameKind you prefer to receive
    var incomingVideoOptions = new IncomingVideoOptions
    {
        StreamKind = VideoStreamKind.RawIncoming,
        FrameKind = frameKind
    };
    var joinCallOptions = new JoinCallOptions
    {
        IncomingVideoOptions = incomingVideoOptions
    };
    
  2. 收到 ParticipantsUpdatedEventArgs 事件附加 RemoteParticipant.VideoStreamStateChanged 委托后。 此事件会告知 IncomingVideoStream 对象的状态。
    private List<RemoteParticipant> remoteParticipantList;
    private void OnRemoteParticipantsUpdated(object sender, ParticipantsUpdatedEventArgs args)
    {
        foreach (RemoteParticipant remoteParticipant in args.AddedParticipants)
        {
            IReadOnlyList<IncomingVideoStream> incomingVideoStreamList = remoteParticipant.IncomingVideoStreams; // Check if there are IncomingVideoStreams already before attaching the delegate
            foreach (IncomingVideoStream incomingVideoStream in incomingVideoStreamList)
            {
                OnRawIncomingVideoStreamStateChanged(incomingVideoStream);
            }
            remoteParticipant.VideoStreamStateChanged += OnVideoStreamStateChanged;
            remoteParticipantList.Add(remoteParticipant); // If the RemoteParticipant ref is not kept alive the VideoStreamStateChanged events are going to be missed
        }
        foreach (RemoteParticipant remoteParticipant in args.RemovedParticipants)
        {
            remoteParticipant.VideoStreamStateChanged -= OnVideoStreamStateChanged;
            remoteParticipantList.Remove(remoteParticipant);
        }
    }
    private void OnVideoStreamStateChanged(object sender, VideoStreamStateChangedEventArgs args)
    {
        CallVideoStream callVideoStream = args.Stream;
        OnRawIncomingVideoStreamStateChanged(callVideoStream as RawIncomingVideoStream);
    }
    private void OnRawIncomingVideoStreamStateChanged(RawIncomingVideoStream rawIncomingVideoStream)
    {
        switch (incomingVideoStream.State)
        {
            case VideoStreamState.Available:
                // There is a new IncomingVideoStream
                rawIncomingVideoStream.RawVideoFrameReceived += OnVideoFrameReceived;
                rawIncomingVideoStream.Start();
                break;
            case VideoStreamState.Started:
                // Will start receiving video frames
                break;
            case VideoStreamState.Stopped:
                // Will stop receiving video frames
                break;
            case VideoStreamState.NotAvailable:
                // The IncomingVideoStream should not be used anymore
                rawIncomingVideoStream.RawVideoFrameReceived -= OnVideoFrameReceived;
                break;
        }
    }
    
  3. 当时,IncomingVideoStream 具有 VideoStreamState.Available 状态附加 RawIncomingVideoStream.RawVideoFrameReceived 委托,如上一步所示。 这提供了新的 RawVideoFrame 对象。
    private async void OnVideoFrameReceived(object sender, RawVideoFrameReceivedEventArgs args)
    {
        RawVideoFrame videoFrame = args.Frame;
        switch (videoFrame.Kind) // The type will be whatever was configured on the IncomingVideoOptions
        {
            case RawVideoFrameKind.Buffer:
                // Render/Modify/Save the video frame
                break;
            case RawVideoFrameKind.Texture:
                // Render/Modify/Save the video frame
                break;
        }
    }
    

本快速入门介绍如何使用适用于 Android 的 Azure 通信服务通话 SDK 实现原始媒体访问。

Azure 通信服务通话 SDK 提供所需的 API,使应用能够生成自身的、要发送给通话中的远程参与者的视频帧。

本快速入门是基于适用于 Android 的快速入门:在应用中添加 1:1 视频通话制作的。

RawAudio 访问

访问原始音频媒体可以访问传入的通话音频流,并可以在通话期间查看和发送自定义传出音频流。

发送原始传出音频

创建一个选项对象,指定要发送的原始流属性。

    RawOutgoingAudioStreamProperties outgoingAudioProperties = new RawOutgoingAudioStreamProperties()
                .setAudioFormat(AudioStreamFormat.PCM16_BIT)
                .setSampleRate(AudioStreamSampleRate.HZ44100)
                .setChannelMode(AudioStreamChannelMode.STEREO)
                .setBufferDuration(AudioStreamBufferDuration.IN_MS20);

    RawOutgoingAudioStreamOptions outgoingAudioStreamOptions = new RawOutgoingAudioStreamOptions()
                .setProperties(outgoingAudioProperties);

创建 RawOutgoingAudioStream 并将其附加到加入通话选项,当连接通话时,流会自动启动。

    JoinCallOptions options = JoinCallOptions() // or StartCallOptions()

    OutgoingAudioOptions outgoingAudioOptions = new OutgoingAudioOptions();
    RawOutgoingAudioStream rawOutgoingAudioStream = new RawOutgoingAudioStream(outgoingAudioStreamOptions);

    outgoingAudioOptions.setStream(rawOutgoingAudioStream);
    options.setOutgoingAudioOptions(outgoingAudioOptions);

    // Start or Join call with those call options.

将流附加到通话

或者,也可以改为将流附加到现有 Call 实例:

    CompletableFuture<Void> result = call.startAudio(context, rawOutgoingAudioStream);

开始发送原始示例

只有在流状态为 AudioStreamState.STARTED 时,我们才能开始发送数据。 若要观察音频流状态的更改,请添加 OnStateChangedListener 事件侦听器。

    private void onStateChanged(PropertyChangedEvent propertyChangedEvent) {
        // When value is `AudioStreamState.STARTED` we'll be able to send audio samples.
    }

    rawOutgoingAudioStream.addOnStateChangedListener(this::onStateChanged)

当流启动时,我们可以开始向通话发送 java.nio.ByteBuffer 音频样本。

音频缓冲区格式应与指定的流属性匹配。

    Thread thread = new Thread(){
        public void run() {
            RawAudioBuffer buffer;
            Calendar nextDeliverTime = Calendar.getInstance();
            while (true)
            {
                nextDeliverTime.add(Calendar.MILLISECOND, 20);
                byte data[] = new byte[outgoingAudioStream.getExpectedBufferSizeInBytes()];
                //can grab microphone data from AudioRecord
                ByteBuffer dataBuffer = ByteBuffer.allocateDirect(outgoingAudioStream.getExpectedBufferSizeInBytes());
                dataBuffer.rewind();
                buffer = new RawAudioBuffer(dataBuffer);
                outgoingAudioStream.sendOutgoingAudioBuffer(buffer);
                long wait = nextDeliverTime.getTimeInMillis() - Calendar.getInstance().getTimeInMillis();
                if (wait > 0)
                {
                    try {
                        Thread.sleep(wait);
                    } catch (InterruptedException e) {
                        e.printStackTrace();
                    }
                }
            }
        }
    };
    thread.start();

接收原始传入音频

如果想要在播放前处理音频,我们还可以接收通话音频流样本作为 java.nio.ByteBuffer

创建一个 RawIncomingAudioStreamOptions 对象,指定要接收的原始流属性。

    RawIncomingAudioStreamOptions options = new RawIncomingAudioStreamOptions();
    RawIncomingAudioStreamProperties properties = new RawIncomingAudioStreamProperties()
                .setAudioFormat(AudioStreamFormat.PCM16_BIT)
                .setSampleRate(AudioStreamSampleRate.HZ44100)
                .setChannelMode(AudioStreamChannelMode.STEREO);
    options.setProperties(properties);

创建 RawIncomingAudioStream 并将其附加到加入通话选项

    JoinCallOptions options =  JoinCallOptions() // or StartCallOptions()
    IncomingAudioOptions incomingAudioOptions = new IncomingAudioOptions();

    RawIncomingAudioStream rawIncomingAudioStream = new RawIncomingAudioStream(audioStreamOptions);
    incomingAudioOptions.setStream(rawIncomingAudioStream);
    options.setIncomingAudioOptions(incomingAudioOptions);

或者,我们也可以改为将流附加到现有 Call 实例:


    CompletableFuture<Void> result = call.startAudio(context, rawIncomingAudioStream);

若要开始从传入流接收原始音频缓冲区,请添加传入流状态侦听器和缓冲区接收事件侦听器。

    private void onStateChanged(PropertyChangedEvent propertyChangedEvent) {
        // When value is `AudioStreamState.STARTED` we'll be able to receive samples.
    }

    private void onMixedAudioBufferReceived(IncomingMixedAudioEvent incomingMixedAudioEvent) {
        // Received a raw audio buffers(java.nio.ByteBuffer).
    }

    rawIncomingAudioStream.addOnStateChangedListener(this::onStateChanged);
    rawIncomingAudioStream.addMixedAudioBufferReceivedListener(this::onMixedAudioBufferReceived);

请务必记住在当前通话 Call 实例中停止音频流:


    CompletableFuture<Void> result = call.stopAudio(context, rawIncomingAudioStream);

RawVideo 访问

由于应用会生成视频帧,因此应用必须向 Azure 通信服务通话 SDK 告知它能够生成的视频格式。 Azure 通信服务通话 SDK 可以使用此信息根据当时的网络条件选择最佳的视频格式配置。

虚拟视频

支持的视频分辨率

宽高比 解决方法 最大 FPS
16x9 1080p 30
16x9 720p 30
16x9 540p 30
16x9 480p 30
16x9 360p 30
16x9 270p 15
16x9 240p 15
16x9 180p 15
4x3 VGA (640x480) 30
4x3 424x320 15
4x3 QVGA (320x240) 15
4x3 212x160 15
  1. 使用 SDK 支持的 VideoStreamPixelFormat 创建一个由 VideoFormat 构成的数组。

    如果提供了多种格式,列表中格式的顺序不会影响使用的格式,也不会确定所用格式的优先级。 格式选择的条件基于网络带宽等外部因素。

    VideoStreamFormat videoStreamFormat = new VideoStreamFormat();
    videoStreamFormat.setResolution(VideoStreamResolution.P360);
    videoStreamFormat.setPixelFormat(VideoStreamPixelFormat.RGBA);
    videoStreamFormat.setFramesPerSecond(framerate);
    videoStreamFormat.setStride1(w * 4); // It is times 4 because RGBA is a 32-bit format
    
    List<VideoStreamFormat> videoStreamFormats = new ArrayList<>();
    videoStreamFormats.add(videoStreamFormat);
    
  2. 创建 RawOutgoingVideoStreamOptions,并使用前面创建的对象设置 Formats

    RawOutgoingVideoStreamOptions rawOutgoingVideoStreamOptions = new RawOutgoingVideoStreamOptions();
    rawOutgoingVideoStreamOptions.setFormats(videoStreamFormats);
    
  3. 使用前面创建的 RawOutgoingVideoStreamOptions 实例创建 VirtualOutgoingVideoStream 的实例。

    VirtualOutgoingVideoStream rawOutgoingVideoStream = new VirtualOutgoingVideoStream(rawOutgoingVideoStreamOptions);
    
  4. 订阅 RawOutgoingVideoStream.addOnFormatChangedListener 委托。 每当 VideoStreamFormat 从列表中提供的视频格式之一更改为另一种格式时,此事件将发出通知。

    virtualOutgoingVideoStream.addOnFormatChangedListener((VideoStreamFormatChangedEvent args) -> 
    {
        VideoStreamFormat videoStreamFormat = args.Format;
    });
    
  5. 创建以下帮助程序类的实例,以使用 VideoStreamPixelFormat.RGBA 生成随机的 RawVideoFrame

    public class VideoFrameSender
    {
        private RawOutgoingVideoStream rawOutgoingVideoStream;
        private Thread frameIteratorThread;
        private Random random = new Random();
        private volatile boolean stopFrameIterator = false;
    
        public VideoFrameSender(RawOutgoingVideoStream rawOutgoingVideoStream)
        {
            this.rawOutgoingVideoStream = rawOutgoingVideoStream;
        }
    
        public void VideoFrameIterator()
        {
            while (!stopFrameIterator)
            {
                if (rawOutgoingVideoStream != null && 
                    rawOutgoingVideoStream.getFormat() != null && 
                    rawOutgoingVideoStream.getState() == VideoStreamState.STARTED)
                {
                    SendRandomVideoFrameRGBA();
                }
            }
        }
    
        private void SendRandomVideoFrameRGBA()
        {
            int rgbaCapacity = rawOutgoingVideoStream.getFormat().getWidth() * rawOutgoingVideoStream.getFormat().getHeight() * 4;
    
            RawVideoFrame videoFrame = GenerateRandomVideoFrameBuffer(rawOutgoingVideoStream.getFormat(), rgbaCapacity);
    
            try
            {
                rawOutgoingVideoStream.sendRawVideoFrame(videoFrame).get();
    
                int delayBetweenFrames = (int)(1000.0 / rawOutgoingVideoStream.getFormat().getFramesPerSecond());
                Thread.sleep(delayBetweenFrames);
            }
            catch (Exception ex)
            {
                String msg = ex.getMessage();
            }
            finally
            {
                videoFrame.close();
            }
        }
    
        private RawVideoFrame GenerateRandomVideoFrameBuffer(VideoStreamFormat videoStreamFormat, int rgbaCapacity)
        {
            ByteBuffer rgbaBuffer = ByteBuffer.allocateDirect(rgbaCapacity); // Only allocateDirect ByteBuffers are allowed
            rgbaBuffer.order(ByteOrder.nativeOrder());
    
            GenerateRandomVideoFrame(rgbaBuffer, rgbaCapacity);
    
            RawVideoFrameBuffer videoFrameBuffer = new RawVideoFrameBuffer();
            videoFrameBuffer.setBuffers(Arrays.asList(rgbaBuffer));
            videoFrameBuffer.setStreamFormat(videoStreamFormat);
    
            return videoFrameBuffer;
        }
    
        private void GenerateRandomVideoFrame(ByteBuffer rgbaBuffer, int rgbaCapacity)
        {
            int w = rawOutgoingVideoStream.getFormat().getWidth();
            int h = rawOutgoingVideoStream.getFormat().getHeight();
    
            byte rVal = (byte)random.nextInt(255);
            byte gVal = (byte)random.nextInt(255);
            byte bVal = (byte)random.nextInt(255);
            byte aVal = (byte)255;
    
            byte[] rgbaArrayBuffer = new byte[rgbaCapacity];
    
            int rgbaStride = w * 4;
    
            for (int y = 0; y < h; y++)
            {
                for (int x = 0; x < rgbaStride; x += 4)
                {
                    rgbaArrayBuffer[(w * 4 * y) + x + 0] = rVal;
                    rgbaArrayBuffer[(w * 4 * y) + x + 1] = gVal;
                    rgbaArrayBuffer[(w * 4 * y) + x + 2] = bVal;
                    rgbaArrayBuffer[(w * 4 * y) + x + 3] = aVal;
                }
            }
    
            rgbaBuffer.put(rgbaArrayBuffer);
            rgbaBuffer.rewind();
        }
    
        public void Start()
        {
            frameIteratorThread = new Thread(this::VideoFrameIterator);
            frameIteratorThread.start();
        }
    
        public void Stop()
        {
            try
            {
                if (frameIteratorThread != null)
                {
                    stopFrameIterator = true;
    
                    frameIteratorThread.join();
                    frameIteratorThread = null;
    
                    stopFrameIterator = false;
                }
            }
            catch (InterruptedException ex)
            {
                String msg = ex.getMessage();
            }
        }
    }
    
  6. 订阅 VideoStream.addOnStateChangedListener 委托。 此委托会告知当前流的状态。 如果状态不等于 VideoStreamState.STARTED,则不发送帧。

    private VideoFrameSender videoFrameSender;
    
    rawOutgoingVideoStream.addOnStateChangedListener((VideoStreamStateChangedEvent args) ->
    {
        CallVideoStream callVideoStream = args.getStream();
    
        switch (callVideoStream.getState())
        {
            case AVAILABLE:
                videoFrameSender = new VideoFrameSender(rawOutgoingVideoStream);
                break;
            case STARTED:
                // Start sending frames
                videoFrameSender.Start();
                break;
            case STOPPED:
                // Stop sending frames
                videoFrameSender.Stop();
                break;
        }
    });
    

ScreenShare 视频

由于 Windows 系统会生成帧,因此你需要实现自己的前台服务来捕获帧并使用 Azure 通信服务通话 API 发送它们。

支持的视频分辨率

宽高比 解决方法 最大 FPS
任意 分辨率高达 1080p 30

创建屏幕共享视频流的步骤

  1. 使用 SDK 支持的 VideoStreamPixelFormat 创建一个由 VideoFormat 构成的数组。

    如果提供了多种格式,列表中格式的顺序不会影响使用的格式,也不会确定所用格式的优先级。 格式选择的条件基于网络带宽等外部因素。

    VideoStreamFormat videoStreamFormat = new VideoStreamFormat();
    videoStreamFormat.setWidth(1280); // Width and height can be used for ScreenShareOutgoingVideoStream for custom resolutions or use one of the predefined values inside VideoStreamResolution
    videoStreamFormat.setHeight(720);
    //videoStreamFormat.setResolution(VideoStreamResolution.P360);
    videoStreamFormat.setPixelFormat(VideoStreamPixelFormat.RGBA);
    videoStreamFormat.setFramesPerSecond(framerate);
    videoStreamFormat.setStride1(w * 4); // It is times 4 because RGBA is a 32-bit format
    
    List<VideoStreamFormat> videoStreamFormats = new ArrayList<>();
    videoStreamFormats.add(videoStreamFormat);
    
  2. 创建 RawOutgoingVideoStreamOptions,并使用前面创建的对象设置 VideoFormats

    RawOutgoingVideoStreamOptions rawOutgoingVideoStreamOptions = new RawOutgoingVideoStreamOptions();
    rawOutgoingVideoStreamOptions.setFormats(videoStreamFormats);
    
  3. 使用前面创建的 RawOutgoingVideoStreamOptions 实例创建 VirtualOutgoingVideoStream 的实例。

    ScreenShareOutgoingVideoStream rawOutgoingVideoStream = new ScreenShareOutgoingVideoStream(rawOutgoingVideoStreamOptions);
    
  4. 按以下方式捕获并发送视频帧。

    private void SendRawVideoFrame()
    {
        ByteBuffer byteBuffer = // Fill it with the content you got from the Windows APIs
        RawVideoFrameBuffer videoFrame = new RawVideoFrameBuffer();
        videoFrame.setBuffers(Arrays.asList(byteBuffer)); // The number of buffers depends on the VideoStreamPixelFormat
        videoFrame.setStreamFormat(rawOutgoingVideoStream.getFormat());
    
        try
        {
            rawOutgoingVideoStream.sendRawVideoFrame(videoFrame).get();
        }
        catch (Exception ex)
        {
            String msg = ex.getMessage();
        }
        finally
        {
            videoFrame.close();
        }
    }
    

原始传入视频

此功能使你能够访问 IncomingVideoStream 对象中的视频帧,以便在本地操作这些帧

  1. 创建通过 JoinCallOptions 设置 VideoStreamKind.RawIncoming 设置的 IncomingVideoOptions 实例

    IncomingVideoOptions incomingVideoOptions = new IncomingVideoOptions()
            .setStreamType(VideoStreamKind.RAW_INCOMING);
    
    JoinCallOptions joinCallOptions = new JoinCallOptions()
            .setIncomingVideoOptions(incomingVideoOptions);
    
  2. 收到 ParticipantsUpdatedEventArgs 事件附加 RemoteParticipant.VideoStreamStateChanged 委托后。 此事件告知 IncomingVideoStream 对象的状态。

    private List<RemoteParticipant> remoteParticipantList;
    
    private void OnRemoteParticipantsUpdated(ParticipantsUpdatedEventArgs args)
    {
        for (RemoteParticipant remoteParticipant : args.getAddedParticipants())
        {
            List<IncomingVideoStream> incomingVideoStreamList = remoteParticipant.getIncomingVideoStreams(); // Check if there are IncomingVideoStreams already before attaching the delegate
            for (IncomingVideoStream incomingVideoStream : incomingVideoStreamList)
            {
                OnRawIncomingVideoStreamStateChanged(incomingVideoStream);
            }
    
            remoteParticipant.addOnVideoStreamStateChanged(this::OnVideoStreamStateChanged);
            remoteParticipantList.add(remoteParticipant); // If the RemoteParticipant ref is not kept alive the VideoStreamStateChanged events are going to be missed
        }
    
        for (RemoteParticipant remoteParticipant : args.getRemovedParticipants())
        {
            remoteParticipant.removeOnVideoStreamStateChanged(this::OnVideoStreamStateChanged);
            remoteParticipantList.remove(remoteParticipant);
        }
    }
    
    private void OnVideoStreamStateChanged(object sender, VideoStreamStateChangedEventArgs args)
    {
        CallVideoStream callVideoStream = args.getStream();
    
        OnRawIncomingVideoStreamStateChanged((RawIncomingVideoStream) callVideoStream);
    }
    
    private void OnRawIncomingVideoStreamStateChanged(RawIncomingVideoStream rawIncomingVideoStream)
    {
        switch (incomingVideoStream.State)
        {
            case AVAILABLE:
                // There is a new IncomingvideoStream
                rawIncomingVideoStream.addOnRawVideoFrameReceived(this::OnVideoFrameReceived);
                rawIncomingVideoStream.Start();
    
                break;
            case STARTED:
                // Will start receiving video frames
                break;
            case STOPPED:
                // Will stop receiving video frames
                break;
            case NOT_AVAILABLE:
                // The IncomingvideoStream should not be used anymore
                rawIncomingVideoStream.removeOnRawVideoFrameReceived(this::OnVideoFrameReceived);
    
                break;
        }
    }
    
  3. 当时,IncomingVideoStream 具有 VideoStreamState.Available 状态附加 RawIncomingVideoStream.RawVideoFrameReceived 委托,如上一步所示。 该委托提供新的 RawVideoFrame 对象。

    private void OnVideoFrameReceived(RawVideoFrameReceivedEventArgs args)
    {
        // Render/Modify/Save the video frame
        RawVideoFrameBuffer videoFrame = (RawVideoFrameBuffer) args.getFrame();
    }
    

本快速入门介绍如何使用适用于 iOS 的 Azure 通信服务通话 SDK 实现原始媒体访问。

Azure 通信服务通话 SDK 提供所需的 API,使应用能够生成自身的、要发送给通话中的远程参与者的视频帧。

本快速入门是基于适用于 iOS 的快速入门:在应用中添加 1:1 视频通话制作的。

RawAudio 访问

访问原始音频媒体可以访问传入通话音频流,并可以在通话期间发送自定义传出音频流。

发送原始传出音频

创建一个选项对象,指定要发送的原始流属性。

    let outgoingAudioStreamOptions = RawOutgoingAudioStreamOptions()
    let properties = RawOutgoingAudioStreamProperties()
    properties.sampleRate = .hz44100
    properties.bufferDuration = .inMs20
    properties.channelMode = .mono
    properties.format = .pcm16Bit
    outgoingAudioStreamOptions.properties = properties

创建 RawOutgoingAudioStream 并将其附加到加入通话选项,当连接通话时,流会自动启动。

    let options = JoinCallOptions() // or StartCallOptions()

    let outgoingAudioOptions = OutgoingAudioOptions()
    self.rawOutgoingAudioStream = RawOutgoingAudioStream(rawOutgoingAudioStreamOptions: outgoingAudioStreamOptions)
    outgoingAudioOptions.stream = self.rawOutgoingAudioStream
    options.outgoingAudioOptions = outgoingAudioOptions

    // Start or Join call passing the options instance.

将流附加到通话

或者,也可以改为将流附加到现有 Call 实例:


    call.startAudio(stream: self.rawOutgoingAudioStream) { error in 
        // Stream attached to `Call`.
    }

开始发送原始示例

只有在流状态为 AudioStreamState.started 时,我们才能开始发送数据。 为了观察音频流状态的更改,我们实现了 RawOutgoingAudioStreamDelegate。 并将其设置为流委托。

    func rawOutgoingAudioStream(_ rawOutgoingAudioStream: RawOutgoingAudioStream,
                                didChangeState args: AudioStreamStateChangedEventArgs) {
        // When value is `AudioStreamState.started` we will be able to send audio samples.
    }

    self.rawOutgoingAudioStream.delegate = DelegateImplementer()

或使用基于闭包

    self.rawOutgoingAudioStream.events.onStateChanged = { args in
        // When value is `AudioStreamState.started` we will be able to send audio samples.
    }

当流启动时,我们可以开始向通话发送 AVAudioPCMBuffer 音频样本。

音频缓冲区格式应与指定的流属性匹配。

    protocol SamplesProducer {
        func produceSample(_ currentSample: Int, 
                           options: RawOutgoingAudioStreamOptions) -> AVAudioPCMBuffer
    }

    // Let's use a simple Tone data producer as example.
    // Producing PCM buffers.
    func produceSamples(_ currentSample: Int,
                        stream: RawOutgoingAudioStream,
                        options: RawOutgoingAudioStreamOptions) -> AVAudioPCMBuffer {
        let sampleRate = options.properties.sampleRate
        let channelMode = options.properties.channelMode
        let bufferDuration = options.properties.bufferDuration
        let numberOfChunks = UInt32(1000 / bufferDuration.value)
        let bufferFrameSize = UInt32(sampleRate.valueInHz) / numberOfChunks
        let frequency = 400

        guard let format = AVAudioFormat(commonFormat: .pcmFormatInt16,
                                         sampleRate: sampleRate.valueInHz,
                                         channels: channelMode.channelCount,
                                         interleaved: channelMode == .stereo) else {
            fatalError("Failed to create PCM Format")
        }

        guard let buffer = AVAudioPCMBuffer(pcmFormat: format, frameCapacity: bufferFrameSize) else {
            fatalError("Failed to create PCM buffer")
        }

        buffer.frameLength = bufferFrameSize

        let factor: Double = ((2 as Double) * Double.pi) / (sampleRate.valueInHz/Double(frequency))
        var interval = 0
        for sampleIdx in 0..<Int(buffer.frameCapacity * channelMode.channelCount) {
            let sample = sin(factor * Double(currentSample + interval))
            // Scale to maximum amplitude. Int16.max is 37,767.
            let value = Int16(sample * Double(Int16.max))
            
            guard let underlyingByteBuffer = buffer.mutableAudioBufferList.pointee.mBuffers.mData else {
                continue
            }
            underlyingByteBuffer.assumingMemoryBound(to: Int16.self).advanced(by: sampleIdx).pointee = value
            interval += channelMode == .mono ? 2 : 1
        }

        return buffer
    }

    final class RawOutgoingAudioSender {
        let stream: RawOutgoingAudioStream
        let options: RawOutgoingAudioStreamOptions
        let producer: SamplesProducer

        private var timer: Timer?
        private var currentSample: Int = 0
        private var currentTimestamp: Int64 = 0

        init(stream: RawOutgoingAudioStream,
             options: RawOutgoingAudioStreamOptions,
             producer: SamplesProducer) {
            self.stream = stream
            self.options = options
            self.producer = producer
        }

        func start() {
            let properties = self.options.properties
            let interval = properties.bufferDuration.timeInterval

            let channelCount = AVAudioChannelCount(properties.channelMode.channelCount)
            let format = AVAudioFormat(commonFormat: .pcmFormatInt16,
                                       sampleRate: properties.sampleRate.valueInHz,
                                       channels: channelCount,
                                       interleaved: channelCount > 1)!
            self.timer = Timer.scheduledTimer(withTimeInterval: interval, repeats: true) { [weak self] _ in
                guard let self = self else { return }
                let sample = self.producer.produceSamples(self.currentSample, options: self.options)
                let rawBuffer = RawAudioBuffer()
                rawBuffer.buffer = sample
                rawBuffer.timestampInTicks = self.currentTimestamp
                self.stream.send(buffer: rawBuffer, completionHandler: { error in
                    if let error = error {
                        // Handle possible error.
                    }
                })

                self.currentTimestamp += Int64(properties.bufferDuration.value)
                self.currentSample += 1
            }
        }

        func stop() {
            self.timer?.invalidate()
            self.timer = nil
        }

        deinit {
            stop()
        }
    }

请务必记住在当前通话 Call 实例中停止音频流:


    call.stopAudio(stream: self.rawOutgoingAudioStream) { error in 
        // Stream detached from `Call` and stopped.
    }

捕获麦克风示例

使用 Apple 的 AVAudioEngine,可以通过向音频引擎输入节点安装侦听器来捕获麦克风帧。 通过捕获麦克风数据并且由于能够使用原始音频功能,我们能够在将音频发送到通话之前对其进行处理。

    import AVFoundation
    import AzureCommunicationCalling

    enum MicrophoneSenderError: Error {
        case notMatchingFormat
    }

    final class MicrophoneDataSender {
        private let stream: RawOutgoingAudioStream
        private let properties: RawOutgoingAudioStreamProperties
        private let format: AVAudioFormat
        private let audioEngine: AVAudioEngine = AVAudioEngine()

        init(properties: RawOutgoingAudioStreamProperties) throws {
            // This can be different depending on which device we are running or value set for
            // `try AVAudioSession.sharedInstance().setPreferredSampleRate(...)`.
            let nodeFormat = self.audioEngine.inputNode.outputFormat(forBus: 0)
            let matchingSampleRate = AudioSampleRate.allCases.first(where: { $0.valueInHz == nodeFormat.sampleRate })
            guard let inputNodeSampleRate = matchingSampleRate else {
                throw MicrophoneSenderError.notMatchingFormat
            }

            // Override the sample rate to one that matches audio session (Audio engine input node frequency).
            properties.sampleRate = inputNodeSampleRate

            let options = RawOutgoingAudioStreamOptions()
            options.properties = properties

            self.stream = RawOutgoingAudioStream(rawOutgoingAudioStreamOptions: options)
            let channelCount = AVAudioChannelCount(properties.channelMode.channelCount)
            self.format = AVAudioFormat(commonFormat: .pcmFormatInt16,
                                        sampleRate: properties.sampleRate.valueInHz,
                                        channels: channelCount,
                                        interleaved: channelCount > 1)!
            self.properties = properties
        }

        func start() throws {
            guard !self.audioEngine.isRunning else {
                return
            }

            // Install tap documentations states that we can get between 100 and 400 ms of data.
            let framesFor100ms = AVAudioFrameCount(self.format.sampleRate * 0.1)

            // Note that some formats may not be allowed by `installTap`, so we have to specify the 
            // correct properties.
            self.audioEngine.inputNode.installTap(onBus: 0, bufferSize: framesFor100ms, 
                                                  format: self.format) { [weak self] buffer, _ in
                guard let self = self else { return }
                
                let rawBuffer = RawAudioBuffer()
                rawBuffer.buffer = buffer
                // Although we specified either 10ms or 20ms, we allow sending up to 100ms of data
                // as long as it can be evenly divided by the specified size.
                self.stream.send(buffer: rawBuffer) { error in
                    if let error = error {
                        // Handle error
                    }
                }
            }

            try audioEngine.start()
        }

        func stop() {
            audioEngine.stop()
        }
    }

注意

音频引擎输入节点的采样率默认为 > 共享音频会话的首选采样率值。 因此,我们不能使用其他值在该节点中安装侦听器。 因此,我们必须确保 RawOutgoingStream 属性采样率与通过麦克风样本中的侦听器获得的采样率匹配,或者将侦听器缓冲区转换为与传出流预期匹配的格式。

通过这个小示例,我们了解了如何使用原始传出音频功能捕获麦克风 AVAudioEngine 数据并将这些示例发送到通话。

接收原始传入音频

如果想要在播放前处理音频,我们还可以接收通话音频流样本作为 AVAudioPCMBuffer

创建一个 RawIncomingAudioStreamOptions 对象,指定要接收的原始流属性。

    let options = RawIncomingAudioStreamOptions()
    let properties = RawIncomingAudioStreamProperties()
    properties.format = .pcm16Bit
    properties.sampleRate = .hz44100
    properties.channelMode = .stereo
    options.properties = properties

创建 RawOutgoingAudioStream 并将其附加到加入通话选项

    let options =  JoinCallOptions() // or StartCallOptions()
    let incomingAudioOptions = IncomingAudioOptions()

    self.rawIncomingStream = RawIncomingAudioStream(rawIncomingAudioStreamOptions: audioStreamOptions)
    incomingAudioOptions.stream = self.rawIncomingStream
    options.incomingAudioOptions = incomingAudioOptions

或者,我们也可以改为将流附加到现有 Call 实例:


    call.startAudio(stream: self.rawIncomingStream) { error in 
        // Stream attached to `Call`.
    }

若要开始从传入流接收原始音频缓冲区,请实现 RawIncomingAudioStreamDelegate

    class RawIncomingReceiver: NSObject, RawIncomingAudioStreamDelegate {
        func rawIncomingAudioStream(_ rawIncomingAudioStream: RawIncomingAudioStream,
                                    didChangeState args: AudioStreamStateChangedEventArgs) {
            // To be notified when stream started and stopped.
        }
        
        func rawIncomingAudioStream(_ rawIncomingAudioStream: RawIncomingAudioStream,
                                    mixedAudioBufferReceived args: IncomingMixedAudioEventArgs) {
            // Receive raw audio buffers(AVAudioPCMBuffer) and process using AVAudioEngine API's.
        }
    }

    self.rawIncomingStream.delegate = RawIncomingReceiver()

    rawIncomingAudioStream.events.mixedAudioBufferReceived = { args in
        // Receive raw audio buffers(AVAudioPCMBuffer) and process them using AVAudioEngine API's.
    }

    rawIncomingAudioStream.events.onStateChanged = { args in
        // To be notified when stream started and stopped.
    }

RawVideo 访问

由于应用会生成视频帧,因此应用必须向 Azure 通信服务通话 SDK 告知它能够生成的视频格式。 Azure 通信服务通话 SDK 可以使用此信息根据当时的网络条件选择最佳的视频格式配置。

虚拟视频

支持的视频分辨率

宽高比 解决方法 最大 FPS
16x9 1080p 30
16x9 720p 30
16x9 540p 30
16x9 480p 30
16x9 360p 30
16x9 270p 15
16x9 240p 15
16x9 180p 15
4x3 VGA (640x480) 30
4x3 424x320 15
4x3 QVGA (320x240) 15
4x3 212x160 15
  1. 使用 SDK 支持的 VideoStreamPixelFormat 创建一个由 VideoFormat 构成的数组。 如果提供了多种格式,列表中格式的顺序不会影响使用的格式,也不会确定所用格式的优先级。 格式选择的条件基于网络带宽等外部因素。

    var videoStreamFormat = VideoStreamFormat()
    videoStreamFormat.resolution = VideoStreamResolution.p360
    videoStreamFormat.pixelFormat = VideoStreamPixelFormat.nv12
    videoStreamFormat.framesPerSecond = framerate
    videoStreamFormat.stride1 = w // w is the resolution width
    videoStreamFormat.stride2 = w / 2 // w is the resolution width
    
    var videoStreamFormats: [VideoStreamFormat] = [VideoStreamFormat]()
    videoStreamFormats.append(videoStreamFormat)
    
  2. 创建 RawOutgoingVideoStreamOptions,并使用前面创建的对象设置格式。

    var rawOutgoingVideoStreamOptions = RawOutgoingVideoStreamOptions()
    rawOutgoingVideoStreamOptions.formats = videoStreamFormats
    
  3. 使用前面创建的 RawOutgoingVideoStreamOptions 实例创建 VirtualOutgoingVideoStream 的实例。

    var rawOutgoingVideoStream = VirtualOutgoingVideoStream(videoStreamOptions: rawOutgoingVideoStreamOptions)
    
  4. 实现到 VirtualOutgoingVideoStreamDelegate 委托。 每当 VideoStreamFormat 从列表中提供的视频格式之一更改为另一种格式时,didChangeFormat 事件将发出通知。

    virtualOutgoingVideoStream.delegate = /* Attach delegate and implement didChangeFormat */
    
  5. 创建以下帮助程序类的实例以访问 CVPixelBuffer 数据

    final class BufferExtensions: NSObject {
        public static func getArrayBuffersUnsafe(cvPixelBuffer: CVPixelBuffer) -> Array<UnsafeMutableRawPointer?>
        {
            var bufferArrayList: Array<UnsafeMutableRawPointer?> = [UnsafeMutableRawPointer?]()
    
            let cvStatus: CVReturn = CVPixelBufferLockBaseAddress(cvPixelBuffer, .readOnly)
    
            if cvStatus == kCVReturnSuccess {
                let bufferListSize = CVPixelBufferGetPlaneCount(cvPixelBuffer);
                for i in 0...bufferListSize {
                    let bufferRef = CVPixelBufferGetBaseAddressOfPlane(cvPixelBuffer, i)
                    bufferArrayList.append(bufferRef)
                }
            }
    
            return bufferArrayList
        }
    }
    
  6. 创建以下帮助程序类的实例,以使用 VideoStreamPixelFormat.rgba 生成随机的 RawVideoFrameBuffer

    final class VideoFrameSender : NSObject
    {
        private var rawOutgoingVideoStream: RawOutgoingVideoStream
        private var frameIteratorThread: Thread
        private var stopFrameIterator: Bool = false
    
        public VideoFrameSender(rawOutgoingVideoStream: RawOutgoingVideoStream)
        {
            self.rawOutgoingVideoStream = rawOutgoingVideoStream
        }
    
        @objc private func VideoFrameIterator()
        {
            while !stopFrameIterator {
                if rawOutgoingVideoStream != nil &&
                   rawOutgoingVideoStream.format != nil &&
                   rawOutgoingVideoStream.state == .started {
                    SendRandomVideoFrameNV12()
                }
           }
        }
    
        public func SendRandomVideoFrameNV12() -> Void
        {
            let videoFrameBuffer = GenerateRandomVideoFrameBuffer()
    
            rawOutgoingVideoStream.send(frame: videoFrameBuffer) { error in
                /*Handle error if non-nil*/
            }
    
            let rate = 0.1 / rawOutgoingVideoStream.format.framesPerSecond
            let second: Float = 1000000
            usleep(useconds_t(rate * second))
        }
    
        private func GenerateRandomVideoFrameBuffer() -> RawVideoFrame
        {
            var cvPixelBuffer: CVPixelBuffer? = nil
            guard CVPixelBufferCreate(kCFAllocatorDefault,
                                    rawOutgoingVideoStream.format.width,
                                    rawOutgoingVideoStream.format.height,
                                    kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,
                                    nil,
                                    &cvPixelBuffer) == kCVReturnSuccess else {
                fatalError()
            }
    
            GenerateRandomVideoFrameNV12(cvPixelBuffer: cvPixelBuffer!)
    
            CVPixelBufferUnlockBaseAddress(cvPixelBuffer!, .readOnly)
    
            let videoFrameBuffer = RawVideoFrameBuffer()
            videoFrameBuffer.buffer = cvPixelBuffer!
            videoFrameBuffer.streamFormat = rawOutgoingVideoStream.format
    
            return videoFrameBuffer
        }
    
       private func GenerateRandomVideoFrameNV12(cvPixelBuffer: CVPixelBuffer) {
            let w = rawOutgoingVideoStream.format.width
            let h = rawOutgoingVideoStream.format.height
    
            let bufferArrayList = BufferExtensions.getArrayBuffersUnsafe(cvPixelBuffer: cvPixelBuffer)
    
            guard bufferArrayList.count >= 2, let yArrayBuffer = bufferArrayList[0], let uvArrayBuffer = bufferArrayList[1] else {
                return
            }
    
            let yVal = Int32.random(in: 1..<255)
            let uvVal = Int32.random(in: 1..<255)
    
            for y in 0...h
            {
                for x in 0...w
                {
                    yArrayBuffer.storeBytes(of: yVal, toByteOffset: Int((y * w) + x), as: Int32.self)
                }
            }
    
            for y in 0...(h/2)
            {
                for x in 0...(w/2)
                {
                    uvArrayBuffer.storeBytes(of: uvVal, toByteOffset: Int((y * w) + x), as: Int32.self)
                }
            }
        }
    
        public func Start() {
            stopFrameIterator = false
            frameIteratorThread = Thread(target: self, selector: #selector(VideoFrameIterator), object: "VideoFrameSender")
            frameIteratorThread?.start()
        }
    
        public func Stop() {
            if frameIteratorThread != nil {
                stopFrameIterator = true
                frameIteratorThread?.cancel()
                frameIteratorThread = nil
            }
        }
    }
    
  7. 实现到 VirtualOutgoingVideoStreamDelegatedidChangeState 事件会告知当前流的状态。 如果状态不等于 VideoStreamState.started,则不发送帧。

    /*Delegate Implementer*/ 
    private var videoFrameSender: VideoFrameSender
    func virtualOutgoingVideoStream(
        _ virtualOutgoingVideoStream: VirtualOutgoingVideoStream,
        didChangeState args: VideoStreamStateChangedEventArgs) {
        switch args.stream.state {
            case .available:
                videoFrameSender = VideoFrameSender(rawOutgoingVideoStream)
                break
            case .started:
                /* Start sending frames */
                videoFrameSender.Start()
                break
            case .stopped:
                /* Stop sending frames */
                videoFrameSender.Stop()
                break
        }
    }
    

ScreenShare 视频

由于 Windows 系统会生成帧,因此你需要实现自己的前台服务来捕获帧并使用 Azure 通信服务通话 API 发送它们。

支持的视频分辨率

宽高比 解决方法 最大 FPS
任意 分辨率高达 1080p 30

创建屏幕共享视频流的步骤

  1. 使用 SDK 支持的 VideoStreamPixelFormat 创建一个由 VideoFormat 构成的数组。 如果提供了多种格式,列表中格式的顺序不会影响使用的格式,也不会确定所用格式的优先级。 格式选择的条件基于网络带宽等外部因素。

    let videoStreamFormat = VideoStreamFormat()
    videoStreamFormat.width = 1280 /* Width and height can be used for ScreenShareOutgoingVideoStream for custom resolutions or use one of the predefined values inside VideoStreamResolution */
    videoStreamFormat.height = 720
    /*videoStreamFormat.resolution = VideoStreamResolution.p360*/
    videoStreamFormat.pixelFormat = VideoStreamPixelFormat.rgba
    videoStreamFormat.framesPerSecond = framerate
    videoStreamFormat.stride1 = w * 4 /* It is times 4 because RGBA is a 32-bit format */
    
    var videoStreamFormats: [VideoStreamFormat] = []
    videoStreamFormats.append(videoStreamFormat)
    
  2. 创建 RawOutgoingVideoStreamOptions,并使用前面创建的对象设置 VideoFormats

    var rawOutgoingVideoStreamOptions = RawOutgoingVideoStreamOptions()
    rawOutgoingVideoStreamOptions.formats = videoStreamFormats
    
  3. 使用前面创建的 RawOutgoingVideoStreamOptions 实例创建 VirtualOutgoingVideoStream 的实例。

    var rawOutgoingVideoStream = ScreenShareOutgoingVideoStream(rawOutgoingVideoStreamOptions)
    
  4. 按以下方式捕获并发送视频帧。

    private func SendRawVideoFrame() -> Void
    {
        CVPixelBuffer cvPixelBuffer = /* Fill it with the content you got from the Windows APIs, The number of buffers depends on the VideoStreamPixelFormat */
        let videoFrameBuffer = RawVideoFrameBuffer()
        videoFrameBuffer.buffer = cvPixelBuffer!
        videoFrameBuffer.streamFormat = rawOutgoingVideoStream.format
    
        rawOutgoingVideoStream.send(frame: videoFrame) { error in
            /*Handle error if not nil*/
        }
    }
    

原始传入视频

此功能使你能够访问 IncomingVideoStream 内部的视频帧,以便在本地操作这些流对象

  1. 创建通过 JoinCallOptions 设置 VideoStreamKind.RawIncoming 设置的 IncomingVideoOptions 实例

    var incomingVideoOptions = IncomingVideoOptions()
    incomingVideoOptions.streamType = VideoStreamKind.rawIncoming
    var joinCallOptions = JoinCallOptions()
    joinCallOptions.incomingVideoOptions = incomingVideoOptions
    
  2. 收到 ParticipantsUpdatedEventArgs 事件附加 RemoteParticipant.delegate.didChangedVideoStreamState 委托后。 此事件会告知 IncomingVideoStream 对象的状态。

    private var remoteParticipantList: [RemoteParticipant] = []
    
    func call(_ call: Call, didUpdateRemoteParticipant args: ParticipantsUpdatedEventArgs) {
        args.addedParticipants.forEach { remoteParticipant in
            remoteParticipant.incomingVideoStreams.forEach { incomingVideoStream in
                OnRawIncomingVideoStreamStateChanged(incomingVideoStream: incomingVideoStream)
            }
            remoteParticipant.delegate = /* Attach delegate OnVideoStreamStateChanged*/
        }
    
        args.removedParticipants.forEach { remoteParticipant in
            remoteParticipant.delegate = nil
        }
    }
    
    func remoteParticipant(_ remoteParticipant: RemoteParticipant, 
                           didVideoStreamStateChanged args: VideoStreamStateChangedEventArgs) {
        OnRawIncomingVideoStreamStateChanged(rawIncomingVideoStream: args.stream)
    }
    
    func OnRawIncomingVideoStreamStateChanged(rawIncomingVideoStream: RawIncomingVideoStream) {
        switch incomingVideoStream.state {
            case .available:
                /* There is a new IncomingVideoStream */
                rawIncomingVideoStream.delegate /* Attach delegate OnVideoFrameReceived*/
                rawIncomingVideoStream.start()
                break;
            case .started:
                /* Will start receiving video frames */
                break
            case .stopped:
                /* Will stop receiving video frames */
                break
            case .notAvailable:
                /* The IncomingVideoStream should not be used anymore */
                rawIncomingVideoStream.delegate = nil
                break
        }
    }
    
  3. 当时,IncomingVideoStream 具有 VideoStreamState.available 状态附加 RawIncomingVideoStream.delegate.didReceivedRawVideoFrame 委托,如上一步所示。 该事件提供了新的 RawVideoFrame 对象。

    func rawIncomingVideoStream(_ rawIncomingVideoStream: RawIncomingVideoStream, 
                                didRawVideoFrameReceived args: RawVideoFrameReceivedEventArgs) {
        /* Render/Modify/Save the video frame */
        let videoFrame = args.frame as! RawVideoFrameBuffer
    }
    

作为开发人员,你可以在通话期间访问传入和传出音频、视频和屏幕共享内容的原始媒体,以便捕获、分析和处理音频/视频内容。 通过访问 Azure 通信服务客户端原始音频、原始视频、原始屏幕共享,开发人员几乎能够不受任何限制地查看和编辑 Azure 通信服务通话 SDK 中传送的音频、视频、屏幕共享内容。 本快速入门介绍如何使用适用于 JavaScript 的 Azure 通信服务通话 SDK 实现原始媒体访问。

例如,

  • 可以直接在通话对象上访问通话的音频/视频流,并在通话期间发送自定义传出音频/视频流。
  • 可以检查音频和视频流,以运行自定义 AI 模型进行分析。 此类模型包括用于分析对话,或提供实时见解和建议以提升座席工作效率的自然语言处理。
  • 组织可以在为患者提供虚拟护理时使用音频和视频媒体流来分析情绪,或者在使用混合现实进行视频通话期间提供远程协助。 此功能为开发人员运用创新来增强交互体验开辟了一条道路。

先决条件

重要

可在 JavaScript 的呼叫 SDK 1.13.1 版本中找到此处的示例。 尝试学习本快速入门时,请确保使用该版本或更新版本。

访问原始音频

访问原始音频媒体可以访问传入通话音频流,并可以在通话期间发送自定义传出音频流。

访问传入的原始音频流

使用以下代码访问传入通话的音频流。

const userId = 'acs_user_id';
const call = callAgent.startCall(userId);
const callStateChangedHandler = async () => {
    if (call.state === "Connected") {
        const remoteAudioStream = call.remoteAudioStreams[0];
        const mediaStream = await remoteAudioStream.getMediaStream();
	// process the incoming call's audio media stream track
    }
};

callStateChangedHandler();
call.on("stateChanged", callStateChangedHandler);

使用自定义音频流发起呼叫

使用以下代码通过自定义音频流(而不是用户的麦克风设备)开始通话。

const createBeepAudioStreamToSend = () => {
    const context = new AudioContext();
    const dest = context.createMediaStreamDestination();
    const os = context.createOscillator();
    os.type = 'sine';
    os.frequency.value = 500;
    os.connect(dest);
    os.start();
    const { stream } = dest;
    return stream;
};

...
const userId = 'acs_user_id';
const mediaStream = createBeepAudioStreamToSend();
const localAudioStream = new LocalAudioStream(mediaStream);
const callOptions = {
    audioOptions: {
        localAudioStreams: [localAudioStream]
    }
};
callAgent.startCall(userId, callOptions);

在通话期间切换到自定义音频流

使用以下代码可在通话期间将输入设备切换到自定义音频流,而不是使用用户的麦克风设备。

const createBeepAudioStreamToSend = () => {
    const context = new AudioContext();
    const dest = context.createMediaStreamDestination();
    const os = context.createOscillator();
    os.type = 'sine';
    os.frequency.value = 500;
    os.connect(dest);
    os.start();
    const { stream } = dest;
    return stream;
};

...

const userId = 'acs_user_id';
const mediaStream = createBeepAudioStreamToSend();
const localAudioStream = new LocalAudioStream(mediaStream);
const call = callAgent.startCall(userId);
const callStateChangedHandler = async () => {
    if (call.state === 'Connected') {
        await call.startAudio(localAudioStream);
    }
};

callStateChangedHandler();
call.on('stateChanged', callStateChangedHandler);

停止自定义音频流

使用以下代码可在通话期间停止发送自定义音频流(在设置该功能后)。

call.stopAudio();

访问原始视频

原始视频媒体提供 MediaStream 对象的实例。 (有关详细信息,请参阅 JavaScript 文档)。原始视频媒体专门提供对传入和传出通话 MediaStream 对象的访问。 对于原始视频,可以使用机器学习来处理视频帧,并使用该对象应用筛选器。

已处理的原始传出视频帧可作为发送方的传出视频发送。 已处理的原始传入视频帧可以在接收端呈现。

使用自定义视频流通话

可以访问传出通话的原始视频流。 可将 MediaStream 用于传出的原始视频流,以使用机器学习处理帧并应用筛选器。 然后可将处理的传出视频作为发送方视频流发送。

本示例将画布数据作为传出视频发送给用户。

const createVideoMediaStreamToSend = () => {
    const canvas = document.createElement('canvas');
    const ctx = canvas.getContext('2d');
    canvas.width = 1500;
    canvas.height = 845;
    ctx.fillStyle = 'blue';
    ctx.fillRect(0, 0, canvas.width, canvas.height);

    const colors = ['red', 'yellow', 'green'];
    window.setInterval(() => {
        if (ctx) {
            ctx.fillStyle = colors[Math.floor(Math.random() * colors.length)];
            const x = Math.floor(Math.random() * canvas.width);
            const y = Math.floor(Math.random() * canvas.height);
            const size = 100;
            ctx.fillRect(x, y, size, size);
        }
    }, 1000 / 30);

    return canvas.captureStream(30);
};

...
const userId = 'acs_user_id';
const mediaStream = createVideoMediaStreamToSend();
const localVideoStream = new LocalVideoStream(mediaStream);
const callOptions = {
    videoOptions: {
        localVideoStreams: [localVideoStream]
    }
};
callAgent.startCall(userId, callOptions);

在通话期间切换到自定义视频流

使用以下代码可在通话期间将输入设备切换到自定义视频流,而不是使用用户的摄像头设备。

const createVideoMediaStreamToSend = () => {
    const canvas = document.createElement('canvas');
    const ctx = canvas.getContext('2d');
    canvas.width = 1500;
    canvas.height = 845;
    ctx.fillStyle = 'blue';
    ctx.fillRect(0, 0, canvas.width, canvas.height);

    const colors = ['red', 'yellow', 'green'];
    window.setInterval(() => {
        if (ctx) {
            ctx.fillStyle = colors[Math.floor(Math.random() * colors.length)];
            const x = Math.floor(Math.random() * canvas.width);
            const y = Math.floor(Math.random() * canvas.height);
            const size = 100;
            ctx.fillRect(x, y, size, size);
	 }
    }, 1000 / 30);

    return canvas.captureStream(30);
};

...

const userId = 'acs_user_id';
const call = callAgent.startCall(userId);
const callStateChangedHandler = async () => {
    if (call.state === 'Connected') {    	
        const mediaStream = createVideoMediaStreamToSend();
        const localVideoStream = this.call.localVideoStreams.find((stream) => { return stream.mediaStreamType === 'Video' });
        await localVideoStream.setMediaStream(mediaStream);
    }
};

callStateChangedHandler();
call.on('stateChanged', callStateChangedHandler);

停止自定义视频流

使用以下代码可在通话期间停止发送自定义视频流(在设置该功能后)。

// Stop video by passing the same `localVideoStream` instance that was used to start video
await call.stopVideo(localVideoStream);

从应用了自定义效果的相机切换到其他相机设备时,请先停止视频,在 LocalVideoStream 上切换源,然后再次启动视频。

const cameras = await this.deviceManager.getCameras();
const newCameraDeviceInfo = cameras.find(cameraDeviceInfo => { return cameraDeviceInfo.id === '<another camera that you want to switch to>' });
// If current camera is using custom raw media stream and video is on
if (this.localVideoStream.mediaStreamType === 'RawMedia' && this.state.videoOn) {
	// Stop raw custom video first
	await this.call.stopVideo(this.localVideoStream);
	// Switch the local video stream's source to the new camera to use
	this.localVideoStream?.switchSource(newCameraDeviceInfo);
	// Start video with the new camera device
	await this.call.startVideo(this.localVideoStream);

// Else if current camera is using normal stream from camera device and video is on
} else if (this.localVideoStream.mediaStreamType === 'Video' && this.state.videoOn) {
	// You can just switch the source, no need to stop and start again. Sent video will automatically switch to the new camera to use
	this.localVideoStream?.switchSource(newCameraDeviceInfo);
}

访问从远程参与者传入的视频流

可以访问传入通话的原始视频流。 可将 MediaStream 用于传入的原始视频流,以使用机器学习处理帧并应用筛选器。 然后,可以在接收端呈现已处理的传入视频。

const remoteVideoStream = remoteParticipants[0].videoStreams.find((stream) => { return stream.mediaStreamType === 'Video'});
const processMediaStream = async () => {
    if (remoteVideoStream.isAvailable) {
	// remote video stream is turned on, process the video's raw media stream.
	const mediaStream = await remoteVideoStream.getMediaStream();
    } else {
	// remote video stream is turned off, handle it
    }
};

remoteVideoStream.on('isAvailableChanged', async () => {
    await processMediaStream();
});

await processMediaStream();

重要

Azure 通信服务的这一功能目前以预览版提供。

预览版 API 和 SDK 在没有服务级别协议的情况下提供。 建议不要将它们用于生产工作负荷。 某些功能可能不受支持或者已受限。

有关详细信息,请参阅 Microsoft Azure 预览版补充使用条款

原始屏幕共享访问权限提供公共预览版,并在版本 1.15.1-beta.1+ 中提供。

访问原始屏幕共享

原始屏幕共享媒体为传入和传出屏幕共享流提供对 MediaStream 对象的访问权限。 对于原始屏幕共享,可以使用机器学习来处理屏幕共享帧,从而使用该对象来应用筛选器。

已处理的原始屏幕共享帧可作为发送方的传出屏幕共享进行发送。 已处理的原始传入屏幕共享帧可以在接收端呈现。

注意:仅桌面浏览器支持发送屏幕共享。

使用自定义屏幕共享流启动屏幕共享

const createVideoMediaStreamToSend = () => {
    const canvas = document.createElement('canvas');
    const ctx = canvas.getContext('2d');
    canvas.width = 1500;
    canvas.height = 845;
    ctx.fillStyle = 'blue';
    ctx.fillRect(0, 0, canvas.width, canvas.height);

    const colors = ['red', 'yellow', 'green'];
    window.setInterval(() => {
        if (ctx) {
            ctx.fillStyle = colors[Math.floor(Math.random() * colors.length)];
            const x = Math.floor(Math.random() * canvas.width);
            const y = Math.floor(Math.random() * canvas.height);
            const size = 100;
            ctx.fillRect(x, y, size, size);
        }
    }, 1000 / 30);

    return canvas.captureStream(30);
};

...
const mediaStream = createVideoMediaStreamToSend();
const localScreenSharingStream = new LocalVideoStream(mediaStream);
// Will start screen sharing with custom raw media stream
await call.startScreenSharing(localScreenSharingStream);
console.log(localScreenSharingStream.mediaStreamType) // 'RawMedia'

从屏幕、浏览器选项卡或应用访问原始屏幕共享流,并将效果应用于流

下面是有关如何从屏幕、浏览器选项卡或应用对原始屏幕共享流应用黑白效果的示例。 注意:Safari 不支持 Canvas 上下文筛选器 = "grayscale(1)" API。

let bwTimeout;
let bwVideoElem;

const applyBlackAndWhiteEffect = function (stream) {
	let width = 1280, height = 720;
	bwVideoElem = document.createElement("video");
	bwVideoElem.srcObject = stream;
	bwVideoElem.height = height;
	bwVideoElem.width = width;
	bwVideoElem.play();
	const canvas = document.createElement('canvas');
	const bwCtx = canvas.getContext('2d', { willReadFrequently: true });
	canvas.width = width;
	canvas.height = height;
	
	const FPS = 30;
	const processVideo = function () {
	    try {
		let begin = Date.now();
		// start processing.
		// NOTE: The Canvas context filter API is not supported in Safari
		bwCtx.filter = "grayscale(1)";
		bwCtx.drawImage(bwVideoElem, 0, 0, width, height);
		const imageData = bwCtx.getImageData(0, 0, width, height);
		bwCtx.putImageData(imageData, 0, 0);
		// schedule the next one.
		let delay = Math.abs(1000/FPS - (Date.now() - begin));
		bwTimeout = setTimeout(processVideo, delay);
	    } catch (err) {
		console.error(err);
	    }
	}
	
	// schedule the first one.
	bwTimeout = setTimeout(processVideo, 0);
	return canvas.captureStream(FPS);
}

// Call startScreenSharing API without passing any stream parameter. Browser will prompt the user to select the screen, browser tab, or app to share in the call.
await call.startScreenSharing();
const localScreenSharingStream = call.localVideoStreams.find( (stream) => { return stream.mediaStreamType === 'ScreenSharing' });
console.log(localScreenSharingStream.mediaStreamType); // 'ScreenSharing'
// Get the raw media stream from the screen, browser tab, or application
const rawMediaStream = await localScreenSharingStream.getMediaStream();
// Apply effects to the media stream as you wish
const blackAndWhiteMediaStream = applyBlackAndWhiteEffect(rawMediaStream);
// Set the media stream with effects no the local screen sharing stream
await localScreenSharingStream.setMediaStream(blackAndWhiteMediaStream);

// Stop screen sharing and clean up the black and white video filter
await call.stopScreenSharing();
clearTimeout(bwTimeout);
bwVideoElem.srcObject.getVideoTracks().forEach((track) => { track.stop(); });
bwVideoElem.srcObject = null;

停止发送屏幕共享流

使用以下代码在通话期间停止发送自定义屏幕共享流(在设置该功能后)。

// Stop sending raw screen sharing stream
await call.stopScreenSharing(localScreenSharingStream);

从远程参与者访问传入的屏幕共享流

可以从远程参与者访问原始屏幕共享流。 可将 MediaStream 用于传入的原始屏幕共享流,以使用机器学习处理帧并应用筛选器。 然后,可以在接收端呈现已处理的传入屏幕共享流。

const remoteScreenSharingStream = remoteParticipants[0].videoStreams.find((stream) => { return stream.mediaStreamType === 'ScreenSharing'});
const processMediaStream = async () => {
    if (remoteScreenSharingStream.isAvailable) {
	// remote screen sharing stream is turned on, process the stream's raw media stream.
	const mediaStream = await remoteScreenSharingStream.getMediaStream();
    } else {
	// remote video stream is turned off, handle it
    }
};

remoteScreenSharingStream.on('isAvailableChanged', async () => {
    await processMediaStream();
});

await processMediaStream();

后续步骤

有关详细信息,请参阅以下文章: