

NSDictionary *param = @{@"api" : @"enablePictureInPictureFloatingWindow",@"params" : @{@"enable" : @(true)}};NSError *err = nil;NSData *jsonData = [NSJSONSerialization dataWithJSONObject:param options:0 error:&err];if (err) {NSLog(@"error: %@", err);}NSString *paramJsonString = [[NSString alloc] initWithData:jsonData encoding:NSUTF8StringEncoding];[self.trtcCloud callExperimentalAPI:paramJsonString];
let param: [String : Any] = ["api": "enablePictureInPictureFloatingWindow", "params": ["enable":true]]if let jsonData = try? JSONSerialization.data(withJSONObject: param, options: .fragmentsAllowed) {let paramJsonString = String.init(data: jsonData, encoding: .utf8) ?? ""trtcCloud.callExperimentalAPI(paramJsonString)}

import UIKitimport AVKitimport CoreFoundationimport TXLiteAVSDK_Professionalclass PipVC: UIViewController {let trtcCloud = TRTCCloud()var pipController: AVPictureInPictureController?var combinedPixelBuffer: CVPixelBuffer?let pixelBufferLock = DispatchQueue(label: "com.demo.pip")var pipDisplayLayer: AVSampleBufferDisplayLayer!}
func enterTrtcRoom() {let params = TRTCParams()params.sdkAppId = UInt32(SDKAppID)params.roomId = UInt32(roomId)params.userId = userIdparams.role = .audienceparams.userSig = GenerateTestUserSig.genTestUserSig(identifier: userId) as StringtrtcCloud.addDelegate(self)trtcCloud.enterRoom(params, appScene: .LIVE)}
func setupAudioSession() {do {try AVAudioSession.sharedInstance().setCategory(.playback)} catch let error {print("+> error: \\(error)")return}do {try AVAudioSession.sharedInstance().setActive(true)} catch let error {print("+> error: \\(error)")return}}func enableBGDecode() {let param: [String : Any] = ["api": "enableBackgroundDecoding","params": ["enable":true]]if let jsonData = try? JSONSerialization.data(withJSONObject: param, options: .fragmentsAllowed) {let paramJsonString = String.init(data: jsonData, encoding: .utf8) ?? ""trtcCloud.callExperimentalAPI(paramJsonString)}}
func setupPipController() {let screenWidth = UIScreen.main.bounds.widthlet videoHeight = screenWidth / 2 / 9 * 16pipDisplayLayer = AVSampleBufferDisplayLayer()pipDisplayLayer.frame = CGRect(x: 0, y: 0, width: screenWidth, height: videoHeight) // Adjust size as neededpipDisplayLayer.videoGravity = .resizeAspectpipDisplayLayer.isOpaque = truepipDisplayLayer.backgroundColor = CGColor(red: 0, green: 0, blue: 0, alpha: 1)view.layer.addSublayer(pipDisplayLayer)if AVPictureInPictureController.isPictureInPictureSupported() {let contentSource = AVPictureInPictureController.ContentSource(sampleBufferDisplayLayer: pipDisplayLayer,playbackDelegate: self)pipController = AVPictureInPictureController(contentSource: contentSource)pipController?.delegate = selfpipController?.canStartPictureInPictureAutomaticallyFromInline = true} else {print("+> error")}}
._NV12 is related to the method used in Step 6: Concatenate left and right frames. Different formats require different concatenation methods. This example code only shows left-right concatenation in the ._NV12 format.extension PipVC: TRTCCloudDelegate {func onUserVideoAvailable(_ userId: String, available: Bool) {if available {trtcCloud.startRemoteView(userId, streamType: .big, view: nil)trtcCloud.setRemoteVideoRenderDelegate(userId, delegate: self, pixelFormat: ._NV12, bufferType: .pixelBuffer);}else{trtcCloud.stopRemoteView(userId, streamType: .big)}}}
func createCombinedPixelBuffer(from sourceBuffer: CVPixelBuffer) {let width = CVPixelBufferGetWidth(sourceBuffer) * 2let height = CVPixelBufferGetHeight(sourceBuffer)let pixelFormat = CVPixelBufferGetPixelFormatType(sourceBuffer)let attributes: [CFString: Any] = [kCVPixelBufferWidthKey: width,kCVPixelBufferHeightKey: height,kCVPixelBufferPixelFormatTypeKey: pixelFormat,kCVPixelBufferIOSurfacePropertiesKey: [:]]CVPixelBufferCreate(kCFAllocatorDefault, width, height, pixelFormat, attributes as CFDictionary, &combinedPixelBuffer)}func updateCombinedPixelBuffer(with sourceBuffer: CVPixelBuffer, forLeft: Bool) {guard let combinedBuffer = combinedPixelBuffer else { print("+> error"); return}CVPixelBufferLockBaseAddress(combinedBuffer, [])CVPixelBufferLockBaseAddress(sourceBuffer, [])// Plane 0: Y/luma planelet combinedLumaBaseAddress = CVPixelBufferGetBaseAddressOfPlane(combinedBuffer, 0)!let sourceLumaBaseAddress = CVPixelBufferGetBaseAddressOfPlane(sourceBuffer, 0)!let combinedLumaBytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(combinedBuffer, 0)let sourceLumaBytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(sourceBuffer, 0)let widthLuma = CVPixelBufferGetWidthOfPlane(sourceBuffer, 0)let heightLuma = CVPixelBufferGetHeightOfPlane(sourceBuffer, 0)// Plane 1: UV/chroma planelet combinedChromaBaseAddress = CVPixelBufferGetBaseAddressOfPlane(combinedBuffer, 1)!let sourceChromaBaseAddress = CVPixelBufferGetBaseAddressOfPlane(sourceBuffer, 1)!let combinedChromaBytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(combinedBuffer, 1)let sourceChromaBytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(sourceBuffer, 1)let widthChroma = CVPixelBufferGetWidthOfPlane(sourceBuffer, 1)let heightChroma = CVPixelBufferGetHeightOfPlane(sourceBuffer, 1)for row in 0..<heightLuma {let combinedRow = combinedLumaBaseAddress.advanced(by: row * combinedLumaBytesPerRow + (forLeft ? 0 : widthLuma))let sourceRow = sourceLumaBaseAddress.advanced(by: row * sourceLumaBytesPerRow)memcpy(combinedRow, sourceRow, widthLuma)}// ._nv12 the chroma plane is subsampled 2:1 horizontally and verticallyfor row in 0..<heightChroma {let combinedRow = combinedChromaBaseAddress.advanced(by: row * combinedChromaBytesPerRow + (forLeft ? 0 : 2 * widthChroma))let sourceRow = sourceChromaBaseAddress.advanced(by: row * sourceChromaBytesPerRow)memcpy(combinedRow, sourceRow, 2 * widthChroma)}CVPixelBufferUnlockBaseAddress(sourceBuffer, [])CVPixelBufferUnlockBaseAddress(combinedBuffer, [])}
func displayPixelBuffer(_ pixelBuffer: CVPixelBuffer, in layer: AVSampleBufferDisplayLayer) {var timing = CMSampleTimingInfo.init(duration: .invalid,presentationTimeStamp: .invalid,decodeTimeStamp: .invalid)var videoInfo: CMVideoFormatDescription? = nilvar result = CMVideoFormatDescriptionCreateForImageBuffer(allocator: nil,imageBuffer: pixelBuffer,formatDescriptionOut: &videoInfo)if result != 0 {return}guard let videoInfo = videoInfo else {return}var sampleBuffer: CMSampleBuffer? = nilresult = CMSampleBufferCreateForImageBuffer(allocator: kCFAllocatorDefault,imageBuffer: pixelBuffer,dataReady: true,makeDataReadyCallback: nil,refcon: nil,formatDescription: videoInfo,sampleTiming: &timing,sampleBufferOut: &sampleBuffer)if result != 0 {return}guard let sampleBuffer = sampleBuffer else {return}guard let attachments = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer,createIfNecessary: true) else {return}CFDictionarySetValue(unsafeBitCast(CFArrayGetValueAtIndex(attachments, 0), to: CFMutableDictionary.self),Unmanaged.passUnretained(kCMSampleAttachmentKey_DisplayImmediately).toOpaque(),Unmanaged.passUnretained(kCFBooleanTrue).toOpaque())layer.enqueue(sampleBuffer)if layer.status == .failed {if let error = layer.error as? NSError {if error.code == -11847 {print("+> error")}}}}
extension PipVC: TRTCVideoRenderDelegate {func onRenderVideoFrame(_ frame: TRTCVideoFrame, userId: String?, streamType: TRTCVideoStreamType) {guard let newPixelBuffer = frame.pixelBuffer else { print("+> error"); return}pixelBufferLock.sync {if combinedPixelBuffer == nil {createCombinedPixelBuffer(from: newPixelBuffer)}if userId == "left" {updateCombinedPixelBuffer(with: newPixelBuffer, forLeft: true)} else {updateCombinedPixelBuffer(with: newPixelBuffer, forLeft: false)}}if let combinedBuffer = combinedPixelBuffer {DispatchQueue.main.async {self.displayPixelBuffer(combinedBuffer, in: self.pipDisplayLayer)}}}}
extension PipVC: AVPictureInPictureControllerDelegate {func pictureInPictureControllerWillStartPictureInPicture(_ pictureInPictureController: AVPictureInPictureController) {}func pictureInPictureControllerDidStartPictureInPicture(_ pictureInPictureController: AVPictureInPictureController) {}func pictureInPictureControllerDidStopPictureInPicture(_ pictureInPictureController: AVPictureInPictureController) {}func pictureInPictureController(_ pictureInPictureController: AVPictureInPictureController, restoreUserInterfaceForPictureInPictureStopWithCompletionHandler completionHandler: @escaping (Bool) -> Void) {completionHandler(true)}func pictureInPictureController(_ pictureInPictureController: AVPictureInPictureController, failedToStartPictureInPictureWithError error: any Error) {}}extension PipVC: AVPictureInPictureSampleBufferPlaybackDelegate {func pictureInPictureControllerTimeRangeForPlayback(_ pictureInPictureController: AVPictureInPictureController) -> CMTimeRange {return CMTimeRange.init(start: .zero, duration: .positiveInfinity)}func pictureInPictureControllerIsPlaybackPaused(_ pictureInPictureController: AVPictureInPictureController) -> Bool {return false}func pictureInPictureController(_ pictureInPictureController: AVPictureInPictureController, setPlaying playing: Bool) {}func pictureInPictureController(_ pictureInPictureController: AVPictureInPictureController, didTransitionToRenderSize newRenderSize: CMVideoDimensions) {}func pictureInPictureController(_ pictureInPictureController: AVPictureInPictureController, skipByInterval skipInterval: CMTime) async {}}
// Disable PIP.pipController?.stopPictureInPicture()// Enable PIP.pipController?.startPictureInPicture()
<activityandroid:name="com.tencent.trtc.pictureinpicture.PictureInPictureActivity"android:theme="@style/Theme.AppCompat.Light.NoActionBar"android:configChanges="screenSize|smallestScreenSize|screenLayout|orientation"android:supportsPictureInPicture="true"
android:supportsPictureInPicture="true" declares that the activity supports PIP.private void startPictureInPicture() {if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {PictureInPictureParams.Builder pictureInPictureBuilder = new PictureInPictureParams.Builder();Rational aspectRatio = new Rational(mVideoView.getWidth(), mVideoView.getHeight());pictureInPictureBuilder.setAspectRatio(aspectRatio);// Enter the PIP mode.enterPictureInPictureMode(pictureInPictureBuilder.build());} else {Toast.makeText(this, R.string.picture_in_picture_not_supported, Toast.LENGTH_SHORT).show();}}
pictureInPictureBuilder.setAspectRatio(aspectRatio); sets the aspect ratio of the PIP window. Here, set the value to the aspect ratio of the video playback view.enterPictureInPictureMode(pictureInPictureBuilder.build()); enters the PIP mode.@Overridepublic void onPictureInPictureModeChanged(boolean isInPictureInPictureMode, Configuration configuration) {super.onPictureInPictureModeChanged(isInPictureInPictureMode, configuration);if (isInPictureInPictureMode) {// Hide the view when entering the PIP mode.} else{// Display the view after exiting the PIP mode.}}

// mTRTCCloud corresponds to the left video view (TXCloudVideoView), and TRTC_VIDEO_RENDER_MODE_FIT is set.TRTCCloudDef.TRTCRenderParams param = new TRTCCloudDef.TRTCRenderParams();param.fillMode = TRTCCloudDef.TRTC_VIDEO_RENDER_MODE_FIT;mTRTCCloud.setRemoteRenderParams(remoteUserIdA,TRTCCloudDef.TRTC_VIDEO_STREAM_TYPE_BIG, param);mTRTCCloud.startRemoteView(remoteUserIdA, TRTCCloudDef.TRTC_VIDEO_STREAM_TYPE_BIG, mTXCloudRemoteView);// mTRTCCloud corresponds to the right video view (TXCloudVideoView).mTRTCCloud.startRemoteView(remoteUserIdB, TRTCCloudDef.TRTC_VIDEO_STREAM_TYPE_BIG, mTXCloudRemoteView);
<com.tencent.rtmp.ui.TXCloudVideoViewandroid:id="@+id/video_view"android:layout_width="192dp"android:layout_height="108dp"android:layout_alignParentStart="true"android:background="#00BCD4"/><com.tencent.rtmp.ui.TXCloudVideoViewandroid:id="@+id/video_view2"android:layout_width="192dp"android:layout_height="108dp"android:layout_alignTop="@+id/video_view"android:layout_toEndOf="@+id/video_view"android:background="#3F51B5"/>
@Overridepublic void onPictureInPictureModeChanged(boolean isInPictureInPictureMode, Configuration configuration) {super.onPictureInPictureModeChanged(isInPictureInPictureMode, configuration);if (isInPictureInPictureMode) {// Set the width of mVideoView to 100dp.RelativeLayout.LayoutParams layoutParams = (RelativeLayout.LayoutParams) mVideoView.getLayoutParams();layoutParams.width = (int) TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_DIP, 100, getResources().getDisplayMetrics());} else {// When exiting the PIP mode, restore the width of video_view.RelativeLayout.LayoutParams layoutParams = (RelativeLayout.LayoutParams) mVideoView.getLayoutParams();layoutParams.width = (int) TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_DIP, 192, getResources().getDisplayMetrics());}}
trtcCloud.callExperimentalAPI(jsonEncode({"api": "enablePictureInPictureFloatingWindow","params": {"enable": true}}));
var pipCode = await _livePlayer!.enablePictureInPicture(true);if (pipCode != V2TXLIVE_OK) {print("error: $pipCode");}
final channel = MethodChannel('flutter_ios_pip_demo');await channel.invokeMethod('enablePip', {'marginTop': appBarHeight + topSafeAreaHeight,'pkLeft': pkLeftUserId,'pkRight': pkRightUserId,});
var channel: FlutterMethodChannel?let pipListener = PipRender()guard let controller = window?.rootViewController as? FlutterViewController else {fatalError("Invalid root view controller")}channel = FlutterMethodChannel(name: "flutter_ios_pip_demo", binaryMessenger: controller.binaryMessenger)channel?.setMethodCallHandler({ [weak self] call, result inguard let self = self else { return }switch (call.method) {case "enablePip":if let arg = call.arguments as? [String: Any] {let marginTop = arg["marginTop"] as? CGFloat ?? 0let pkLeft = arg["pkLeft"] as? String ?? ""let pkRight = arg["pkRight"] as? String ?? ""pipListener.enablePip(mainView: vc.view, mt: mt, pkLeft: pkLeft, pkRight: pkRight)}result(nil)breakcase "disablePip":pipListener.disablePip()result(nil)breakdefault:break}})
import UIKitimport AVKitimport TXLiteAVSDK_Professionalclass PipRender: NSObject {// For other variables, see the Calling System APIs for Implementation section for the native iOS end.var mainView: UIView?var mt: CGFloat?// Since trtcCloud is single-instance, you can obtain it this way in your code.let trtcCloud = TRTCCloud.sharedInstance()func disablePip() {pipDisplayLayer?.removeFromSuperlayer()pipController?.stopPictureInPicture()}func enablePip(mainView: UIView, mt: CGFloat, pkLeft: String, pkRight: String) {self.mainView = mainViewself.mt = mttrtcCloud.addDelegate(self)enableBGDecode()setupAudioSession()setupPipController()pipController?.startPictureInPicture()if pkLeft.count > 0 {trtcCloud.startRemoteView(pkLeft, streamType: .big, view: nil)trtcCloud.setRemoteVideoRenderDelegate(pkLeft, delegate: self, pixelFormat: ._NV12, bufferType: .pixelBuffer);}if pkRight.count > 0 {trtcCloud.startRemoteView(pkRight, streamType: .big, view: nil)trtcCloud.setRemoteVideoRenderDelegate(pkRight, delegate: self, pixelFormat: ._NV12, bufferType: .pixelBuffer);}}// In this method, the PIP display position needs to be adjusted according to business needs to ensure that the position is the same as the display position in Flutter.func setupPipController() {let screenWidth = UIScreen.main.bounds.widthlet videoHeight = screenWidth / 2 / 9 * 16pipDisplayLayer = AVSampleBufferDisplayLayer()// Adjust the PIP display position here based on your actual needs.let tsa = self.mainView?.safeAreaInsets.top ??let vmt = tsa + (self.mt ?? 0)pipDisplayLayer.frame = CGRect(x: 0, y: vmt, width: screenWidth, height: videoHeight) // Adjust size as neededpipDisplayLayer.videoGravity = .resizeAspectpipDisplayLayer.isOpaque = truepipDisplayLayer.backgroundColor = CGColor(red: 0, green: 0, blue: 0, alpha: 1)// Use the mainView passed in by enablePIP to add a PIP frame.mainView?.layer.addSublayer(pipDisplayLayer)if AVPictureInPictureController.isPictureInPictureSupported() {let contentSource = AVPictureInPictureController.ContentSource(sampleBufferDisplayLayer: pipDisplayLayer,playbackDelegate: self)pipController = AVPictureInPictureController(contentSource: contentSource)pipController?.delegate = selfpipController?.canStartPictureInPictureAutomaticallyFromInline = true} else {print("+> PiP not supported")}}// All other methods follow the implementation in the Calling System APIs for Implementation section for the native iOS end.}
// Trigger the PIP stop according to business needs.trtcCloud.startRemoteView(pkLeftUserId, TRTCCloudDef.TRTC_VIDEO_STREAM_TYPE_BIG, pkLeftId);trtcCloud.startRemoteView(pkRightUserId, TRTCCloudDef.TRTC_VIDEO_STREAM_TYPE_BIG, pkRightId);await channel.invokeMethod('disablePip');
@overridedispose() {channel.invokeMethod('disablePip');super.dispose();}
MethodChannel _channel = MethodChannel('samples.flutter.dev');final int? result = await _channel.invokeMethod('pictureInPicture');
private void startPictureInPicture() {if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {PictureInPictureParams.Builder pictureInPictureBuilder = new PictureInPictureParams.Builder();// Set the specified PIP size based on specific business requirements.Rational aspectRatio = new Rational(100, 100);pictureInPictureBuilder.setAspectRatio(aspectRatio);// Enter the PIP mode.enterPictureInPictureMode(pictureInPictureBuilder.build());} else {Toast.makeText(this, R.string.picture_in_picture_not_supported, Toast.LENGTH_SHORT).show();}}@Overridepublic void configureFlutterEngine(@NonNull FlutterEngine flutterEngine) {super.configureFlutterEngine(flutterEngine);MethodChannel channel = new MethodChannel(flutterEngine.getDartExecutor().getBinaryMessenger(), "samples.flutter.dev");channel.setMethodCallHandler((call, result) -> {if (call.method.equals("pictureInPicture")) {startPictureInPicture();} else {result.notImplemented();}});}
android:supportsPictureInPicture="true" for the activity in AndroidManifest.xml, as follows:<activityandroid:name="example.android.app.src.main.java.com.tencent.live.example.MainActivity"android:supportsPictureInPicture="true"android:configChanges="orientation|keyboardHidden|keyboard|screenSize|smallestScreenSize|locale|layoutDirection|fontScale|screenLayout|density|uiMode">...</activity>
Feedback