Skip to content

Instantly share code, notes, and snippets.

@p-larson
Last active August 29, 2022 05:42
Show Gist options
  • Save p-larson/785b12dacbcbe5ca11ca23fce742c406 to your computer and use it in GitHub Desktop.
Save p-larson/785b12dacbcbe5ca11ca23fce742c406 to your computer and use it in GitHub Desktop.
import SwiftUI
import Foundation
import CoreGraphics
import AVFoundation
import VideoToolbox
import ComposableArchitecture
struct ContentView: View {
let store: Store<CameraState, CameraAction>
var body: some View {
WithViewStore(self.store) { viewStore in
if let image = viewStore.feed {
GeometryReader { geometry in
Image(decorative: image, scale: 1.0, orientation: .upMirrored)
.resizable()
.scaledToFill()
.frame(
width: geometry.size.width,
height: geometry.size.height,
alignment: .center
)
.clipped()
}
} else {
Color.black.onAppear(perform: {
viewStore.send(.open)
})
}
}
}
}
public struct CameraState: Equatable {
var feed: CGImage?
var isRecording: Bool
}
enum CameraAction: Equatable {
case open
case start
case receive(CGImage)
case authorizationResponse(AVAuthorizationStatus)
}
struct CameraClient {
var requestAuthorization: @Sendable () async -> AVAuthorizationStatus
var startFeed: @Sendable (AVCaptureSession, AVCaptureVideoDataOutput, DispatchQueue) async -> AsyncStream<CGImage>
}
private final class Delegate: NSObject, AVCaptureVideoDataOutputSampleBufferDelegate {
let continuation: AsyncStream<CGImage>.Continuation
init(continuation: AsyncStream<CGImage>.Continuation) {
self.continuation = continuation
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if let imageBuffer = sampleBuffer.imageBuffer {
var image: CGImage?
VTCreateCGImageFromCVPixelBuffer(imageBuffer, options: nil, imageOut: &image)
if let image = image {
self.continuation.yield(image)
}
}
}
}
private final actor Camera {
var delegate: Delegate?
func startFeed(_ session: AVCaptureSession, _ output: AVCaptureVideoDataOutput, _ queue: DispatchQueue) async -> AsyncStream<CGImage> {
defer {
session.beginConfiguration()
output.setSampleBufferDelegate(self.delegate, queue: queue)
session.commitConfiguration()
}
return AsyncStream<CGImage>(bufferingPolicy: .bufferingNewest(1)) { continuation in
self.delegate = Delegate(continuation: continuation)
}
}
}
extension CameraClient {
static var live: Self {
let camera = Camera()
return Self(
requestAuthorization: {
return AVCaptureDevice.authorizationStatus(for: .video)
}, startFeed: { session, output, queue in
await camera.startFeed(session, output, queue)
}
)
}
}
struct CameraEnvironment {
var cameraClient: CameraClient
var session = AVCaptureSession()
var sessionQueue = DispatchQueue(label: "com.demo.camera", qos: .userInitiated, autoreleaseFrequency: .workItem)
var videoOutput = AVCaptureVideoDataOutput()
}
let reducer = Reducer<CameraState, CameraAction, CameraEnvironment> {
state, action, environment in
switch action {
case .open:
return .run { send in
let status = await environment.cameraClient.requestAuthorization()
await send(.authorizationResponse(status))
guard status == .authorized else {
return
}
await send(.start)
for await frame in await environment.cameraClient.startFeed(environment.session, environment.videoOutput, environment.sessionQueue) {
await send(.receive(frame))
}
}
case .start:
return .fireAndForget {
environment.sessionQueue.async {
environment.session.beginConfiguration()
defer {
environment.session.commitConfiguration()
environment.session.startRunning()
}
let device = AVCaptureDevice.default(
.builtInWideAngleCamera,
for: .video,
position: .back
)
guard let camera = device else {
// TODO: Handle error
fatalError()
}
do {
let cameraInput = try AVCaptureDeviceInput(device: camera)
if environment.session.canAddInput(cameraInput) {
environment.session.addInput(cameraInput)
} else {
// TODO: Handle error
fatalError()
}
} catch {
// TODO: Handle error
fatalError()
}
if environment.session.canAddOutput(environment.videoOutput) {
environment.session.addOutput(environment.videoOutput)
environment.videoOutput.videoSettings = [
kCVPixelBufferPixelFormatTypeKey as String : kCVPixelFormatType_32BGRA
]
let videoConnection = environment.videoOutput.connection(with: .video)
videoConnection?.videoOrientation = .portrait
} else {
// TODO: Handle error
fatalError()
}
}
}
case .receive(let live):
state.feed = live
// Buffer is not being released.
return .none
case .authorizationResponse(let status):
// TODO: Handle response
switch status {
default:
return .none
}
}
}
@p-larson
Copy link
Author

Problem

CMSampleBuffer is being retained in memory somewhere and somehow during concurrency, leading to samples being dropped. This has taken my attention for the day, I'm seeking new eyes to shed light into what I'm doing wrong.

I greatly appreciate any corrections to my approach. Thank you for your time.

Side Quest

I'm sending the CameraAction.receive(CGImage) potentially a lot to the reducer, I couldn't find anything saying I shouldn't in the documentation but I feel like I shouldn't.

Is there a more efficient approach to updating view dependent state without running it through the reducer?

Ouput

received action:
  CameraAction.open
  (No state changes)

received action:
  CameraAction.authorizationResponse(AVAuthorizationStatus.AVAuthorizationStatus)
  (No state changes)

received action:
  CameraAction.start
  (No state changes)

received action:
  CameraAction.receive(__NSCFType())
  CameraState(
    numberOfFrames: 0,
-   feed: nil,
+   feed: __NSCFType(),
    isRecording: false
  )

received action:
  CameraAction.receive(__NSCFType())
  CameraState(
    numberOfFrames: 0,
-   feed: __NSCFType()
+   feed: __NSCFType()
    isRecording: false
  )

received action:
  CameraAction.receive(__NSCFType())
  CameraState(
    numberOfFrames: 0,
-   feed: __NSCFType()
+   feed: __NSCFType()
    isRecording: false
  )

received action:
  CameraAction.receive(__NSCFType())
  CameraState(
    numberOfFrames: 0,
-   feed: __NSCFType()
+   feed: __NSCFType()
    isRecording: false
  )

received action:
  CameraAction.receive(__NSCFType())
  CameraState(
    numberOfFrames: 0,
-   feed: __NSCFType()
+   feed: __NSCFType()
    isRecording: false
  )

received action:
  CameraAction.receive(__NSCFType())
  CameraState(
    numberOfFrames: 0,
-   feed: __NSCFType()
+   feed: __NSCFType()
    isRecording: false
  )

received action:
  CameraAction.receive(__NSCFType())
  CameraState(
    numberOfFrames: 0,
-   feed: __NSCFType()
+   feed: __NSCFType()
    isRecording: false
  )

received action:
  CameraAction.receive(__NSCFType())
  CameraState(
    numberOfFrames: 0,
-   feed: __NSCFType()
+   feed: __NSCFType()
    isRecording: false
  )

received action:
  CameraAction.receive(__NSCFType())
  CameraState(
    numberOfFrames: 0,
-   feed: __NSCFType()
+   feed: __NSCFType()
    isRecording: false
  )

received action:
  CameraAction.receive(__NSCFType())
  CameraState(
    numberOfFrames: 0,
-   feed: __NSCFType()
+   feed: __NSCFType()
    isRecording: false
  )

received action:
  CameraAction.receive(__NSCFType())
  CameraState(
    numberOfFrames: 0,
-   feed: __NSCFType()
+   feed: __NSCFType()
    isRecording: false
  )

received action:
  CameraAction.receive(__NSCFType())
  CameraState(
    numberOfFrames: 0,
-   feed: __NSCFType()
+   feed: __NSCFType()
    isRecording: false
  )

received action:
  CameraAction.receive(__NSCFType())
  CameraState(
    numberOfFrames: 0,
-   feed: __NSCFType()
+   feed: __NSCFType()
    isRecording: false
  )

Optional(OutOfBuffers)
Wimyx_08262022/ContentView.swift:78: Fatal error
2022-08-28 18:53:12.202177-0500 Wimyx 08262022[48818:5157375] Wimyx_08262022/ContentView.swift:78: Fatal error

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment