mirror of
https://github.com/pointfreeco/swift-composable-architecture.git
synced 2025-12-20 09:11:33 +01:00
* `@preconcurrency @MainActor` isolation of `Store` * Remove unneeded `@MainActor`s * Remove thread checking code * Remove unneeded `@MainActor`s * Swift 5.10 compatibility fixes * wip * More 5.10 fixes * wip * fixes * wip * wip * up the timeout * wip * Fixes * Remove mainActorASAP in favor of mainActorNow. (#3288) * wip * Run swift-format * Update README.md * Fix integration tests. (#3294) * Fix integration tests. * wip * wip * Run swift-format * mainActorNow doesnt need escaping closure * wip * migration guide * wip * Update MigratingTo1.14.md --------- Co-authored-by: Brandon Williams <mbrandonw@hey.com> Co-authored-by: Brandon Williams <135203+mbrandonw@users.noreply.github.com> Co-authored-by: mbrandonw <mbrandonw@users.noreply.github.com>
131 lines
4.1 KiB
Swift
131 lines
4.1 KiB
Swift
import ComposableArchitecture
|
|
import XCTest
|
|
|
|
@testable import SpeechRecognition
|
|
|
|
final class SpeechRecognitionTests: XCTestCase {
|
|
func testDenyAuthorization() async {
|
|
let store = await TestStore(initialState: SpeechRecognition.State()) {
|
|
SpeechRecognition()
|
|
} withDependencies: {
|
|
$0.speechClient.requestAuthorization = { .denied }
|
|
}
|
|
|
|
await store.send(.recordButtonTapped) {
|
|
$0.isRecording = true
|
|
}
|
|
await store.receive(\.speechRecognizerAuthorizationStatusResponse) {
|
|
$0.alert = AlertState {
|
|
TextState(
|
|
"""
|
|
You denied access to speech recognition. This app needs access to transcribe your speech.
|
|
"""
|
|
)
|
|
}
|
|
$0.isRecording = false
|
|
}
|
|
}
|
|
|
|
func testRestrictedAuthorization() async {
|
|
let store = await TestStore(initialState: SpeechRecognition.State()) {
|
|
SpeechRecognition()
|
|
} withDependencies: {
|
|
$0.speechClient.requestAuthorization = { .restricted }
|
|
}
|
|
|
|
await store.send(.recordButtonTapped) {
|
|
$0.isRecording = true
|
|
}
|
|
await store.receive(\.speechRecognizerAuthorizationStatusResponse) {
|
|
$0.alert = AlertState { TextState("Your device does not allow speech recognition.") }
|
|
$0.isRecording = false
|
|
}
|
|
}
|
|
|
|
func testAllowAndRecord() async {
|
|
let recognitionTask = AsyncThrowingStream.makeStream(of: SpeechRecognitionResult.self)
|
|
let store = await TestStore(initialState: SpeechRecognition.State()) {
|
|
SpeechRecognition()
|
|
} withDependencies: {
|
|
$0.speechClient.finishTask = { recognitionTask.continuation.finish() }
|
|
$0.speechClient.startTask = { @Sendable _ in recognitionTask.stream }
|
|
$0.speechClient.requestAuthorization = { .authorized }
|
|
}
|
|
|
|
let firstResult = SpeechRecognitionResult(
|
|
bestTranscription: Transcription(
|
|
formattedString: "Hello",
|
|
segments: []
|
|
),
|
|
isFinal: false,
|
|
transcriptions: []
|
|
)
|
|
var secondResult = firstResult
|
|
secondResult.bestTranscription.formattedString = "Hello world"
|
|
|
|
await store.send(.recordButtonTapped) {
|
|
$0.isRecording = true
|
|
}
|
|
|
|
await store.receive(\.speechRecognizerAuthorizationStatusResponse)
|
|
|
|
recognitionTask.continuation.yield(firstResult)
|
|
await store.receive(\.speech.success) {
|
|
$0.transcribedText = "Hello"
|
|
}
|
|
|
|
recognitionTask.continuation.yield(secondResult)
|
|
await store.receive(\.speech.success) {
|
|
$0.transcribedText = "Hello world"
|
|
}
|
|
|
|
await store.send(.recordButtonTapped) {
|
|
$0.isRecording = false
|
|
}
|
|
|
|
await store.finish()
|
|
}
|
|
|
|
func testAudioSessionFailure() async {
|
|
let recognitionTask = AsyncThrowingStream.makeStream(of: SpeechRecognitionResult.self)
|
|
let store = await TestStore(initialState: SpeechRecognition.State()) {
|
|
SpeechRecognition()
|
|
} withDependencies: {
|
|
$0.speechClient.startTask = { @Sendable _ in recognitionTask.stream }
|
|
$0.speechClient.requestAuthorization = { .authorized }
|
|
}
|
|
|
|
await store.send(.recordButtonTapped) {
|
|
$0.isRecording = true
|
|
}
|
|
|
|
await store.receive(\.speechRecognizerAuthorizationStatusResponse)
|
|
|
|
recognitionTask.continuation.finish(throwing: SpeechClient.Failure.couldntConfigureAudioSession)
|
|
await store.receive(\.speech.failure) {
|
|
$0.alert = AlertState { TextState("Problem with audio device. Please try again.") }
|
|
}
|
|
}
|
|
|
|
func testAudioEngineFailure() async {
|
|
let recognitionTask = AsyncThrowingStream.makeStream(of: SpeechRecognitionResult.self)
|
|
let store = await TestStore(initialState: SpeechRecognition.State()) {
|
|
SpeechRecognition()
|
|
} withDependencies: {
|
|
$0.speechClient.startTask = { @Sendable _ in recognitionTask.stream }
|
|
$0.speechClient.requestAuthorization = { .authorized }
|
|
}
|
|
|
|
await store.send(.recordButtonTapped) {
|
|
$0.isRecording = true
|
|
}
|
|
|
|
await store.receive(\.speechRecognizerAuthorizationStatusResponse)
|
|
|
|
recognitionTask.continuation.finish(throwing: SpeechClient.Failure.couldntStartAudioEngine)
|
|
await store.receive(\.speech.failure) {
|
|
$0.alert = AlertState { TextState("Problem with audio device. Please try again.") }
|
|
}
|
|
}
|
|
}
|