Toggle navigation
Toggle navigation
此项目
正在载入...
Sign in
xuning
/
sherpaonnx
转到一个项目
Toggle navigation
项目
群组
代码片段
帮助
Toggle navigation pinning
Project
Activity
Repository
Pipelines
Graphs
Issues
0
Merge Requests
0
Wiki
Network
Create a new issue
Builds
Commits
Authored by
Ross-Fan
2025-02-02 22:49:13 +0800
Browse Files
Options
Browse Files
Download
Email Patches
Plain Diff
Committed by
GitHub
2025-02-02 22:49:13 +0800
Commit
1d950a88e53df4671bb347f9e53c6a5a4d1bda22
1d950a88
1 parent
59ff8542
Initialize the audio session for iOS ASR example (#1786)
Fixes #1784
隐藏空白字符变更
内嵌
并排对比
正在显示
1 个修改的文件
包含
29 行增加
和
10 行删除
ios-swiftui/SherpaOnnx/SherpaOnnx/SherpaOnnxViewModel.swift
ios-swiftui/SherpaOnnx/SherpaOnnx/SherpaOnnxViewModel.swift
查看文件 @
1d950a8
...
...
@@ -5,8 +5,8 @@
// Created by knight on 2023/4/5.
//
import
Foundation
import
AVFoundation
import
Foundation
enum
Status
{
case
stop
...
...
@@ -22,6 +22,7 @@ class SherpaOnnxViewModel: ObservableObject {
var
audioEngine
:
AVAudioEngine
?
=
nil
var
recognizer
:
SherpaOnnxRecognizer
!
=
nil
private
var
audioSession
:
AVAudioSession
!
var
lastSentence
:
String
=
""
let
maxSentence
:
Int
=
20
...
...
@@ -36,11 +37,16 @@ class SherpaOnnxViewModel: ObservableObject {
let
start
=
max
(
sentences
.
count
-
maxSentence
,
0
)
if
lastSentence
.
isEmpty
{
return
sentences
.
enumerated
()
.
map
{
(
index
,
s
)
in
"
\(
index
)
:
\(
s
.
lowercased
()
)
"
}[
start
...
]
.
joined
(
separator
:
"
\n
"
)
return
sentences
.
enumerated
()
.
map
{
(
index
,
s
)
in
"
\(
index
)
:
\(
s
.
lowercased
()
)
"
}[
start
...
]
.
joined
(
separator
:
"
\n
"
)
}
else
{
return
sentences
.
enumerated
()
.
map
{
(
index
,
s
)
in
"
\(
index
)
:
\(
s
.
lowercased
()
)
"
}[
start
...
]
.
joined
(
separator
:
"
\n
"
)
+
"
\n\(
sentences
.
count
)
:
\(
lastSentence
.
lowercased
()
)
"
return
sentences
.
enumerated
()
.
map
{
(
index
,
s
)
in
"
\(
index
)
:
\(
s
.
lowercased
()
)
"
}[
start
...
]
.
joined
(
separator
:
"
\n
"
)
+
"
\n\(
sentences
.
count
)
:
\(
lastSentence
.
lowercased
()
)
"
}
}
...
...
@@ -48,8 +54,20 @@ class SherpaOnnxViewModel: ObservableObject {
self
.
subtitles
=
self
.
results
}
func
setupAudioSession
()
{
audioSession
=
AVAudioSession
.
sharedInstance
()
do
{
try
audioSession
.
setCategory
(
.
playAndRecord
,
mode
:
.
default
,
options
:
[
.
defaultToSpeaker
])
try
audioSession
.
setActive
(
true
)
}
catch
{
print
(
"Failed to set up audio session:
\(
error
)
"
)
}
}
init
()
{
initRecognizer
()
setupAudioSession
()
initRecorder
()
}
...
...
@@ -116,8 +134,8 @@ class SherpaOnnxViewModel: ObservableObject {
pcmFormat
:
outputFormat
,
frameCapacity
:
AVAudioFrameCount
(
outputFormat
.
sampleRate
)
*
buffer
.
frameLength
/
AVAudioFrameCount
(
buffer
.
format
.
sampleRate
))
!
*
buffer
.
frameLength
/
AVAudioFrameCount
(
buffer
.
format
.
sampleRate
))
!
var
error
:
NSError
?
let
_
=
converter
.
convert
(
...
...
@@ -129,7 +147,7 @@ class SherpaOnnxViewModel: ObservableObject {
let
array
=
convertedBuffer
.
array
()
if
!
array
.
isEmpty
{
self
.
recognizer
.
acceptWaveform
(
samples
:
array
)
while
(
self
.
recognizer
.
isReady
())
{
while
self
.
recognizer
.
isReady
()
{
self
.
recognizer
.
decode
()
}
let
isEndpoint
=
self
.
recognizer
.
isEndpoint
()
...
...
@@ -141,7 +159,7 @@ class SherpaOnnxViewModel: ObservableObject {
print
(
text
)
}
if
isEndpoint
{
if
isEndpoint
{
if
!
text
.
isEmpty
{
let
tmp
=
self
.
lastSentence
self
.
lastSentence
=
""
...
...
@@ -170,7 +188,8 @@ class SherpaOnnxViewModel: ObservableObject {
do
{
try
self
.
audioEngine
?
.
start
()
}
catch
let
error
as
NSError
{
print
(
"Got an error starting audioEngine:
\(
error
.
domain
)
,
\(
error
)
"
)
print
(
"Got an error starting audioEngine:
\(
error
.
domain
)
,
\(
error
)
"
)
}
print
(
"started"
)
}
...
...
请
注册
或
登录
后发表评论