Dep (stt): use ffmpeg instead of portaudio
This commit is contained in:
@@ -6,19 +6,27 @@ services:
|
||||
ports:
|
||||
- "8081:8081"
|
||||
volumes:
|
||||
- whisper_models:/app/models
|
||||
- ./whisper.cpp/models/ggml-large-v3-turbo-q5_0.bin:/app/models/ggml-large-v3-turbo-q5_0.bin
|
||||
working_dir: /app
|
||||
entrypoint: ""
|
||||
command: >
|
||||
sh -c "
|
||||
if [ ! -f /app/models/ggml-large-v3-turbo.bin ]; then
|
||||
echo 'Downloading ggml-large-v3-turbo model...'
|
||||
./download-ggml-model.sh large-v3-turbo /app/models
|
||||
if [ ! -f /app/models/ggml-large-v3-turbo-q5_0.bin ]; then
|
||||
echo 'Downloading ggml-large-v3-turboq5_0 model...'
|
||||
curl -o /app/models/ggml-large-v3-turbo-q5_0.bin -L "https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-large-v3-turbo-q5_0.bin?download=true"
|
||||
fi &&
|
||||
./build/bin/whisper-server -m /app/models/ggml-large-v3-turbo.bin -t 4 -p 1 --port 8081 --host 0.0.0.0
|
||||
./build/bin/whisper-server -m /app/models/ggml-large-v3-turbo-q5_0.bin -t 4 -p 1 --port 8081 --host 0.0.0.0
|
||||
"
|
||||
environment:
|
||||
- WHISPER_LOG_LEVEL=3
|
||||
# For GPU support, uncomment the following lines:
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
# Restart policy in case the service fails
|
||||
restart: unless-stopped
|
||||
|
||||
@@ -45,7 +53,5 @@ services:
|
||||
volumes:
|
||||
models:
|
||||
driver: local
|
||||
audio:
|
||||
driver: local
|
||||
whisper_models:
|
||||
driver: local
|
||||
|
||||
Reference in New Issue
Block a user