1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59services:
kokoro:
image: ghcr.io/remsky/kokoro-fastapi-cpu
#image: ghcr.io/remsky/kokoro-fastapi-gpu # Uncomment this line if you have a GPU and want to use it
container_name: kokoro
environment: #comment this section out if you are using the GPU image
# ONNX Optimization Settings for vectorized operations
- ONNX_NUM_THREADS=8 # Maximize core usage for vectorized ops
- ONNX_INTER_OP_THREADS=4 # Higher inter-op for parallel matrix operations
- ONNX_EXECUTION_MODE=parallel
- ONNX_OPTIMIZATION_LEVEL=all
- ONNX_MEMORY_PATTERN=true
- ONNX_ARENA_EXTEND_STRATEGY=kNextPowerOfTwo
# environment: #uncomment this section if you are using the GPU image
# - PYTHONPATH=/app:/app/api
# - USE_GPU=true
# - PYTHONUNBUFFERED=1
ports:
- 8880:8880
healthcheck:
test: ['CMD-SHELL', 'curl -f http://localhost:8880/health || exit 1']
interval: 10s
timeout: 5s
retries: 5
# deploy: #uncomment this section if you are using the GPU image
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
epub_to_audiobook:
build:
context: ./ # Directory containing the Dockerfile for epub_to_audiobook
dockerfile: Dockerfile # Name of the Dockerfile (if it's not the default 'Dockerfile')
container_name: epub_to_audiobook
environment:
- OPENAI_BASE_URL=http://kokoro:8880/v1
- OPENAI_API_KEY=nope
- PATH_TO_EPUB_FILE=${PATH_TO_EPUB_FILE}
- OUTPUT_DIR=${OUTPUT_DIR:-audiobook_output}
- VOICE_NAME=${VOICE_NAME:-af_bella}
volumes:
- ./:/app
- ${PATH_TO_EPUB_FILE}:/epub_src
#command: tail /dev/null # Uncomment this line to keep the container running, and run via connecting to it with `docker exec -it epub_to_audiobook /bin/bash`
command: >
python main.py
"${PATH_TO_EPUB_FILE}"
"${OUTPUT_DIR}"
--tts openai
--no_prompt
--model_name tts-1
--voice_name '${VOICE_NAME}'
depends_on:
kokoro:
condition: service_healthy