Skip to content

Commit

Permalink
chore: expose additional config options (#51) pt2
Browse files Browse the repository at this point in the history
  • Loading branch information
Fedir Zadniprovskyi authored and fedirz committed Aug 27, 2024
1 parent 2f6a1bc commit 210dab4
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 2 deletions.
4 changes: 2 additions & 2 deletions faster_whisper_server/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,9 +156,9 @@ class WhisperConfig(BaseModel):
You can find other supported models at https://huggingface.co/models?p=2&sort=trending&search=ctranslate2 and https://huggingface.co/models?sort=trending&search=ct2
"""
inference_device: Device = Field(default=Device.AUTO)
compute_type: Quantization = Field(default=Quantization.DEFAULT)
device_index: int | list[int] = 0
cpu_threads: int = 16
compute_type: Quantization = Field(default=Quantization.DEFAULT)
cpu_threads: int = 0
num_workers: int = 1


Expand Down
3 changes: 3 additions & 0 deletions faster_whisper_server/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,10 @@ def load_model(model_name: str) -> WhisperModel:
whisper = WhisperModel(
model_name,
device=config.whisper.inference_device,
device_index=config.whisper.device_index,
compute_type=config.whisper.compute_type,
cpu_threads=config.whisper.cpu_threads,
num_workers=config.whisper.num_workers,
)
logger.info(
f"Loaded {model_name} loaded in {time.perf_counter() - start:.2f} seconds. {config.whisper.inference_device}({config.whisper.compute_type}) will be used for inference." # noqa: E501
Expand Down

0 comments on commit 210dab4

Please sign in to comment.