diff --git a/python-api-examples/speaker-identification-with-vad-non-streaming-asr.py b/python-api-examples/speaker-identification-with-vad-non-streaming-asr.py index fe735e17a..0534b80f5 100755 --- a/python-api-examples/speaker-identification-with-vad-non-streaming-asr.py +++ b/python-api-examples/speaker-identification-with-vad-non-streaming-asr.py @@ -65,7 +65,7 @@ import numpy as np import sherpa_onnx -import torchaudio +import soundfile as sf try: import sounddevice as sd @@ -357,8 +357,14 @@ def load_speaker_file(args) -> Dict[str, List[str]]: def load_audio(filename: str) -> Tuple[np.ndarray, int]: - samples, sample_rate = torchaudio.load(filename) - return samples[0].contiguous().numpy(), sample_rate + data, sample_rate = sf.read( + filename, + always_2d=True, + dtype="float32", + ) + data = data[:, 0] # use only the first channel + samples = np.ascontiguousarray(data) + return samples, sample_rate def compute_speaker_embedding( diff --git a/python-api-examples/speaker-identification-with-vad.py b/python-api-examples/speaker-identification-with-vad.py index afad458dd..8514ed58f 100755 --- a/python-api-examples/speaker-identification-with-vad.py +++ b/python-api-examples/speaker-identification-with-vad.py @@ -60,7 +60,7 @@ import numpy as np import sherpa_onnx -import torchaudio +import soundfile as sf try: import sounddevice as sd @@ -160,8 +160,14 @@ def load_speaker_file(args) -> Dict[str, List[str]]: def load_audio(filename: str) -> Tuple[np.ndarray, int]: - samples, sample_rate = torchaudio.load(filename) - return samples[0].contiguous().numpy(), sample_rate + data, sample_rate = sf.read( + filename, + always_2d=True, + dtype="float32", + ) + data = data[:, 0] # use only the first channel + samples = np.ascontiguousarray(data) + return samples, sample_rate def compute_speaker_embedding( diff --git a/python-api-examples/speaker-identification.py b/python-api-examples/speaker-identification.py index c09478d81..abfa45587 100755 --- a/python-api-examples/speaker-identification.py +++ b/python-api-examples/speaker-identification.py @@ -52,7 +52,7 @@ import numpy as np import sherpa_onnx -import torchaudio +import soundfile as sf try: import sounddevice as sd @@ -145,8 +145,14 @@ def load_speaker_file(args) -> Dict[str, List[str]]: def load_audio(filename: str) -> Tuple[np.ndarray, int]: - samples, sample_rate = torchaudio.load(filename) - return samples[0].contiguous().numpy(), sample_rate + data, sample_rate = sf.read( + filename, + always_2d=True, + dtype="float32", + ) + data = data[:, 0] # use only the first channel + samples = np.ascontiguousarray(data) + return samples, sample_rate def compute_speaker_embedding(