Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[server]log redundancy in server #2113

Merged
merged 1 commit into from
Jul 5, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddlespeech/cli/tts/infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -382,7 +382,7 @@ def infer(self,
text, merge_sentences=merge_sentences)
phone_ids = input_ids["phone_ids"]
else:
print("lang should in {'zh', 'en'}!")
logger.error("lang should in {'zh', 'en'}!")
self.frontend_time = time.time() - frontend_st

self.am_time = 0
Expand Down
2 changes: 0 additions & 2 deletions paddlespeech/server/bin/paddlespeech_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,6 @@ def execute(self, argv: List[str]) -> bool:
time_end = time.time()
time_consume = time_end - time_start
response_dict = res.json()
logger.info(response_dict["message"])
logger.info("Save synthesized audio successfully on %s." % (output))
logger.info("Audio duration: %f s." %
(response_dict['result']['duration']))
Expand Down Expand Up @@ -702,7 +701,6 @@ def execute(self, argv: List[str]) -> bool:
test_audio=args.test,
task=task)
time_end = time.time()
logger.info(f"The vector: {res}")
logger.info("Response time %f s." % (time_end - time_start))
return True
except Exception as e:
Expand Down
20 changes: 11 additions & 9 deletions paddlespeech/server/engine/acs/python/acs_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def __init__(self):
"""The ACSEngine Engine
"""
super(ACSEngine, self).__init__()
logger.info("Create the ACSEngine Instance")
logger.debug("Create the ACSEngine Instance")
self.word_list = []

def init(self, config: dict):
Expand All @@ -42,15 +42,15 @@ def init(self, config: dict):
Returns:
bool: The engine instance flag
"""
logger.info("Init the acs engine")
logger.debug("Init the acs engine")
try:
self.config = config
self.device = self.config.get("device", paddle.get_device())

# websocket default ping timeout is 20 seconds
self.ping_timeout = self.config.get("ping_timeout", 20)
paddle.set_device(self.device)
logger.info(f"ACS Engine set the device: {self.device}")
logger.debug(f"ACS Engine set the device: {self.device}")

except BaseException as e:
logger.error(
Expand All @@ -66,7 +66,9 @@ def init(self, config: dict):
self.url = "ws://" + self.config.asr_server_ip + ":" + str(
self.config.asr_server_port) + "/paddlespeech/asr/streaming"

logger.info("Init the acs engine successfully")
logger.info("Initialize acs server engine successfully on device: %s." %
(self.device))

return True

def read_search_words(self):
Expand Down Expand Up @@ -95,12 +97,12 @@ def get_asr_content(self, audio_data):
Returns:
_type_: _description_
"""
logger.info("send a message to the server")
logger.debug("send a message to the server")
if self.url is None:
logger.error("No asr server, please input valid ip and port")
return ""
ws = websocket.WebSocket()
logger.info(f"set the ping timeout: {self.ping_timeout} seconds")
logger.debug(f"set the ping timeout: {self.ping_timeout} seconds")
ws.connect(self.url, ping_timeout=self.ping_timeout)
audio_info = json.dumps(
{
Expand All @@ -123,7 +125,7 @@ def get_asr_content(self, audio_data):
logger.info(f"audio result: {msg}")

# 3. send chunk audio data to engine
logger.info("send the end signal")
logger.debug("send the end signal")
audio_info = json.dumps(
{
"name": "test.wav",
Expand Down Expand Up @@ -197,7 +199,7 @@ def get_macthed_word(self, msg):
start = max(time_stamp[m.start(0)]['bg'] - offset, 0)

end = min(time_stamp[m.end(0) - 1]['ed'] + offset, max_ed)
logger.info(f'start: {start}, end: {end}')
logger.debug(f'start: {start}, end: {end}')
acs_result.append({'w': w, 'bg': start, 'ed': end})

return acs_result, asr_result
Expand All @@ -212,7 +214,7 @@ def run(self, audio_data):
Returns:
acs_result, asr_result: the acs result and the asr result
"""
logger.info("start to process the audio content search")
logger.debug("start to process the audio content search")
msg = self.get_asr_content(io.BytesIO(audio_data))

acs_result, asr_result = self.get_macthed_word(msg)
Expand Down
44 changes: 22 additions & 22 deletions paddlespeech/server/engine/asr/online/onnx/asr_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def __init__(self, asr_engine):
asr_engine (ASREngine): the global asr engine
"""
super().__init__()
logger.info(
logger.debug(
"create an paddle asr connection handler to process the websocket connection"
)
self.config = asr_engine.config # server config
Expand Down Expand Up @@ -152,12 +152,12 @@ def reset(self):
self.output_reset()

def extract_feat(self, samples: ByteString):
logger.info("Online ASR extract the feat")
logger.debug("Online ASR extract the feat")
samples = np.frombuffer(samples, dtype=np.int16)
assert samples.ndim == 1

self.num_samples += samples.shape[0]
logger.info(
logger.debug(
f"This package receive {samples.shape[0]} pcm data. Global samples:{self.num_samples}"
)

Expand All @@ -168,7 +168,7 @@ def extract_feat(self, samples: ByteString):
else:
assert self.remained_wav.ndim == 1 # (T,)
self.remained_wav = np.concatenate([self.remained_wav, samples])
logger.info(
logger.debug(
f"The concatenation of remain and now audio samples length is: {self.remained_wav.shape}"
)

Expand Down Expand Up @@ -202,14 +202,14 @@ def extract_feat(self, samples: ByteString):
# update remained wav
self.remained_wav = self.remained_wav[self.n_shift * num_frames:]

logger.info(
logger.debug(
f"process the audio feature success, the cached feat shape: {self.cached_feat.shape}"
)
logger.info(
logger.debug(
f"After extract feat, the cached remain the audio samples: {self.remained_wav.shape}"
)
logger.info(f"global samples: {self.num_samples}")
logger.info(f"global frames: {self.num_frames}")
logger.debug(f"global samples: {self.num_samples}")
logger.debug(f"global frames: {self.num_frames}")

def decode(self, is_finished=False):
"""advance decoding
Expand Down Expand Up @@ -237,7 +237,7 @@ def decode(self, is_finished=False):
return

num_frames = self.cached_feat.shape[1]
logger.info(
logger.debug(
f"Required decoding window {decoding_window} frames, and the connection has {num_frames} frames"
)

Expand Down Expand Up @@ -355,7 +355,7 @@ def update_config(self) -> None:

lm_url = self.task_resource.res_dict['lm_url']
lm_md5 = self.task_resource.res_dict['lm_md5']
logger.info(f"Start to load language model {lm_url}")
logger.debug(f"Start to load language model {lm_url}")
self.download_lm(
lm_url,
os.path.dirname(self.config.decode.lang_model_path), lm_md5)
Expand All @@ -367,7 +367,7 @@ def init_model(self) -> None:

if "deepspeech2" in self.model_type:
# AM predictor
logger.info("ASR engine start to init the am predictor")
logger.debug("ASR engine start to init the am predictor")
self.am_predictor = onnx_infer.get_sess(
model_path=self.am_model, sess_conf=self.am_predictor_conf)
else:
Expand Down Expand Up @@ -400,7 +400,7 @@ def _init_from_path(self,
self.num_decoding_left_chunks = num_decoding_left_chunks
# conf for paddleinference predictor or onnx
self.am_predictor_conf = am_predictor_conf
logger.info(f"model_type: {self.model_type}")
logger.debug(f"model_type: {self.model_type}")

sample_rate_str = '16k' if sample_rate == 16000 else '8k'
tag = model_type + '-' + lang + '-' + sample_rate_str
Expand All @@ -422,12 +422,11 @@ def _init_from_path(self,
# self.res_path, self.task_resource.res_dict[
# 'params']) if am_params is None else os.path.abspath(am_params)

logger.info("Load the pretrained model:")
logger.info(f" tag = {tag}")
logger.info(f" res_path: {self.res_path}")
logger.info(f" cfg path: {self.cfg_path}")
logger.info(f" am_model path: {self.am_model}")
# logger.info(f" am_params path: {self.am_params}")
logger.debug("Load the pretrained model:")
logger.debug(f" tag = {tag}")
logger.debug(f" res_path: {self.res_path}")
logger.debug(f" cfg path: {self.cfg_path}")
logger.debug(f" am_model path: {self.am_model}")

#Init body.
self.config = CfgNode(new_allowed=True)
Expand All @@ -436,7 +435,7 @@ def _init_from_path(self,
if self.config.spm_model_prefix:
self.config.spm_model_prefix = os.path.join(
self.res_path, self.config.spm_model_prefix)
logger.info(f"spm model path: {self.config.spm_model_prefix}")
logger.debug(f"spm model path: {self.config.spm_model_prefix}")

self.vocab = self.config.vocab_filepath

Expand All @@ -450,7 +449,7 @@ def _init_from_path(self,
# AM predictor
self.init_model()

logger.info(f"create the {model_type} model success")
logger.debug(f"create the {model_type} model success")
return True


Expand Down Expand Up @@ -501,15 +500,16 @@ def init(self, config: dict) -> bool:
"If all GPU or XPU is used, you can set the server to 'cpu'")
sys.exit(-1)

logger.info(f"paddlespeech_server set the device: {self.device}")
logger.debug(f"paddlespeech_server set the device: {self.device}")

if not self.init_model():
logger.error(
"Init the ASR server occurs error, please check the server configuration yaml"
)
return False

logger.info("Initialize ASR server engine successfully.")
logger.info("Initialize ASR server engine successfully on device: %s." %
(self.device))
return True

def new_handler(self):
Expand Down
Loading