Skip to content

Commit

Permalink
Unified Download Progress for Huggingface and Modelscope (#394)
Browse files Browse the repository at this point in the history
  • Loading branch information
JingofXin authored Dec 19, 2024
1 parent 51f2923 commit 327b498
Show file tree
Hide file tree
Showing 11 changed files with 224 additions and 57 deletions.
2 changes: 1 addition & 1 deletion lazyllm/components/auto/autodeploy.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ class AutoDeploy(LazyLLMDeployBase):

def __new__(cls, base_model, source=lazyllm.config['model_source'], trust_remote_code=True, max_token_num=1024,
launcher=launchers.remote(ngpus=1), stream=False, type=None, **kw):
base_model = ModelManager(source).download(base_model)
base_model = ModelManager(source).download(base_model) or ''
model_name = get_model_name(base_model)
if not type:
type = ModelManager.get_model_type(model_name)
Expand Down
2 changes: 1 addition & 1 deletion lazyllm/components/auto/autofinetune.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
class AutoFinetune(LazyLLMFinetuneBase):
def __new__(cls, base_model, target_path, source=lazyllm.config['model_source'], merge_path=None, ctx_len=1024,
batch_size=32, lora_r=8, launcher=launchers.remote(ngpus=1), **kw):
base_model = ModelManager(source).download(base_model)
base_model = ModelManager(source).download(base_model) or ''
model_name = get_model_name(base_model)
model_type = ModelManager.get_model_type(model_name)
if model_type in ['embed', 'tts', 'vlm', 'stt', 'sd']:
Expand Down
4 changes: 2 additions & 2 deletions lazyllm/components/embedding/embed.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ class LazyHuggingFaceEmbedding(object):
def __init__(self, base_embed, source=None, init=False):
from ..utils.downloader import ModelManager
source = lazyllm.config['model_source'] if not source else source
self.base_embed = ModelManager(source).download(base_embed)
self.base_embed = ModelManager(source).download(base_embed) or ''
self.embed = None
self.tokenizer = None
self.device = "cpu"
Expand Down Expand Up @@ -48,7 +48,7 @@ class LazyHuggingFaceRerank(object):
def __init__(self, base_rerank, source=None, init=False):
from ..utils.downloader import ModelManager
source = lazyllm.config['model_source'] if not source else source
self.base_rerank = ModelManager(source).download(base_rerank)
self.base_rerank = ModelManager(source).download(base_rerank) or ''
self.reranker = None
self.init_flag = lazyllm.once_flag()
if init:
Expand Down
2 changes: 1 addition & 1 deletion lazyllm/components/speech_to_text/sense_voice.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def is_valid_path(path):
class SenseVoice(object):
def __init__(self, base_path, source=None, init=False):
source = lazyllm.config['model_source'] if not source else source
self.base_path = ModelManager(source).download(base_path)
self.base_path = ModelManager(source).download(base_path) or ''
self.model = None
self.init_flag = lazyllm.once_flag()
if init:
Expand Down
2 changes: 1 addition & 1 deletion lazyllm/components/stable_diffusion/stable_diffusion3.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
class StableDiffusion3(object):
def __init__(self, base_sd, source=None, embed_batch_size=30, trust_remote_code=True, save_path=None, init=False):
source = lazyllm.config['model_source'] if not source else source
self.base_sd = ModelManager(source).download(base_sd)
self.base_sd = ModelManager(source).download(base_sd) or ''
self.embed_batch_size = embed_batch_size
self.trust_remote_code = trust_remote_code
self.sd = None
Expand Down
2 changes: 1 addition & 1 deletion lazyllm/components/text_to_speech/bark.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ class Bark(object):

def __init__(self, base_path, source=None, trust_remote_code=True, save_path=None, init=False):
source = lazyllm.config['model_source'] if not source else source
self.base_path = ModelManager(source).download(base_path)
self.base_path = ModelManager(source).download(base_path) or ''
self.trust_remote_code = trust_remote_code
self.processor, self.bark = None, None
self.init_flag = lazyllm.once_flag()
Expand Down
2 changes: 1 addition & 1 deletion lazyllm/components/text_to_speech/chattts.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ class ChatTTSModule(object):

def __init__(self, base_path, source=None, save_path=None, init=False):
source = lazyllm.config['model_source'] if not source else source
self.base_path = ModelManager(source).download(base_path)
self.base_path = ModelManager(source).download(base_path) or ''
self.model, self.spk = None, None
self.init_flag = lazyllm.once_flag()
self.device = 'cpu'
Expand Down
2 changes: 1 addition & 1 deletion lazyllm/components/text_to_speech/musicgen.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ class MusicGen(object):

def __init__(self, base_path, source=None, save_path=None, init=False):
source = lazyllm.config['model_source'] if not source else source
self.base_path = ModelManager(source).download(base_path)
self.base_path = ModelManager(source).download(base_path) or ''
self.model = None
self.init_flag = lazyllm.once_flag()
self.save_path = save_path or os.path.join(lazyllm.config['temp_dir'], 'musicgen')
Expand Down
Loading

0 comments on commit 327b498

Please sign in to comment.