diff --git a/garak/__main__.py b/garak/__main__.py index 55e257a0..7603c4e4 100644 --- a/garak/__main__.py +++ b/garak/__main__.py @@ -2,7 +2,6 @@ import sys -sys.path.append("/home/sda/tianhaoli/garak") from garak import cli diff --git a/garak/generators/huggingface.py b/garak/generators/huggingface.py index e216285e..a3d8a3bf 100644 --- a/garak/generators/huggingface.py +++ b/garak/generators/huggingface.py @@ -84,7 +84,6 @@ def __init__(self, name, do_sample=True, generations=10, device=0): model=name, do_sample=do_sample, device=device, - proxy={'http': 'http://127.0.0.1:7890/', 'https': 'http://127.0.0.1:7890/'} ) self.deprefix_prompt = name in models_to_deprefix if _config.loaded: @@ -306,7 +305,6 @@ def _call_model(self, prompt: str, generations_this_call: int = 1) -> List[str]: headers=self.headers, json=payload, timeout=(20, 90), # (connect, read) - proxies={'http': 'http://127.0.0.1:7890/', 'https': 'http://127.0.0.1:7890/'} ) if req_response.status_code == 503: @@ -536,18 +534,14 @@ class LLaVA(Generator): ] def __init__(self, name="", generations=10): - proxies = { - 'http': 'http://127.0.0.1:7890', - 'https': 'http://127.0.0.1:7890' - } if name not in self.supported_models: raise ValueError( f"Invalid modal name {name}, current support: {self.supported_models}." ) - self.processor = LlavaNextProcessor.from_pretrained(name, proxies=proxies) + self.processor = LlavaNextProcessor.from_pretrained(name) self.model = LlavaNextForConditionalGeneration.from_pretrained(name, torch_dtype=torch.float16, - low_cpu_mem_usage=True, proxies=proxies) + low_cpu_mem_usage=True) if torch.cuda.is_available(): self.model.to("cuda:0") else: