You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
the command: python run.py --model_name gpt2 -f pytorch --lambada_path /ampere/aml/natural_language_processing/text_generation/nanogpt/lambada_test_plain_text.txt
results in following error:
`Traceback (most recent call last):
File "/ampere/mzd/ampere_model_library/utils/pytorch.py", line 65, in init
self._frozen_script = torch.jit.freeze(torch.jit.script(self._model), preserved_attrs=[func])
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_script.py", line 1284, in script
return torch.jit._recursive.create_script_module(
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_recursive.py", line 480, in create_script_module
return create_script_module_impl(nn_module, concrete_type, stubs_fn)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_recursive.py", line 542, in create_script_module_impl
script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_script.py", line 614, in _construct
init_fn(script_module)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_recursive.py", line 520, in init_fn
scripted = create_script_module_impl(orig_value, sub_concrete_type, stubs_fn)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_recursive.py", line 542, in create_script_module_impl
script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_script.py", line 614, in _construct
init_fn(script_module)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_recursive.py", line 520, in init_fn
scripted = create_script_module_impl(orig_value, sub_concrete_type, stubs_fn)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_recursive.py", line 542, in create_script_module_impl
script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_script.py", line 614, in _construct
init_fn(script_module)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_recursive.py", line 520, in init_fn
scripted = create_script_module_impl(orig_value, sub_concrete_type, stubs_fn)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_recursive.py", line 542, in create_script_module_impl
script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_script.py", line 614, in _construct
init_fn(script_module)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_recursive.py", line 520, in init_fn
scripted = create_script_module_impl(orig_value, sub_concrete_type, stubs_fn)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_recursive.py", line 546, in create_script_module_impl
create_methods_and_properties_from_stubs(concrete_type, method_stubs, property_stubs)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_recursive.py", line 397, in create_methods_and_properties_from_stubs
concrete_type._create_methods_and_properties(property_defs, property_rcbs, method_defs, method_rcbs, method_defaults)
RuntimeError:
Module 'CausalSelfAttention' has no attribute 'bias' :
File "/ampere/aml/natural_language_processing/text_generation/nanogpt/nanoGPT/model.py", line 78
# manual implementation of attention
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf'))
~~~~~~~~~ <--- HERE
att = F.softmax(att, dim=-1)
att = self.attn_dropout(att)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/ampere/aml/natural_language_processing/text_generation/nanogpt/run.py", line 78, in
main()
File "/ampere/aml/natural_language_processing/text_generation/nanogpt/run.py", line 71, in main
run_pytorch_fp32(**vars(args))
File "/ampere/aml/natural_language_processing/text_generation/nanogpt/run.py", line 63, in run_pytorch_fp32
run_pytorch(model_name, batch_size, num_runs, timeout, lambada_path, disable_jit_freeze, **kwargs)
File "/ampere/aml/natural_language_processing/text_generation/nanogpt/run.py", line 57, in run_pytorch
runner = PyTorchRunner(model, disable_jit_freeze=disable_jit_freeze, func="generate")
File "/ampere/mzd/ampere_model_library/utils/pytorch.py", line 69, in init
self._frozen_script = torch.jit.freeze(torch.jit.trace(self._model, example_inputs))
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_trace.py", line 793, in trace
raise RuntimeError("example_kwarg_inputs should be a dict")
RuntimeError: example_kwarg_inputs should be a dict`
The text was updated successfully, but these errors were encountered:
the command:
python run.py --model_name gpt2 -f pytorch --lambada_path /ampere/aml/natural_language_processing/text_generation/nanogpt/lambada_test_plain_text.txt
results in following error:
`Traceback (most recent call last):
File "/ampere/mzd/ampere_model_library/utils/pytorch.py", line 65, in init
self._frozen_script = torch.jit.freeze(torch.jit.script(self._model), preserved_attrs=[func])
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_script.py", line 1284, in script
return torch.jit._recursive.create_script_module(
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_recursive.py", line 480, in create_script_module
return create_script_module_impl(nn_module, concrete_type, stubs_fn)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_recursive.py", line 542, in create_script_module_impl
script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_script.py", line 614, in _construct
init_fn(script_module)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_recursive.py", line 520, in init_fn
scripted = create_script_module_impl(orig_value, sub_concrete_type, stubs_fn)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_recursive.py", line 542, in create_script_module_impl
script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_script.py", line 614, in _construct
init_fn(script_module)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_recursive.py", line 520, in init_fn
scripted = create_script_module_impl(orig_value, sub_concrete_type, stubs_fn)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_recursive.py", line 542, in create_script_module_impl
script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_script.py", line 614, in _construct
init_fn(script_module)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_recursive.py", line 520, in init_fn
scripted = create_script_module_impl(orig_value, sub_concrete_type, stubs_fn)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_recursive.py", line 542, in create_script_module_impl
script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_script.py", line 614, in _construct
init_fn(script_module)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_recursive.py", line 520, in init_fn
scripted = create_script_module_impl(orig_value, sub_concrete_type, stubs_fn)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_recursive.py", line 546, in create_script_module_impl
create_methods_and_properties_from_stubs(concrete_type, method_stubs, property_stubs)
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_recursive.py", line 397, in create_methods_and_properties_from_stubs
concrete_type._create_methods_and_properties(property_defs, property_rcbs, method_defs, method_rcbs, method_defaults)
RuntimeError:
Module 'CausalSelfAttention' has no attribute 'bias' :
File "/ampere/aml/natural_language_processing/text_generation/nanogpt/nanoGPT/model.py", line 78
# manual implementation of attention
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf'))
~~~~~~~~~ <--- HERE
att = F.softmax(att, dim=-1)
att = self.attn_dropout(att)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/ampere/aml/natural_language_processing/text_generation/nanogpt/run.py", line 78, in
main()
File "/ampere/aml/natural_language_processing/text_generation/nanogpt/run.py", line 71, in main
run_pytorch_fp32(**vars(args))
File "/ampere/aml/natural_language_processing/text_generation/nanogpt/run.py", line 63, in run_pytorch_fp32
run_pytorch(model_name, batch_size, num_runs, timeout, lambada_path, disable_jit_freeze, **kwargs)
File "/ampere/aml/natural_language_processing/text_generation/nanogpt/run.py", line 57, in run_pytorch
runner = PyTorchRunner(model, disable_jit_freeze=disable_jit_freeze, func="generate")
File "/ampere/mzd/ampere_model_library/utils/pytorch.py", line 69, in init
self._frozen_script = torch.jit.freeze(torch.jit.trace(self._model, example_inputs))
File "/usr/local/lib/python3.10/dist-packages/torch/jit/_trace.py", line 793, in trace
raise RuntimeError("example_kwarg_inputs should be a dict")
RuntimeError: example_kwarg_inputs should be a dict`
The text was updated successfully, but these errors were encountered: