Skip to content

Commit

Permalink
pickable Vocab / v3.0.2 (#2268)
Browse files Browse the repository at this point in the history
* pickable Vocab / v3.0.2
  • Loading branch information
vince62s authored Dec 7, 2022
1 parent 874e18a commit 9698acd
Show file tree
Hide file tree
Showing 6 changed files with 23 additions and 13 deletions.
11 changes: 10 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,16 @@

## [Unreleased]

## [3.0.0](https://github.com/OpenNMT/OpenNMT-py/tree/3.0.1) (2022-11-23)
## [3.0.2](https://github.com/OpenNMT/OpenNMT-py/tree/3.0.2) (2022-12-07)
* pyonmttok.Vocab is now pickable. dataloader switched to spawn. (MacOS/Windows compatible)
* fix scoring with specific metrics (BLEU, TER)
* fix tensorboard logging
* fix dedup in batch iterator (only for TRAIN, was happening at inference also)
* New: Change: tgt_prefix renamed to tgt_file_prefix
* New: tgt_prefix / src_prefix used for "prefix" Transform (onmt/transforms/misc.py)
* New: process transforms of buckets in batches (vs per example) / faster

## [3.0.1](https://github.com/OpenNMT/OpenNMT-py/tree/3.0.1) (2022-11-23)

* fix dynamic scoring
* reinstate apex.amp level O1/O2 for benchmarking
Expand Down
2 changes: 1 addition & 1 deletion onmt/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,4 @@
__all__ = [onmt.inputters, onmt.encoders, onmt.decoders, onmt.models,
onmt.utils, onmt.modules, "Trainer"]

__version__ = "3.0.1"
__version__ = "3.0.2"
2 changes: 1 addition & 1 deletion onmt/inputters/dynamic_iterator.py
Original file line number Diff line number Diff line change
Expand Up @@ -341,7 +341,7 @@ def build_dynamic_dataset_iter(opt, transforms_cls, vocabs, copy=False,
else:
data_loader = DataLoader(data_iter, batch_size=None,
pin_memory=True,
multiprocessing_context="fork",
multiprocessing_context="spawn",
num_workers=data_iter.num_workers,
worker_init_fn=data_iter._init_datasets,
prefetch_factor=opt.prefetch_factor)
Expand Down
2 changes: 1 addition & 1 deletion onmt/opts.py
Original file line number Diff line number Diff line change
Expand Up @@ -648,7 +648,7 @@ def _add_train_dynamic_data(parser):
type=int, default=0,
help="""The bucket size is incremented with this
amount of examples (optional)""")
group.add("-prefetch_factor", "--prefetch_factor", type=int, default=100,
group.add("-prefetch_factor", "--prefetch_factor", type=int, default=200,
help="""number of mini-batches loaded in advance to avoid the
GPU waiting during the refilling of the bucket.""")

Expand Down
15 changes: 8 additions & 7 deletions onmt/utils/scoring_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,11 +185,12 @@ def translate(self, model, sources, refs, gpu_rank, step, mode):
file.write("PRED: {}\n\n".format(preds[i]))
# we deactivate the decoder's cache
# as we use teacher forcing at training time.
for layer in model.decoder.transformer_layers:
layer.self_attn.layer_cache = (False,
{'keys': torch.tensor([]),
'values': torch.tensor([])})
layer.context_attn.layer_cache = (False,
{'keys': torch.tensor([]),
'values': torch.tensor([])})
if hasattr(model.decoder, 'transformer_layers'):
for layer in model.decoder.transformer_layers:
layer.self_attn.layer_cache = (False,
{'keys': torch.tensor([]),
'values': torch.tensor([])})
layer.context_attn.layer_cache = (False,
{'keys': torch.tensor([]),
'values': torch.tensor([])})
return preds, texts_ref
4 changes: 2 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
description='A python implementation of OpenNMT',
long_description=long_description,
long_description_content_type='text/markdown',
version='3.0.1',
version='3.0.2',
packages=find_packages(),
project_urls={
"Documentation": "http://opennmt.net/OpenNMT-py/",
Expand All @@ -27,7 +27,7 @@
"tensorboard>=2.3",
"flask",
"waitress",
"pyonmttok>=1.34,<2",
"pyonmttok>=1.35,<2",
"pyyaml",
"sacrebleu"
],
Expand Down

0 comments on commit 9698acd

Please sign in to comment.