diff --git a/CHANGELOG.md b/CHANGELOG.md index 47b86828..628da137 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,23 +25,19 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - `every_n_train_steps` has been renamed to `val_check_interval` in accordance to the corresponding Pytorch Lightning parameter. - Training batches are randomly shuffled. -### Fixed - -- Casanovo runs on CPU and can passes all tests. -- Enable gradients during prediction and validation to avoid NaNs from occuring as a temporary workaround until a new Pytorch version is available. -- Upgrade to depthcharge v0.2.3 for `PeptideTransformerDecoder` hotfix. -- Correctly report amino acid precision and recall during validation. - ### Removed - Remove config option for a custom Pytorch Lightning logger. +- Remove superfluous `custom_encoder` config option. ### Fixed -- Casanovo now runs on CPU and can passes all tests. -- Upgrade to Depthcharge v0.2.0 to fix sinusoidal encoding. +- Casanovo runs on CPU and can pass all tests. - Correctly refer to input peak files by their full file path. - Specifying custom residues to retrain Casanovo is now possible. +- Upgrade to depthcharge v0.2.3 to fix sinusoidal encoding and for the `PeptideTransformerDecoder` hotfix. +- Enable gradients during prediction and validation to avoid NaNs from occuring as a temporary workaround until a new Pytorch version is available. +- Correctly report amino acid precision and recall during validation. ## [3.3.0] - 2023-04-04 diff --git a/casanovo/config.yaml b/casanovo/config.yaml index dedb1740..729e827d 100644 --- a/casanovo/config.yaml +++ b/casanovo/config.yaml @@ -79,9 +79,6 @@ dropout: 0.0 # Number of dimensions to use for encoding peak intensity # Projected up to ``dim_model`` by default and summed with the peak m/z encoding dim_intensity: -# Option to provide a pre-trained spectrum encoder when training -# Trained from scratch by default -custom_encoder: # Max decoded peptide length max_length: 100 # Number of warmup iterations for learning rate scheduler diff --git a/casanovo/denovo/model.py b/casanovo/denovo/model.py index 03baa594..5851aa76 100644 --- a/casanovo/denovo/model.py +++ b/casanovo/denovo/model.py @@ -43,9 +43,6 @@ class Spec2Pep(pl.LightningModule, ModelMixin): (``dim_model - dim_intensity``) are reserved for encoding the m/z value. If ``None``, the intensity will be projected up to ``dim_model`` using a linear layer, then summed with the m/z encoding for each peak. - custom_encoder : Optional[Union[SpectrumEncoder, PairedSpectrumEncoder]] - A pretrained encoder to use. The ``dim_model`` of the encoder must be - the same as that specified by the ``dim_model`` parameter here. max_length : int The maximum peptide length to decode. residues: Union[Dict[str, float], str] @@ -97,7 +94,6 @@ def __init__( n_layers: int = 9, dropout: float = 0.0, dim_intensity: Optional[int] = None, - custom_encoder: Optional[SpectrumEncoder] = None, max_length: int = 100, residues: Union[Dict[str, float], str] = "canonical", max_charge: int = 5, @@ -120,17 +116,14 @@ def __init__( self.save_hyperparameters() # Build the model. - if custom_encoder is not None: - self.encoder = custom_encoder - else: - self.encoder = SpectrumEncoder( - dim_model=dim_model, - n_head=n_head, - dim_feedforward=dim_feedforward, - n_layers=n_layers, - dropout=dropout, - dim_intensity=dim_intensity, - ) + self.encoder = SpectrumEncoder( + dim_model=dim_model, + n_head=n_head, + dim_feedforward=dim_feedforward, + n_layers=n_layers, + dropout=dropout, + dim_intensity=dim_intensity, + ) self.decoder = PeptideDecoder( dim_model=dim_model, n_head=n_head, diff --git a/casanovo/denovo/model_runner.py b/casanovo/denovo/model_runner.py index cbefd849..852860d3 100644 --- a/casanovo/denovo/model_runner.py +++ b/casanovo/denovo/model_runner.py @@ -212,7 +212,6 @@ def initialize_model(self, train: bool) -> None: n_layers=self.config.n_layers, dropout=self.config.dropout, dim_intensity=self.config.dim_intensity, - custom_encoder=self.config.custom_encoder, max_length=self.config.max_length, residues=self.config.residues, max_charge=self.config.max_charge,