Skip to content

Commit

Permalink
Merge pull request #236 from MannLabs/230-basic-linting-rule-f-remove…
Browse files Browse the repository at this point in the history
…-unused

230 basic linting rule f remove unused
  • Loading branch information
mschwoer authored Jun 19, 2024
2 parents 03b8fb7 + 773f163 commit 976a794
Show file tree
Hide file tree
Showing 7 changed files with 12 additions and 39 deletions.
3 changes: 0 additions & 3 deletions alphadia/data/bruker.py
Original file line number Diff line number Diff line change
Expand Up @@ -693,9 +693,6 @@ def assemble_push_intensity(
len(unique_precursor_index)
)

relative_precursor_index = precursor_index_reverse[precursor_index] # noqa: F841 # TODO check for potential bug then remove this line

n_precursor_indices = len(unique_precursor_index) # noqa: F841 # TODO check for potential bug then remove this line
n_tof_slices = len(tof_limits)

# scan valuesa
Expand Down
2 changes: 0 additions & 2 deletions alphadia/fdr.py
Original file line number Diff line number Diff line change
Expand Up @@ -344,10 +344,8 @@ def plot_fdr(
"""

y_test_proba = classifier.predict_proba(X_test)[:, 1]
y_test_pred = np.round(y_test_proba) # noqa: F841 # TODO check for potential bug then remove this line

y_train_proba = classifier.predict_proba(X_train)[:, 1]
y_train_pred = np.round(y_train_proba) # noqa: F841 # TODO check for potential bug then remove this line

fpr_test, tpr_test, thresholds_test = sklearn.metrics.roc_curve(
y_test, y_test_proba
Expand Down
17 changes: 0 additions & 17 deletions alphadia/features.py
Original file line number Diff line number Diff line change
Expand Up @@ -465,14 +465,6 @@ def build_features(
dense_precursors[0], p_expected_scan_center, p_expected_frame_center
).reshape(n_precursors, n_isotopes)

# sum precursor
sum_precursor_intensity = np.sum( # noqa: F841 # TODO check for potential bug then remove this line
np.sum(dense_precursors[0], axis=-1), axis=-1
).astype(np.float32)
sum_fragment_intensity = np.sum( # noqa: F841 # TODO check for potential bug then remove this line
np.sum(dense_fragments[0], axis=-1), axis=-1
).astype(np.float32)

# (n_precursor, n_isotopes)
mass_error_array = (observed_precursor_mz - isotope_mz) / isotope_mz * 1e6

Expand Down Expand Up @@ -724,10 +716,6 @@ def fragment_features(
quant_window: nb.uint32 = 3,
quant_all: nb.boolean = False,
):
fragment_feature_dict = nb.typed.Dict.empty( # noqa: F841 # TODO check for potential bug then remove this line
key_type=nb.types.unicode_type, value_type=float_array
)

n_observations = observation_importance.shape[0]
n_fragments = dense_fragments.shape[1]
feature_array[17] = float(n_observations)
Expand Down Expand Up @@ -780,7 +768,6 @@ def fragment_features(

# (quant_window * 2 + 1)
frame_rt_quant = frame_rt[center - quant_window : center + quant_window + 1]
quant_duration = frame_rt_quant[-1] - frame_rt_quant[0] # noqa: F841 # TODO check for potential bug then remove this line

# (quant_window * 2)
delta_rt = frame_rt_quant[1:] - frame_rt_quant[:-1]
Expand Down Expand Up @@ -1019,8 +1006,6 @@ def profile_features(
feature_array,
):
n_observations = len(observation_importance)
# most intense observation across all observations
best_observation = np.argmax(observation_importance) # noqa: F841 # TODO check for potential bug then remove this line

fragment_idx_sorted = np.argsort(fragment_intensity)[::-1]

Expand Down Expand Up @@ -1203,8 +1188,6 @@ def reference_features(
key_type=nb.types.unicode_type, value_type=nb.types.float32
)

n_observation = reference_observation_importance.shape[0] # noqa: F841 # TODO check for potential bug then remove this line
n_fragments = reference_fragments_scan_profile.shape[0] # noqa: F841 # TODO check for potential bug then remove this line
fragment_idx_sorted = np.argsort(fragment_lib_intensity)[::-1]

if (
Expand Down
6 changes: 3 additions & 3 deletions alphadia/outputtransform.py
Original file line number Diff line number Diff line change
Expand Up @@ -400,9 +400,9 @@ def build_transfer_model(self):
tune_mgr = FinetuneManager(
device=device, settings=self.config["transfer_learning"]
)
stats = tune_mgr.finetune_rt(transfer_lib.precursor_df) # noqa: F841 # TODO check for potential bug then remove this line
stats = tune_mgr.finetune_charge(transfer_lib.precursor_df) # noqa: F841 # TODO check for potential bug then remove this line
stats = tune_mgr.finetune_ms2( # noqa: F841 # TODO check for potential bug then remove this line
tune_mgr.finetune_rt(transfer_lib.precursor_df)
tune_mgr.finetune_charge(transfer_lib.precursor_df)
tune_mgr.finetune_ms2(
transfer_lib.precursor_df.copy(), transfer_lib.fragment_intensity_df.copy()
)

Expand Down
7 changes: 3 additions & 4 deletions alphadia/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -423,9 +423,6 @@ def fourier_filter(dense_stack, kernel):
"""

k0 = kernel.shape[0] # noqa: F841 # TODO check for potential bug then remove this line
k1 = kernel.shape[1] # noqa: F841 # TODO check for potential bug then remove this line

# make sure both dimensions are even
scan_mod = dense_stack.shape[3] % 2
frame_mod = dense_stack.shape[4] % 2
Expand Down Expand Up @@ -453,8 +450,10 @@ def fourier_filter(dense_stack, kernel):

# with nb.objmode(smooth_output='float32[:,:,:,:]'):
# # roll back to original position
# k0 = kernel.shape[0]
# k1 = kernel.shape[1]
# smooth_output = np.roll(smooth_output, -k0//2, axis=2)
# smooth_output = np.roll(smooth_output, -k1//2, axis=3)
# smooth_output = np.roll(smooth_output, -k1//2, axis=3)

return smooth_output

Expand Down
12 changes: 6 additions & 6 deletions tests/unit_tests/test_fdr.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,11 +258,11 @@ def test_feed_forward():
# assert classifier.metrics["test_accuracy"][-1] > 0.99
# assert classifier.metrics["train_accuracy"][-1] > 0.99

y_pred = classifier.predict(x) # noqa: F841 # TODO check for potential bug then remove this line
# assert np.all(y_pred == y) # TODO fix this test
y_pred = classifier.predict(x) # noqa: F841 # TODO fix this test
# assert np.all(y_pred == y)

y_proba = classifier.predict_proba(x)[:, 1] # noqa: F841 # TODO check for potential bug then remove this line
# assert np.all(np.round(y_proba) == y) # TODO fix this test
y_proba = classifier.predict_proba(x)[:, 1] # noqa: F841 # TODO fix this test
# assert np.all(np.round(y_proba) == y)


def test_feed_forward_save():
Expand All @@ -285,5 +285,5 @@ def test_feed_forward_save():
torch.load(os.path.join(tempfolder, "test_feed_forward_save.pth"))
)

y_pred = new_classifier.predict(x) # noqa: F841 # TODO check for potential bug then remove this line
# assert np.all(y_pred == y) # TODO fix this test
y_pred = new_classifier.predict(x) # noqa: F841 # TODO fix this test
# assert np.all(y_pred == y)
4 changes: 0 additions & 4 deletions tests/unit_tests/test_grouping.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import time
import pytest
import numpy as np
import pandas as pd
Expand Down Expand Up @@ -194,8 +193,5 @@ def test_grouping_fuzz(expected_time: int = 10):
{"precursor_idx": precursor_idx, "proteins": proteins, "decoy": decoys}
)

grouping_start_time = time.time()
_ = grouping.perform_grouping(simulated_psm_data, genes_or_proteins="proteins")
grouping_end_time = time.time()
elapsed_time = grouping_end_time - grouping_start_time # noqa: F841 # TODO check for potential bug then remove this line
assert True # TODO fix this test

0 comments on commit 976a794

Please sign in to comment.