Skip to content

Commit

Permalink
Remove np.asfarray,np.float_ usage to support numpy-2.x deprecation
Browse files Browse the repository at this point in the history
  • Loading branch information
suhas-r authored and etal committed Jan 16, 2025
1 parent dd834b0 commit 5cb6aea
Show file tree
Hide file tree
Showing 11 changed files with 21 additions and 21 deletions.
2 changes: 1 addition & 1 deletion cnvlib/bintest.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def z_prob(cnarr):

def p_adjust_bh(p):
"""Benjamini-Hochberg p-value correction for multiple hypothesis testing."""
p = np.asfarray(p)
p = np.asarray(p, dtype=float)
by_descend = p.argsort()[::-1]
by_orig = by_descend.argsort()
steps = float(len(p)) / np.arange(len(p), 0, -1)
Expand Down
4 changes: 2 additions & 2 deletions cnvlib/call.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def absolute_threshold(cnarr, ploidy, thresholds, is_haploid_x_reference):
GAIN(3) >= +0.3
"""
absolutes = np.zeros(len(cnarr), dtype=np.float_)
absolutes = np.zeros(len(cnarr), dtype=np.float64)
for idx, row in enumerate(cnarr):
ref_copies = _reference_copies_pure(row.chromosome, ploidy, is_haploid_x_reference)
if np.isnan(row.log2):
Expand Down Expand Up @@ -161,7 +161,7 @@ def absolute_clonal(cnarr, ploidy, purity, is_haploid_x_reference, diploid_parx_

def absolute_pure(cnarr, ploidy, is_haploid_x_reference):
"""Calculate absolute copy number values from segment or bin log2 ratios."""
absolutes = np.zeros(len(cnarr), dtype=np.float_)
absolutes = np.zeros(len(cnarr), dtype=np.float64)
for i, row in enumerate(cnarr):
ref_copies = _reference_copies_pure(row.chromosome, ploidy, is_haploid_x_reference)
absolutes[i] = _log2_ratio_to_absolute_pure(row.log2, ref_copies)
Expand Down
2 changes: 1 addition & 1 deletion cnvlib/cnary.py
Original file line number Diff line number Diff line change
Expand Up @@ -492,7 +492,7 @@ def expect_flat_log2(self, is_haploid_x_reference=None, diploid_parx_genome=None
"""
if is_haploid_x_reference is None:
is_haploid_x_reference = not self.guess_xx(diploid_parx_genome=diploid_parx_genome, verbose=False)
cvg = np.zeros(len(self), dtype=np.float_)
cvg = np.zeros(len(self), dtype=np.float64)
if is_haploid_x_reference:
# Single-copy X, Y
idx = self.chr_x_filter(diploid_parx_genome).values | (self.chr_y_filter(diploid_parx_genome)).values
Expand Down
6 changes: 3 additions & 3 deletions cnvlib/descriptives.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def on_array(default=None):
def outer(f):
@wraps(f)
def wrapper(a, **kwargs):
a = np.asfarray(a)
a = np.asarray(a, dtype=float)
a = a[~np.isnan(a)]
if not len(a):
return np.nan
Expand Down Expand Up @@ -52,8 +52,8 @@ def wrapper(a, w, **kwargs):
raise ValueError(f"Unequal array lengths: a={len(a)}, w={len(w)}")
if not len(a):
return np.nan
a = np.asfarray(a)
w = np.asfarray(w)
a = np.asarray(a, dtype=float)
w = np.asarray(w, dtype=float)
# Drop a's NaN indices from both arrays
a_nan = np.isnan(a)
if a_nan.any():
Expand Down
2 changes: 1 addition & 1 deletion cnvlib/importers.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def do_import_theta(segarr, theta_results_fname, ploidy=2):
# Drop any segments where the C value is None
mask_drop = np.array([c is None for c in copies], dtype="bool")
segarr = segarr[~mask_drop].copy()
ok_copies = np.asfarray([c for c in copies if c is not None])
ok_copies = np.asarray([c for c in copies if c is not None], dtype=float)
# Replace remaining segment values with these integers
segarr["cn"] = ok_copies.astype("int")
ok_copies[ok_copies == 0] = 0.5
Expand Down
2 changes: 1 addition & 1 deletion cnvlib/reference.py
Original file line number Diff line number Diff line change
Expand Up @@ -575,7 +575,7 @@ def get_fasta_stats(cnarr, fa_fname):
calculate_gc_lo(subseq) for subseq in fasta_extract_regions(fa_fname, cnarr)
]
gc_vals, rm_vals = zip(*gc_rm_vals)
return np.asfarray(gc_vals), np.asfarray(rm_vals)
return np.asarray(gc_vals, dtype=float), np.asarray(rm_vals, dtype=float)


def calculate_gc_lo(subseq):
Expand Down
6 changes: 3 additions & 3 deletions cnvlib/segmentation/haar.py
Original file line number Diff line number Diff line change
Expand Up @@ -305,9 +305,9 @@ def HaarConv(
logging.debug(
"Error?: stepHalfSize (%s) > signalSize (%s)", stepHalfSize, signalSize
)
return np.zeros(signalSize, dtype=np.float_)
return np.zeros(signalSize, dtype=np.float64)

result = np.zeros(signalSize, dtype=np.float_)
result = np.zeros(signalSize, dtype=np.float64)
if weight is not None:
# Init weight sums
highWeightSum = weight[:stepHalfSize].sum()
Expand Down Expand Up @@ -490,7 +490,7 @@ def PulseConv(
pulseHeight = 1.0 / pulseSize

# Circular padding init
result = np.zeros(signalSize, dtype=np.float_)
result = np.zeros(signalSize, dtype=np.float64)
for k in range((pulseSize + 1) // 2):
result[0] += signal[k]
for k in range(pulseSize // 2):
Expand Down
6 changes: 3 additions & 3 deletions cnvlib/segmetrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def do_segmetrics(
for statname in location_stats:
func = stat_funcs[statname]
segarr[statname] = np.fromiter(
map(func, bins_log2s), np.float_, len(segarr)
map(func, bins_log2s), np.float64, len(segarr)
)
# Measures of spread
if spread_stats:
Expand All @@ -62,7 +62,7 @@ def do_segmetrics(
for statname in spread_stats:
func = stat_funcs[statname]
segarr[statname] = np.fromiter(
map(func, deviations), np.float_, len(segarr)
map(func, deviations), np.float64, len(segarr)
)
# Interval calculations
weights = cnarr["weight"]
Expand Down Expand Up @@ -137,7 +137,7 @@ def confidence_interval_bootstrap(
samples = _smooth_samples_by_weight(values, samples)
# Recalculate segment means
seg_means = (np.average(val, weights=wt) for val, wt in samples)
bootstrap_dist = np.fromiter(seg_means, np.float_, bootstraps)
bootstrap_dist = np.fromiter(seg_means, np.float64, bootstraps)
alphas = np.array([alpha / 2, 1 - alpha / 2])
if not smoothed:
# alphas = _bca_correct_alpha(values, weights, bootstrap_dist, alphas)
Expand Down
8 changes: 4 additions & 4 deletions cnvlib/smoothing.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def check_inputs(x, width, as_series=True, weights=None):
whole window. The output half-window size is truncated to the length of `x`
if needed.
"""
x = np.asfarray(x)
x = np.asarray(x, dtype=float)
wing = _width2wing(width, x)
signal = _pad_array(x, wing)
if as_series:
Expand Down Expand Up @@ -63,21 +63,21 @@ def rolling_median(x, width):
rolled = signal.rolling(2 * wing + 1, 1, center=True).median()
# if rolled.hasnans:
# rolled = rolled.interpolate()
return np.asfarray(rolled[wing:-wing])
return np.asarray(rolled[wing:-wing], dtype=float)


def rolling_quantile(x, width, quantile):
"""Rolling quantile (0--1) with mirrored edges."""
x, wing, signal = check_inputs(x, width)
rolled = signal.rolling(2 * wing + 1, 2, center=True).quantile(quantile)
return np.asfarray(rolled[wing:-wing])
return np.asarray(rolled[wing:-wing], dtype=float)


def rolling_std(x, width):
"""Rolling quantile (0--1) with mirrored edges."""
x, wing, signal = check_inputs(x, width)
rolled = signal.rolling(2 * wing + 1, 2, center=True).std()
return np.asfarray(rolled[wing:-wing])
return np.asarray(rolled[wing:-wing], dtype=float)


def convolve_weighted(window, signal, weights, n_iter=1):
Expand Down
2 changes: 1 addition & 1 deletion scripts/guess_baits.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def filter_targets(target_bed, sample_bams, procs, fasta):
raise RuntimeError("Targets must be in BED format; try skg_convert.py")
logging.info("Loaded %d candidate regions from %s", len(baits), target_bed)
# Loop over BAMs to calculate weighted averages of bin coverage depths
total_depths = np.zeros(len(baits), dtype=np.float_)
total_depths = np.zeros(len(baits), dtype=np.float64)
for bam_fname in sample_bams:
logging.info("Evaluating targets in %s", bam_fname)
sample = cnvlib.do_coverage(target_bed, bam_fname, processes=procs, fasta=fasta)
Expand Down
2 changes: 1 addition & 1 deletion skgenome/intersect.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def into_ranges(
elem = source[src_col].iat[0]
if isinstance(elem, (str, np.string_)):
summary_func = join_strings
elif isinstance(elem, (float, np.float_)):
elif isinstance(elem, (float, np.float64)):
summary_func = np.nanmedian
else:
summary_func = first_of
Expand Down

0 comments on commit 5cb6aea

Please sign in to comment.