Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Removing deprecated methods #305

Merged
merged 1 commit into from
Jan 5, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 6 additions & 2 deletions __tests__/dataframe.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -655,14 +655,18 @@ describe("dataframe", () => {
.median();
expect(actual.row(0)).toEqual([2, 7, null]);
});
test("melt", () => {
test("unpivot", () => {
const df = pl.DataFrame({
id: [1],
asset_key_1: ["123"],
asset_key_2: ["456"],
asset_key_3: ["abc"],
});
const actual = df.melt("id", ["asset_key_1", "asset_key_2", "asset_key_3"]);
const actual = df.unpivot("id", [
"asset_key_1",
"asset_key_2",
"asset_key_3",
]);
const expected = pl.DataFrame({
id: [1, 1, 1],
variable: ["asset_key_1", "asset_key_2", "asset_key_3"],
Expand Down
1 change: 0 additions & 1 deletion __tests__/series.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,6 @@ describe("series", () => {
${numSeries()} | ${"isNotNaN"} | ${[]}
${numSeries()} | ${"isNumeric"} | ${[]}
${numSeries()} | ${"isUnique"} | ${[]}
${numSeries()} | ${"isUtf8"} | ${[]}
${numSeries()} | ${"kurtosis"} | ${[]}
${numSeries()} | ${"kurtosis"} | ${[{ fisher: true, bias: true }]}
${numSeries()} | ${"kurtosis"} | ${[{ bias: false }]}
Expand Down
38 changes: 0 additions & 38 deletions polars/dataframe.ts
Original file line number Diff line number Diff line change
Expand Up @@ -308,8 +308,6 @@ export interface DataFrame<T extends Record<string, Series> = any>
* ```
*/
describe(): DataFrame;
/** @deprecated *since 0.4.0* use {@link unique} */
distinct(maintainOrder?, subset?, keep?): DataFrame;
/**
* __Remove column from DataFrame and return as new.__
* ___
Expand Down Expand Up @@ -1001,11 +999,6 @@ export interface DataFrame<T extends Record<string, Series> = any>
* ```
*/
median(): DataFrame<T>;
/**
* Unpivot a DataFrame from wide to long format.
* @deprecated *since 0.13.0* use {@link unpivot}
*/
melt(idVars: ColumnSelection, valueVars: ColumnSelection): DataFrame;
/**
* Unpivot a DataFrame from wide to long format.
* ___
Expand Down Expand Up @@ -1604,11 +1597,6 @@ export interface DataFrame<T extends Record<string, Series> = any>
* ```
*/
tail(length?: number): DataFrame<T>;
/**
* @deprecated *since 0.4.0* use {@link writeCSV}
* @category Deprecated
*/
toCSV(destOrOptions?, options?);
/**
* Converts dataframe object into row oriented javascript objects
* @example
Expand Down Expand Up @@ -1653,17 +1641,6 @@ export interface DataFrame<T extends Record<string, Series> = any>
* @category IO
*/
toObject(): { [K in keyof T]: DTypeToJs<T[K]["dtype"] | null>[] };

/**
* @deprecated *since 0.4.0* use {@link writeIPC}
* @category IO Deprecated
*/
toIPC(destination?, options?);
/**
* @deprecated *since 0.4.0* use {@link writeParquet}
* @category IO Deprecated
*/
toParquet(destination?, options?);
toSeries(index?: number): T[keyof T];
toString(): string;
/**
Expand Down Expand Up @@ -2166,9 +2143,6 @@ export const _DataFrame = (_df: any): DataFrame => {
}
return wrap("dropNulls");
},
distinct(opts: any = false, subset?, keep = "first") {
return this.unique(opts, subset);
},
unique(opts: any = false, subset?, keep = "first") {
const defaultOptions = {
maintainOrder: false,
Expand Down Expand Up @@ -2352,9 +2326,6 @@ export const _DataFrame = (_df: any): DataFrame => {
median() {
return this.lazy().median().collectSync();
},
melt(ids, values) {
return wrap("unpivot", columnOrColumns(ids), columnOrColumns(values));
},
unpivot(ids, values) {
return wrap("unpivot", columnOrColumns(ids), columnOrColumns(values));
},
Expand Down Expand Up @@ -2546,9 +2517,6 @@ export const _DataFrame = (_df: any): DataFrame => {
serialize(format) {
return _df.serialize(format);
},
toCSV(...args) {
return this.writeCSV(...args);
},
writeCSV(dest?, options = {}) {
if (dest instanceof Writable || typeof dest === "string") {
return _df.writeCsv(dest, options) as any;
Expand Down Expand Up @@ -2632,9 +2600,6 @@ export const _DataFrame = (_df: any): DataFrame => {

return Buffer.concat(buffers);
},
toParquet(dest?, options?) {
return this.writeParquet(dest, options);
},
writeParquet(dest?, options = { compression: "uncompressed" }) {
if (dest instanceof Writable || typeof dest === "string") {
return _df.writeParquet(dest, options.compression) as any;
Expand Down Expand Up @@ -2669,9 +2634,6 @@ export const _DataFrame = (_df: any): DataFrame => {

return Buffer.concat(buffers);
},
toIPC(dest?, options?) {
return this.writeIPC(dest, options);
},
writeIPC(dest?, options = { compression: "uncompressed" }) {
if (dest instanceof Writable || typeof dest === "string") {
return _df.writeIpc(dest, options.compression) as any;
Expand Down
13 changes: 2 additions & 11 deletions polars/groupby.ts
Original file line number Diff line number Diff line change
Expand Up @@ -44,11 +44,6 @@ export interface GroupBy {
*/
agg(...columns: Expr[]): DataFrame;
agg(columns: Record<string, keyof Expr | (keyof Expr)[]>): DataFrame;
/**
* Count the number of values in each group.
* @deprecated @since 0.10.0 @use {@link len}
*/
count(): DataFrame;
/**
* Return the number of rows in each group.
*/
Expand Down Expand Up @@ -164,9 +159,8 @@ export interface GroupBy {

export type PivotOps = Pick<
GroupBy,
"count" | "first" | "max" | "mean" | "median" | "min" | "sum"
"len" | "first" | "max" | "mean" | "median" | "min" | "sum"
> & { [inspect](): string };

/** @ignore */
export function _GroupBy(df: any, by: string[], maintainOrder = false) {
const customInspect = () =>
Expand Down Expand Up @@ -212,9 +206,6 @@ export function _GroupBy(df: any, by: string[], maintainOrder = false) {
agg,
pivot,
aggList: () => agg(exclude(by as any)),
count() {
return _DataFrame(df.groupby([by].flat(), by, "count"));
},
len() {
return _DataFrame(df.groupby([by].flat(), by, "count"));
},
Expand Down Expand Up @@ -254,7 +245,7 @@ function PivotOps(
min: pivot("min"),
max: pivot("max"),
mean: pivot("mean"),
count: pivot("count"),
len: pivot("len"),
median: pivot("median"),
};
}
Expand Down
28 changes: 0 additions & 28 deletions polars/lazy/dataframe.ts
Original file line number Diff line number Diff line change
Expand Up @@ -74,24 +74,6 @@ export interface LazyDataFrame extends Serialize, GroupByOps<LazyGroupBy> {
drop(name: string): LazyDataFrame;
drop(names: string[]): LazyDataFrame;
drop(name: string, ...names: string[]): LazyDataFrame;
/**
* Drop duplicate rows from this DataFrame.
* Note that this fails if there is a column of type `List` in the DataFrame.
* @param maintainOrder
* @param subset - subset to drop duplicates for
* @param keep "first" | "last"
* @deprecated @since 0.4.0 @use {@link unique}
*/
distinct(
maintainOrder?: boolean,
subset?: ColumnSelection,
keep?: "first" | "last",
): LazyDataFrame;
distinct(opts: {
maintainOrder?: boolean;
subset?: ColumnSelection;
keep?: "first" | "last";
}): LazyDataFrame;
/**
* Drop rows with null values from this DataFrame.
* This method only drops nulls row-wise if any single value of the row is null.
Expand Down Expand Up @@ -370,9 +352,7 @@ export interface LazyDataFrame extends Serialize, GroupByOps<LazyGroupBy> {
median(): LazyDataFrame;
/**
* @see {@link DataFrame.unpivot}
* @deprecated *since 0.13.0* use {@link unpivot}
*/
melt(idVars: ColumnSelection, valueVars: ColumnSelection): LazyDataFrame;
unpivot(idVars: ColumnSelection, valueVars: ColumnSelection): LazyDataFrame;
/**
* @see {@link DataFrame.min}
Expand Down Expand Up @@ -687,9 +667,6 @@ export const _LazyDataFrame = (_ldf: any): LazyDataFrame => {
drop(...cols) {
return _LazyDataFrame(_ldf.dropColumns(cols.flat(2)));
},
distinct(...args: any[]) {
return _LazyDataFrame((_ldf.unique as any)(...args));
},
unique(opts: any = false, subset?, keep = "first") {
const defaultOptions = {
maintainOrder: false,
Expand Down Expand Up @@ -993,11 +970,6 @@ export const _LazyDataFrame = (_ldf: any): LazyDataFrame => {
median() {
return _LazyDataFrame(_ldf.median());
},
melt(ids, values) {
return _LazyDataFrame(
_ldf.unpivot(columnOrColumnsStrict(ids), columnOrColumnsStrict(values)),
);
},
unpivot(ids, values) {
return _LazyDataFrame(
_ldf.unpivot(columnOrColumnsStrict(ids), columnOrColumnsStrict(values)),
Expand Down
28 changes: 0 additions & 28 deletions polars/lazy/expr/string.ts
Original file line number Diff line number Diff line change
Expand Up @@ -113,31 +113,6 @@ export interface StringNamespace extends StringFunctions<Expr> {
* Throw errors if encounter invalid JSON strings.
* @params Not implemented ATM
* @returns DF with struct
* @deprecated @since 0.8.4 @use {@link jsonDecode}
* @example

* >>> df = pl.DataFrame( {json: ['{"a":1, "b": true}', null, '{"a":2, "b": false}']} )
* >>> df.select(pl.col("json").str.jsonExtract())
* shape: (3, 1)
* ┌─────────────┐
* │ json │
* │ --- │
* │ struct[2] │
* ╞═════════════╡
* │ {1,true} │
* │ {null,null} │
* │ {2,false} │
* └─────────────┘
* See Also
* ----------
* jsonPathMatch : Extract the first match of json string with provided JSONPath expression.
*/
jsonExtract(dtype?: DataType, inferSchemaLength?: number): Expr;
/**
* Parse string values as JSON.
* Throw errors if encounter invalid JSON strings.
* @params Not implemented ATM
* @returns DF with struct
* @example

* >>> df = pl.DataFrame( {json: ['{"a":1, "b": true}', null, '{"a":2, "b": false}']} )
Expand Down Expand Up @@ -369,9 +344,6 @@ export const ExprStringFunctions = (_expr: any): StringNamespace => {
extract(pat: any, groupIndex: number) {
return wrap("strExtract", exprToLitOrExpr(pat, true)._expr, groupIndex);
},
jsonExtract(dtype?: DataType, inferSchemaLength?: number) {
return wrap("strJsonDecode", dtype, inferSchemaLength);
},
jsonDecode(dtype?: DataType, inferSchemaLength?: number) {
return wrap("strJsonDecode", dtype, inferSchemaLength);
},
Expand Down
19 changes: 0 additions & 19 deletions polars/series/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -526,12 +526,6 @@ export interface Series<T extends DataType = any, Name extends string = string>
* ```
*/
isUnique(): Series;
/**
* Checks if this Series datatype is a Utf8.
* @deprecated *since 0.8.4*
* @see Use `Series.dtype.equals(pl.String)` instead.
*/
isUtf8(): boolean;
/**
* Checks if this Series datatype is a String.
*/
Expand Down Expand Up @@ -805,13 +799,6 @@ export interface Series<T extends DataType = any, Name extends string = string>
nullEqual?: boolean,
strict?: boolean,
): boolean;
/**
* __Set masked values__
* @param filter Boolean mask
* @param value value to replace masked values with
* @deprecated @since 0.8.4 @use {@link scatter}
*/
setAtIdx(indices: number[] | Series, value: any): void;
/**
* __Set masked values__
* @param filter Boolean mask
Expand Down Expand Up @@ -1526,9 +1513,6 @@ export function _Series(_s: any): Series {
isString() {
return this.dtype.equals(DataType.String);
},
isUtf8() {
return this.dtype.equals(DataType.Utf8);
},
kurtosis(fisher: any = true, bias = true) {
if (typeof fisher === "boolean") {
return _s.kurtosis(fisher, bias);
Expand Down Expand Up @@ -1715,9 +1699,6 @@ export function _Series(_s: any): Series {
clip(...args) {
return expr_op("clip", ...args);
},
setAtIdx(indices, value) {
_s.scatter(indices, value);
},
scatter(indices, value) {
indices = Series.isSeries(indices)
? indices.cast(DataType.UInt32)
Expand Down
19 changes: 0 additions & 19 deletions polars/series/string.ts
Original file line number Diff line number Diff line change
Expand Up @@ -94,22 +94,6 @@ export interface StringNamespace extends StringFunctions<Series> {
* ```
*/
extract(pattern: any, groupIndex: number): Series;
/***
* Parse string values as JSON.
* @returns Utf8 array. Contain null if original value is null or the `jsonPath` return nothing.
* @deprecated @since 0.8.4 @use {@link jsonDecode}
* @example
* s = pl.Series("json", ['{"a":1, "b": true}', null, '{"a":2, "b": false}']);
* s.str.jsonExtract().as("json");
* shape: (3,)
* Series: 'json' [struct[2]]
* [
* {1,true}
* {null,null}
* {2,false}
* ]
*/
jsonExtract(dtype?: DataType, inferSchemaLength?: number): Series;
/***
* Parse string values as JSON.
* @returns Utf8 array. Contain null if original value is null or the `jsonPath` return nothing.
Expand Down Expand Up @@ -322,9 +306,6 @@ export const SeriesStringFunctions = (_s: any): StringNamespace => {
.select(col(s.name).str.extract(pat, groupIndex).as(s.name))
.getColumn(s.name);
},
jsonExtract(dtype?: DataType, inferSchemaLength?: number) {
return wrap("strJsonDecode", dtype, inferSchemaLength);
},
jsonDecode(dtype?: DataType, inferSchemaLength?: number) {
return wrap("strJsonDecode", dtype, inferSchemaLength);
},
Expand Down
4 changes: 2 additions & 2 deletions src/conversion.rs
Original file line number Diff line number Diff line change
Expand Up @@ -830,8 +830,8 @@ impl FromNapiValue for Wrap<CsvWriterOptions> {
let obj = Object::from_napi_value(env, napi_val)?;
let include_bom = obj.get::<_, bool>("includeBom")?.unwrap_or(false);
let include_header = obj.get::<_, bool>("includeHeader")?.unwrap_or(true);
let batch_size =
NonZero::new(obj.get::<_, i64>("batchSize")?.unwrap_or(1024) as usize).ok_or_else(|| napi::Error::from_reason("Invalid batch size"))?;
let batch_size = NonZero::new(obj.get::<_, i64>("batchSize")?.unwrap_or(1024) as usize)
.ok_or_else(|| napi::Error::from_reason("Invalid batch size"))?;
let maintain_order = obj.get::<_, bool>("maintainOrder")?.unwrap_or(true);
let date_format = obj.get::<_, String>("dateFormat")?;
let time_format = obj.get::<_, String>("timeFormat")?;
Expand Down
Loading