diff --git a/docs/api/SUMMARY.md b/docs/api/SUMMARY.md index 59d5b4d50..0b5cfda1a 100644 --- a/docs/api/SUMMARY.md +++ b/docs/api/SUMMARY.md @@ -22,6 +22,7 @@ search: - [Cell](safeds/data/tabular/containers/Cell.md) - [Column](safeds/data/tabular/containers/Column.md) - [Row](safeds/data/tabular/containers/Row.md) + - [StringCell](safeds/data/tabular/containers/StringCell.md) - [Table](safeds/data/tabular/containers/Table.md) - plotting - [ColumnPlotter](safeds/data/tabular/plotting/ColumnPlotter.md) @@ -94,15 +95,11 @@ search: - nn - converters - [InputConversion](safeds/ml/nn/converters/InputConversion.md) - - [InputConversionImage](safeds/ml/nn/converters/InputConversionImage.md) + - [InputConversionImageToColumn](safeds/ml/nn/converters/InputConversionImageToColumn.md) + - [InputConversionImageToImage](safeds/ml/nn/converters/InputConversionImageToImage.md) + - [InputConversionImageToTable](safeds/ml/nn/converters/InputConversionImageToTable.md) - [InputConversionTable](safeds/ml/nn/converters/InputConversionTable.md) - [InputConversionTimeSeries](safeds/ml/nn/converters/InputConversionTimeSeries.md) - - [OutputConversion](safeds/ml/nn/converters/OutputConversion.md) - - [OutputConversionImageToColumn](safeds/ml/nn/converters/OutputConversionImageToColumn.md) - - [OutputConversionImageToImage](safeds/ml/nn/converters/OutputConversionImageToImage.md) - - [OutputConversionImageToTable](safeds/ml/nn/converters/OutputConversionImageToTable.md) - - [OutputConversionTable](safeds/ml/nn/converters/OutputConversionTable.md) - - [OutputConversionTimeSeries](safeds/ml/nn/converters/OutputConversionTimeSeries.md) - layers - [AveragePooling2DLayer](safeds/ml/nn/layers/AveragePooling2DLayer.md) - [Convolutional2DLayer](safeds/ml/nn/layers/Convolutional2DLayer.md) @@ -112,5 +109,9 @@ search: - [Layer](safeds/ml/nn/layers/Layer.md) - [LSTMLayer](safeds/ml/nn/layers/LSTMLayer.md) - [MaxPooling2DLayer](safeds/ml/nn/layers/MaxPooling2DLayer.md) + - typing + - [ConstantImageSize](safeds/ml/nn/typing/ConstantImageSize.md) + - [ModelImageSize](safeds/ml/nn/typing/ModelImageSize.md) + - [VariableImageSize](safeds/ml/nn/typing/VariableImageSize.md) - [NeuralNetworkClassifier](safeds/ml/nn/NeuralNetworkClassifier.md) - [NeuralNetworkRegressor](safeds/ml/nn/NeuralNetworkRegressor.md) diff --git a/docs/api/safeds/data/image/containers/ImageList.md b/docs/api/safeds/data/image/containers/ImageList.md index 8b705ad2b..2ded7669f 100644 --- a/docs/api/safeds/data/image/containers/ImageList.md +++ b/docs/api/safeds/data/image/containers/ImageList.md @@ -14,29 +14,29 @@ To create an `ImageList` call one of the following static methods: ```sds linenums="17" class ImageList { /** - * Return the number of images in this image list. + * The number of images in this image list. */ - @PythonName("number_of_images") attr imageCount: Int + @PythonName("image_count") attr imageCount: Int /** - * Return a list of all widths in this image list. + * A list of all widths in this image list. */ attr widths: List /** - * Return a list of all heights in this image list. + * A list of all heights in this image list. */ attr heights: List /** - * Return the channel of all images. + * The channel of all images. */ attr channel: Int /** - * Return the sizes of all images. + * The sizes of all images. */ attr sizes: List /** - * Return the number of different sizes of images in this image list. + * The number of different sizes of images in this image list. */ - @PythonName("number_of_sizes") attr sizeCount: Int + @PythonName("size_count") attr sizeCount: Int /** * Create an ImageList from a list of images. @@ -515,37 +515,37 @@ To create an `ImageList` call one of the following static methods: ## `channel` {#safeds.data.image.containers.ImageList.channel data-toc-label='[attribute] channel'} -Return the channel of all images. +The channel of all images. **Type:** [`Int`][safeds.lang.Int] ## `heights` {#safeds.data.image.containers.ImageList.heights data-toc-label='[attribute] heights'} -Return a list of all heights in this image list. +A list of all heights in this image list. **Type:** [`List`][safeds.lang.List] ## `imageCount` {#safeds.data.image.containers.ImageList.imageCount data-toc-label='[attribute] imageCount'} -Return the number of images in this image list. +The number of images in this image list. **Type:** [`Int`][safeds.lang.Int] ## `sizeCount` {#safeds.data.image.containers.ImageList.sizeCount data-toc-label='[attribute] sizeCount'} -Return the number of different sizes of images in this image list. +The number of different sizes of images in this image list. **Type:** [`Int`][safeds.lang.Int] ## `sizes` {#safeds.data.image.containers.ImageList.sizes data-toc-label='[attribute] sizes'} -Return the sizes of all images. +The sizes of all images. **Type:** [`List`][safeds.lang.List] ## `widths` {#safeds.data.image.containers.ImageList.widths data-toc-label='[attribute] widths'} -Return a list of all widths in this image list. +A list of all widths in this image list. **Type:** [`List`][safeds.lang.List] diff --git a/docs/api/safeds/data/image/typing/ImageSize.md b/docs/api/safeds/data/image/typing/ImageSize.md index 66d1da467..c820b2ce5 100644 --- a/docs/api/safeds/data/image/typing/ImageSize.md +++ b/docs/api/safeds/data/image/typing/ImageSize.md @@ -1,51 +1,18 @@ -# `ImageSize` {#safeds.data.image.typing.ImageSize data-toc-label='[class] ImageSize'} +--- +search: + boost: 0.5 +--- -A container for image size data. +# :test_tube:{ title="Experimental" } `ImageSize` {#safeds.data.image.typing.ImageSize data-toc-label='[class] ImageSize'} -**Parameters:** +A container for image size data. -| Name | Type | Description | Default | -|------|------|-------------|---------| -| `width` | [`Int`][safeds.lang.Int] | the width of the image | - | -| `height` | [`Int`][safeds.lang.Int] | the height of the image | - | -| `channel` | [`Int`][safeds.lang.Int] | the channel of the image | - | -| `ignoreInvalidChannel` | [`Boolean`][safeds.lang.Boolean] | - | `#!sds false` | +**Parent type:** [`ConstantImageSize`][safeds.ml.nn.typing.ConstantImageSize] ??? quote "Stub code in `ImageSize.sdsstub`" - ```sds linenums="12" - class ImageSize( - width: Int, - height: Int, - channel: Int, - @PythonName("_ignore_invalid_channel") ignoreInvalidChannel: Boolean = false - ) { - /** - * Get the width of this `ImageSize` in pixels. - */ - attr width: Int - /** - * Get the height of this `ImageSize` in pixels. - */ - attr height: Int - /** - * Get the channel of this `ImageSize` in pixels. - */ - attr channel: Int - - /** - * Create a `ImageSize` of a given image. - * - * @param image the given image for the `ImageSize` - * - * @result imageSize the calculated `ImageSize` - */ - @Pure - @PythonName("from_image") - static fun fromImage( - image: Image - ) -> imageSize: ImageSize - } + ```sds linenums="15" + class ImageSize sub ConstantImageSize ``` ## `channel` {#safeds.data.image.typing.ImageSize.channel data-toc-label='[attribute] channel'} @@ -65,29 +32,3 @@ Get the height of this `ImageSize` in pixels. Get the width of this `ImageSize` in pixels. **Type:** [`Int`][safeds.lang.Int] - -## `fromImage` {#safeds.data.image.typing.ImageSize.fromImage data-toc-label='[static-function] fromImage'} - -Create a `ImageSize` of a given image. - -**Parameters:** - -| Name | Type | Description | Default | -|------|------|-------------|---------| -| `image` | [`Image`][safeds.data.image.containers.Image] | the given image for the `ImageSize` | - | - -**Results:** - -| Name | Type | Description | -|------|------|-------------| -| `imageSize` | [`ImageSize`][safeds.data.image.typing.ImageSize] | the calculated `ImageSize` | - -??? quote "Stub code in `ImageSize.sdsstub`" - - ```sds linenums="38" - @Pure - @PythonName("from_image") - static fun fromImage( - image: Image - ) -> imageSize: ImageSize - ``` diff --git a/docs/api/safeds/data/labeled/containers/Dataset.md b/docs/api/safeds/data/labeled/containers/Dataset.md index a0465a92d..4def9eba2 100644 --- a/docs/api/safeds/data/labeled/containers/Dataset.md +++ b/docs/api/safeds/data/labeled/containers/Dataset.md @@ -7,13 +7,21 @@ search: A dataset is used as input to machine learning models. +**Type parameters:** + +| Name | Upper Bound | Description | Default | +|------|-------------|-------------|---------| +| `I` | [`Any?`][safeds.lang.Any] | The type of the input data. | - | +| `O` | [`Any?`][safeds.lang.Any] | The type of the output data. | - | + **Inheritors:** - [`ImageDataset`][safeds.data.labeled.containers.ImageDataset] - [`TabularDataset`][safeds.data.labeled.containers.TabularDataset] +- [`TimeSeriesDataset`][safeds.data.labeled.containers.TimeSeriesDataset] ??? quote "Stub code in `Dataset.sdsstub`" - ```sds linenums="6" - class Dataset + ```sds linenums="9" + class Dataset ``` diff --git a/docs/api/safeds/data/labeled/containers/ImageDataset.md b/docs/api/safeds/data/labeled/containers/ImageDataset.md index d2ca754d7..10655b785 100644 --- a/docs/api/safeds/data/labeled/containers/ImageDataset.md +++ b/docs/api/safeds/data/labeled/containers/ImageDataset.md @@ -2,14 +2,14 @@ A Dataset for ImageLists as input and ImageLists, Tables or Columns as output. -**Parent type:** [`Dataset`][safeds.data.labeled.containers.Dataset] +**Parent type:** [`Dataset`][safeds.data.labeled.containers.Dataset] **Parameters:** | Name | Type | Description | Default | |------|------|-------------|---------| | `inputData` | [`ImageList`][safeds.data.image.containers.ImageList] | the input ImageList | - | -| `outputData` | `#!sds T` | the output data | - | +| `outputData` | `#!sds O` | the output data | - | | `batchSize` | [`Int`][safeds.lang.Int] | the batch size used for training | `#!sds 1` | | `shuffle` | [`Boolean`][safeds.lang.Boolean] | weather the data should be shuffled after each epoch of training | `#!sds false` | @@ -17,17 +17,17 @@ A Dataset for ImageLists as input and ImageLists, Tables or Columns as output. | Name | Upper Bound | Description | Default | |------|-------------|-------------|---------| -| `T` | [`Any?`][safeds.lang.Any] | - | - | +| `O` | [`Any?`][safeds.lang.Any] | - | - | ??? quote "Stub code in `ImageDataset.sdsstub`" ```sds linenums="17" - class ImageDataset( + class ImageDataset( @PythonName("input_data") inputData: ImageList, - @PythonName("output_data") outputData: T, + @PythonName("output_data") outputData: O, @PythonName("batch_size") batchSize: Int = 1, shuffle: Boolean = false - ) sub Dataset { + ) sub Dataset { /** * Get the input `ImageSize` of this dataset. */ @@ -53,7 +53,7 @@ A Dataset for ImageLists as input and ImageLists, Tables or Columns as output. */ @Pure @PythonName("get_output") - fun getOutput() -> output: T + fun getOutput() -> output: O /** * Return a new `ImageDataset` with shuffled data. @@ -63,7 +63,7 @@ A Dataset for ImageLists as input and ImageLists, Tables or Columns as output. * @result imageDataset the shuffled `ImageDataset` */ @Pure - fun shuffle() -> imageDataset: ImageDataset + fun shuffle() -> imageDataset: ImageDataset } ``` @@ -105,14 +105,14 @@ Get the output data of this dataset. | Name | Type | Description | |------|------|-------------| -| `output` | `#!sds T` | the output data of this dataset | +| `output` | `#!sds O` | the output data of this dataset | ??? quote "Stub code in `ImageDataset.sdsstub`" ```sds linenums="46" @Pure @PythonName("get_output") - fun getOutput() -> output: T + fun getOutput() -> output: O ``` ## `shuffle` {#safeds.data.labeled.containers.ImageDataset.shuffle data-toc-label='[function] shuffle'} @@ -125,11 +125,11 @@ The original dataset list is not modified. | Name | Type | Description | |------|------|-------------| -| `imageDataset` | [`ImageDataset`][safeds.data.labeled.containers.ImageDataset] | the shuffled `ImageDataset` | +| `imageDataset` | [`ImageDataset`][safeds.data.labeled.containers.ImageDataset] | the shuffled `ImageDataset` | ??? quote "Stub code in `ImageDataset.sdsstub`" ```sds linenums="57" @Pure - fun shuffle() -> imageDataset: ImageDataset + fun shuffle() -> imageDataset: ImageDataset ``` diff --git a/docs/api/safeds/data/labeled/containers/TabularDataset.md b/docs/api/safeds/data/labeled/containers/TabularDataset.md index 37be167fc..4d098a295 100644 --- a/docs/api/safeds/data/labeled/containers/TabularDataset.md +++ b/docs/api/safeds/data/labeled/containers/TabularDataset.md @@ -12,14 +12,14 @@ Columns in a tabular dataset are divided into three categories: Feature columns are implicitly defined as all columns except the target and extra columns. If no extra columns are specified, all columns except the target column are used as features. -**Parent type:** [`Dataset`][safeds.data.labeled.containers.Dataset] +**Parent type:** [`Dataset>`][safeds.data.labeled.containers.Dataset] **Parameters:** | Name | Type | Description | Default | |------|------|-------------|---------| | `data` | `#!sds union>, Table>` | The data. | - | -| `targetName` | [`String`][safeds.lang.String] | Name of the target column. | - | +| `targetName` | [`String`][safeds.lang.String] | The name of the target column. | - | | `extraNames` | [`List`][safeds.lang.List] | Names of the columns that are neither features nor target. If null, no extra columns are used, i.e. all but the target column are used as features. | `#!sds []` | **Examples:** @@ -44,7 +44,7 @@ pipeline example { data: union>, Table>, @PythonName("target_name") targetName: String, @PythonName("extra_names") extraNames: List = [] - ) sub Dataset { + ) sub Dataset { /** * The feature columns of the tabular dataset. */ diff --git a/docs/api/safeds/data/labeled/containers/TimeSeriesDataset.md b/docs/api/safeds/data/labeled/containers/TimeSeriesDataset.md index 707818ee9..bb67198c1 100644 --- a/docs/api/safeds/data/labeled/containers/TimeSeriesDataset.md +++ b/docs/api/safeds/data/labeled/containers/TimeSeriesDataset.md @@ -1,41 +1,47 @@ # :test_tube:{ title="Experimental" } `TimeSeriesDataset` {#safeds.data.labeled.containers.TimeSeriesDataset data-toc-label='[class] TimeSeriesDataset'} -A time series dataset maps feature and time columns to a target column. Not like the TabularDataset a TimeSeries needs to contain one target and one time column, but can have empty features. +A time series dataset maps feature and time columns to a target column. -Create a time series dataset from a mapping of column names to their values. +Unlike a TabularDataset, a TimeSeries needs to contain one target and one time column, but can have empty features. + +**Parent type:** [`Dataset>`][safeds.data.labeled.containers.Dataset] **Parameters:** | Name | Type | Description | Default | |------|------|-------------|---------| | `data` | `#!sds union>, Table>` | The data. | - | -| `targetName` | [`String`][safeds.lang.String] | Name of the target column. | - | -| `timeName` | [`String`][safeds.lang.String] | Name of the time column. | - | -| `extraNames` | [`List?`][safeds.lang.List] | Names of the columns that are neither features nor target. If null, no extra columns are used, i.e. all but the target column are used as features. | `#!sds null` | +| `targetName` | [`String`][safeds.lang.String] | The name of the target column. | - | +| `timeName` | [`String`][safeds.lang.String] | The name of the time column. | - | +| `windowSize` | [`Int`][safeds.lang.Int] | The number of consecutive sample to use as input for prediction. | - | +| `extraNames` | [`List?`][safeds.lang.List] | Names of the columns that are neither features nor target. If None, no extra columns are used, i.e. all but the target column are used as features. | `#!sds null` | +| `forecastHorizon` | [`Int`][safeds.lang.Int] | The number of time steps to predict into the future. | `#!sds 1` | **Examples:** -```sds hl_lines="3" +```sds hl_lines="2" pipeline example { - // from safeds.data.labeled.containers import TabularDataset // dataset = TimeSeriesDataset( // {"id": [1, 2, 3], "feature": [4, 5, 6], "target": [1, 2, 3], "error":[0,0,1]}, // target_name="target", // time_name = "id", - // extra_names=["error"] + // window_size=1, + // extra_names=["error"], // ) } ``` ??? quote "Stub code in `TimeSeriesDataset.sdsstub`" - ```sds linenums="28" + ```sds linenums="30" class TimeSeriesDataset( data: union>, Table>, @PythonName("target_name") targetName: String, @PythonName("time_name") timeName: String, - @PythonName("extra_names") extraNames: List? = null - ) { + @PythonName("window_size") windowSize: Int, + @PythonName("extra_names") extraNames: List? = null, + @PythonName("forecast_horizon") forecastHorizon: Int = 1 + ) sub Dataset { /** * The feature columns of the time series dataset. */ @@ -48,6 +54,14 @@ pipeline example { * The time column of the time series dataset. */ attr time: Column + /** + * The number of consecutive sample to use as input for prediction. + */ + @PythonName("window_size") attr windowSize: Int + /** + * The number of time steps to predict into the future. + */ + @PythonName("forecast_horizon") attr forecastHorizon: Int /** * Additional columns of the time series dataset that are neither features, target nor time. * @@ -82,6 +96,12 @@ The feature columns of the time series dataset. **Type:** [`Table`][safeds.data.tabular.containers.Table] +## `forecastHorizon` {#safeds.data.labeled.containers.TimeSeriesDataset.forecastHorizon data-toc-label='[attribute] forecastHorizon'} + +The number of time steps to predict into the future. + +**Type:** [`Int`][safeds.lang.Int] + ## `target` {#safeds.data.labeled.containers.TimeSeriesDataset.target data-toc-label='[attribute] target'} The target column of the time series dataset. @@ -94,6 +114,12 @@ The time column of the time series dataset. **Type:** [`Column`][safeds.data.tabular.containers.Column] +## `windowSize` {#safeds.data.labeled.containers.TimeSeriesDataset.windowSize data-toc-label='[attribute] windowSize'} + +The number of consecutive sample to use as input for prediction. + +**Type:** [`Int`][safeds.lang.Int] + ## `toTable` {#safeds.data.labeled.containers.TimeSeriesDataset.toTable data-toc-label='[function] toTable'} Return a new `Table` containing the feature columns, the target column, the time column and the extra columns. @@ -108,7 +134,7 @@ The original `TimeSeriesDataset` is not modified. ??? quote "Stub code in `TimeSeriesDataset.sdsstub`" - ```sds linenums="60" + ```sds linenums="72" @Pure @PythonName("to_table") fun toTable() -> table: Table diff --git a/docs/api/safeds/data/tabular/containers/Cell.md b/docs/api/safeds/data/tabular/containers/Cell.md index 1a67e14dc..fd7cd6375 100644 --- a/docs/api/safeds/data/tabular/containers/Cell.md +++ b/docs/api/safeds/data/tabular/containers/Cell.md @@ -19,6 +19,11 @@ This class cannot be instantiated directly. It is only used for arguments of cal ```sds linenums="8" class Cell { + /** + * Namespace for operations on strings. + */ + attr str: StringCell + /** * Negate a boolean. This WILL LATER BE equivalent to the ^not operator. * @@ -147,6 +152,21 @@ This class cannot be instantiated directly. It is only used for arguments of cal other: Any ) -> result: Cell + /** + * Divide by a value. This WILL LATER BE equivalent to the `/` operator. + * + * @example + * pipeline example { + * val column = Column("example", [6, 8]); + * val result = column.transform((cell) -> cell.div(2)); + * // Column("example", [3, 4]) + * } + */ + @Pure + fun div( + other: Any + ) -> result: Cell + /** * Perform a modulo operation. * @@ -207,21 +227,6 @@ This class cannot be instantiated directly. It is only used for arguments of cal other: Any ) -> result: Cell - /** - * Divide by a value. This WILL LATER BE equivalent to the `/` operator. - * - * @example - * pipeline example { - * val column = Column("example", [6, 8]); - * val result = column.transform((cell) -> cell.div(2)); - * // Column("example", [3, 4]) - * } - */ - @Pure - fun div( - other: Any - ) -> result: Cell - /** * Check if equal to a value. This WILL LATER BE equivalent to the `==` operator. * @@ -299,6 +304,12 @@ This class cannot be instantiated directly. It is only used for arguments of cal } ``` +## `str` {#safeds.data.tabular.containers.Cell.str data-toc-label='[attribute] str'} + +Namespace for operations on strings. + +**Type:** [`StringCell`][safeds.data.tabular.containers.StringCell] + ## `abs` {#safeds.data.tabular.containers.Cell.abs data-toc-label='[function] abs'} Get the absolute value. @@ -321,7 +332,7 @@ pipeline example { ??? quote "Stub code in `Cell.sdsstub`" - ```sds linenums="80" + ```sds linenums="85" @Pure fun abs() -> result: Cell ``` @@ -354,7 +365,7 @@ pipeline example { ??? quote "Stub code in `Cell.sdsstub`" - ```sds linenums="132" + ```sds linenums="137" @Pure fun add( other: Any @@ -389,7 +400,7 @@ pipeline example { ??? quote "Stub code in `Cell.sdsstub`" - ```sds linenums="33" + ```sds linenums="38" @Pure @PythonName("and_") fun ^and( @@ -419,7 +430,7 @@ pipeline example { ??? quote "Stub code in `Cell.sdsstub`" - ```sds linenums="93" + ```sds linenums="98" @Pure fun ceil() -> result: Cell ``` @@ -452,7 +463,7 @@ pipeline example { ??? quote "Stub code in `Cell.sdsstub`" - ```sds linenums="207" + ```sds linenums="152" @Pure fun div( other: Any @@ -487,7 +498,7 @@ pipeline example { ??? quote "Stub code in `Cell.sdsstub`" - ```sds linenums="222" + ```sds linenums="227" @Pure fun eq( other: Any @@ -516,7 +527,7 @@ pipeline example { ??? quote "Stub code in `Cell.sdsstub`" - ```sds linenums="106" + ```sds linenums="111" @Pure fun floor() -> result: Cell ``` @@ -549,7 +560,7 @@ pipeline example { ??? quote "Stub code in `Cell.sdsstub`" - ```sds linenums="237" + ```sds linenums="242" @Pure fun ge( other: Any @@ -584,7 +595,7 @@ pipeline example { ??? quote "Stub code in `Cell.sdsstub`" - ```sds linenums="252" + ```sds linenums="257" @Pure fun gt( other: Any @@ -619,7 +630,7 @@ pipeline example { ??? quote "Stub code in `Cell.sdsstub`" - ```sds linenums="267" + ```sds linenums="272" @Pure fun le( other: Any @@ -654,7 +665,7 @@ pipeline example { ??? quote "Stub code in `Cell.sdsstub`" - ```sds linenums="282" + ```sds linenums="287" @Pure fun lt( other: Any @@ -689,7 +700,7 @@ pipeline example { ??? quote "Stub code in `Cell.sdsstub`" - ```sds linenums="147" + ```sds linenums="167" @Pure fun mod( other: Any @@ -724,7 +735,7 @@ pipeline example { ??? quote "Stub code in `Cell.sdsstub`" - ```sds linenums="162" + ```sds linenums="182" @Pure fun mul( other: Any @@ -753,7 +764,7 @@ pipeline example { ??? quote "Stub code in `Cell.sdsstub`" - ```sds linenums="119" + ```sds linenums="124" @Pure fun neg() -> result: Cell ``` @@ -780,7 +791,7 @@ pipeline example { ??? quote "Stub code in `Cell.sdsstub`" - ```sds linenums="19" + ```sds linenums="24" @Pure @PythonName("not_") fun ^not() -> result: Cell @@ -814,7 +825,7 @@ pipeline example { ??? quote "Stub code in `Cell.sdsstub`" - ```sds linenums="49" + ```sds linenums="54" @Pure @PythonName("or_") fun ^or( @@ -850,7 +861,7 @@ pipeline example { ??? quote "Stub code in `Cell.sdsstub`" - ```sds linenums="177" + ```sds linenums="197" @Pure fun pow( other: union @@ -885,7 +896,7 @@ pipeline example { ??? quote "Stub code in `Cell.sdsstub`" - ```sds linenums="192" + ```sds linenums="212" @Pure fun ^sub( other: Any @@ -920,7 +931,7 @@ pipeline example { ??? quote "Stub code in `Cell.sdsstub`" - ```sds linenums="65" + ```sds linenums="70" @Pure fun xor( other: union> diff --git a/docs/api/safeds/data/tabular/containers/Column.md b/docs/api/safeds/data/tabular/containers/Column.md index 9e18f8f77..07fc5fc98 100644 --- a/docs/api/safeds/data/tabular/containers/Column.md +++ b/docs/api/safeds/data/tabular/containers/Column.md @@ -7,7 +7,7 @@ A named, one-dimensional collection of homogeneous values. | Name | Type | Description | Default | |------|------|-------------|---------| | `name` | [`String`][safeds.lang.String] | The name of the column. | - | -| `data` | [`List?`][safeds.lang.List] | The data of the column. If null, an empty column is created. | `#!sds null` | +| `data` | [`List`][safeds.lang.List] | The data of the column. | `#!sds []` | **Type parameters:** @@ -28,7 +28,7 @@ pipeline example { ```sds linenums="18" class Column( name: String, - data: List? = null + data: List = [] ) { /** * Whether the column is numeric. @@ -45,7 +45,7 @@ pipeline example { /** * The number of rows in the column. */ - @PythonName("number_of_rows") attr rowCount: Int + @PythonName("row_count") attr rowCount: Int /** * The plotter for the column. */ @@ -55,8 +55,28 @@ pipeline example { */ attr type: DataType + /* + * Return the distinct values in the column. + * + * @param ignoreMissingValues Whether to ignore missing values. + * + * @result distinctValues The distinct values in the column. + * + * @example + * pipeline example { + * val column = Column("test", [1, 2, 3, 2]); + * val result = column.getDistinctValues(); + * // [1, 2, 3] + * } + */ + @Pure + @PythonName("get_distinct_values") + fun getDistinctValues( + @PythonName("ignore_missing_values") ignoreMissingValues: Boolean = true, + ) -> distinctValues: List + /** - * Return the column value at specified index. + * Return the column value at specified index. This WILL LATER BE equivalent to the `[]` operator (indexed access). * * Nonnegative indices are counted from the beginning (starting at 0), negative indices from the end (starting at * -1). @@ -77,10 +97,177 @@ pipeline example { index: Int ) -> value: T + /** + * Return whether all values in the column satisfy the predicate. + * + * The predicate can return one of three values: + * + * - true, if the value satisfies the predicate. + * - false, if the value does not satisfy the predicate. + * - null, if the truthiness of the predicate is unknown, e.g. due to missing values. + * + * By default, cases where the truthiness of the predicate is unknown are ignored and this method returns: + * + * - true, if the predicate always returns true or null. + * - false, if the predicate returns false at least once. + * + * You can instead enable Kleene logic by setting `ignoreUnknown = false`. In this case, this method returns: + * + * - true, if the predicate always returns true. + * - false, if the predicate returns false at least once. + * - null, if the predicate never returns false, but at least once null. + * + * @param predicate The predicate to apply to each value. + * @param ignoreUnknown Whether to ignore cases where the truthiness of the predicate is unknown. + * + * @result allSatisfyPredicate Whether all values in the column satisfy the predicate. + * + * @example + * pipeline example { + * val column = Column("test", [1, 2, 3]); + * val result = column.all((cell) -> cell.gt(0)); // true + * } + * + * @example + * pipeline example { + * val column = Column("test", [1, 2, 3]); + * val result = column.all((cell) -> cell.lt(3)); // false + * } + */ + @Pure + fun all( + predicate: (cell: Cell) -> satisfiesPredicate: Cell, + @PythonName("ignore_unknown") ignoreUnknown: Boolean = true, + ) -> allSatisfyPredicate: Boolean? + + /** + * Return whether any value in the column satisfies the predicate. + * + * The predicate can return one of three values: + * + * - true, if the value satisfies the predicate. + * - false, if the value does not satisfy the predicate. + * - null, if the truthiness of the predicate is unknown, e.g. due to missing values. + * + * By default, cases where the truthiness of the predicate is unknown are ignored and this method returns: + * + * - true, if the predicate returns true at least once. + * - false, if the predicate always returns false or null. + * + * You can instead enable Kleene logic by setting `ignoreUnknown = false`. In this case, this method returns: + * + * - true, if the predicate returns true at least once. + * - false, if the predicate always returns false. + * - null, if the predicate never returns true, but at least once null. + * + * @param predicate The predicate to apply to each value. + * @param ignoreUnknown Whether to ignore cases where the truthiness of the predicate is unknown. + * + * @result anySatisfyPredicate Whether any value in the column satisfies the predicate. + * + * @example + * pipeline example { + * val column = Column("test", [1, 2, 3]); + * val result = column.any((cell) -> cell.gt(2)); // true + * } + * + * @example + * pipeline example { + * val column = Column("test", [1, 2, 3]); + * val result = column.any((cell) -> cell.lt(0)); // false + * } + */ + @Pure + fun any( + predicate: (cell: Cell) -> satisfiesPredicate: Cell, + @PythonName("ignore_unknown") ignoreUnknown: Boolean = true, + ) -> anySatisfyPredicate: Boolean? + + /** + * Return how many values in the column satisfy the predicate. + * + * The predicate can return one of three results: + * + * - true, if the value satisfies the predicate. + * - false, if the value does not satisfy the predicate. + * - null, if the truthiness of the predicate is unknown, e.g. due to missing values. + * + * By default, cases where the truthiness of the predicate is unknown are ignored and this method returns how + * often the predicate returns true. + * + * You can instead enable Kleene logic by setting `ignore_unknown = False`. In this case, this method returns null + * if the predicate returns null at least once. Otherwise, it still returns how often the predicate returns true. + * + * @param predicate The predicate to apply to each value. + * @param ignoreUnknown Whether to ignore cases where the truthiness of the predicate is unknown. + * + * @result count The number of values in the column that satisfy the predicate. + * + * @example + * pipeline example { + * val column = Column("test", [1, 2, 3]); + * val result = column.countIf((cell) -> cell.gt(1)); // 2 + * } + * + * @example + * pipeline example { + * val column = Column("test", [1, 2, 3]); + * val result = column.countIf((cell) -> cell.lt(0)); // 0 + * } + */ + @Pure + fun countIf( + predicate: (cell: Cell) -> satisfiesPredicate: Cell, + @PythonName("ignore_unknown") ignoreUnknown: Boolean = true, + ) -> count: Int? + + /** + * Return whether no value in the column satisfies the predicate. + * + * The predicate can return one of three values: + * + * - true, if the value satisfies the predicate. + * - false, if the value does not satisfy the predicate. + * - null, if the truthiness of the predicate is unknown, e.g. due to missing values. + * + * By default, cases where the truthiness of the predicate is unknown are ignored and this method returns: + * + * - true, if the predicate always returns false or null. + * - false, if the predicate returns true at least once. + * + * You can instead enable Kleene logic by setting `ignoreUnknown = false`. In this case, this method returns: + * + * - true, if the predicate always returns false. + * - false, if the predicate returns true at least once. + * - null, if the predicate never returns true, but at least once null. + * + * @param predicate The predicate to apply to each value. + * @param ignoreUnknown Whether to ignore cases where the truthiness of the predicate is unknown. + * + * @result noneSatisfyPredicate Whether no value in the column satisfies the predicate. + * + * @example + * pipeline example { + * val column = Column("test", [1, 2, 3]); + * val result = column.none((cell) -> cell.lt(0)); // true + * } + * + * @example + * pipeline example { + * val column = Column("test", [1, 2, 3]); + * val result = column.none((cell) -> cell.gt(2)); // false + * } + */ + @Pure + fun none( + predicate: (cell: Cell) -> satisfiesPredicate: Cell, + @PythonName("ignore_unknown") ignoreUnknown: Boolean = true, + ) -> noneSatisfyPredicate: Int? + /** * Return a new column with a new name. * - * The original column is not modified. + * **Note:** The original column is not modified. * * @param newName The new name of the column. * @@ -101,7 +288,7 @@ pipeline example { /** * Return a new column with values transformed by the transformer. * - * The original column is not modified. + * **Note:** The original column is not modified. * * @param transformer The transformer to apply to each value. * @@ -310,6 +497,27 @@ pipeline example { @PythonName("missing_value_ratio") fun missingValueRatio() -> missingValueRatio: Float + /** + * Return the mode of the values in the column. + * + * The mode is the value that appears most frequently in the column. If multiple values occur equally often, all + * of them are returned. The values are sorted in ascending order. + * + * @param ignoreMissingValues Whether to ignore missing values. + * + * @result mode The mode of the values in the column. + * + * @example + * pipeline example { + * val column = Column("test", [3, 1, 2, 1, 3]); + * val result = column.mode(); // [1, 3] + * } + */ + @Pure + fun mode( + @PythonName("ignore_missing_values") ignoreMissingValues: Boolean = true, + ) -> mode: List + /** * Return the stability of the column. * @@ -434,6 +642,124 @@ The type of the column. **Type:** [`DataType`][safeds.data.tabular.typing.DataType] +## `all` {#safeds.data.tabular.containers.Column.all data-toc-label='[function] all'} + +Return whether all values in the column satisfy the predicate. + +The predicate can return one of three values: + +- true, if the value satisfies the predicate. +- false, if the value does not satisfy the predicate. +- null, if the truthiness of the predicate is unknown, e.g. due to missing values. + +By default, cases where the truthiness of the predicate is unknown are ignored and this method returns: + +- true, if the predicate always returns true or null. +- false, if the predicate returns false at least once. + +You can instead enable Kleene logic by setting `ignoreUnknown = false`. In this case, this method returns: + +- true, if the predicate always returns true. +- false, if the predicate returns false at least once. +- null, if the predicate never returns false, but at least once null. + +**Parameters:** + +| Name | Type | Description | Default | +|------|------|-------------|---------| +| `predicate` | `#!sds (cell: Cell) -> (satisfiesPredicate: Cell)` | The predicate to apply to each value. | - | +| `ignoreUnknown` | [`Boolean`][safeds.lang.Boolean] | Whether to ignore cases where the truthiness of the predicate is unknown. | `#!sds true` | + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `allSatisfyPredicate` | [`Boolean?`][safeds.lang.Boolean] | Whether all values in the column satisfy the predicate. | + +**Examples:** + +```sds hl_lines="3" +pipeline example { + val column = Column("test", [1, 2, 3]); + val result = column.all((cell) -> cell.gt(0)); // true +} +``` +```sds hl_lines="3" +pipeline example { + val column = Column("test", [1, 2, 3]); + val result = column.all((cell) -> cell.lt(3)); // false +} +``` + +??? quote "Stub code in `Column.sdsstub`" + + ```sds linenums="126" + @Pure + fun all( + predicate: (cell: Cell) -> satisfiesPredicate: Cell, + @PythonName("ignore_unknown") ignoreUnknown: Boolean = true, + ) -> allSatisfyPredicate: Boolean? + ``` + +## `any` {#safeds.data.tabular.containers.Column.any data-toc-label='[function] any'} + +Return whether any value in the column satisfies the predicate. + +The predicate can return one of three values: + +- true, if the value satisfies the predicate. +- false, if the value does not satisfy the predicate. +- null, if the truthiness of the predicate is unknown, e.g. due to missing values. + +By default, cases where the truthiness of the predicate is unknown are ignored and this method returns: + +- true, if the predicate returns true at least once. +- false, if the predicate always returns false or null. + +You can instead enable Kleene logic by setting `ignoreUnknown = false`. In this case, this method returns: + +- true, if the predicate returns true at least once. +- false, if the predicate always returns false. +- null, if the predicate never returns true, but at least once null. + +**Parameters:** + +| Name | Type | Description | Default | +|------|------|-------------|---------| +| `predicate` | `#!sds (cell: Cell) -> (satisfiesPredicate: Cell)` | The predicate to apply to each value. | - | +| `ignoreUnknown` | [`Boolean`][safeds.lang.Boolean] | Whether to ignore cases where the truthiness of the predicate is unknown. | `#!sds true` | + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `anySatisfyPredicate` | [`Boolean?`][safeds.lang.Boolean] | Whether any value in the column satisfies the predicate. | + +**Examples:** + +```sds hl_lines="3" +pipeline example { + val column = Column("test", [1, 2, 3]); + val result = column.any((cell) -> cell.gt(2)); // true +} +``` +```sds hl_lines="3" +pipeline example { + val column = Column("test", [1, 2, 3]); + val result = column.any((cell) -> cell.lt(0)); // false +} +``` + +??? quote "Stub code in `Column.sdsstub`" + + ```sds linenums="169" + @Pure + fun any( + predicate: (cell: Cell) -> satisfiesPredicate: Cell, + @PythonName("ignore_unknown") ignoreUnknown: Boolean = true, + ) -> anySatisfyPredicate: Boolean? + ``` + ## `correlationWith` {#safeds.data.tabular.containers.Column.correlationWith data-toc-label='[function] correlationWith'} Calculate the Pearson correlation between this column and another column. @@ -476,7 +802,7 @@ pipeline example { ??? quote "Stub code in `Column.sdsstub`" - ```sds linenums="154" + ```sds linenums="341" @Pure @PythonName("correlation_with") fun correlationWith( @@ -484,6 +810,60 @@ pipeline example { ) -> correlation: Float ``` +## `countIf` {#safeds.data.tabular.containers.Column.countIf data-toc-label='[function] countIf'} + +Return how many values in the column satisfy the predicate. + +The predicate can return one of three results: + +- true, if the value satisfies the predicate. +- false, if the value does not satisfy the predicate. +- null, if the truthiness of the predicate is unknown, e.g. due to missing values. + +By default, cases where the truthiness of the predicate is unknown are ignored and this method returns how +often the predicate returns true. + +You can instead enable Kleene logic by setting `ignore_unknown = False`. In this case, this method returns null +if the predicate returns null at least once. Otherwise, it still returns how often the predicate returns true. + +**Parameters:** + +| Name | Type | Description | Default | +|------|------|-------------|---------| +| `predicate` | `#!sds (cell: Cell) -> (satisfiesPredicate: Cell)` | The predicate to apply to each value. | - | +| `ignoreUnknown` | [`Boolean`][safeds.lang.Boolean] | Whether to ignore cases where the truthiness of the predicate is unknown. | `#!sds true` | + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `count` | [`Int?`][safeds.lang.Int] | The number of values in the column that satisfy the predicate. | + +**Examples:** + +```sds hl_lines="3" +pipeline example { + val column = Column("test", [1, 2, 3]); + val result = column.countIf((cell) -> cell.gt(1)); // 2 +} +``` +```sds hl_lines="3" +pipeline example { + val column = Column("test", [1, 2, 3]); + val result = column.countIf((cell) -> cell.lt(0)); // 0 +} +``` + +??? quote "Stub code in `Column.sdsstub`" + + ```sds linenums="207" + @Pure + fun countIf( + predicate: (cell: Cell) -> satisfiesPredicate: Cell, + @PythonName("ignore_unknown") ignoreUnknown: Boolean = true, + ) -> count: Int? + ``` + ## `distinctValueCount` {#safeds.data.tabular.containers.Column.distinctValueCount data-toc-label='[function] distinctValueCount'} Return the number of distinct values in the column. @@ -511,7 +891,7 @@ pipeline example { ??? quote "Stub code in `Column.sdsstub`" - ```sds linenums="173" + ```sds linenums="360" @Pure @PythonName("distinct_value_count") fun distinctValueCount( @@ -519,9 +899,33 @@ pipeline example { ) -> distinctValueCount: Int ``` +## `getDistinctValues` {#safeds.data.tabular.containers.Column.getDistinctValues data-toc-label='[function] getDistinctValues'} + +**Parameters:** + +| Name | Type | Description | Default | +|------|------|-------------|---------| +| `ignoreMissingValues` | [`Boolean`][safeds.lang.Boolean] | - | `#!sds true` | + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `distinctValues` | [`List`][safeds.lang.List] | - | + +??? quote "Stub code in `Column.sdsstub`" + + ```sds linenums="61" + @Pure + @PythonName("get_distinct_values") + fun getDistinctValues( + @PythonName("ignore_missing_values") ignoreMissingValues: Boolean = true, + ) -> distinctValues: List + ``` + ## `getValue` {#safeds.data.tabular.containers.Column.getValue data-toc-label='[function] getValue'} -Return the column value at specified index. +Return the column value at specified index. This WILL LATER BE equivalent to the `[]` operator (indexed access). Nonnegative indices are counted from the beginning (starting at 0), negative indices from the end (starting at -1). @@ -549,7 +953,7 @@ pipeline example { ??? quote "Stub code in `Column.sdsstub`" - ```sds linenums="63" + ```sds linenums="83" @Pure @PythonName("get_value") fun getValue( @@ -590,7 +994,7 @@ pipeline example { ??? quote "Stub code in `Column.sdsstub`" - ```sds linenums="202" + ```sds linenums="389" @Pure fun idness() -> idness: Float ``` @@ -616,7 +1020,7 @@ pipeline example { ??? quote "Stub code in `Column.sdsstub`" - ```sds linenums="216" + ```sds linenums="403" @Pure fun max() -> max: T? ``` @@ -644,7 +1048,7 @@ pipeline example { ??? quote "Stub code in `Column.sdsstub`" - ```sds linenums="232" + ```sds linenums="419" @Pure fun mean() -> mean: T ``` @@ -673,7 +1077,7 @@ pipeline example { ??? quote "Stub code in `Column.sdsstub`" - ```sds linenums="249" + ```sds linenums="436" @Pure fun median() -> median: T ``` @@ -699,7 +1103,7 @@ pipeline example { ??? quote "Stub code in `Column.sdsstub`" - ```sds linenums="263" + ```sds linenums="450" @Pure fun min() -> min: T? ``` @@ -725,7 +1129,7 @@ pipeline example { ??? quote "Stub code in `Column.sdsstub`" - ```sds linenums="277" + ```sds linenums="464" @Pure @PythonName("missing_value_count") fun missingValueCount() -> missingValueCount: Int @@ -758,17 +1162,113 @@ pipeline example { ??? quote "Stub code in `Column.sdsstub`" - ```sds linenums="298" + ```sds linenums="485" @Pure @PythonName("missing_value_ratio") fun missingValueRatio() -> missingValueRatio: Float ``` +## `mode` {#safeds.data.tabular.containers.Column.mode data-toc-label='[function] mode'} + +Return the mode of the values in the column. + +The mode is the value that appears most frequently in the column. If multiple values occur equally often, all +of them are returned. The values are sorted in ascending order. + +**Parameters:** + +| Name | Type | Description | Default | +|------|------|-------------|---------| +| `ignoreMissingValues` | [`Boolean`][safeds.lang.Boolean] | Whether to ignore missing values. | `#!sds true` | + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `mode` | [`List`][safeds.lang.List] | The mode of the values in the column. | + +**Examples:** + +```sds hl_lines="3" +pipeline example { + val column = Column("test", [3, 1, 2, 1, 3]); + val result = column.mode(); // [1, 3] +} +``` + +??? quote "Stub code in `Column.sdsstub`" + + ```sds linenums="505" + @Pure + fun mode( + @PythonName("ignore_missing_values") ignoreMissingValues: Boolean = true, + ) -> mode: List + ``` + +## `none` {#safeds.data.tabular.containers.Column.none data-toc-label='[function] none'} + +Return whether no value in the column satisfies the predicate. + +The predicate can return one of three values: + +- true, if the value satisfies the predicate. +- false, if the value does not satisfy the predicate. +- null, if the truthiness of the predicate is unknown, e.g. due to missing values. + +By default, cases where the truthiness of the predicate is unknown are ignored and this method returns: + +- true, if the predicate always returns false or null. +- false, if the predicate returns true at least once. + +You can instead enable Kleene logic by setting `ignoreUnknown = false`. In this case, this method returns: + +- true, if the predicate always returns false. +- false, if the predicate returns true at least once. +- null, if the predicate never returns true, but at least once null. + +**Parameters:** + +| Name | Type | Description | Default | +|------|------|-------------|---------| +| `predicate` | `#!sds (cell: Cell) -> (satisfiesPredicate: Cell)` | The predicate to apply to each value. | - | +| `ignoreUnknown` | [`Boolean`][safeds.lang.Boolean] | Whether to ignore cases where the truthiness of the predicate is unknown. | `#!sds true` | + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `noneSatisfyPredicate` | [`Int?`][safeds.lang.Int] | Whether no value in the column satisfies the predicate. | + +**Examples:** + +```sds hl_lines="3" +pipeline example { + val column = Column("test", [1, 2, 3]); + val result = column.none((cell) -> cell.lt(0)); // true +} +``` +```sds hl_lines="3" +pipeline example { + val column = Column("test", [1, 2, 3]); + val result = column.none((cell) -> cell.gt(2)); // false +} +``` + +??? quote "Stub code in `Column.sdsstub`" + + ```sds linenums="250" + @Pure + fun none( + predicate: (cell: Cell) -> satisfiesPredicate: Cell, + @PythonName("ignore_unknown") ignoreUnknown: Boolean = true, + ) -> noneSatisfyPredicate: Int? + ``` + ## `rename` {#safeds.data.tabular.containers.Column.rename data-toc-label='[function] rename'} Return a new column with a new name. -The original column is not modified. +**Note:** The original column is not modified. **Parameters:** @@ -794,7 +1294,7 @@ pipeline example { ??? quote "Stub code in `Column.sdsstub`" - ```sds linenums="85" + ```sds linenums="272" @Pure fun rename( @PythonName("new_name") newName: String @@ -828,7 +1328,7 @@ pipeline example { ??? quote "Stub code in `Column.sdsstub`" - ```sds linenums="319" + ```sds linenums="527" @Pure fun stability() -> stability: Float ``` @@ -856,7 +1356,7 @@ pipeline example { ??? quote "Stub code in `Column.sdsstub`" - ```sds linenums="336" + ```sds linenums="544" @Pure @PythonName("standard_deviation") fun standardDeviation() -> standardDeviation: Float @@ -883,7 +1383,7 @@ pipeline example { ??? quote "Stub code in `Column.sdsstub`" - ```sds linenums="122" + ```sds linenums="309" @Pure @PythonName("summarize_statistics") fun summarizeStatistics() -> statistics: Table @@ -910,7 +1410,7 @@ pipeline example { ??? quote "Stub code in `Column.sdsstub`" - ```sds linenums="368" + ```sds linenums="576" @Pure @PythonName("to_list") fun toList() -> values: List @@ -938,7 +1438,7 @@ pipeline example { ??? quote "Stub code in `Column.sdsstub`" - ```sds linenums="384" + ```sds linenums="592" @Pure @PythonName("to_table") fun toTable() -> table: Table @@ -948,7 +1448,7 @@ pipeline example { Return a new column with values transformed by the transformer. -The original column is not modified. +**Note:** The original column is not modified. **Parameters:** @@ -980,7 +1480,7 @@ pipeline example { ??? quote "Stub code in `Column.sdsstub`" - ```sds linenums="106" + ```sds linenums="293" @Pure fun transform( transformer: (cell: Cell) -> transformedCell: Cell @@ -1010,7 +1510,7 @@ pipeline example { ??? quote "Stub code in `Column.sdsstub`" - ```sds linenums="354" + ```sds linenums="562" @Pure fun variance() -> variance: Float ``` diff --git a/docs/api/safeds/data/tabular/containers/Row.md b/docs/api/safeds/data/tabular/containers/Row.md index 33997bd3d..43d84d165 100644 --- a/docs/api/safeds/data/tabular/containers/Row.md +++ b/docs/api/safeds/data/tabular/containers/Row.md @@ -20,18 +20,25 @@ This class cannot be instantiated directly. It is only used for arguments of cal /** * The number of columns in the row. */ - @PythonName("number_of_columns") attr columnCount: Int + @PythonName("column_count") attr columnCount: Int /** * The schema of the row. */ attr ^schema: Schema /** - * Get the value of the specified column. + * Get the value of the specified column. This WILL LATER BE equivalent to using the `[]` operator (indexed access). * * @param name The name of the column. * * @result value The value of the column. + * + * @example + * pipeline example { + * val table = Table({"col1": [1, 2], "col2": [3, 4]}); + * val result = table.removeRows((row) -> row.getValue("col1").eq(1)); + * // Table({"col1": [2], "col2": [4]}) + * } */ @Pure @PythonName("get_value") @@ -103,7 +110,7 @@ Get the type of the specified column. ??? quote "Stub code in `Row.sdsstub`" - ```sds linenums="45" + ```sds linenums="52" @Pure @PythonName("get_column_type") fun getColumnType( @@ -113,7 +120,7 @@ Get the type of the specified column. ## `getValue` {#safeds.data.tabular.containers.Row.getValue data-toc-label='[function] getValue'} -Get the value of the specified column. +Get the value of the specified column. This WILL LATER BE equivalent to using the `[]` operator (indexed access). **Parameters:** @@ -127,9 +134,19 @@ Get the value of the specified column. |------|------|-------------| | `value` | [`Cell`][safeds.data.tabular.containers.Cell] | The value of the column. | +**Examples:** + +```sds hl_lines="3" +pipeline example { + val table = Table({"col1": [1, 2], "col2": [3, 4]}); + val result = table.removeRows((row) -> row.getValue("col1").eq(1)); + // Table({"col1": [2], "col2": [4]}) +} +``` + ??? quote "Stub code in `Row.sdsstub`" - ```sds linenums="32" + ```sds linenums="39" @Pure @PythonName("get_value") fun getValue( @@ -155,7 +172,7 @@ Check if the row has a column with the specified name. ??? quote "Stub code in `Row.sdsstub`" - ```sds linenums="58" + ```sds linenums="65" @Pure @PythonName("has_column") fun hasColumn( diff --git a/docs/api/safeds/data/tabular/containers/StringCell.md b/docs/api/safeds/data/tabular/containers/StringCell.md new file mode 100644 index 000000000..22ad2b660 --- /dev/null +++ b/docs/api/safeds/data/tabular/containers/StringCell.md @@ -0,0 +1,830 @@ +--- +search: + boost: 0.5 +--- + +# `StringCell` {#safeds.data.tabular.containers.StringCell data-toc-label='[class] StringCell'} + +Namespace for operations on strings. + +This class cannot be instantiated directly. It can only be accessed using the `str` attribute of a cell. + +**Examples:** + +```sds +pipeline example { + val column = Column("example", ["ab", "bc", "cd"]); + val result = column.transform((cell) -> cell.str.toUppercase()); + // Column("example", ["AB", "BC", "CD"]) +} +``` + +??? quote "Stub code in `StringCell.sdsstub`" + + ```sds linenums="15" + class StringCell { + /** + * Check if the string value in the cell contains the substring. + * + * @param substring The substring to search for. + * + * @result contains Whether the string value contains the substring. + * + * @example + * pipeline example { + * val column = Column("example", ["ab", "bc", "cd"]); + * val result = column.countIf((cell) -> cell.str.contains("b")); // 2 + * } + */ + @Pure + fun contains( + substring: String + ) -> contains: Cell + + /** + * Check if the string value in the cell ends with the suffix. + * + * @param suffix The suffix to search for. + * + * @result endsWith Whether the string value ends with the suffix. + * + * @example + * pipeline example { + * val column = Column("example", ["ab", "bc", "cd"]); + * val result = column.countIf((cell) -> cell.str.endsWith("c")); // 1 + * } + */ + @Pure + @PythonName("ends_with") + fun endsWith( + suffix: String + ) -> endsWith: Cell + + /** + * Get the index of the first occurrence of the substring in the string value in the cell. + * + * @param substring The substring to search for. + * + * @result indexOf The index of the first occurrence of the substring. If the substring is not found, null is returned. + * + * @example + * pipeline example { + * val column = Column("example", ["ab", "bc", "cd"]); + * val result = column.transform((cell) -> cell.str.indexOf("b")); + * // Column("example", [1, 0, null]) + * } + */ + @Pure + @PythonName("index_of") + fun indexOf( + substring: String + ) -> indexOf: Cell + + /** + * Get the number of characters of the string value in the cell. + * + * @param optimizeForAscii Greatly speed up this operation if the string is ASCII-only. If the string contains non-ASCII characters, + * this option will return incorrect results, though. + * + * @result length The length of the string value. + * + * @example + * pipeline example { + * val column = Column("example", ["", "a", "abc"]); + * val result = column.transform((cell) -> cell.str.length()); + * // Column("example", [0, 1, 3]) + * } + */ + @Pure + fun length( + @PythonName("optimize_for_ascii") optimizeForAscii: Boolean = false + ) -> length: Cell + + /** + * Replace occurrences of the old substring with the new substring in the string value in the cell. + * + * @param old The substring to replace. + * @param new The substring to replace with. + * + * @result replacedString The string value with the occurrences replaced. + * + * @example + * pipeline example { + * val column = Column("example", ["ab", "bc", "cd"]); + * val result = column.transform((cell) -> cell.str.replace("b", "z")); + * // Column("example", ["az", "zc", "cd"]) + * } + */ + @Pure + fun replace( + old: String, + new: String + ) -> replacedString: Cell + + /** + * Check if the string value in the cell starts with the prefix. + * + * @param prefix The prefix to search for. + * + * @result startsWith Whether the string value starts with the prefix. + * + * @example + * pipeline example { + * val column = Column("example", ["ab", "bc", "cd"]); + * val result = column.countIf((cell) -> cell.str.startsWith("a")); // 1 + * } + */ + @Pure + @PythonName("starts_with") + fun startsWith( + prefix: String + ) -> startsWith: Cell + + /** + * Get a substring of the string value in the cell. + * + * @param start The start index of the substring. + * @param length The length of the substring. If null, the slice contains all rows starting from `start`. Must greater than + * or equal to 0. + * + * @result substring The substring of the string value. + * + * @example + * pipeline example { + * val column = Column("example", ["abc", "def", "ghi"]); + * val result = column.transform((cell) -> cell.str.substring(1, 2)); + * // Column("example", ["bc", "ef", "hi"]) + * } + */ + @Pure + fun substring( + start: Int = 0, + length: Int? = null + ) -> substring: Cell + + /** + * Convert the string value in the cell to a date. Requires the string to be in the ISO 8601 format. + * + * @result date The date value. If the string cannot be converted to a date, null is returned. + * + * @example + * pipeline example { + * val column = Column("example", ["2021-01-01", "2021-02-01", "abc"]); + * val result = column.transform((cell) -> cell.str.toDate()); + * } + */ + @Pure + @PythonName("to_date") + fun toDate() -> date: Cell // TODO: Add builtin type for date + + /** + * Convert the string value in the cell to a datetime. Requires the string to be in the ISO 8601 format. + * + * @result datetime The datetime value. If the string cannot be converted to a datetime, null is returned. + * + * @example + * pipeline example { + * val column = Column("example", ["2021-01-01T00:00:00z", "2021-02-01T00:00:00z", "abc"]); + * val result = column.transform((cell) -> cell.str.toDatetime()); + * } + */ + @Pure + @PythonName("to_datetime") + fun toDatetime() -> datetime: Cell // TODO: Add builtin type for datetime + + /** + * Convert the string value in the cell to a float. + * + * @result float The float value. If the string cannot be converted to a float, null is returned. + * + * @example + * pipeline example { + * val column = Column("example", ["1", "3.4", "5.6", "abc"]); + * val result = column.transform((cell) -> cell.str.toFloat()); + * // Column("example", [1.0, 3.4, 5.6, null]) + * } + */ + @Pure + @PythonName("to_float") + fun toFloat() -> float: Cell + + /** + * Convert the string value in the cell to an integer. + * + * @param base The base of the integer. + * + * @result int The integer value. If the string cannot be converted to an integer, null is returned. + * + * @example + * pipeline example { + * val column = Column("example", ["1", "2", "3", "abc"]); + * val result = column.transform((cell) -> cell.str.toInt()); + * // Column("example", [1, 2, 3, null]) + * } + * + * @example + * pipeline example { + * val column = Column("example", ["1", "10", "11", "abc"]); + * val result = column.transform((cell) -> cell.str.toInt(base = 2)); + * // Column("example", [1, 2, 3, null]) + * } + */ + @Pure + @PythonName("to_int") + fun toInt( + base: Int = 10 + ) -> int: Cell + + /** + * Convert the string value in the cell to lowercase. + * + * @result lowercase The string value in lowercase. + * + * @example + * pipeline example { + * val column = Column("example", ["AB", "BC", "CD"]); + * val result = column.transform((cell) -> cell.str.toLowercase()); + * // Column("example", ["ab", "bc", "cd"]) + * } + */ + @Pure + @PythonName("to_lowercase") + fun toLowercase() -> lowercase: Cell + + /** + * Convert the string value in the cell to uppercase. + * + * @result uppercase The string value in uppercase. + * + * @example + * pipeline example { + * val column = Column("example", ["ab", "bc", "cd"]); + * val result = column.transform((cell) -> cell.str.toUppercase()); + * // Column("example", ["AB", "BC", "CD"]) + * } + */ + @Pure + @PythonName("to_uppercase") + fun toUppercase() -> uppercase: Cell + + /** + * Remove whitespace from the start and end of the string value in the cell. + * + * @result trimmed The string value without whitespace at the start and end. + * + * @example + * pipeline example { + * val column = Column("example", ["", " abc", "abc ", " abc "]); + * val result = column.transform((cell) -> cell.str.trim()); + * // Column("example", ["", "abc", "abc", "abc"]) + * } + */ + @Pure + fun trim() -> trimmed: Cell + + /** + * Remove whitespace from the end of the string value in the cell. + * + * @result trimmed The string value without whitespace at the end. + * + * @example + * pipeline example { + * val column = Column("example", ["", " abc", "abc ", " abc "]); + * val result = column.transform((cell) -> cell.str.trimEnd()); + * // Column("example", ["", " abc", "abc", " abc"]) + * } + */ + @Pure + @PythonName("trim_end") + fun trimEnd() -> trimmed: Cell + + /** + * Remove whitespace from the start of the string value in the cell. + * + * @result trimmed The string value without whitespace at the start. + * + * @example + * pipeline example { + * val column = Column("example", ["", " abc", "abc ", " abc "]); + * val result = column.transform((cell) -> cell.str.trimStart()); + * // Column("example", ["", "abc", "abc ", "abc "]) + * } + */ + @Pure + @PythonName("trim_start") + fun trimStart() -> trimmed: Cell + } + ``` + +## `contains` {#safeds.data.tabular.containers.StringCell.contains data-toc-label='[function] contains'} + +Check if the string value in the cell contains the substring. + +**Parameters:** + +| Name | Type | Description | Default | +|------|------|-------------|---------| +| `substring` | [`String`][safeds.lang.String] | The substring to search for. | - | + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `contains` | [`Cell`][safeds.data.tabular.containers.Cell] | Whether the string value contains the substring. | + +**Examples:** + +```sds hl_lines="3" +pipeline example { + val column = Column("example", ["ab", "bc", "cd"]); + val result = column.countIf((cell) -> cell.str.contains("b")); // 2 +} +``` + +??? quote "Stub code in `StringCell.sdsstub`" + + ```sds linenums="29" + @Pure + fun contains( + substring: String + ) -> contains: Cell + ``` + +## `endsWith` {#safeds.data.tabular.containers.StringCell.endsWith data-toc-label='[function] endsWith'} + +Check if the string value in the cell ends with the suffix. + +**Parameters:** + +| Name | Type | Description | Default | +|------|------|-------------|---------| +| `suffix` | [`String`][safeds.lang.String] | The suffix to search for. | - | + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `endsWith` | [`Cell`][safeds.data.tabular.containers.Cell] | Whether the string value ends with the suffix. | + +**Examples:** + +```sds hl_lines="3" +pipeline example { + val column = Column("example", ["ab", "bc", "cd"]); + val result = column.countIf((cell) -> cell.str.endsWith("c")); // 1 +} +``` + +??? quote "Stub code in `StringCell.sdsstub`" + + ```sds linenums="47" + @Pure + @PythonName("ends_with") + fun endsWith( + suffix: String + ) -> endsWith: Cell + ``` + +## `indexOf` {#safeds.data.tabular.containers.StringCell.indexOf data-toc-label='[function] indexOf'} + +Get the index of the first occurrence of the substring in the string value in the cell. + +**Parameters:** + +| Name | Type | Description | Default | +|------|------|-------------|---------| +| `substring` | [`String`][safeds.lang.String] | The substring to search for. | - | + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `indexOf` | [`Cell`][safeds.data.tabular.containers.Cell] | The index of the first occurrence of the substring. If the substring is not found, null is returned. | + +**Examples:** + +```sds hl_lines="3" +pipeline example { + val column = Column("example", ["ab", "bc", "cd"]); + val result = column.transform((cell) -> cell.str.indexOf("b")); + // Column("example", [1, 0, null]) +} +``` + +??? quote "Stub code in `StringCell.sdsstub`" + + ```sds linenums="67" + @Pure + @PythonName("index_of") + fun indexOf( + substring: String + ) -> indexOf: Cell + ``` + +## `length` {#safeds.data.tabular.containers.StringCell.length data-toc-label='[function] length'} + +Get the number of characters of the string value in the cell. + +**Parameters:** + +| Name | Type | Description | Default | +|------|------|-------------|---------| +| `optimizeForAscii` | [`Boolean`][safeds.lang.Boolean] | Greatly speed up this operation if the string is ASCII-only. If the string contains non-ASCII characters, this option will return incorrect results, though. | `#!sds false` | + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `length` | [`Cell`][safeds.data.tabular.containers.Cell] | The length of the string value. | + +**Examples:** + +```sds hl_lines="3" +pipeline example { + val column = Column("example", ["", "a", "abc"]); + val result = column.transform((cell) -> cell.str.length()); + // Column("example", [0, 1, 3]) +} +``` + +??? quote "Stub code in `StringCell.sdsstub`" + + ```sds linenums="88" + @Pure + fun length( + @PythonName("optimize_for_ascii") optimizeForAscii: Boolean = false + ) -> length: Cell + ``` + +## `replace` {#safeds.data.tabular.containers.StringCell.replace data-toc-label='[function] replace'} + +Replace occurrences of the old substring with the new substring in the string value in the cell. + +**Parameters:** + +| Name | Type | Description | Default | +|------|------|-------------|---------| +| `old` | [`String`][safeds.lang.String] | The substring to replace. | - | +| `new` | [`String`][safeds.lang.String] | The substring to replace with. | - | + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `replacedString` | [`Cell`][safeds.data.tabular.containers.Cell] | The string value with the occurrences replaced. | + +**Examples:** + +```sds hl_lines="3" +pipeline example { + val column = Column("example", ["ab", "bc", "cd"]); + val result = column.transform((cell) -> cell.str.replace("b", "z")); + // Column("example", ["az", "zc", "cd"]) +} +``` + +??? quote "Stub code in `StringCell.sdsstub`" + + ```sds linenums="108" + @Pure + fun replace( + old: String, + new: String + ) -> replacedString: Cell + ``` + +## `startsWith` {#safeds.data.tabular.containers.StringCell.startsWith data-toc-label='[function] startsWith'} + +Check if the string value in the cell starts with the prefix. + +**Parameters:** + +| Name | Type | Description | Default | +|------|------|-------------|---------| +| `prefix` | [`String`][safeds.lang.String] | The prefix to search for. | - | + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `startsWith` | [`Cell`][safeds.data.tabular.containers.Cell] | Whether the string value starts with the prefix. | + +**Examples:** + +```sds hl_lines="3" +pipeline example { + val column = Column("example", ["ab", "bc", "cd"]); + val result = column.countIf((cell) -> cell.str.startsWith("a")); // 1 +} +``` + +??? quote "Stub code in `StringCell.sdsstub`" + + ```sds linenums="127" + @Pure + @PythonName("starts_with") + fun startsWith( + prefix: String + ) -> startsWith: Cell + ``` + +## `substring` {#safeds.data.tabular.containers.StringCell.substring data-toc-label='[function] substring'} + +Get a substring of the string value in the cell. + +**Parameters:** + +| Name | Type | Description | Default | +|------|------|-------------|---------| +| `start` | [`Int`][safeds.lang.Int] | The start index of the substring. | `#!sds 0` | +| `length` | [`Int?`][safeds.lang.Int] | The length of the substring. If null, the slice contains all rows starting from `start`. Must greater than or equal to 0. | `#!sds null` | + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `substring` | [`Cell`][safeds.data.tabular.containers.Cell] | The substring of the string value. | + +**Examples:** + +```sds hl_lines="3" +pipeline example { + val column = Column("example", ["abc", "def", "ghi"]); + val result = column.transform((cell) -> cell.str.substring(1, 2)); + // Column("example", ["bc", "ef", "hi"]) +} +``` + +??? quote "Stub code in `StringCell.sdsstub`" + + ```sds linenums="149" + @Pure + fun substring( + start: Int = 0, + length: Int? = null + ) -> substring: Cell + ``` + +## `toDate` {#safeds.data.tabular.containers.StringCell.toDate data-toc-label='[function] toDate'} + +Convert the string value in the cell to a date. Requires the string to be in the ISO 8601 format. + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `date` | [`Cell`][safeds.data.tabular.containers.Cell] | The date value. If the string cannot be converted to a date, null is returned. | + +**Examples:** + +```sds hl_lines="3" +pipeline example { + val column = Column("example", ["2021-01-01", "2021-02-01", "abc"]); + val result = column.transform((cell) -> cell.str.toDate()); +} +``` + +??? quote "Stub code in `StringCell.sdsstub`" + + ```sds linenums="166" + @Pure + @PythonName("to_date") + fun toDate() -> date: Cell + ``` + +## `toDatetime` {#safeds.data.tabular.containers.StringCell.toDatetime data-toc-label='[function] toDatetime'} + +Convert the string value in the cell to a datetime. Requires the string to be in the ISO 8601 format. + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `datetime` | [`Cell`][safeds.data.tabular.containers.Cell] | The datetime value. If the string cannot be converted to a datetime, null is returned. | + +**Examples:** + +```sds hl_lines="3" +pipeline example { + val column = Column("example", ["2021-01-01T00:00:00z", "2021-02-01T00:00:00z", "abc"]); + val result = column.transform((cell) -> cell.str.toDatetime()); +} +``` + +??? quote "Stub code in `StringCell.sdsstub`" + + ```sds linenums="181" + @Pure + @PythonName("to_datetime") + fun toDatetime() -> datetime: Cell + ``` + +## `toFloat` {#safeds.data.tabular.containers.StringCell.toFloat data-toc-label='[function] toFloat'} + +Convert the string value in the cell to a float. + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `float` | [`Cell`][safeds.data.tabular.containers.Cell] | The float value. If the string cannot be converted to a float, null is returned. | + +**Examples:** + +```sds hl_lines="3" +pipeline example { + val column = Column("example", ["1", "3.4", "5.6", "abc"]); + val result = column.transform((cell) -> cell.str.toFloat()); + // Column("example", [1.0, 3.4, 5.6, null]) +} +``` + +??? quote "Stub code in `StringCell.sdsstub`" + + ```sds linenums="197" + @Pure + @PythonName("to_float") + fun toFloat() -> float: Cell + ``` + +## `toInt` {#safeds.data.tabular.containers.StringCell.toInt data-toc-label='[function] toInt'} + +Convert the string value in the cell to an integer. + +**Parameters:** + +| Name | Type | Description | Default | +|------|------|-------------|---------| +| `base` | [`Int`][safeds.lang.Int] | The base of the integer. | `#!sds 10` | + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `int` | [`Cell`][safeds.data.tabular.containers.Cell] | The integer value. If the string cannot be converted to an integer, null is returned. | + +**Examples:** + +```sds hl_lines="3" +pipeline example { + val column = Column("example", ["1", "2", "3", "abc"]); + val result = column.transform((cell) -> cell.str.toInt()); + // Column("example", [1, 2, 3, null]) +} +``` +```sds hl_lines="3" +pipeline example { + val column = Column("example", ["1", "10", "11", "abc"]); + val result = column.transform((cell) -> cell.str.toInt(base = 2)); + // Column("example", [1, 2, 3, null]) +} +``` + +??? quote "Stub code in `StringCell.sdsstub`" + + ```sds linenums="222" + @Pure + @PythonName("to_int") + fun toInt( + base: Int = 10 + ) -> int: Cell + ``` + +## `toLowercase` {#safeds.data.tabular.containers.StringCell.toLowercase data-toc-label='[function] toLowercase'} + +Convert the string value in the cell to lowercase. + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `lowercase` | [`Cell`][safeds.data.tabular.containers.Cell] | The string value in lowercase. | + +**Examples:** + +```sds hl_lines="3" +pipeline example { + val column = Column("example", ["AB", "BC", "CD"]); + val result = column.transform((cell) -> cell.str.toLowercase()); + // Column("example", ["ab", "bc", "cd"]) +} +``` + +??? quote "Stub code in `StringCell.sdsstub`" + + ```sds linenums="240" + @Pure + @PythonName("to_lowercase") + fun toLowercase() -> lowercase: Cell + ``` + +## `toUppercase` {#safeds.data.tabular.containers.StringCell.toUppercase data-toc-label='[function] toUppercase'} + +Convert the string value in the cell to uppercase. + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `uppercase` | [`Cell`][safeds.data.tabular.containers.Cell] | The string value in uppercase. | + +**Examples:** + +```sds hl_lines="3" +pipeline example { + val column = Column("example", ["ab", "bc", "cd"]); + val result = column.transform((cell) -> cell.str.toUppercase()); + // Column("example", ["AB", "BC", "CD"]) +} +``` + +??? quote "Stub code in `StringCell.sdsstub`" + + ```sds linenums="256" + @Pure + @PythonName("to_uppercase") + fun toUppercase() -> uppercase: Cell + ``` + +## `trim` {#safeds.data.tabular.containers.StringCell.trim data-toc-label='[function] trim'} + +Remove whitespace from the start and end of the string value in the cell. + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `trimmed` | [`Cell`][safeds.data.tabular.containers.Cell] | The string value without whitespace at the start and end. | + +**Examples:** + +```sds hl_lines="3" +pipeline example { + val column = Column("example", ["", " abc", "abc ", " abc "]); + val result = column.transform((cell) -> cell.str.trim()); + // Column("example", ["", "abc", "abc", "abc"]) +} +``` + +??? quote "Stub code in `StringCell.sdsstub`" + + ```sds linenums="272" + @Pure + fun trim() -> trimmed: Cell + ``` + +## `trimEnd` {#safeds.data.tabular.containers.StringCell.trimEnd data-toc-label='[function] trimEnd'} + +Remove whitespace from the end of the string value in the cell. + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `trimmed` | [`Cell`][safeds.data.tabular.containers.Cell] | The string value without whitespace at the end. | + +**Examples:** + +```sds hl_lines="3" +pipeline example { + val column = Column("example", ["", " abc", "abc ", " abc "]); + val result = column.transform((cell) -> cell.str.trimEnd()); + // Column("example", ["", " abc", "abc", " abc"]) +} +``` + +??? quote "Stub code in `StringCell.sdsstub`" + + ```sds linenums="287" + @Pure + @PythonName("trim_end") + fun trimEnd() -> trimmed: Cell + ``` + +## `trimStart` {#safeds.data.tabular.containers.StringCell.trimStart data-toc-label='[function] trimStart'} + +Remove whitespace from the start of the string value in the cell. + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `trimmed` | [`Cell`][safeds.data.tabular.containers.Cell] | The string value without whitespace at the start. | + +**Examples:** + +```sds hl_lines="3" +pipeline example { + val column = Column("example", ["", " abc", "abc ", " abc "]); + val result = column.transform((cell) -> cell.str.trimStart()); + // Column("example", ["", "abc", "abc ", "abc "]) +} +``` + +??? quote "Stub code in `StringCell.sdsstub`" + + ```sds linenums="303" + @Pure + @PythonName("trim_start") + fun trimStart() -> trimmed: Cell + ``` diff --git a/docs/api/safeds/data/tabular/containers/Table.md b/docs/api/safeds/data/tabular/containers/Table.md index 2173e2962..6a72f5920 100644 --- a/docs/api/safeds/data/tabular/containers/Table.md +++ b/docs/api/safeds/data/tabular/containers/Table.md @@ -39,13 +39,13 @@ pipeline example { /** * The number of columns in the table. */ - @PythonName("number_of_columns") attr columnCount: Int + @PythonName("column_count") attr columnCount: Int /** * The number of rows in the table. * * **Note:** This operation must fully load the data into memory, which can be expensive. */ - @PythonName("number_of_rows") attr rowCount: Int + @PythonName("row_count") attr rowCount: Int /** * The plotter for the table. */ @@ -201,8 +201,6 @@ pipeline example { /** * Get a column from the table. * - * **Note:** This operation must fully load the data into memory, which can be expensive. - * * @param name The name of the column. * * @result column The column. @@ -422,6 +420,45 @@ pipeline example { transformer: (cell: Cell) -> transformedCell: Cell ) -> newTable: Table + /** + * Return how many rows in the table satisfy the predicate. + * + * The predicate can return one of three results: + * + * - true, if the row satisfies the predicate. + * - false, if the row does not satisfy the predicate. + * - null, if the truthiness of the predicate is unknown, e.g. due to missing values. + * + * By default, cases where the truthiness of the predicate is unknown are ignored and this method returns how often + * the predicate returns true. + * + * You can instead enable Kleene logic by setting `ignore_unknown = False`. In this case, this method returns null + * if the predicate returns null at least once. Otherwise, it still returns how often the predicate returns true. + * + * @param predicate The predicate to apply to each row. + * @param ignoreUnknown Whether to ignore cases where the truthiness of the predicate is unknown. + * + * @result count The number of rows in the table that satisfy the predicate. + * + * @example + * pipeline example { + * val table = Table({"col1": [1, 2, 3], "col2": [1, 3, 3]}); + * val result = table.countRowIf((row) -> row.getValue("col1").eq(row.getValue("col2"))); // 2 + * } + * + * @example + * pipeline example { + * val table = Table({"col1": [1, 2, 3], "col2": [1, 3, 3]}); + * val result = table.countRowIf((row) -> row.getValue("col1").gt(row.getValue("col2"))); // 0 + * } + */ + @Pure + @PythonName("count_row_if") + fun countRowIf( + predicate: (cell: Row) -> satisfiesPredicate: Cell, + @PythonName("ignore_unknown") ignoreUnknown: Boolean = true, + ) -> count: Int? + /** * Return a new table without duplicate rows. * @@ -743,7 +780,7 @@ pipeline example { * @example * pipeline example { * val table = Table({"a": [1, 2, 3]}); - * val transformer, val transformedTable = RangeScaler(min=0.0, max=1.0).fitAndTransform(table, ["a"]); + * val transformer, val transformedTable = RangeScaler(min=0.0, max=1.0, columnNames="a").fitAndTransform(table); * val result = transformedTable.inverseTransformTable(transformer); * // Table({"a": [1, 2, 3]}) * } @@ -769,7 +806,7 @@ pipeline example { * @example * pipeline example { * val table = Table({"a": [1, 2, 3]}); - * val transformer = RangeScaler(min=0.0, max=1.0).fit(table, ["a"]); + * val transformer = RangeScaler(min=0.0, max=1.0, columnNames="a").fit(table); * val result = table.transformTable(transformer); * // Table({"a": [0, 0.5, 1]}) * } @@ -903,7 +940,7 @@ pipeline example { * Feature columns are implicitly defined as all columns except the target and extra columns. If no extra columns * are specified, all columns except the target column are used as features. * - * @param targetName Name of the target column. + * @param targetName The name of the target column. * @param extraNames Names of the columns that are neither feature nor target. If null, no extra columns are used, i.e. all but * the target column are used as features. * @@ -933,10 +970,12 @@ pipeline example { * * The original table is not modified. * - * @param targetName Name of the target column. - * @param timeName Name of the time column. - * @param extraNames Names of the columns that are neither features nor target. If null, no extra columns are used, i.e. all but + * @param targetName The name of the target column. + * @param timeName The name of the time column. + * @param windowSize The number of consecutive sample to use as input for prediction. + * @param extraNames Names of the columns that are neither features nor target. If None, no extra columns are used, i.e. all but * the target column are used as features. + * @param forecastHorizon The number of time steps to predict into the future. * * @result dataset A new time series dataset with the given target and feature names. * @@ -949,7 +988,7 @@ pipeline example { * "amount_bought": [74, 72, 51], * } * ); - * val dataset = table.toTimeSeriesDataset(targetName="amount_bought", timeName= "day"); + * val dataset = table.toTimeSeriesDataset(targetName="amount_bought", timeName= "day", windowSize=2); * } */ @Pure @@ -957,7 +996,9 @@ pipeline example { fun toTimeSeriesDataset( @PythonName("target_name") targetName: String, @PythonName("time_name") timeName: String, - @PythonName("extra_names") extraNames: List? = null + @PythonName("window_size") windowSize: Int, + @PythonName("extra_names") extraNames: List? = null, + @PythonName("forecast_horizon") forecastHorizon: Int = 1 ) -> dataset: TimeSeriesDataset } ``` @@ -1108,7 +1149,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="699" + ```sds linenums="736" @Pure @PythonName("add_table_as_columns") fun addTableAsColumns( @@ -1150,7 +1191,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="725" + ```sds linenums="762" @Pure @PythonName("add_table_as_rows") fun addTableAsRows( @@ -1158,12 +1199,65 @@ pipeline example { ) -> newTable: Table ``` +## `countRowIf` {#safeds.data.tabular.containers.Table.countRowIf data-toc-label='[function] countRowIf'} + +Return how many rows in the table satisfy the predicate. + +The predicate can return one of three results: + +- true, if the row satisfies the predicate. +- false, if the row does not satisfy the predicate. +- null, if the truthiness of the predicate is unknown, e.g. due to missing values. + +By default, cases where the truthiness of the predicate is unknown are ignored and this method returns how often +the predicate returns true. + +You can instead enable Kleene logic by setting `ignore_unknown = False`. In this case, this method returns null +if the predicate returns null at least once. Otherwise, it still returns how often the predicate returns true. + +**Parameters:** + +| Name | Type | Description | Default | +|------|------|-------------|---------| +| `predicate` | `#!sds (cell: Row) -> (satisfiesPredicate: Cell)` | The predicate to apply to each row. | - | +| `ignoreUnknown` | [`Boolean`][safeds.lang.Boolean] | Whether to ignore cases where the truthiness of the predicate is unknown. | `#!sds true` | + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `count` | [`Int?`][safeds.lang.Int] | The number of rows in the table that satisfy the predicate. | + +**Examples:** + +```sds hl_lines="3" +pipeline example { + val table = Table({"col1": [1, 2, 3], "col2": [1, 3, 3]}); + val result = table.countRowIf((row) -> row.getValue("col1").eq(row.getValue("col2"))); // 2 +} +``` +```sds hl_lines="3" +pipeline example { + val table = Table({"col1": [1, 2, 3], "col2": [1, 3, 3]}); + val result = table.countRowIf((row) -> row.getValue("col1").gt(row.getValue("col2"))); // 0 +} +``` + +??? quote "Stub code in `Table.sdsstub`" + + ```sds linenums="455" + @Pure + @PythonName("count_row_if") + fun countRowIf( + predicate: (cell: Row) -> satisfiesPredicate: Cell, + @PythonName("ignore_unknown") ignoreUnknown: Boolean = true, + ) -> count: Int? + ``` + ## `getColumn` {#safeds.data.tabular.containers.Table.getColumn data-toc-label='[function] getColumn'} Get a column from the table. -**Note:** This operation must fully load the data into memory, which can be expensive. - **Parameters:** | Name | Type | Description | Default | @@ -1188,7 +1282,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="217" + ```sds linenums="215" @Pure @PythonName("get_column") fun getColumn( @@ -1223,7 +1317,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="236" + ```sds linenums="234" @Pure @PythonName("get_column_type") fun getColumnType( @@ -1258,7 +1352,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="255" + ```sds linenums="253" @Pure @PythonName("has_column") fun hasColumn( @@ -1292,7 +1386,7 @@ Return a new table inverse-transformed by a **fitted, invertible** transformer. ```sds hl_lines="4" pipeline example { val table = Table({"a": [1, 2, 3]}); - val transformer, val transformedTable = RangeScaler(min=0.0, max=1.0).fitAndTransform(table, ["a"]); + val transformer, val transformedTable = RangeScaler(min=0.0, max=1.0, columnNames="a").fitAndTransform(table); val result = transformedTable.inverseTransformTable(transformer); // Table({"a": [1, 2, 3]}) } @@ -1300,7 +1394,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="751" + ```sds linenums="788" @Pure @PythonName("inverse_transform_table") fun inverseTransformTable( @@ -1349,7 +1443,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="288" + ```sds linenums="286" @Pure @PythonName("remove_columns") fun removeColumns( @@ -1385,7 +1479,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="308" + ```sds linenums="306" @Pure @PythonName("remove_columns_except") fun removeColumnsExcept( @@ -1420,7 +1514,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="331" + ```sds linenums="329" @Pure @PythonName("remove_columns_with_missing_values") fun removeColumnsWithMissingValues() -> newTable: Table @@ -1450,7 +1544,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="439" + ```sds linenums="476" @Pure @PythonName("remove_duplicate_rows") fun removeDuplicateRows() -> newTable: Table @@ -1480,7 +1574,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="349" + ```sds linenums="347" @Pure @PythonName("remove_non_numeric_columns") fun removeNonNumericColumns() -> newTable: Table @@ -1516,7 +1610,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="459" + ```sds linenums="496" @Pure @PythonName("remove_rows") fun removeRows( @@ -1555,7 +1649,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="482" + ```sds linenums="519" @Pure @PythonName("remove_rows_by_column") fun removeRowsByColumn( @@ -1594,7 +1688,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="505" + ```sds linenums="542" @Pure @PythonName("remove_rows_with_missing_values") fun removeRowsWithMissingValues( @@ -1649,7 +1743,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="544" + ```sds linenums="581" @Pure @PythonName("remove_rows_with_outliers") fun removeRowsWithOutliers( @@ -1689,7 +1783,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="370" + ```sds linenums="368" @Pure @PythonName("rename_column") fun renameColumn( @@ -1729,7 +1823,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="394" + ```sds linenums="392" @Pure @PythonName("replace_column") fun replaceColumn( @@ -1762,7 +1856,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="565" + ```sds linenums="602" @Pure @PythonName("shuffle_rows") fun shuffleRows() -> newTable: Table @@ -1806,7 +1900,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="594" + ```sds linenums="631" @Pure @PythonName("slice_rows") fun sliceRows( @@ -1846,7 +1940,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="618" + ```sds linenums="655" @Pure @PythonName("sort_rows") fun sortRows( @@ -1886,7 +1980,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="642" + ```sds linenums="679" @Pure @PythonName("sort_rows_by_column") fun sortRowsByColumn( @@ -1932,7 +2026,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="672" + ```sds linenums="709" @Pure @PythonName("split_rows") fun splitRows( @@ -1962,7 +2056,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="794" + ```sds linenums="831" @Pure @PythonName("summarize_statistics") fun summarizeStatistics() -> statistics: Table @@ -1989,7 +2083,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="809" + ```sds linenums="846" @Pure @PythonName("to_columns") fun toColumns() -> columns: List @@ -2019,7 +2113,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="827" + ```sds linenums="864" @Impure([ImpurityReason.FileWriteToParameterizedPath("path")]) @PythonName("to_csv_file") fun toCsvFile( @@ -2054,7 +2148,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="868" + ```sds linenums="905" @Impure([ImpurityReason.FileWriteToParameterizedPath("path")]) @PythonName("to_json_file") fun toJsonFile( @@ -2085,7 +2179,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="845" + ```sds linenums="882" @Pure @PythonName("to_dict") fun toMap() -> map: Map> @@ -2115,7 +2209,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="889" + ```sds linenums="926" @Impure([ImpurityReason.FileWriteToParameterizedPath("path")]) @PythonName("to_parquet_file") fun toParquetFile( @@ -2139,7 +2233,7 @@ are specified, all columns except the target column are used as features. | Name | Type | Description | Default | |------|------|-------------|---------| -| `targetName` | [`String`][safeds.lang.String] | Name of the target column. | - | +| `targetName` | [`String`][safeds.lang.String] | The name of the target column. | - | | `extraNames` | [`List?`][safeds.lang.List] | Names of the columns that are neither feature nor target. If null, no extra columns are used, i.e. all but the target column are used as features. | `#!sds null` | **Results:** @@ -2165,7 +2259,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="924" + ```sds linenums="961" @Pure @PythonName("to_tabular_dataset") fun toTabularDataset( @@ -2184,9 +2278,11 @@ The original table is not modified. | Name | Type | Description | Default | |------|------|-------------|---------| -| `targetName` | [`String`][safeds.lang.String] | Name of the target column. | - | -| `timeName` | [`String`][safeds.lang.String] | Name of the time column. | - | -| `extraNames` | [`List?`][safeds.lang.List] | Names of the columns that are neither features nor target. If null, no extra columns are used, i.e. all but the target column are used as features. | `#!sds null` | +| `targetName` | [`String`][safeds.lang.String] | The name of the target column. | - | +| `timeName` | [`String`][safeds.lang.String] | The name of the time column. | - | +| `windowSize` | [`Int`][safeds.lang.Int] | The number of consecutive sample to use as input for prediction. | - | +| `extraNames` | [`List?`][safeds.lang.List] | Names of the columns that are neither features nor target. If None, no extra columns are used, i.e. all but the target column are used as features. | `#!sds null` | +| `forecastHorizon` | [`Int`][safeds.lang.Int] | The number of time steps to predict into the future. | `#!sds 1` | **Results:** @@ -2205,19 +2301,21 @@ pipeline example { "amount_bought": [74, 72, 51], } ); - val dataset = table.toTimeSeriesDataset(targetName="amount_bought", timeName= "day"); + val dataset = table.toTimeSeriesDataset(targetName="amount_bought", timeName= "day", windowSize=2); } ``` ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="955" + ```sds linenums="994" @Pure @PythonName("to_time_series_dataset") fun toTimeSeriesDataset( @PythonName("target_name") targetName: String, @PythonName("time_name") timeName: String, - @PythonName("extra_names") extraNames: List? = null + @PythonName("window_size") windowSize: Int, + @PythonName("extra_names") extraNames: List? = null, + @PythonName("forecast_horizon") forecastHorizon: Int = 1 ) -> dataset: TimeSeriesDataset ``` @@ -2252,7 +2350,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="418" + ```sds linenums="416" @Pure @PythonName("transform_column") fun transformColumn( @@ -2287,7 +2385,7 @@ Return a new table transformed by a **fitted** transformer. ```sds hl_lines="4" pipeline example { val table = Table({"a": [1, 2, 3]}); - val transformer = RangeScaler(min=0.0, max=1.0).fit(table, ["a"]); + val transformer = RangeScaler(min=0.0, max=1.0, columnNames="a").fit(table); val result = table.transformTable(transformer); // Table({"a": [0, 0.5, 1]}) } @@ -2295,7 +2393,7 @@ pipeline example { ??? quote "Stub code in `Table.sdsstub`" - ```sds linenums="777" + ```sds linenums="814" @Pure @PythonName("transform_table") fun transformTable( diff --git a/docs/api/safeds/data/tabular/plotting/ColumnPlotter.md b/docs/api/safeds/data/tabular/plotting/ColumnPlotter.md index 3b8d6a9b0..0be2d6010 100644 --- a/docs/api/safeds/data/tabular/plotting/ColumnPlotter.md +++ b/docs/api/safeds/data/tabular/plotting/ColumnPlotter.md @@ -53,7 +53,7 @@ pipeline example { */ @Pure fun histogram( - @PythonName("maximum_number_of_bins") const maxBinCount: Int = 10 + @PythonName("max_bin_count") const maxBinCount: Int = 10 ) -> plot: Image where { maxBinCount > 0 } @@ -136,7 +136,7 @@ pipeline example { ```sds linenums="47" @Pure fun histogram( - @PythonName("maximum_number_of_bins") const maxBinCount: Int = 10 + @PythonName("max_bin_count") const maxBinCount: Int = 10 ) -> plot: Image where { maxBinCount > 0 } diff --git a/docs/api/safeds/data/tabular/plotting/TablePlotter.md b/docs/api/safeds/data/tabular/plotting/TablePlotter.md index b72662e99..ede53c77b 100644 --- a/docs/api/safeds/data/tabular/plotting/TablePlotter.md +++ b/docs/api/safeds/data/tabular/plotting/TablePlotter.md @@ -68,7 +68,7 @@ pipeline example { */ @Pure fun histograms( - @PythonName("maximum_number_of_bins") const maxBinCount: Int = 10 + @PythonName("max_bin_count") const maxBinCount: Int = 10 ) -> plot: Image where { maxBinCount > 0 } @@ -211,7 +211,7 @@ pipeline example { ```sds linenums="63" @Pure fun histograms( - @PythonName("maximum_number_of_bins") const maxBinCount: Int = 10 + @PythonName("max_bin_count") const maxBinCount: Int = 10 ) -> plot: Image where { maxBinCount > 0 } diff --git a/docs/api/safeds/data/tabular/transformation/Discretizer.md b/docs/api/safeds/data/tabular/transformation/Discretizer.md index f1cd9005c..0fe1dbd2c 100644 --- a/docs/api/safeds/data/tabular/transformation/Discretizer.md +++ b/docs/api/safeds/data/tabular/transformation/Discretizer.md @@ -9,13 +9,14 @@ The Discretizer bins continuous data into intervals. | Name | Type | Description | Default | |------|------|-------------|---------| | `binCount` | [`Int`][safeds.lang.Int] | The number of bins to be created. | `#!sds 5` | +| `columnNames` | `#!sds union, String?>` | The list of columns used to fit the transformer. If `None`, all numeric columns are used. | `#!sds null` | **Examples:** ```sds hl_lines="3" pipeline example { val table = Table({"a": [1, 2, 3, 4]}); - val discretizer = Discretizer(2).fit(table, ["a"]); + val discretizer = Discretizer(2, columnNames = "a").fit(table); val transformedTable = discretizer.transform(table); // Table({"a": [0, 0, 1, 1]}) } @@ -23,16 +24,17 @@ pipeline example { ??? quote "Stub code in `Discretizer.sdsstub`" - ```sds linenums="19" + ```sds linenums="20" class Discretizer( - @PythonName("number_of_bins") const binCount: Int = 5 + @PythonName("bin_count") const binCount: Int = 5, + @PythonName("column_names") columnNames: union, String, Nothing?> = null ) sub TableTransformer where { binCount >= 2 } { /** * The number of bins to be created. */ - @PythonName("number_of_bins") attr binCount: Int + @PythonName("bin_count") attr binCount: Int /** * Learn a transformation for a set of columns in a table. @@ -40,14 +42,12 @@ pipeline example { * This transformer is not modified. * * @param table The table used to fit the transformer. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. */ @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: Discretizer /** @@ -56,7 +56,6 @@ pipeline example { * **Note:** Neither this transformer nor the given table are modified. * * @param table The table used to fit the transformer. The transformer is then applied to this table. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. * @result transformedTable The transformed table. @@ -64,8 +63,7 @@ pipeline example { @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: Discretizer, transformedTable: Table) } ``` @@ -93,7 +91,6 @@ This transformer is not modified. | Name | Type | Description | Default | |------|------|-------------|---------| | `table` | [`Table`][safeds.data.tabular.containers.Table] | The table used to fit the transformer. | - | -| `columnNames` | [`List?`][safeds.lang.List] | The list of columns from the table used to fit the transformer. If `null`, all columns are used. | - | **Results:** @@ -103,11 +100,10 @@ This transformer is not modified. ??? quote "Stub code in `Discretizer.sdsstub`" - ```sds linenums="39" + ```sds linenums="40" @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: Discretizer ``` @@ -122,7 +118,6 @@ Learn a transformation for a set of columns in a table and apply the learned tra | Name | Type | Description | Default | |------|------|-------------|---------| | `table` | [`Table`][safeds.data.tabular.containers.Table] | The table used to fit the transformer. The transformer is then applied to this table. | - | -| `columnNames` | [`List?`][safeds.lang.List] | The list of columns from the table used to fit the transformer. If `null`, all columns are used. | `#!sds null` | **Results:** @@ -133,12 +128,11 @@ Learn a transformation for a set of columns in a table and apply the learned tra ??? quote "Stub code in `Discretizer.sdsstub`" - ```sds linenums="56" + ```sds linenums="55" @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: Discretizer, transformedTable: Table) ``` diff --git a/docs/api/safeds/data/tabular/transformation/InvertibleTableTransformer.md b/docs/api/safeds/data/tabular/transformation/InvertibleTableTransformer.md index 1c0c8420c..1312569c4 100644 --- a/docs/api/safeds/data/tabular/transformation/InvertibleTableTransformer.md +++ b/docs/api/safeds/data/tabular/transformation/InvertibleTableTransformer.md @@ -21,14 +21,12 @@ A `TableTransformer` that can also undo the learned transformation after it has * **Note:** This transformer is not modified. * * @param table The table used to fit the transformer. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. */ @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: InvertibleTableTransformer /** @@ -37,7 +35,6 @@ A `TableTransformer` that can also undo the learned transformation after it has * **Note:** Neither this transformer nor the given table are modified. * * @param table The table used to fit the transformer. The transformer is then applied to this table. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. * @result transformedTable The transformed table. @@ -45,8 +42,7 @@ A `TableTransformer` that can also undo the learned transformation after it has @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: InvertibleTableTransformer, transformedTable: Table) /** @@ -85,7 +81,6 @@ Learn a transformation for a set of columns in a table. | Name | Type | Description | Default | |------|------|-------------|---------| | `table` | [`Table`][safeds.data.tabular.containers.Table] | The table used to fit the transformer. | - | -| `columnNames` | [`List?`][safeds.lang.List] | The list of columns from the table used to fit the transformer. If `null`, all columns are used. | - | **Results:** @@ -95,11 +90,10 @@ Learn a transformation for a set of columns in a table. ??? quote "Stub code in `InvertibleTableTransformer.sdsstub`" - ```sds linenums="20" + ```sds linenums="19" @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: InvertibleTableTransformer ``` @@ -114,7 +108,6 @@ Learn a transformation for a set of columns in a table and apply the learned tra | Name | Type | Description | Default | |------|------|-------------|---------| | `table` | [`Table`][safeds.data.tabular.containers.Table] | The table used to fit the transformer. The transformer is then applied to this table. | - | -| `columnNames` | [`List?`][safeds.lang.List] | The list of columns from the table used to fit the transformer. If `null`, all columns are used. | `#!sds null` | **Results:** @@ -125,12 +118,11 @@ Learn a transformation for a set of columns in a table and apply the learned tra ??? quote "Stub code in `InvertibleTableTransformer.sdsstub`" - ```sds linenums="37" + ```sds linenums="34" @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: InvertibleTableTransformer, transformedTable: Table) ``` @@ -156,7 +148,7 @@ Column order and types may differ from the original table. Likewise, some values ??? quote "Stub code in `InvertibleTableTransformer.sdsstub`" - ```sds linenums="55" + ```sds linenums="51" @Pure @PythonName("inverse_transform") fun inverseTransform( diff --git a/docs/api/safeds/data/tabular/transformation/LabelEncoder.md b/docs/api/safeds/data/tabular/transformation/LabelEncoder.md index ac75b8f18..14efb143d 100644 --- a/docs/api/safeds/data/tabular/transformation/LabelEncoder.md +++ b/docs/api/safeds/data/tabular/transformation/LabelEncoder.md @@ -8,14 +8,15 @@ The LabelEncoder encodes one or more given columns into labels. | Name | Type | Description | Default | |------|------|-------------|---------| -| `partialOrder` | [`List`][safeds.lang.List] | The partial order of the labels. The labels are encoded in the order of the given list. Additional values are encoded as the next integer after the last value in the list in the order they appear in the data. | `#!sds []` | +| `columnNames` | `#!sds union, String?>` | The list of columns used to fit the transformer. If `None`, all non-numeric columns are used. | `#!sds null` | +| `partialOrder` | [`List`][safeds.lang.List] | The partial order of the labels. The labels are encoded in the order of the given list. Additional values are assigned labels in the order they are encountered during fitting. | `#!sds []` | **Examples:** ```sds hl_lines="3" pipeline example { val table = Table({"a": ["z", "y"], "b": [3, 4]}); - val encoder = LabelEncoder().fit(table, ["a"]); + val encoder = LabelEncoder(columnNames = "a").fit(table); val transformedTable = encoder.transform(table); // Table({"a": [1, 0], "b": [3, 4]}) val originalTable = encoder.inverseTransform(transformedTable); @@ -25,24 +26,28 @@ pipeline example { ??? quote "Stub code in `LabelEncoder.sdsstub`" - ```sds linenums="22" + ```sds linenums="23" class LabelEncoder( + @PythonName("column_names") columnNames: union, String, Nothing?> = null, @PythonName("partial_order") partialOrder: List = [] ) sub InvertibleTableTransformer { + /** + * The partial order of the labels. + */ + @PythonName("partial_order") attr partialOrder: List + /** * Learn a transformation for a set of columns in a table. * * This transformer is not modified. * * @param table The table used to fit the transformer. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all non-numeric columns are used. * * @result fittedTransformer The fitted transformer. */ @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: LabelEncoder /** @@ -51,7 +56,6 @@ pipeline example { * **Note:** Neither this transformer nor the given table are modified. * * @param table The table used to fit the transformer. The transformer is then applied to this table. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. * @result transformedTable The transformed table. @@ -59,8 +63,7 @@ pipeline example { @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: LabelEncoder, transformedTable: Table) } ``` @@ -71,6 +74,12 @@ Whether the transformer is fitted. **Type:** [`Boolean`][safeds.lang.Boolean] +## `partialOrder` {#safeds.data.tabular.transformation.LabelEncoder.partialOrder data-toc-label='[attribute] partialOrder'} + +The partial order of the labels. + +**Type:** [`List`][safeds.lang.List] + ## `fit` {#safeds.data.tabular.transformation.LabelEncoder.fit data-toc-label='[function] fit'} Learn a transformation for a set of columns in a table. @@ -82,7 +91,6 @@ This transformer is not modified. | Name | Type | Description | Default | |------|------|-------------|---------| | `table` | [`Table`][safeds.data.tabular.containers.Table] | The table used to fit the transformer. | - | -| `columnNames` | [`List?`][safeds.lang.List] | The list of columns from the table used to fit the transformer. If `null`, all non-numeric columns are used. | - | **Results:** @@ -92,11 +100,10 @@ This transformer is not modified. ??? quote "Stub code in `LabelEncoder.sdsstub`" - ```sds linenums="35" + ```sds linenums="41" @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: LabelEncoder ``` @@ -111,7 +118,6 @@ Learn a transformation for a set of columns in a table and apply the learned tra | Name | Type | Description | Default | |------|------|-------------|---------| | `table` | [`Table`][safeds.data.tabular.containers.Table] | The table used to fit the transformer. The transformer is then applied to this table. | - | -| `columnNames` | [`List?`][safeds.lang.List] | The list of columns from the table used to fit the transformer. If `null`, all columns are used. | `#!sds null` | **Results:** @@ -122,12 +128,11 @@ Learn a transformation for a set of columns in a table and apply the learned tra ??? quote "Stub code in `LabelEncoder.sdsstub`" - ```sds linenums="52" + ```sds linenums="56" @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: LabelEncoder, transformedTable: Table) ``` @@ -153,7 +158,7 @@ Column order and types may differ from the original table. Likewise, some values ??? quote "Stub code in `InvertibleTableTransformer.sdsstub`" - ```sds linenums="55" + ```sds linenums="51" @Pure @PythonName("inverse_transform") fun inverseTransform( diff --git a/docs/api/safeds/data/tabular/transformation/OneHotEncoder.md b/docs/api/safeds/data/tabular/transformation/OneHotEncoder.md index 00024908d..fbf67387d 100644 --- a/docs/api/safeds/data/tabular/transformation/OneHotEncoder.md +++ b/docs/api/safeds/data/tabular/transformation/OneHotEncoder.md @@ -31,6 +31,7 @@ One-hot encoding is closely related to dummy variable / indicator variables, whi | Name | Type | Description | Default | |------|------|-------------|---------| +| `columnNames` | `#!sds union, String?>` | The list of columns used to fit the transformer. If `None`, all non-numeric columns are used. | `#!sds null` | | `separator` | [`String`][safeds.lang.String] | The separator used to separate the original column name from the value in the new column names. | `#!sds "__"` | **Examples:** @@ -38,7 +39,7 @@ One-hot encoding is closely related to dummy variable / indicator variables, whi ```sds hl_lines="3" pipeline example { val table = Table({"a": ["z", "y"], "b": [3, 4]}); - val encoder = OneHotEncoder().fit(table, ["a"]); + val encoder = OneHotEncoder(columnNames=["a"]).fit(table); val transformedTable = encoder.transform(table); // Table({"a__z": [1, 0], "a__y": [0, 1], "b": [3, 4]}) val originalTable = encoder.inverseTransform(transformedTable); @@ -48,24 +49,28 @@ pipeline example { ??? quote "Stub code in `OneHotEncoder.sdsstub`" - ```sds linenums="44" + ```sds linenums="45" class OneHotEncoder( + @PythonName("column_names") columnNames: union, String, Nothing?> = null, separator: String = "__" ) sub InvertibleTableTransformer { + /** + * The separator used to separate the original column name from the value in the new column names. + */ + attr separator: String + /** * Learn a transformation for a set of columns in a table. * * This transformer is not modified. * * @param table The table used to fit the transformer. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. */ @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: OneHotEncoder /** @@ -74,7 +79,6 @@ pipeline example { * **Note:** Neither this transformer nor the given table are modified. * * @param table The table used to fit the transformer. The transformer is then applied to this table. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. * @result transformedTable The transformed table. @@ -82,8 +86,7 @@ pipeline example { @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: OneHotEncoder, transformedTable: Table) } ``` @@ -94,6 +97,12 @@ Whether the transformer is fitted. **Type:** [`Boolean`][safeds.lang.Boolean] +## `separator` {#safeds.data.tabular.transformation.OneHotEncoder.separator data-toc-label='[attribute] separator'} + +The separator used to separate the original column name from the value in the new column names. + +**Type:** [`String`][safeds.lang.String] + ## `fit` {#safeds.data.tabular.transformation.OneHotEncoder.fit data-toc-label='[function] fit'} Learn a transformation for a set of columns in a table. @@ -105,7 +114,6 @@ This transformer is not modified. | Name | Type | Description | Default | |------|------|-------------|---------| | `table` | [`Table`][safeds.data.tabular.containers.Table] | The table used to fit the transformer. | - | -| `columnNames` | [`List?`][safeds.lang.List] | The list of columns from the table used to fit the transformer. If `null`, all columns are used. | - | **Results:** @@ -115,11 +123,10 @@ This transformer is not modified. ??? quote "Stub code in `OneHotEncoder.sdsstub`" - ```sds linenums="57" + ```sds linenums="63" @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: OneHotEncoder ``` @@ -134,7 +141,6 @@ Learn a transformation for a set of columns in a table and apply the learned tra | Name | Type | Description | Default | |------|------|-------------|---------| | `table` | [`Table`][safeds.data.tabular.containers.Table] | The table used to fit the transformer. The transformer is then applied to this table. | - | -| `columnNames` | [`List?`][safeds.lang.List] | The list of columns from the table used to fit the transformer. If `null`, all columns are used. | `#!sds null` | **Results:** @@ -145,12 +151,11 @@ Learn a transformation for a set of columns in a table and apply the learned tra ??? quote "Stub code in `OneHotEncoder.sdsstub`" - ```sds linenums="74" + ```sds linenums="78" @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: OneHotEncoder, transformedTable: Table) ``` @@ -176,7 +181,7 @@ Column order and types may differ from the original table. Likewise, some values ??? quote "Stub code in `InvertibleTableTransformer.sdsstub`" - ```sds linenums="55" + ```sds linenums="51" @Pure @PythonName("inverse_transform") fun inverseTransform( diff --git a/docs/api/safeds/data/tabular/transformation/RangeScaler.md b/docs/api/safeds/data/tabular/transformation/RangeScaler.md index 165ac67ea..273139825 100644 --- a/docs/api/safeds/data/tabular/transformation/RangeScaler.md +++ b/docs/api/safeds/data/tabular/transformation/RangeScaler.md @@ -10,13 +10,14 @@ The RangeScaler transforms column values by scaling each value to a given range. |------|------|-------------|---------| | `min` | [`Float`][safeds.lang.Float] | The minimum of the new range after the transformation | `#!sds 0.0` | | `max` | [`Float`][safeds.lang.Float] | The maximum of the new range after the transformation | `#!sds 1.0` | +| `columnNames` | `#!sds union, String?>` | The list of columns used to fit the transformer. If `None`, all numeric columns are used. | `#!sds null` | **Examples:** ```sds hl_lines="3" pipeline example { val table = Table({"a": [1, 2, 3]}); - val scaler = RangeScaler(0.0, 1.0).fit(table, ["a"]); + val scaler = RangeScaler(0.0, 1.0, columnNames = "a").fit(table); val transformedTable = scaler.transform(table); // transformedTable = Table({"a": [0.0, 0.5, 1.0]}); val originalTable = scaler.inverseTransform(transformedTable); @@ -26,10 +27,11 @@ pipeline example { ??? quote "Stub code in `RangeScaler.sdsstub`" - ```sds linenums="22" + ```sds linenums="23" class RangeScaler( - const min: Float = 0.0, - const max: Float = 1.0 + @PythonName("min_") const min: Float = 0.0, + @PythonName("max_") const max: Float = 1.0, + @PythonName("column_names") columnNames: union, String, Nothing?> = null ) sub InvertibleTableTransformer { /** * The minimum of the new range after the transformation. @@ -46,14 +48,12 @@ pipeline example { * This transformer is not modified. * * @param table The table used to fit the transformer. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all numeric columns are used. * * @result fittedTransformer The fitted transformer. */ @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: RangeScaler /** @@ -62,7 +62,6 @@ pipeline example { * **Note:** Neither this transformer nor the given table are modified. * * @param table The table used to fit the transformer. The transformer is then applied to this table. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. * @result transformedTable The transformed table. @@ -70,8 +69,7 @@ pipeline example { @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: RangeScaler, transformedTable: Table) } ``` @@ -105,7 +103,6 @@ This transformer is not modified. | Name | Type | Description | Default | |------|------|-------------|---------| | `table` | [`Table`][safeds.data.tabular.containers.Table] | The table used to fit the transformer. | - | -| `columnNames` | [`List?`][safeds.lang.List] | The list of columns from the table used to fit the transformer. If `null`, all numeric columns are used. | - | **Results:** @@ -115,11 +112,10 @@ This transformer is not modified. ??? quote "Stub code in `RangeScaler.sdsstub`" - ```sds linenums="45" + ```sds linenums="46" @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: RangeScaler ``` @@ -134,7 +130,6 @@ Learn a transformation for a set of columns in a table and apply the learned tra | Name | Type | Description | Default | |------|------|-------------|---------| | `table` | [`Table`][safeds.data.tabular.containers.Table] | The table used to fit the transformer. The transformer is then applied to this table. | - | -| `columnNames` | [`List?`][safeds.lang.List] | The list of columns from the table used to fit the transformer. If `null`, all columns are used. | `#!sds null` | **Results:** @@ -145,12 +140,11 @@ Learn a transformation for a set of columns in a table and apply the learned tra ??? quote "Stub code in `RangeScaler.sdsstub`" - ```sds linenums="62" + ```sds linenums="61" @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: RangeScaler, transformedTable: Table) ``` @@ -176,7 +170,7 @@ Column order and types may differ from the original table. Likewise, some values ??? quote "Stub code in `InvertibleTableTransformer.sdsstub`" - ```sds linenums="55" + ```sds linenums="51" @Pure @PythonName("inverse_transform") fun inverseTransform( diff --git a/docs/api/safeds/data/tabular/transformation/SimpleImputer.md b/docs/api/safeds/data/tabular/transformation/SimpleImputer.md index 2037f8e33..9391dd39e 100644 --- a/docs/api/safeds/data/tabular/transformation/SimpleImputer.md +++ b/docs/api/safeds/data/tabular/transformation/SimpleImputer.md @@ -9,14 +9,15 @@ Replace missing values with the given strategy. | Name | Type | Description | Default | |------|------|-------------|---------| | `strategy` | [`Strategy`][safeds.data.tabular.transformation.SimpleImputer.Strategy] | The strategy used to impute missing values. | - | -| `valueToReplace` | `#!sds union` | - | `#!sds null` | +| `columnNames` | `#!sds union, String?>` | The list of columns used to fit the transformer. If `None`, all columns are used. | `#!sds null` | +| `valueToReplace` | `#!sds union` | The value that should be replaced. | `#!sds null` | **Examples:** ```sds hl_lines="3" pipeline example { val table = Table({"a": [1, null], "b": [3, 4]}); - val imputer = SimpleImputer(SimpleImputer.Strategy.Mean).fit(table, ["a"]); + val imputer = SimpleImputer(SimpleImputer.Strategy.Mean, columnNames = "a").fit(table); val transformedTable = imputer.transform(table); // Table({"a": [1, 1], "b": [3, 4]}) } @@ -24,7 +25,7 @@ pipeline example { ```sds hl_lines="3" pipeline example { val table = Table({"a": [1, null], "b": [3, 4]}); - val imputer = SimpleImputer(SimpleImputer.Strategy.Constant(0)).fit(table, ["a"]); + val imputer = SimpleImputer(SimpleImputer.Strategy.Constant(0), columnNames = "a").fit(table); val transformedTable = imputer.transform(table); // Table({"a": [1, 0], "b": [3, 4]}) } @@ -32,9 +33,10 @@ pipeline example { ??? quote "Stub code in `SimpleImputer.sdsstub`" - ```sds linenums="27" + ```sds linenums="29" class SimpleImputer( strategy: SimpleImputer.Strategy, + @PythonName("column_names") columnNames: union, String, Nothing?> = null, @PythonName("value_to_replace") valueToReplace: union = null ) sub TableTransformer { /** @@ -83,14 +85,12 @@ pipeline example { * This transformer is not modified. * * @param table The table used to fit the transformer. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. */ @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: SimpleImputer /** @@ -99,7 +99,6 @@ pipeline example { * **Note:** Neither this transformer nor the given table are modified. * * @param table The table used to fit the transformer. The transformer is then applied to this table. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. * @result transformedTable The transformed table. @@ -107,8 +106,7 @@ pipeline example { @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: SimpleImputer, transformedTable: Table) } ``` @@ -142,7 +140,6 @@ This transformer is not modified. | Name | Type | Description | Default | |------|------|-------------|---------| | `table` | [`Table`][safeds.data.tabular.containers.Table] | The table used to fit the transformer. | - | -| `columnNames` | [`List?`][safeds.lang.List] | The list of columns from the table used to fit the transformer. If `null`, all columns are used. | - | **Results:** @@ -152,11 +149,10 @@ This transformer is not modified. ??? quote "Stub code in `SimpleImputer.sdsstub`" - ```sds linenums="81" + ```sds linenums="83" @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: SimpleImputer ``` @@ -171,7 +167,6 @@ Learn a transformation for a set of columns in a table and apply the learned tra | Name | Type | Description | Default | |------|------|-------------|---------| | `table` | [`Table`][safeds.data.tabular.containers.Table] | The table used to fit the transformer. The transformer is then applied to this table. | - | -| `columnNames` | [`List?`][safeds.lang.List] | The list of columns from the table used to fit the transformer. If `null`, all columns are used. | `#!sds null` | **Results:** @@ -186,8 +181,7 @@ Learn a transformation for a set of columns in a table and apply the learned tra @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: SimpleImputer, transformedTable: Table) ``` @@ -224,7 +218,7 @@ Various strategies to replace missing values. ??? quote "Stub code in `SimpleImputer.sdsstub`" - ```sds linenums="34" + ```sds linenums="37" enum Strategy { /** * Replace missing values with the given constant value. diff --git a/docs/api/safeds/data/tabular/transformation/StandardScaler.md b/docs/api/safeds/data/tabular/transformation/StandardScaler.md index 4ecd2e82c..d82c59102 100644 --- a/docs/api/safeds/data/tabular/transformation/StandardScaler.md +++ b/docs/api/safeds/data/tabular/transformation/StandardScaler.md @@ -4,12 +4,18 @@ The StandardScaler transforms column values to a range by removing the mean and **Parent type:** [`InvertibleTableTransformer`][safeds.data.tabular.transformation.InvertibleTableTransformer] +**Parameters:** + +| Name | Type | Description | Default | +|------|------|-------------|---------| +| `columnNames` | `#!sds union, String?>` | The list of columns used to fit the transformer. If `None`, all numeric columns are used. | `#!sds null` | + **Examples:** ```sds hl_lines="3" pipeline example { val table = Table({"a": [0, 1, 0]}); - val scaler = StandardScaler().fit(table, ["a"]); + val scaler = StandardScaler(columnNames = "a").fit(table); val transformedTable = scaler.transform(table); // transformedTable = Table({"a": [-0.707, 1.414, -0.707]}); val originalTable = scaler.inverseTransform(transformedTable); @@ -19,22 +25,22 @@ pipeline example { ??? quote "Stub code in `StandardScaler.sdsstub`" - ```sds linenums="19" - class StandardScaler() sub InvertibleTableTransformer { + ```sds linenums="21" + class StandardScaler( + @PythonName("column_names") columnNames: union, String, Nothing?> = null + ) sub InvertibleTableTransformer { /** * Learn a transformation for a set of columns in a table. * * This transformer is not modified. * * @param table The table used to fit the transformer. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. */ @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: StandardScaler /** @@ -43,7 +49,6 @@ pipeline example { * **Note:** Neither this transformer nor the given table are modified. * * @param table The table used to fit the transformer. The transformer is then applied to this table. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. * @result transformedTable The transformed table. @@ -51,8 +56,7 @@ pipeline example { @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: StandardScaler, transformedTable: Table) } ``` @@ -74,7 +78,6 @@ This transformer is not modified. | Name | Type | Description | Default | |------|------|-------------|---------| | `table` | [`Table`][safeds.data.tabular.containers.Table] | The table used to fit the transformer. | - | -| `columnNames` | [`List?`][safeds.lang.List] | The list of columns from the table used to fit the transformer. If `null`, all columns are used. | - | **Results:** @@ -84,11 +87,10 @@ This transformer is not modified. ??? quote "Stub code in `StandardScaler.sdsstub`" - ```sds linenums="30" + ```sds linenums="33" @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: StandardScaler ``` @@ -103,7 +105,6 @@ Learn a transformation for a set of columns in a table and apply the learned tra | Name | Type | Description | Default | |------|------|-------------|---------| | `table` | [`Table`][safeds.data.tabular.containers.Table] | The table used to fit the transformer. The transformer is then applied to this table. | - | -| `columnNames` | [`List?`][safeds.lang.List] | The list of columns from the table used to fit the transformer. If `null`, all columns are used. | `#!sds null` | **Results:** @@ -114,12 +115,11 @@ Learn a transformation for a set of columns in a table and apply the learned tra ??? quote "Stub code in `StandardScaler.sdsstub`" - ```sds linenums="47" + ```sds linenums="48" @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: StandardScaler, transformedTable: Table) ``` @@ -145,7 +145,7 @@ Column order and types may differ from the original table. Likewise, some values ??? quote "Stub code in `InvertibleTableTransformer.sdsstub`" - ```sds linenums="55" + ```sds linenums="51" @Pure @PythonName("inverse_transform") fun inverseTransform( diff --git a/docs/api/safeds/data/tabular/transformation/TableTransformer.md b/docs/api/safeds/data/tabular/transformation/TableTransformer.md index 2522e05ba..f7db38e30 100644 --- a/docs/api/safeds/data/tabular/transformation/TableTransformer.md +++ b/docs/api/safeds/data/tabular/transformation/TableTransformer.md @@ -10,12 +10,13 @@ Learn a transformation for a set of columns in a `Table` and transform another ` **Inheritors:** - [`Discretizer`][safeds.data.tabular.transformation.Discretizer] +- `#!sds Imputer` - [`InvertibleTableTransformer`][safeds.data.tabular.transformation.InvertibleTableTransformer] - [`SimpleImputer`][safeds.data.tabular.transformation.SimpleImputer] ??? quote "Stub code in `TableTransformer.sdsstub`" - ```sds linenums="8" + ```sds linenums="10" class TableTransformer { /** * Whether the transformer is fitted. @@ -28,14 +29,12 @@ Learn a transformation for a set of columns in a `Table` and transform another ` * **Note:** This transformer is not modified. * * @param table The table used to fit the transformer. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. */ @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: TableTransformer /** @@ -58,7 +57,6 @@ Learn a transformation for a set of columns in a `Table` and transform another ` * **Note:** Neither this transformer nor the given table are modified. * * @param table The table used to fit the transformer. The transformer is then applied to this table. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. * @result transformedTable The transformed table. @@ -66,8 +64,7 @@ Learn a transformation for a set of columns in a `Table` and transform another ` @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: TableTransformer, transformedTable: Table) } ``` @@ -89,7 +86,6 @@ Learn a transformation for a set of columns in a table. | Name | Type | Description | Default | |------|------|-------------|---------| | `table` | [`Table`][safeds.data.tabular.containers.Table] | The table used to fit the transformer. | - | -| `columnNames` | [`List?`][safeds.lang.List] | The list of columns from the table used to fit the transformer. If `null`, all columns are used. | - | **Results:** @@ -99,11 +95,10 @@ Learn a transformation for a set of columns in a table. ??? quote "Stub code in `TableTransformer.sdsstub`" - ```sds linenums="24" + ```sds linenums="25" @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: TableTransformer ``` @@ -118,7 +113,6 @@ Learn a transformation for a set of columns in a table and apply the learned tra | Name | Type | Description | Default | |------|------|-------------|---------| | `table` | [`Table`][safeds.data.tabular.containers.Table] | The table used to fit the transformer. The transformer is then applied to this table. | - | -| `columnNames` | [`List?`][safeds.lang.List] | The list of columns from the table used to fit the transformer. If `null`, all columns are used. | `#!sds null` | **Results:** @@ -129,12 +123,11 @@ Learn a transformation for a set of columns in a table and apply the learned tra ??? quote "Stub code in `TableTransformer.sdsstub`" - ```sds linenums="55" + ```sds linenums="54" @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: TableTransformer, transformedTable: Table) ``` diff --git a/docs/api/safeds/ml/classical/classification/AdaBoostClassifier.md b/docs/api/safeds/ml/classical/classification/AdaBoostClassifier.md index 41bc86210..df3fed78b 100644 --- a/docs/api/safeds/ml/classical/classification/AdaBoostClassifier.md +++ b/docs/api/safeds/ml/classical/classification/AdaBoostClassifier.md @@ -28,22 +28,22 @@ pipeline example { ```sds linenums="24" class AdaBoostClassifier( learner: Classifier = DecisionTreeClassifier(), - @PythonName("maximum_number_of_learners") const maxLearnerCount: Int = 50, + @PythonName("max_learner_count") const maxLearnerCount: Int = 50, @PythonName("learning_rate") const learningRate: Float = 1.0 ) sub Classifier where { maxLearnerCount >= 1, learningRate > 0.0 } { /** - * Get the base learner used for training the ensemble. + * The base learner used for training the ensemble. */ attr learner: Classifier /** - * Get the maximum number of learners in the ensemble. + * The maximum number of learners in the ensemble. */ - @PythonName("maximum_number_of_learners") attr maxLearnerCount: Int + @PythonName("max_learner_count") attr maxLearnerCount: Int /** - * Get the learning rate. + * The learning rate. */ @PythonName("learning_rate") attr learningRate: Float @@ -71,19 +71,19 @@ Whether the model is fitted. ## `learner` {#safeds.ml.classical.classification.AdaBoostClassifier.learner data-toc-label='[attribute] learner'} -Get the base learner used for training the ensemble. +The base learner used for training the ensemble. **Type:** [`Classifier`][safeds.ml.classical.classification.Classifier] ## `learningRate` {#safeds.ml.classical.classification.AdaBoostClassifier.learningRate data-toc-label='[attribute] learningRate'} -Get the learning rate. +The learning rate. **Type:** [`Float`][safeds.lang.Float] ## `maxLearnerCount` {#safeds.ml.classical.classification.AdaBoostClassifier.maxLearnerCount data-toc-label='[attribute] maxLearnerCount'} -Get the maximum number of learners in the ensemble. +The maximum number of learners in the ensemble. **Type:** [`Int`][safeds.lang.Int] diff --git a/docs/api/safeds/ml/classical/classification/Classifier.md b/docs/api/safeds/ml/classical/classification/Classifier.md index fb360a995..e5c10d51a 100644 --- a/docs/api/safeds/ml/classical/classification/Classifier.md +++ b/docs/api/safeds/ml/classical/classification/Classifier.md @@ -16,8 +16,10 @@ A model for classification tasks. - [`GradientBoostingClassifier`][safeds.ml.classical.classification.GradientBoostingClassifier] - [`KNearestNeighborsClassifier`][safeds.ml.classical.classification.KNearestNeighborsClassifier] - [`LogisticClassifier`][safeds.ml.classical.classification.LogisticClassifier] +- `#!sds LogisticRegressionClassifier` - [`RandomForestClassifier`][safeds.ml.classical.classification.RandomForestClassifier] - [`SupportVectorClassifier`][safeds.ml.classical.classification.SupportVectorClassifier] +- `#!sds SupportVectorMachineClassifier` ??? quote "Stub code in `Classifier.sdsstub`" diff --git a/docs/api/safeds/ml/classical/classification/DecisionTreeClassifier.md b/docs/api/safeds/ml/classical/classification/DecisionTreeClassifier.md index f8a2c1c11..878aa86e5 100644 --- a/docs/api/safeds/ml/classical/classification/DecisionTreeClassifier.md +++ b/docs/api/safeds/ml/classical/classification/DecisionTreeClassifier.md @@ -26,19 +26,19 @@ pipeline example { ```sds linenums="20" class DecisionTreeClassifier( - @PythonName("maximum_depth") maxDepth: Int? = null, - @PythonName("minimum_number_of_samples_in_leaves") const minSampleCountInLeaves: Int = 1 + @PythonName("max_depth") maxDepth: Int? = null, + @PythonName("min_sample_count_in_leaves") const minSampleCountInLeaves: Int = 1 ) sub Classifier where { minSampleCountInLeaves > 0 } { /** * The maximum depth of the tree. */ - @PythonName("maximum_depth") attr maxDepth: Int? + @PythonName("max_depth") attr maxDepth: Int? /** * The minimum number of samples that must remain in the leaves of the tree. */ - @PythonName("minimum_number_of_samples_in_leaves") attr minSampleCountInLeaves: Int + @PythonName("min_sample_count_in_leaves") attr minSampleCountInLeaves: Int /** * Create a copy of this classifier and fit it with the given training data. diff --git a/docs/api/safeds/ml/classical/classification/GradientBoostingClassifier.md b/docs/api/safeds/ml/classical/classification/GradientBoostingClassifier.md index 0b0c0a05d..ec752d5ad 100644 --- a/docs/api/safeds/ml/classical/classification/GradientBoostingClassifier.md +++ b/docs/api/safeds/ml/classical/classification/GradientBoostingClassifier.md @@ -26,18 +26,18 @@ pipeline example { ```sds linenums="23" class GradientBoostingClassifier( - @PythonName("number_of_trees") const treeCount: Int = 100, + @PythonName("tree_count") const treeCount: Int = 100, @PythonName("learning_rate") const learningRate: Float = 0.1 ) sub Classifier where { treeCount >= 1, learningRate > 0.0 } { /** - * Get the number of trees (estimators) in the ensemble. + * The number of trees (estimators) in the ensemble. */ - @PythonName("number_of_trees") attr treeCount: Int + @PythonName("tree_count") attr treeCount: Int /** - * Get the learning rate. + * The learning rate. */ @PythonName("learning_rate") attr learningRate: Float @@ -65,13 +65,13 @@ Whether the model is fitted. ## `learningRate` {#safeds.ml.classical.classification.GradientBoostingClassifier.learningRate data-toc-label='[attribute] learningRate'} -Get the learning rate. +The learning rate. **Type:** [`Float`][safeds.lang.Float] ## `treeCount` {#safeds.ml.classical.classification.GradientBoostingClassifier.treeCount data-toc-label='[attribute] treeCount'} -Get the number of trees (estimators) in the ensemble. +The number of trees (estimators) in the ensemble. **Type:** [`Int`][safeds.lang.Int] diff --git a/docs/api/safeds/ml/classical/classification/KNearestNeighborsClassifier.md b/docs/api/safeds/ml/classical/classification/KNearestNeighborsClassifier.md index 7c7c106fa..ad2b7692b 100644 --- a/docs/api/safeds/ml/classical/classification/KNearestNeighborsClassifier.md +++ b/docs/api/safeds/ml/classical/classification/KNearestNeighborsClassifier.md @@ -25,14 +25,14 @@ pipeline example { ```sds linenums="21" class KNearestNeighborsClassifier( - @PythonName("number_of_neighbors") const neighborCount: Int + @PythonName("neighbor_count") const neighborCount: Int ) sub Classifier where { neighborCount >= 1 } { /** - * Get the number of neighbors used for interpolation. + * The number of neighbors used for interpolation. */ - @PythonName("number_of_neighbors") attr neighborCount: Int + @PythonName("neighbor_count") attr neighborCount: Int /** * Create a copy of this classifier and fit it with the given training data. @@ -58,7 +58,7 @@ Whether the model is fitted. ## `neighborCount` {#safeds.ml.classical.classification.KNearestNeighborsClassifier.neighborCount data-toc-label='[attribute] neighborCount'} -Get the number of neighbors used for interpolation. +The number of neighbors used for interpolation. **Type:** [`Int`][safeds.lang.Int] diff --git a/docs/api/safeds/ml/classical/classification/RandomForestClassifier.md b/docs/api/safeds/ml/classical/classification/RandomForestClassifier.md index 09ee8c075..88acbc999 100644 --- a/docs/api/safeds/ml/classical/classification/RandomForestClassifier.md +++ b/docs/api/safeds/ml/classical/classification/RandomForestClassifier.md @@ -27,25 +27,25 @@ pipeline example { ```sds linenums="22" class RandomForestClassifier( - @PythonName("number_of_trees") const treeCount: Int = 100, - @PythonName("maximum_depth") maxDepth: Int? = null, - @PythonName("minimum_number_of_samples_in_leaves") const minSampleCountInLeaves: Int = 1, + @PythonName("tree_count") const treeCount: Int = 100, + @PythonName("max_depth") maxDepth: Int? = null, + @PythonName("min_sample_count_in_leaves") const minSampleCountInLeaves: Int = 1, ) sub Classifier where { treeCount > 0, minSampleCountInLeaves > 0, } { /** - * Get the number of trees used in the random forest. + * The number of trees used in the random forest. */ - @PythonName("number_of_trees") attr treeCount: Int + @PythonName("tree_count") attr treeCount: Int /** * The maximum depth of each tree. */ - @PythonName("maximum_depth") attr maxDepth: Int? + @PythonName("max_depth") attr maxDepth: Int? /** * The minimum number of samples that must remain in the leaves of each tree. */ - @PythonName("minimum_number_of_samples_in_leaves") attr minSampleCountInLeaves: Int + @PythonName("min_sample_count_in_leaves") attr minSampleCountInLeaves: Int /** * Create a copy of this classifier and fit it with the given training data. @@ -83,7 +83,7 @@ The minimum number of samples that must remain in the leaves of each tree. ## `treeCount` {#safeds.ml.classical.classification.RandomForestClassifier.treeCount data-toc-label='[attribute] treeCount'} -Get the number of trees used in the random forest. +The number of trees used in the random forest. **Type:** [`Int`][safeds.lang.Int] diff --git a/docs/api/safeds/ml/classical/classification/SupportVectorClassifier.md b/docs/api/safeds/ml/classical/classification/SupportVectorClassifier.md index d714af697..3645cd3e6 100644 --- a/docs/api/safeds/ml/classical/classification/SupportVectorClassifier.md +++ b/docs/api/safeds/ml/classical/classification/SupportVectorClassifier.md @@ -67,11 +67,11 @@ pipeline example { } /** - * Get the regularization strength. + * The regularization strength. */ attr c: Float /** - * Get the type of kernel used. + * The type of kernel used. */ attr kernel: SupportVectorClassifier.Kernel @@ -93,7 +93,7 @@ pipeline example { ## `c` {#safeds.ml.classical.classification.SupportVectorClassifier.c data-toc-label='[attribute] c'} -Get the regularization strength. +The regularization strength. **Type:** [`Float`][safeds.lang.Float] @@ -105,7 +105,7 @@ Whether the model is fitted. ## `kernel` {#safeds.ml.classical.classification.SupportVectorClassifier.kernel data-toc-label='[attribute] kernel'} -Get the type of kernel used. +The type of kernel used. **Type:** [`Kernel`][safeds.ml.classical.classification.SupportVectorClassifier.Kernel] diff --git a/docs/api/safeds/ml/classical/regression/AdaBoostRegressor.md b/docs/api/safeds/ml/classical/regression/AdaBoostRegressor.md index 219de9860..d6fec6873 100644 --- a/docs/api/safeds/ml/classical/regression/AdaBoostRegressor.md +++ b/docs/api/safeds/ml/classical/regression/AdaBoostRegressor.md @@ -28,22 +28,22 @@ pipeline example { ```sds linenums="24" class AdaBoostRegressor( learner: Regressor = DecisionTreeRegressor(), - @PythonName("maximum_number_of_learners") const maxLearnerCount: Int = 50, + @PythonName("max_learner_count") const maxLearnerCount: Int = 50, @PythonName("learning_rate") const learningRate: Float = 1.0 ) sub Regressor where { maxLearnerCount >= 1, learningRate > 0.0 } { /** - * Get the base learner used for training the ensemble. + * The base learner used for training the ensemble. */ attr learner: Regressor /** - * Get the maximum number of learners in the ensemble. + * The maximum number of learners in the ensemble. */ - @PythonName("maximum_number_of_learners") attr maxLearnerCount: Int + @PythonName("max_learner_count") attr maxLearnerCount: Int /** - * Get the learning rate. + * The learning rate. */ @PythonName("learning_rate") attr learningRate: Float @@ -71,19 +71,19 @@ Whether the model is fitted. ## `learner` {#safeds.ml.classical.regression.AdaBoostRegressor.learner data-toc-label='[attribute] learner'} -Get the base learner used for training the ensemble. +The base learner used for training the ensemble. **Type:** [`Regressor`][safeds.ml.classical.regression.Regressor] ## `learningRate` {#safeds.ml.classical.regression.AdaBoostRegressor.learningRate data-toc-label='[attribute] learningRate'} -Get the learning rate. +The learning rate. **Type:** [`Float`][safeds.lang.Float] ## `maxLearnerCount` {#safeds.ml.classical.regression.AdaBoostRegressor.maxLearnerCount data-toc-label='[attribute] maxLearnerCount'} -Get the maximum number of learners in the ensemble. +The maximum number of learners in the ensemble. **Type:** [`Int`][safeds.lang.Int] @@ -104,7 +104,10 @@ to 1.0. You can interpret the coefficient of determination as follows: | 0.0 | The model is as good as predicting the mean of the target values. Try something else. | | (-∞, 0.0) | The model is worse than predicting the mean of the target values. Something is very wrong. | -**Note:** Some other libraries call this metric `r2_score`. +**Notes:** + +- The model must be fitted. +- Some other libraries call this metric `r2_score`. **Parameters:** @@ -120,7 +123,7 @@ to 1.0. You can interpret the coefficient of determination as follows: ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="60" + ```sds linenums="65" @Pure @PythonName("coefficient_of_determination") fun coefficientOfDetermination( @@ -243,6 +246,9 @@ The mean absolute error is the average of the absolute differences between the p values. The **lower** the mean absolute error, the better the regressor. Results range from 0.0 to positive infinity. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -257,7 +263,7 @@ infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="77" + ```sds linenums="85" @Pure @PythonName("mean_absolute_error") fun meanAbsoluteError( @@ -277,6 +283,9 @@ directions. The **higher** the mean directional accuracy, the better the regress This metric is useful for time series data, where the order of the target values has a meaning. It is not useful for other types of data. Because of this, it is not included in the `summarize_metrics` method. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -291,7 +300,7 @@ for other types of data. Because of this, it is not included in the `summarize_m ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="98" + ```sds linenums="109" @Pure @PythonName("mean_directional_accuracy") fun meanDirectionalAccuracy( @@ -307,7 +316,10 @@ The mean squared error is the average of the squared differences between the pre values. The **lower** the mean squared error, the better the regressor. Results range from 0.0 to positive infinity. -**Note:** To get the root mean squared error (RMSE), take the square root of the result. +**Notes:** + +- The model must be fitted. +- To get the root mean squared error (RMSE), take the square root of the result. **Parameters:** @@ -323,7 +335,7 @@ infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="117" + ```sds linenums="131" @Pure @PythonName("mean_squared_error") fun meanSquaredError( @@ -339,6 +351,9 @@ The median absolute deviation is the median of the absolute differences between target values. The **lower** the median absolute deviation, the better the regressor. Results range from 0.0 to positive infinity. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -353,7 +368,7 @@ positive infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="134" + ```sds linenums="151" @Pure @PythonName("median_absolute_deviation") fun medianAbsoluteDeviation( @@ -392,6 +407,8 @@ Predict the target values on the given dataset. Summarize the regressor's metrics on the given data. +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -406,7 +423,7 @@ Summarize the regressor's metrics on the given data. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="32" + ```sds linenums="34" @Pure @PythonName("summarize_metrics") fun summarizeMetrics( diff --git a/docs/api/safeds/ml/classical/regression/DecisionTreeRegressor.md b/docs/api/safeds/ml/classical/regression/DecisionTreeRegressor.md index 308dc050a..12b5fd7d1 100644 --- a/docs/api/safeds/ml/classical/regression/DecisionTreeRegressor.md +++ b/docs/api/safeds/ml/classical/regression/DecisionTreeRegressor.md @@ -26,19 +26,19 @@ pipeline example { ```sds linenums="21" class DecisionTreeRegressor( - @PythonName("maximum_depth") maxDepth: Int? = null, - @PythonName("minimum_number_of_samples_in_leaves") const minSampleCountInLeaves: Int = 1 + @PythonName("max_depth") maxDepth: Int? = null, + @PythonName("min_sample_count_in_leaves") const minSampleCountInLeaves: Int = 1 ) sub Regressor where { minSampleCountInLeaves > 0 } { /** * The maximum depth of the tree. */ - @PythonName("maximum_depth") attr maxDepth: Int? + @PythonName("max_depth") attr maxDepth: Int? /** * The minimum number of samples that must remain in the leaves of the tree. */ - @PythonName("minimum_number_of_samples_in_leaves") attr minSampleCountInLeaves: Int + @PythonName("min_sample_count_in_leaves") attr minSampleCountInLeaves: Int /** * Create a copy of this regressor and fit it with the given training data. @@ -91,7 +91,10 @@ to 1.0. You can interpret the coefficient of determination as follows: | 0.0 | The model is as good as predicting the mean of the target values. Try something else. | | (-∞, 0.0) | The model is worse than predicting the mean of the target values. Something is very wrong. | -**Note:** Some other libraries call this metric `r2_score`. +**Notes:** + +- The model must be fitted. +- Some other libraries call this metric `r2_score`. **Parameters:** @@ -107,7 +110,7 @@ to 1.0. You can interpret the coefficient of determination as follows: ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="60" + ```sds linenums="65" @Pure @PythonName("coefficient_of_determination") fun coefficientOfDetermination( @@ -230,6 +233,9 @@ The mean absolute error is the average of the absolute differences between the p values. The **lower** the mean absolute error, the better the regressor. Results range from 0.0 to positive infinity. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -244,7 +250,7 @@ infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="77" + ```sds linenums="85" @Pure @PythonName("mean_absolute_error") fun meanAbsoluteError( @@ -264,6 +270,9 @@ directions. The **higher** the mean directional accuracy, the better the regress This metric is useful for time series data, where the order of the target values has a meaning. It is not useful for other types of data. Because of this, it is not included in the `summarize_metrics` method. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -278,7 +287,7 @@ for other types of data. Because of this, it is not included in the `summarize_m ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="98" + ```sds linenums="109" @Pure @PythonName("mean_directional_accuracy") fun meanDirectionalAccuracy( @@ -294,7 +303,10 @@ The mean squared error is the average of the squared differences between the pre values. The **lower** the mean squared error, the better the regressor. Results range from 0.0 to positive infinity. -**Note:** To get the root mean squared error (RMSE), take the square root of the result. +**Notes:** + +- The model must be fitted. +- To get the root mean squared error (RMSE), take the square root of the result. **Parameters:** @@ -310,7 +322,7 @@ infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="117" + ```sds linenums="131" @Pure @PythonName("mean_squared_error") fun meanSquaredError( @@ -326,6 +338,9 @@ The median absolute deviation is the median of the absolute differences between target values. The **lower** the median absolute deviation, the better the regressor. Results range from 0.0 to positive infinity. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -340,7 +355,7 @@ positive infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="134" + ```sds linenums="151" @Pure @PythonName("median_absolute_deviation") fun medianAbsoluteDeviation( @@ -379,6 +394,8 @@ Predict the target values on the given dataset. Summarize the regressor's metrics on the given data. +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -393,7 +410,7 @@ Summarize the regressor's metrics on the given data. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="32" + ```sds linenums="34" @Pure @PythonName("summarize_metrics") fun summarizeMetrics( diff --git a/docs/api/safeds/ml/classical/regression/ElasticNetRegressor.md b/docs/api/safeds/ml/classical/regression/ElasticNetRegressor.md index 5bc715b2b..d28c49494 100644 --- a/docs/api/safeds/ml/classical/regression/ElasticNetRegressor.md +++ b/docs/api/safeds/ml/classical/regression/ElasticNetRegressor.md @@ -34,11 +34,11 @@ pipeline example { lassoRatio <= 1.0 } { /** - * Get the regularization of the model. + * The regularization of the model. */ attr alpha: Float /** - * Get the ratio between Lasso and Ridge regularization. + * The ratio between Lasso and Ridge regularization. */ @PythonName("lasso_ratio") attr lassoRatio: Float @@ -60,7 +60,7 @@ pipeline example { ## `alpha` {#safeds.ml.classical.regression.ElasticNetRegressor.alpha data-toc-label='[attribute] alpha'} -Get the regularization of the model. +The regularization of the model. **Type:** [`Float`][safeds.lang.Float] @@ -72,7 +72,7 @@ Whether the model is fitted. ## `lassoRatio` {#safeds.ml.classical.regression.ElasticNetRegressor.lassoRatio data-toc-label='[attribute] lassoRatio'} -Get the ratio between Lasso and Ridge regularization. +The ratio between Lasso and Ridge regularization. **Type:** [`Float`][safeds.lang.Float] @@ -93,7 +93,10 @@ to 1.0. You can interpret the coefficient of determination as follows: | 0.0 | The model is as good as predicting the mean of the target values. Try something else. | | (-∞, 0.0) | The model is worse than predicting the mean of the target values. Something is very wrong. | -**Note:** Some other libraries call this metric `r2_score`. +**Notes:** + +- The model must be fitted. +- Some other libraries call this metric `r2_score`. **Parameters:** @@ -109,7 +112,7 @@ to 1.0. You can interpret the coefficient of determination as follows: ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="60" + ```sds linenums="65" @Pure @PythonName("coefficient_of_determination") fun coefficientOfDetermination( @@ -232,6 +235,9 @@ The mean absolute error is the average of the absolute differences between the p values. The **lower** the mean absolute error, the better the regressor. Results range from 0.0 to positive infinity. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -246,7 +252,7 @@ infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="77" + ```sds linenums="85" @Pure @PythonName("mean_absolute_error") fun meanAbsoluteError( @@ -266,6 +272,9 @@ directions. The **higher** the mean directional accuracy, the better the regress This metric is useful for time series data, where the order of the target values has a meaning. It is not useful for other types of data. Because of this, it is not included in the `summarize_metrics` method. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -280,7 +289,7 @@ for other types of data. Because of this, it is not included in the `summarize_m ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="98" + ```sds linenums="109" @Pure @PythonName("mean_directional_accuracy") fun meanDirectionalAccuracy( @@ -296,7 +305,10 @@ The mean squared error is the average of the squared differences between the pre values. The **lower** the mean squared error, the better the regressor. Results range from 0.0 to positive infinity. -**Note:** To get the root mean squared error (RMSE), take the square root of the result. +**Notes:** + +- The model must be fitted. +- To get the root mean squared error (RMSE), take the square root of the result. **Parameters:** @@ -312,7 +324,7 @@ infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="117" + ```sds linenums="131" @Pure @PythonName("mean_squared_error") fun meanSquaredError( @@ -328,6 +340,9 @@ The median absolute deviation is the median of the absolute differences between target values. The **lower** the median absolute deviation, the better the regressor. Results range from 0.0 to positive infinity. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -342,7 +357,7 @@ positive infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="134" + ```sds linenums="151" @Pure @PythonName("median_absolute_deviation") fun medianAbsoluteDeviation( @@ -381,6 +396,8 @@ Predict the target values on the given dataset. Summarize the regressor's metrics on the given data. +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -395,7 +412,7 @@ Summarize the regressor's metrics on the given data. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="32" + ```sds linenums="34" @Pure @PythonName("summarize_metrics") fun summarizeMetrics( diff --git a/docs/api/safeds/ml/classical/regression/GradientBoostingRegressor.md b/docs/api/safeds/ml/classical/regression/GradientBoostingRegressor.md index cc69bd2f2..9e79d76ab 100644 --- a/docs/api/safeds/ml/classical/regression/GradientBoostingRegressor.md +++ b/docs/api/safeds/ml/classical/regression/GradientBoostingRegressor.md @@ -26,18 +26,18 @@ pipeline example { ```sds linenums="23" class GradientBoostingRegressor( - @PythonName("number_of_trees") const treeCount: Int = 100, + @PythonName("tree_count") const treeCount: Int = 100, @PythonName("learning_rate") const learningRate: Float = 0.1 ) sub Regressor where { treeCount >= 1, learningRate > 0.0 } { /** - * Get the number of trees (estimators) in the ensemble. + * The number of trees (estimators) in the ensemble. */ - @PythonName("number_of_trees") attr treeCount: Int + @PythonName("tree_count") attr treeCount: Int /** - * Get the learning rate. + * The learning rate. */ @PythonName("learning_rate") attr learningRate: Float @@ -65,13 +65,13 @@ Whether the model is fitted. ## `learningRate` {#safeds.ml.classical.regression.GradientBoostingRegressor.learningRate data-toc-label='[attribute] learningRate'} -Get the learning rate. +The learning rate. **Type:** [`Float`][safeds.lang.Float] ## `treeCount` {#safeds.ml.classical.regression.GradientBoostingRegressor.treeCount data-toc-label='[attribute] treeCount'} -Get the number of trees (estimators) in the ensemble. +The number of trees (estimators) in the ensemble. **Type:** [`Int`][safeds.lang.Int] @@ -92,7 +92,10 @@ to 1.0. You can interpret the coefficient of determination as follows: | 0.0 | The model is as good as predicting the mean of the target values. Try something else. | | (-∞, 0.0) | The model is worse than predicting the mean of the target values. Something is very wrong. | -**Note:** Some other libraries call this metric `r2_score`. +**Notes:** + +- The model must be fitted. +- Some other libraries call this metric `r2_score`. **Parameters:** @@ -108,7 +111,7 @@ to 1.0. You can interpret the coefficient of determination as follows: ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="60" + ```sds linenums="65" @Pure @PythonName("coefficient_of_determination") fun coefficientOfDetermination( @@ -231,6 +234,9 @@ The mean absolute error is the average of the absolute differences between the p values. The **lower** the mean absolute error, the better the regressor. Results range from 0.0 to positive infinity. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -245,7 +251,7 @@ infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="77" + ```sds linenums="85" @Pure @PythonName("mean_absolute_error") fun meanAbsoluteError( @@ -265,6 +271,9 @@ directions. The **higher** the mean directional accuracy, the better the regress This metric is useful for time series data, where the order of the target values has a meaning. It is not useful for other types of data. Because of this, it is not included in the `summarize_metrics` method. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -279,7 +288,7 @@ for other types of data. Because of this, it is not included in the `summarize_m ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="98" + ```sds linenums="109" @Pure @PythonName("mean_directional_accuracy") fun meanDirectionalAccuracy( @@ -295,7 +304,10 @@ The mean squared error is the average of the squared differences between the pre values. The **lower** the mean squared error, the better the regressor. Results range from 0.0 to positive infinity. -**Note:** To get the root mean squared error (RMSE), take the square root of the result. +**Notes:** + +- The model must be fitted. +- To get the root mean squared error (RMSE), take the square root of the result. **Parameters:** @@ -311,7 +323,7 @@ infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="117" + ```sds linenums="131" @Pure @PythonName("mean_squared_error") fun meanSquaredError( @@ -327,6 +339,9 @@ The median absolute deviation is the median of the absolute differences between target values. The **lower** the median absolute deviation, the better the regressor. Results range from 0.0 to positive infinity. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -341,7 +356,7 @@ positive infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="134" + ```sds linenums="151" @Pure @PythonName("median_absolute_deviation") fun medianAbsoluteDeviation( @@ -380,6 +395,8 @@ Predict the target values on the given dataset. Summarize the regressor's metrics on the given data. +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -394,7 +411,7 @@ Summarize the regressor's metrics on the given data. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="32" + ```sds linenums="34" @Pure @PythonName("summarize_metrics") fun summarizeMetrics( diff --git a/docs/api/safeds/ml/classical/regression/KNearestNeighborsRegressor.md b/docs/api/safeds/ml/classical/regression/KNearestNeighborsRegressor.md index 01fe4c71f..286c80ab7 100644 --- a/docs/api/safeds/ml/classical/regression/KNearestNeighborsRegressor.md +++ b/docs/api/safeds/ml/classical/regression/KNearestNeighborsRegressor.md @@ -25,14 +25,14 @@ pipeline example { ```sds linenums="21" class KNearestNeighborsRegressor( - @PythonName("number_of_neighbors") const neighborCount: Int + @PythonName("neighbor_count") const neighborCount: Int ) sub Regressor where { neighborCount >= 1 } { /** - * Get the number of neighbors used for interpolation. + * The number of neighbors used for interpolation. */ - @PythonName("number_of_neighbors") attr neighborCount: Int + @PythonName("neighbor_count") attr neighborCount: Int /** * Create a copy of this regressor and fit it with the given training data. @@ -58,7 +58,7 @@ Whether the model is fitted. ## `neighborCount` {#safeds.ml.classical.regression.KNearestNeighborsRegressor.neighborCount data-toc-label='[attribute] neighborCount'} -Get the number of neighbors used for interpolation. +The number of neighbors used for interpolation. **Type:** [`Int`][safeds.lang.Int] @@ -79,7 +79,10 @@ to 1.0. You can interpret the coefficient of determination as follows: | 0.0 | The model is as good as predicting the mean of the target values. Try something else. | | (-∞, 0.0) | The model is worse than predicting the mean of the target values. Something is very wrong. | -**Note:** Some other libraries call this metric `r2_score`. +**Notes:** + +- The model must be fitted. +- Some other libraries call this metric `r2_score`. **Parameters:** @@ -95,7 +98,7 @@ to 1.0. You can interpret the coefficient of determination as follows: ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="60" + ```sds linenums="65" @Pure @PythonName("coefficient_of_determination") fun coefficientOfDetermination( @@ -218,6 +221,9 @@ The mean absolute error is the average of the absolute differences between the p values. The **lower** the mean absolute error, the better the regressor. Results range from 0.0 to positive infinity. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -232,7 +238,7 @@ infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="77" + ```sds linenums="85" @Pure @PythonName("mean_absolute_error") fun meanAbsoluteError( @@ -252,6 +258,9 @@ directions. The **higher** the mean directional accuracy, the better the regress This metric is useful for time series data, where the order of the target values has a meaning. It is not useful for other types of data. Because of this, it is not included in the `summarize_metrics` method. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -266,7 +275,7 @@ for other types of data. Because of this, it is not included in the `summarize_m ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="98" + ```sds linenums="109" @Pure @PythonName("mean_directional_accuracy") fun meanDirectionalAccuracy( @@ -282,7 +291,10 @@ The mean squared error is the average of the squared differences between the pre values. The **lower** the mean squared error, the better the regressor. Results range from 0.0 to positive infinity. -**Note:** To get the root mean squared error (RMSE), take the square root of the result. +**Notes:** + +- The model must be fitted. +- To get the root mean squared error (RMSE), take the square root of the result. **Parameters:** @@ -298,7 +310,7 @@ infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="117" + ```sds linenums="131" @Pure @PythonName("mean_squared_error") fun meanSquaredError( @@ -314,6 +326,9 @@ The median absolute deviation is the median of the absolute differences between target values. The **lower** the median absolute deviation, the better the regressor. Results range from 0.0 to positive infinity. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -328,7 +343,7 @@ positive infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="134" + ```sds linenums="151" @Pure @PythonName("median_absolute_deviation") fun medianAbsoluteDeviation( @@ -367,6 +382,8 @@ Predict the target values on the given dataset. Summarize the regressor's metrics on the given data. +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -381,7 +398,7 @@ Summarize the regressor's metrics on the given data. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="32" + ```sds linenums="34" @Pure @PythonName("summarize_metrics") fun summarizeMetrics( diff --git a/docs/api/safeds/ml/classical/regression/LassoRegressor.md b/docs/api/safeds/ml/classical/regression/LassoRegressor.md index 02a7a863d..7a6d6e6c0 100644 --- a/docs/api/safeds/ml/classical/regression/LassoRegressor.md +++ b/docs/api/safeds/ml/classical/regression/LassoRegressor.md @@ -30,7 +30,7 @@ pipeline example { alpha >= 0.0 } { /** - * Get the regularization of the model. + * The regularization of the model. */ attr alpha: Float @@ -52,7 +52,7 @@ pipeline example { ## `alpha` {#safeds.ml.classical.regression.LassoRegressor.alpha data-toc-label='[attribute] alpha'} -Get the regularization of the model. +The regularization of the model. **Type:** [`Float`][safeds.lang.Float] @@ -79,7 +79,10 @@ to 1.0. You can interpret the coefficient of determination as follows: | 0.0 | The model is as good as predicting the mean of the target values. Try something else. | | (-∞, 0.0) | The model is worse than predicting the mean of the target values. Something is very wrong. | -**Note:** Some other libraries call this metric `r2_score`. +**Notes:** + +- The model must be fitted. +- Some other libraries call this metric `r2_score`. **Parameters:** @@ -95,7 +98,7 @@ to 1.0. You can interpret the coefficient of determination as follows: ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="60" + ```sds linenums="65" @Pure @PythonName("coefficient_of_determination") fun coefficientOfDetermination( @@ -218,6 +221,9 @@ The mean absolute error is the average of the absolute differences between the p values. The **lower** the mean absolute error, the better the regressor. Results range from 0.0 to positive infinity. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -232,7 +238,7 @@ infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="77" + ```sds linenums="85" @Pure @PythonName("mean_absolute_error") fun meanAbsoluteError( @@ -252,6 +258,9 @@ directions. The **higher** the mean directional accuracy, the better the regress This metric is useful for time series data, where the order of the target values has a meaning. It is not useful for other types of data. Because of this, it is not included in the `summarize_metrics` method. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -266,7 +275,7 @@ for other types of data. Because of this, it is not included in the `summarize_m ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="98" + ```sds linenums="109" @Pure @PythonName("mean_directional_accuracy") fun meanDirectionalAccuracy( @@ -282,7 +291,10 @@ The mean squared error is the average of the squared differences between the pre values. The **lower** the mean squared error, the better the regressor. Results range from 0.0 to positive infinity. -**Note:** To get the root mean squared error (RMSE), take the square root of the result. +**Notes:** + +- The model must be fitted. +- To get the root mean squared error (RMSE), take the square root of the result. **Parameters:** @@ -298,7 +310,7 @@ infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="117" + ```sds linenums="131" @Pure @PythonName("mean_squared_error") fun meanSquaredError( @@ -314,6 +326,9 @@ The median absolute deviation is the median of the absolute differences between target values. The **lower** the median absolute deviation, the better the regressor. Results range from 0.0 to positive infinity. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -328,7 +343,7 @@ positive infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="134" + ```sds linenums="151" @Pure @PythonName("median_absolute_deviation") fun medianAbsoluteDeviation( @@ -367,6 +382,8 @@ Predict the target values on the given dataset. Summarize the regressor's metrics on the given data. +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -381,7 +398,7 @@ Summarize the regressor's metrics on the given data. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="32" + ```sds linenums="34" @Pure @PythonName("summarize_metrics") fun summarizeMetrics( diff --git a/docs/api/safeds/ml/classical/regression/LinearRegressor.md b/docs/api/safeds/ml/classical/regression/LinearRegressor.md index 9207ac4fd..36f1528a4 100644 --- a/docs/api/safeds/ml/classical/regression/LinearRegressor.md +++ b/docs/api/safeds/ml/classical/regression/LinearRegressor.md @@ -58,7 +58,10 @@ to 1.0. You can interpret the coefficient of determination as follows: | 0.0 | The model is as good as predicting the mean of the target values. Try something else. | | (-∞, 0.0) | The model is worse than predicting the mean of the target values. Something is very wrong. | -**Note:** Some other libraries call this metric `r2_score`. +**Notes:** + +- The model must be fitted. +- Some other libraries call this metric `r2_score`. **Parameters:** @@ -74,7 +77,7 @@ to 1.0. You can interpret the coefficient of determination as follows: ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="60" + ```sds linenums="65" @Pure @PythonName("coefficient_of_determination") fun coefficientOfDetermination( @@ -197,6 +200,9 @@ The mean absolute error is the average of the absolute differences between the p values. The **lower** the mean absolute error, the better the regressor. Results range from 0.0 to positive infinity. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -211,7 +217,7 @@ infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="77" + ```sds linenums="85" @Pure @PythonName("mean_absolute_error") fun meanAbsoluteError( @@ -231,6 +237,9 @@ directions. The **higher** the mean directional accuracy, the better the regress This metric is useful for time series data, where the order of the target values has a meaning. It is not useful for other types of data. Because of this, it is not included in the `summarize_metrics` method. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -245,7 +254,7 @@ for other types of data. Because of this, it is not included in the `summarize_m ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="98" + ```sds linenums="109" @Pure @PythonName("mean_directional_accuracy") fun meanDirectionalAccuracy( @@ -261,7 +270,10 @@ The mean squared error is the average of the squared differences between the pre values. The **lower** the mean squared error, the better the regressor. Results range from 0.0 to positive infinity. -**Note:** To get the root mean squared error (RMSE), take the square root of the result. +**Notes:** + +- The model must be fitted. +- To get the root mean squared error (RMSE), take the square root of the result. **Parameters:** @@ -277,7 +289,7 @@ infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="117" + ```sds linenums="131" @Pure @PythonName("mean_squared_error") fun meanSquaredError( @@ -293,6 +305,9 @@ The median absolute deviation is the median of the absolute differences between target values. The **lower** the median absolute deviation, the better the regressor. Results range from 0.0 to positive infinity. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -307,7 +322,7 @@ positive infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="134" + ```sds linenums="151" @Pure @PythonName("median_absolute_deviation") fun medianAbsoluteDeviation( @@ -346,6 +361,8 @@ Predict the target values on the given dataset. Summarize the regressor's metrics on the given data. +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -360,7 +377,7 @@ Summarize the regressor's metrics on the given data. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="32" + ```sds linenums="34" @Pure @PythonName("summarize_metrics") fun summarizeMetrics( diff --git a/docs/api/safeds/ml/classical/regression/RandomForestRegressor.md b/docs/api/safeds/ml/classical/regression/RandomForestRegressor.md index 104fb5180..3c20acf34 100644 --- a/docs/api/safeds/ml/classical/regression/RandomForestRegressor.md +++ b/docs/api/safeds/ml/classical/regression/RandomForestRegressor.md @@ -27,25 +27,25 @@ pipeline example { ```sds linenums="22" class RandomForestRegressor( - @PythonName("number_of_trees") const treeCount: Int = 100, - @PythonName("maximum_depth") maxDepth: Int? = null, - @PythonName("minimum_number_of_samples_in_leaves") const minSampleCountInLeaves: Int = 1, + @PythonName("tree_count") const treeCount: Int = 100, + @PythonName("max_depth") maxDepth: Int? = null, + @PythonName("min_sample_count_in_leaves") const minSampleCountInLeaves: Int = 1, ) sub Regressor where { treeCount > 0, minSampleCountInLeaves > 0, } { /** - * Get the number of trees used in the random forest. + * The number of trees used in the random forest. */ - @PythonName("number_of_trees") attr treeCount: Int + @PythonName("tree_count") attr treeCount: Int /** * The maximum depth of each tree. */ - @PythonName("maximum_depth") attr maxDepth: Int? + @PythonName("max_depth") attr maxDepth: Int? /** * The minimum number of samples that must remain in the leaves of each tree. */ - @PythonName("minimum_number_of_samples_in_leaves") attr minSampleCountInLeaves: Int + @PythonName("min_sample_count_in_leaves") attr minSampleCountInLeaves: Int /** * Create a copy of this regressor and fit it with the given training data. @@ -83,7 +83,7 @@ The minimum number of samples that must remain in the leaves of each tree. ## `treeCount` {#safeds.ml.classical.regression.RandomForestRegressor.treeCount data-toc-label='[attribute] treeCount'} -Get the number of trees used in the random forest. +The number of trees used in the random forest. **Type:** [`Int`][safeds.lang.Int] @@ -104,7 +104,10 @@ to 1.0. You can interpret the coefficient of determination as follows: | 0.0 | The model is as good as predicting the mean of the target values. Try something else. | | (-∞, 0.0) | The model is worse than predicting the mean of the target values. Something is very wrong. | -**Note:** Some other libraries call this metric `r2_score`. +**Notes:** + +- The model must be fitted. +- Some other libraries call this metric `r2_score`. **Parameters:** @@ -120,7 +123,7 @@ to 1.0. You can interpret the coefficient of determination as follows: ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="60" + ```sds linenums="65" @Pure @PythonName("coefficient_of_determination") fun coefficientOfDetermination( @@ -243,6 +246,9 @@ The mean absolute error is the average of the absolute differences between the p values. The **lower** the mean absolute error, the better the regressor. Results range from 0.0 to positive infinity. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -257,7 +263,7 @@ infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="77" + ```sds linenums="85" @Pure @PythonName("mean_absolute_error") fun meanAbsoluteError( @@ -277,6 +283,9 @@ directions. The **higher** the mean directional accuracy, the better the regress This metric is useful for time series data, where the order of the target values has a meaning. It is not useful for other types of data. Because of this, it is not included in the `summarize_metrics` method. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -291,7 +300,7 @@ for other types of data. Because of this, it is not included in the `summarize_m ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="98" + ```sds linenums="109" @Pure @PythonName("mean_directional_accuracy") fun meanDirectionalAccuracy( @@ -307,7 +316,10 @@ The mean squared error is the average of the squared differences between the pre values. The **lower** the mean squared error, the better the regressor. Results range from 0.0 to positive infinity. -**Note:** To get the root mean squared error (RMSE), take the square root of the result. +**Notes:** + +- The model must be fitted. +- To get the root mean squared error (RMSE), take the square root of the result. **Parameters:** @@ -323,7 +335,7 @@ infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="117" + ```sds linenums="131" @Pure @PythonName("mean_squared_error") fun meanSquaredError( @@ -339,6 +351,9 @@ The median absolute deviation is the median of the absolute differences between target values. The **lower** the median absolute deviation, the better the regressor. Results range from 0.0 to positive infinity. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -353,7 +368,7 @@ positive infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="134" + ```sds linenums="151" @Pure @PythonName("median_absolute_deviation") fun medianAbsoluteDeviation( @@ -392,6 +407,8 @@ Predict the target values on the given dataset. Summarize the regressor's metrics on the given data. +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -406,7 +423,7 @@ Summarize the regressor's metrics on the given data. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="32" + ```sds linenums="34" @Pure @PythonName("summarize_metrics") fun summarizeMetrics( diff --git a/docs/api/safeds/ml/classical/regression/Regressor.md b/docs/api/safeds/ml/classical/regression/Regressor.md index 98e44172f..4548b0f9e 100644 --- a/docs/api/safeds/ml/classical/regression/Regressor.md +++ b/docs/api/safeds/ml/classical/regression/Regressor.md @@ -17,9 +17,11 @@ A model for regression tasks. - [`GradientBoostingRegressor`][safeds.ml.classical.regression.GradientBoostingRegressor] - [`KNearestNeighborsRegressor`][safeds.ml.classical.regression.KNearestNeighborsRegressor] - [`LassoRegressor`][safeds.ml.classical.regression.LassoRegressor] +- `#!sds LinearRegressionRegressor` - [`LinearRegressor`][safeds.ml.classical.regression.LinearRegressor] - [`RandomForestRegressor`][safeds.ml.classical.regression.RandomForestRegressor] - [`RidgeRegressor`][safeds.ml.classical.regression.RidgeRegressor] +- `#!sds SupportVectorMachineRegressor` - [`SupportVectorRegressor`][safeds.ml.classical.regression.SupportVectorRegressor] ??? quote "Stub code in `Regressor.sdsstub`" @@ -43,6 +45,8 @@ A model for regression tasks. /** * Summarize the regressor's metrics on the given data. * + * **Note:** The model must be fitted. + * * @param validationOrTestSet The validation or test set. * * @result metrics A table containing the regressor's metrics. @@ -69,7 +73,10 @@ A model for regression tasks. * | 0.0 | The model is as good as predicting the mean of the target values. Try something else. | * | (-∞, 0.0) | The model is worse than predicting the mean of the target values. Something is very wrong. | * - * **Note:** Some other libraries call this metric `r2_score`. + * **Notes:** + * + * - The model must be fitted. + * - Some other libraries call this metric `r2_score`. * * @param validationOrTestSet The validation or test set. * @@ -88,6 +95,9 @@ A model for regression tasks. * values. The **lower** the mean absolute error, the better the regressor. Results range from 0.0 to positive * infinity. * + * + * **Note:** The model must be fitted. + * * @param validationOrTestSet The validation or test set. * * @result meanAbsoluteError The mean absolute error of the regressor. @@ -109,6 +119,9 @@ A model for regression tasks. * This metric is useful for time series data, where the order of the target values has a meaning. It is not useful * for other types of data. Because of this, it is not included in the `summarize_metrics` method. * + * + * **Note:** The model must be fitted. + * * @param validationOrTestSet The validation or test set. * * @result meanDirectionalAccuracy The mean directional accuracy of the regressor. @@ -126,7 +139,10 @@ A model for regression tasks. * values. The **lower** the mean squared error, the better the regressor. Results range from 0.0 to positive * infinity. * - * **Note:** To get the root mean squared error (RMSE), take the square root of the result. + * **Notes:** + * + * - The model must be fitted. + * - To get the root mean squared error (RMSE), take the square root of the result. * * @param validationOrTestSet The validation or test set. * @@ -145,6 +161,9 @@ A model for regression tasks. * target values. The **lower** the median absolute deviation, the better the regressor. Results range from 0.0 to * positive infinity. * + * + * **Note:** The model must be fitted. + * * @param validationOrTestSet The validation or test set. * * @result medianAbsoluteDeviation The median absolute deviation of the regressor. @@ -180,7 +199,10 @@ to 1.0. You can interpret the coefficient of determination as follows: | 0.0 | The model is as good as predicting the mean of the target values. Try something else. | | (-∞, 0.0) | The model is worse than predicting the mean of the target values. Something is very wrong. | -**Note:** Some other libraries call this metric `r2_score`. +**Notes:** + +- The model must be fitted. +- Some other libraries call this metric `r2_score`. **Parameters:** @@ -196,7 +218,7 @@ to 1.0. You can interpret the coefficient of determination as follows: ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="60" + ```sds linenums="65" @Pure @PythonName("coefficient_of_determination") fun coefficientOfDetermination( @@ -319,6 +341,9 @@ The mean absolute error is the average of the absolute differences between the p values. The **lower** the mean absolute error, the better the regressor. Results range from 0.0 to positive infinity. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -333,7 +358,7 @@ infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="77" + ```sds linenums="85" @Pure @PythonName("mean_absolute_error") fun meanAbsoluteError( @@ -353,6 +378,9 @@ directions. The **higher** the mean directional accuracy, the better the regress This metric is useful for time series data, where the order of the target values has a meaning. It is not useful for other types of data. Because of this, it is not included in the `summarize_metrics` method. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -367,7 +395,7 @@ for other types of data. Because of this, it is not included in the `summarize_m ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="98" + ```sds linenums="109" @Pure @PythonName("mean_directional_accuracy") fun meanDirectionalAccuracy( @@ -383,7 +411,10 @@ The mean squared error is the average of the squared differences between the pre values. The **lower** the mean squared error, the better the regressor. Results range from 0.0 to positive infinity. -**Note:** To get the root mean squared error (RMSE), take the square root of the result. +**Notes:** + +- The model must be fitted. +- To get the root mean squared error (RMSE), take the square root of the result. **Parameters:** @@ -399,7 +430,7 @@ infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="117" + ```sds linenums="131" @Pure @PythonName("mean_squared_error") fun meanSquaredError( @@ -415,6 +446,9 @@ The median absolute deviation is the median of the absolute differences between target values. The **lower** the median absolute deviation, the better the regressor. Results range from 0.0 to positive infinity. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -429,7 +463,7 @@ positive infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="134" + ```sds linenums="151" @Pure @PythonName("median_absolute_deviation") fun medianAbsoluteDeviation( @@ -468,6 +502,8 @@ Predict the target values on the given dataset. Summarize the regressor's metrics on the given data. +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -482,7 +518,7 @@ Summarize the regressor's metrics on the given data. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="32" + ```sds linenums="34" @Pure @PythonName("summarize_metrics") fun summarizeMetrics( diff --git a/docs/api/safeds/ml/classical/regression/RidgeRegressor.md b/docs/api/safeds/ml/classical/regression/RidgeRegressor.md index f901f6e33..197b9b87d 100644 --- a/docs/api/safeds/ml/classical/regression/RidgeRegressor.md +++ b/docs/api/safeds/ml/classical/regression/RidgeRegressor.md @@ -30,7 +30,7 @@ pipeline example { alpha >= 0.0 } { /** - * Get the regularization of the model. + * The regularization of the model. */ attr alpha: Float @@ -52,7 +52,7 @@ pipeline example { ## `alpha` {#safeds.ml.classical.regression.RidgeRegressor.alpha data-toc-label='[attribute] alpha'} -Get the regularization of the model. +The regularization of the model. **Type:** [`Float`][safeds.lang.Float] @@ -79,7 +79,10 @@ to 1.0. You can interpret the coefficient of determination as follows: | 0.0 | The model is as good as predicting the mean of the target values. Try something else. | | (-∞, 0.0) | The model is worse than predicting the mean of the target values. Something is very wrong. | -**Note:** Some other libraries call this metric `r2_score`. +**Notes:** + +- The model must be fitted. +- Some other libraries call this metric `r2_score`. **Parameters:** @@ -95,7 +98,7 @@ to 1.0. You can interpret the coefficient of determination as follows: ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="60" + ```sds linenums="65" @Pure @PythonName("coefficient_of_determination") fun coefficientOfDetermination( @@ -218,6 +221,9 @@ The mean absolute error is the average of the absolute differences between the p values. The **lower** the mean absolute error, the better the regressor. Results range from 0.0 to positive infinity. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -232,7 +238,7 @@ infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="77" + ```sds linenums="85" @Pure @PythonName("mean_absolute_error") fun meanAbsoluteError( @@ -252,6 +258,9 @@ directions. The **higher** the mean directional accuracy, the better the regress This metric is useful for time series data, where the order of the target values has a meaning. It is not useful for other types of data. Because of this, it is not included in the `summarize_metrics` method. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -266,7 +275,7 @@ for other types of data. Because of this, it is not included in the `summarize_m ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="98" + ```sds linenums="109" @Pure @PythonName("mean_directional_accuracy") fun meanDirectionalAccuracy( @@ -282,7 +291,10 @@ The mean squared error is the average of the squared differences between the pre values. The **lower** the mean squared error, the better the regressor. Results range from 0.0 to positive infinity. -**Note:** To get the root mean squared error (RMSE), take the square root of the result. +**Notes:** + +- The model must be fitted. +- To get the root mean squared error (RMSE), take the square root of the result. **Parameters:** @@ -298,7 +310,7 @@ infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="117" + ```sds linenums="131" @Pure @PythonName("mean_squared_error") fun meanSquaredError( @@ -314,6 +326,9 @@ The median absolute deviation is the median of the absolute differences between target values. The **lower** the median absolute deviation, the better the regressor. Results range from 0.0 to positive infinity. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -328,7 +343,7 @@ positive infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="134" + ```sds linenums="151" @Pure @PythonName("median_absolute_deviation") fun medianAbsoluteDeviation( @@ -367,6 +382,8 @@ Predict the target values on the given dataset. Summarize the regressor's metrics on the given data. +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -381,7 +398,7 @@ Summarize the regressor's metrics on the given data. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="32" + ```sds linenums="34" @Pure @PythonName("summarize_metrics") fun summarizeMetrics( diff --git a/docs/api/safeds/ml/classical/regression/SupportVectorRegressor.md b/docs/api/safeds/ml/classical/regression/SupportVectorRegressor.md index 0971060ab..05dfb1b00 100644 --- a/docs/api/safeds/ml/classical/regression/SupportVectorRegressor.md +++ b/docs/api/safeds/ml/classical/regression/SupportVectorRegressor.md @@ -67,11 +67,11 @@ pipeline example { } /** - * Get the regularization strength. + * The regularization strength. */ attr c: Float /** - * Get the type of kernel used. + * The type of kernel used. */ attr kernel: SupportVectorRegressor.Kernel @@ -93,7 +93,7 @@ pipeline example { ## `c` {#safeds.ml.classical.regression.SupportVectorRegressor.c data-toc-label='[attribute] c'} -Get the regularization strength. +The regularization strength. **Type:** [`Float`][safeds.lang.Float] @@ -105,7 +105,7 @@ Whether the model is fitted. ## `kernel` {#safeds.ml.classical.regression.SupportVectorRegressor.kernel data-toc-label='[attribute] kernel'} -Get the type of kernel used. +The type of kernel used. **Type:** [`Kernel`][safeds.ml.classical.regression.SupportVectorRegressor.Kernel] @@ -126,7 +126,10 @@ to 1.0. You can interpret the coefficient of determination as follows: | 0.0 | The model is as good as predicting the mean of the target values. Try something else. | | (-∞, 0.0) | The model is worse than predicting the mean of the target values. Something is very wrong. | -**Note:** Some other libraries call this metric `r2_score`. +**Notes:** + +- The model must be fitted. +- Some other libraries call this metric `r2_score`. **Parameters:** @@ -142,7 +145,7 @@ to 1.0. You can interpret the coefficient of determination as follows: ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="60" + ```sds linenums="65" @Pure @PythonName("coefficient_of_determination") fun coefficientOfDetermination( @@ -265,6 +268,9 @@ The mean absolute error is the average of the absolute differences between the p values. The **lower** the mean absolute error, the better the regressor. Results range from 0.0 to positive infinity. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -279,7 +285,7 @@ infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="77" + ```sds linenums="85" @Pure @PythonName("mean_absolute_error") fun meanAbsoluteError( @@ -299,6 +305,9 @@ directions. The **higher** the mean directional accuracy, the better the regress This metric is useful for time series data, where the order of the target values has a meaning. It is not useful for other types of data. Because of this, it is not included in the `summarize_metrics` method. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -313,7 +322,7 @@ for other types of data. Because of this, it is not included in the `summarize_m ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="98" + ```sds linenums="109" @Pure @PythonName("mean_directional_accuracy") fun meanDirectionalAccuracy( @@ -329,7 +338,10 @@ The mean squared error is the average of the squared differences between the pre values. The **lower** the mean squared error, the better the regressor. Results range from 0.0 to positive infinity. -**Note:** To get the root mean squared error (RMSE), take the square root of the result. +**Notes:** + +- The model must be fitted. +- To get the root mean squared error (RMSE), take the square root of the result. **Parameters:** @@ -345,7 +357,7 @@ infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="117" + ```sds linenums="131" @Pure @PythonName("mean_squared_error") fun meanSquaredError( @@ -361,6 +373,9 @@ The median absolute deviation is the median of the absolute differences between target values. The **lower** the median absolute deviation, the better the regressor. Results range from 0.0 to positive infinity. + +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -375,7 +390,7 @@ positive infinity. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="134" + ```sds linenums="151" @Pure @PythonName("median_absolute_deviation") fun medianAbsoluteDeviation( @@ -414,6 +429,8 @@ Predict the target values on the given dataset. Summarize the regressor's metrics on the given data. +**Note:** The model must be fitted. + **Parameters:** | Name | Type | Description | Default | @@ -428,7 +445,7 @@ Summarize the regressor's metrics on the given data. ??? quote "Stub code in `Regressor.sdsstub`" - ```sds linenums="32" + ```sds linenums="34" @Pure @PythonName("summarize_metrics") fun summarizeMetrics( diff --git a/docs/api/safeds/ml/nn/NeuralNetworkClassifier.md b/docs/api/safeds/ml/nn/NeuralNetworkClassifier.md index d6f8d3b4b..114b92b69 100644 --- a/docs/api/safeds/ml/nn/NeuralNetworkClassifier.md +++ b/docs/api/safeds/ml/nn/NeuralNetworkClassifier.md @@ -6,31 +6,41 @@ A NeuralNetworkClassifier is a neural network that is used for classification ta | Name | Type | Description | Default | |------|------|-------------|---------| -| `inputConversion` | [`InputConversion`][safeds.ml.nn.converters.InputConversion] | to convert the input data for the neural network | - | +| `inputConversion` | `#!sds InputConversion` | to convert the input data for the neural network | - | | `layers` | [`List`][safeds.lang.List] | a list of layers for the neural network to learn | - | -| `outputConversion` | [`OutputConversion`][safeds.ml.nn.converters.OutputConversion] | to convert the output data of the neural network back | - | **Type parameters:** | Name | Upper Bound | Description | Default | |------|-------------|-------------|---------| -| `FitIn` | [`Any?`][safeds.lang.Any] | - | - | -| `PredictIn` | [`Any?`][safeds.lang.Any] | - | - | -| `PredictOut` | [`Any?`][safeds.lang.Any] | - | - | +| `D` | [`Any?`][safeds.lang.Any] | The type of the full dataset. It's the input to `fit` and the output of `predict`. | - | +| `F` | [`Any?`][safeds.lang.Any] | The type of the features. It's the input to `predict`. | - | ??? quote "Stub code in `NeuralNetworkClassifier.sdsstub`" - ```sds linenums="15" - class NeuralNetworkClassifier( - @PythonName("input_conversion") inputConversion: InputConversion, + ```sds linenums="17" + class NeuralNetworkClassifier( + @PythonName("input_conversion") inputConversion: InputConversion, layers: List, - @PythonName("output_conversion") outputConversion: OutputConversion ) { /** * Whether the classifier is fitted. */ @PythonName("is_fitted") attr isFitted: Boolean + /** + * Load a pretrained model from a [Huggingface repository](https://huggingface.co/models/). + * + * @param huggingfaceRepo the name of the huggingface repository + * + * @result pretrainedModel the pretrained model as a NeuralNetworkClassifier + */ + @Pure + @PythonName("load_pretrained_model") + static fun loadPretrainedModel( + @PythonName("huggingface_repo") huggingfaceRepo: String + ) -> pretrainedModel: NeuralNetworkClassifier + /** * Train the neural network with given training data. * @@ -40,8 +50,10 @@ A NeuralNetworkClassifier is a neural network that is used for classification ta * @param epochSize The number of times the training cycle should be done. * @param batchSize The size of data batches that should be loaded at one time. * @param learningRate The learning rate of the neural network. - * @param callbackOnBatchCompletion Function used to view metrics while training. Gets called after a batch is completed with the index of the last batch and the overall loss average. - * @param callbackOnEpochCompletion Function used to view metrics while training. Gets called after an epoch is completed with the index of the last epoch and the overall loss average. + * @param callbackOnBatchCompletion Function used to view metrics while training. Gets called after a batch is completed with the index of the + * last batch and the overall loss average. + * @param callbackOnEpochCompletion Function used to view metrics while training. Gets called after an epoch is completed with the index of the + * last epoch and the overall loss average. * * @result fittedClassifier The trained Model * @@ -52,13 +64,13 @@ A NeuralNetworkClassifier is a neural network that is used for classification ta */ @Pure fun fit( - @PythonName("train_data") trainData: FitIn, + @PythonName("train_data") trainData: D, @PythonName("epoch_size") const epochSize: Int = 25, @PythonName("batch_size") const batchSize: Int = 1, @PythonName("learning_rate") learningRate: Float = 0.001, @PythonName("callback_on_batch_completion") callbackOnBatchCompletion: (param1: Int, param2: Float) -> () = (param1, param2) {}, @PythonName("callback_on_epoch_completion") callbackOnEpochCompletion: (param1: Int, param2: Float) -> () = (param1, param2) {} - ) -> fittedClassifier: NeuralNetworkClassifier where { + ) -> fittedClassifier: NeuralNetworkClassifier where { epochSize >= 1, batchSize >= 1 } @@ -79,8 +91,8 @@ A NeuralNetworkClassifier is a neural network that is used for classification ta */ @Pure fun predict( - @PythonName("test_data") testData: PredictIn - ) -> prediction: PredictOut + @PythonName("test_data") testData: F + ) -> prediction: D } ``` @@ -100,7 +112,7 @@ The original model is not modified. | Name | Type | Description | Default | |------|------|-------------|---------| -| `trainData` | `#!sds FitIn` | The data the network should be trained on. | - | +| `trainData` | `#!sds D` | The data the network should be trained on. | - | | `epochSize` | [`Int`][safeds.lang.Int] | The number of times the training cycle should be done. | `#!sds 25` | | `batchSize` | [`Int`][safeds.lang.Int] | The size of data batches that should be loaded at one time. | `#!sds 1` | | `learningRate` | [`Float`][safeds.lang.Float] | The learning rate of the neural network. | `#!sds 0.001` | @@ -111,7 +123,7 @@ The original model is not modified. | Name | Type | Description | |------|------|-------------| -| `fittedClassifier` | [`NeuralNetworkClassifier`][safeds.ml.nn.NeuralNetworkClassifier] | The trained Model | +| `fittedClassifier` | [`NeuralNetworkClassifier`][safeds.ml.nn.NeuralNetworkClassifier] | The trained Model | **Examples:** @@ -123,16 +135,16 @@ pipeline example { ??? quote "Stub code in `NeuralNetworkClassifier.sdsstub`" - ```sds linenums="44" + ```sds linenums="60" @Pure fun fit( - @PythonName("train_data") trainData: FitIn, + @PythonName("train_data") trainData: D, @PythonName("epoch_size") const epochSize: Int = 25, @PythonName("batch_size") const batchSize: Int = 1, @PythonName("learning_rate") learningRate: Float = 0.001, @PythonName("callback_on_batch_completion") callbackOnBatchCompletion: (param1: Int, param2: Float) -> () = (param1, param2) {}, @PythonName("callback_on_epoch_completion") callbackOnEpochCompletion: (param1: Int, param2: Float) -> () = (param1, param2) {} - ) -> fittedClassifier: NeuralNetworkClassifier where { + ) -> fittedClassifier: NeuralNetworkClassifier where { epochSize >= 1, batchSize >= 1 } @@ -148,13 +160,13 @@ The original Model is not modified. | Name | Type | Description | Default | |------|------|-------------|---------| -| `testData` | `#!sds PredictIn` | The data the network should predict. | - | +| `testData` | `#!sds F` | The data the network should predict. | - | **Results:** | Name | Type | Description | |------|------|-------------| -| `prediction` | `#!sds PredictOut` | The given test_data with an added "prediction" column at the end | +| `prediction` | `#!sds D` | The given test_data with an added "prediction" column at the end | **Examples:** @@ -166,9 +178,35 @@ pipeline example { ??? quote "Stub code in `NeuralNetworkClassifier.sdsstub`" - ```sds linenums="71" + ```sds linenums="87" @Pure fun predict( - @PythonName("test_data") testData: PredictIn - ) -> prediction: PredictOut + @PythonName("test_data") testData: F + ) -> prediction: D + ``` + +## `loadPretrainedModel` {#safeds.ml.nn.NeuralNetworkClassifier.loadPretrainedModel data-toc-label='[static-function] loadPretrainedModel'} + +Load a pretrained model from a [Huggingface repository](https://huggingface.co/models/). + +**Parameters:** + +| Name | Type | Description | Default | +|------|------|-------------|---------| +| `huggingfaceRepo` | [`String`][safeds.lang.String] | the name of the huggingface repository | - | + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `pretrainedModel` | [`NeuralNetworkClassifier`][safeds.ml.nn.NeuralNetworkClassifier] | the pretrained model as a NeuralNetworkClassifier | + +??? quote "Stub code in `NeuralNetworkClassifier.sdsstub`" + + ```sds linenums="33" + @Pure + @PythonName("load_pretrained_model") + static fun loadPretrainedModel( + @PythonName("huggingface_repo") huggingfaceRepo: String + ) -> pretrainedModel: NeuralNetworkClassifier ``` diff --git a/docs/api/safeds/ml/nn/NeuralNetworkRegressor.md b/docs/api/safeds/ml/nn/NeuralNetworkRegressor.md index 48d2da94a..87bc58a23 100644 --- a/docs/api/safeds/ml/nn/NeuralNetworkRegressor.md +++ b/docs/api/safeds/ml/nn/NeuralNetworkRegressor.md @@ -6,31 +6,41 @@ A NeuralNetworkRegressor is a neural network that is used for regression tasks. | Name | Type | Description | Default | |------|------|-------------|---------| -| `inputConversion` | [`InputConversion`][safeds.ml.nn.converters.InputConversion] | to convert the input data for the neural network | - | +| `inputConversion` | `#!sds InputConversion` | to convert the input data for the neural network | - | | `layers` | [`List`][safeds.lang.List] | a list of layers for the neural network to learn | - | -| `outputConversion` | [`OutputConversion`][safeds.ml.nn.converters.OutputConversion] | to convert the output data of the neural network back | - | **Type parameters:** | Name | Upper Bound | Description | Default | |------|-------------|-------------|---------| -| `FitIn` | [`Any?`][safeds.lang.Any] | - | - | -| `PredictIn` | [`Any?`][safeds.lang.Any] | - | - | -| `PredictOut` | [`Any?`][safeds.lang.Any] | - | - | +| `D` | [`Any?`][safeds.lang.Any] | The type of the full dataset. It's the input to `fit` and the output of `predict`. | - | +| `F` | [`Any?`][safeds.lang.Any] | The type of the features. It's the input to `predict`. | - | ??? quote "Stub code in `NeuralNetworkRegressor.sdsstub`" - ```sds linenums="15" - class NeuralNetworkRegressor( - @PythonName("input_conversion") inputConversion: InputConversion, - layers: List, - @PythonName("output_conversion") outputConversion: OutputConversion + ```sds linenums="17" + class NeuralNetworkRegressor( + @PythonName("input_conversion") inputConversion: InputConversion, + layers: List ) { /** * Whether the regressor is fitted. */ @PythonName("is_fitted") attr isFitted: Boolean + /** + * Load a pretrained model from a [Huggingface repository](https://huggingface.co/models/). + * + * @param huggingfaceRepo the name of the huggingface repository + * + * @result pretrainedModel the pretrained model as a NeuralNetworkRegressor + */ + @Pure + @PythonName("load_pretrained_model") + static fun loadPretrainedModel( + @PythonName("huggingface_repo") huggingfaceRepo: String + ) -> pretrainedModel: NeuralNetworkRegressor + /** * Train the neural network with given training data. * @@ -40,8 +50,10 @@ A NeuralNetworkRegressor is a neural network that is used for regression tasks. * @param epochSize The number of times the training cycle should be done. * @param batchSize The size of data batches that should be loaded at one time. * @param learningRate The learning rate of the neural network. - * @param callbackOnBatchCompletion Function used to view metrics while training. Gets called after a batch is completed with the index of the last batch and the overall loss average. - * @param callbackOnEpochCompletion Function used to view metrics while training. Gets called after an epoch is completed with the index of the last epoch and the overall loss average. + * @param callbackOnBatchCompletion Function used to view metrics while training. Gets called after a batch is completed with the index of the + * last batch and the overall loss average. + * @param callbackOnEpochCompletion Function used to view metrics while training. Gets called after an epoch is completed with the index of the + * last epoch and the overall loss average. * * @result fittedRegressor The trained Model * @@ -52,13 +64,13 @@ A NeuralNetworkRegressor is a neural network that is used for regression tasks. */ @Pure fun fit( - @PythonName("train_data") trainData: FitIn, + @PythonName("train_data") trainData: D, @PythonName("epoch_size") const epochSize: Int = 25, @PythonName("batch_size") const batchSize: Int = 1, @PythonName("learning_rate") learningRate: Float = 0.001, @PythonName("callback_on_batch_completion") callbackOnBatchCompletion: (param1: Int, param2: Float) -> () = (param1, param2) {}, @PythonName("callback_on_epoch_completion") callbackOnEpochCompletion: (param1: Int, param2: Float) -> () = (param1, param2) {} - ) -> fittedRegressor: NeuralNetworkRegressor where { + ) -> fittedRegressor: NeuralNetworkRegressor where { epochSize >= 1, batchSize >= 1 } @@ -79,8 +91,8 @@ A NeuralNetworkRegressor is a neural network that is used for regression tasks. */ @Pure fun predict( - @PythonName("test_data") testData: PredictIn - ) -> prediction: PredictOut + @PythonName("test_data") testData: F + ) -> prediction: D } ``` @@ -100,7 +112,7 @@ The original model is not modified. | Name | Type | Description | Default | |------|------|-------------|---------| -| `trainData` | `#!sds FitIn` | The data the network should be trained on. | - | +| `trainData` | `#!sds D` | The data the network should be trained on. | - | | `epochSize` | [`Int`][safeds.lang.Int] | The number of times the training cycle should be done. | `#!sds 25` | | `batchSize` | [`Int`][safeds.lang.Int] | The size of data batches that should be loaded at one time. | `#!sds 1` | | `learningRate` | [`Float`][safeds.lang.Float] | The learning rate of the neural network. | `#!sds 0.001` | @@ -111,7 +123,7 @@ The original model is not modified. | Name | Type | Description | |------|------|-------------| -| `fittedRegressor` | [`NeuralNetworkRegressor`][safeds.ml.nn.NeuralNetworkRegressor] | The trained Model | +| `fittedRegressor` | [`NeuralNetworkRegressor`][safeds.ml.nn.NeuralNetworkRegressor] | The trained Model | **Examples:** @@ -123,16 +135,16 @@ pipeline example { ??? quote "Stub code in `NeuralNetworkRegressor.sdsstub`" - ```sds linenums="44" + ```sds linenums="60" @Pure fun fit( - @PythonName("train_data") trainData: FitIn, + @PythonName("train_data") trainData: D, @PythonName("epoch_size") const epochSize: Int = 25, @PythonName("batch_size") const batchSize: Int = 1, @PythonName("learning_rate") learningRate: Float = 0.001, @PythonName("callback_on_batch_completion") callbackOnBatchCompletion: (param1: Int, param2: Float) -> () = (param1, param2) {}, @PythonName("callback_on_epoch_completion") callbackOnEpochCompletion: (param1: Int, param2: Float) -> () = (param1, param2) {} - ) -> fittedRegressor: NeuralNetworkRegressor where { + ) -> fittedRegressor: NeuralNetworkRegressor where { epochSize >= 1, batchSize >= 1 } @@ -148,13 +160,13 @@ The original Model is not modified. | Name | Type | Description | Default | |------|------|-------------|---------| -| `testData` | `#!sds PredictIn` | The data the network should predict. | - | +| `testData` | `#!sds F` | The data the network should predict. | - | **Results:** | Name | Type | Description | |------|------|-------------| -| `prediction` | `#!sds PredictOut` | The given test_data with an added "prediction" column at the end | +| `prediction` | `#!sds D` | The given test_data with an added "prediction" column at the end | **Examples:** @@ -166,9 +178,35 @@ pipeline example { ??? quote "Stub code in `NeuralNetworkRegressor.sdsstub`" - ```sds linenums="71" + ```sds linenums="87" @Pure fun predict( - @PythonName("test_data") testData: PredictIn - ) -> prediction: PredictOut + @PythonName("test_data") testData: F + ) -> prediction: D + ``` + +## `loadPretrainedModel` {#safeds.ml.nn.NeuralNetworkRegressor.loadPretrainedModel data-toc-label='[static-function] loadPretrainedModel'} + +Load a pretrained model from a [Huggingface repository](https://huggingface.co/models/). + +**Parameters:** + +| Name | Type | Description | Default | +|------|------|-------------|---------| +| `huggingfaceRepo` | [`String`][safeds.lang.String] | the name of the huggingface repository | - | + +**Results:** + +| Name | Type | Description | +|------|------|-------------| +| `pretrainedModel` | [`NeuralNetworkRegressor`][safeds.ml.nn.NeuralNetworkRegressor] | the pretrained model as a NeuralNetworkRegressor | + +??? quote "Stub code in `NeuralNetworkRegressor.sdsstub`" + + ```sds linenums="33" + @Pure + @PythonName("load_pretrained_model") + static fun loadPretrainedModel( + @PythonName("huggingface_repo") huggingfaceRepo: String + ) -> pretrainedModel: NeuralNetworkRegressor ``` diff --git a/docs/api/safeds/ml/nn/converters/InputConversion.md b/docs/api/safeds/ml/nn/converters/InputConversion.md index b52dc929b..da8007243 100644 --- a/docs/api/safeds/ml/nn/converters/InputConversion.md +++ b/docs/api/safeds/ml/nn/converters/InputConversion.md @@ -11,15 +11,19 @@ The input conversion for a neural network, defines the input parameters for the | Name | Upper Bound | Description | Default | |------|-------------|-------------|---------| -| `FitIn` | [`Any?`][safeds.lang.Any] | - | - | -| `PredictIn` | [`Any?`][safeds.lang.Any] | - | - | +| `D` | [`Any?`][safeds.lang.Any] | The type of the full dataset. It's the input to `fit` and the output of `predict`. | - | +| `F` | [`Any?`][safeds.lang.Any] | The type of the features. It's the input to `predict`. | - | **Inheritors:** +- [`InputConversionImageToColumn`][safeds.ml.nn.converters.InputConversionImageToColumn] +- [`InputConversionImageToImage`][safeds.ml.nn.converters.InputConversionImageToImage] +- [`InputConversionImageToTable`][safeds.ml.nn.converters.InputConversionImageToTable] - [`InputConversionTable`][safeds.ml.nn.converters.InputConversionTable] +- [`InputConversionTimeSeries`][safeds.ml.nn.converters.InputConversionTimeSeries] ??? quote "Stub code in `InputConversion.sdsstub`" - ```sds linenums="7" - class InputConversion + ```sds linenums="10" + class InputConversion ``` diff --git a/docs/api/safeds/ml/nn/converters/InputConversionImage.md b/docs/api/safeds/ml/nn/converters/InputConversionImage.md deleted file mode 100644 index 004a7f207..000000000 --- a/docs/api/safeds/ml/nn/converters/InputConversionImage.md +++ /dev/null @@ -1,17 +0,0 @@ -# :test_tube:{ title="Experimental" } `InputConversionImage` {#safeds.ml.nn.converters.InputConversionImage data-toc-label='[class] InputConversionImage'} - -The input conversion for a neural network, defines the input parameters for the neural network. - -**Parameters:** - -| Name | Type | Description | Default | -|------|------|-------------|---------| -| `imageSize` | [`ImageSize`][safeds.data.image.typing.ImageSize] | the size of the input images | - | - -??? quote "Stub code in `InputConversionImage.sdsstub`" - - ```sds linenums="11" - class InputConversionImage( - @PythonName("image_size") imageSize: ImageSize - ) - ``` diff --git a/docs/api/safeds/ml/nn/converters/InputConversionImageToColumn.md b/docs/api/safeds/ml/nn/converters/InputConversionImageToColumn.md new file mode 100644 index 000000000..3a8f28778 --- /dev/null +++ b/docs/api/safeds/ml/nn/converters/InputConversionImageToColumn.md @@ -0,0 +1,19 @@ +# `InputConversionImageToColumn` {#safeds.ml.nn.converters.InputConversionImageToColumn data-toc-label='[class] InputConversionImageToColumn'} + +The input conversion for a neural network, defines the input parameters for the neural network. + +**Parent type:** [`InputConversion>, ImageList>`][safeds.ml.nn.converters.InputConversion] + +**Parameters:** + +| Name | Type | Description | Default | +|------|------|-------------|---------| +| `imageSize` | [`ModelImageSize`][safeds.ml.nn.typing.ModelImageSize] | the size of the input images | - | + +??? quote "Stub code in `InputConversionImageToColumn.sdsstub`" + + ```sds linenums="10" + class InputConversionImageToColumn( + @PythonName("image_size") imageSize: ModelImageSize + ) sub InputConversion, ImageList> + ``` diff --git a/docs/api/safeds/ml/nn/converters/InputConversionImageToImage.md b/docs/api/safeds/ml/nn/converters/InputConversionImageToImage.md new file mode 100644 index 000000000..de138d26f --- /dev/null +++ b/docs/api/safeds/ml/nn/converters/InputConversionImageToImage.md @@ -0,0 +1,19 @@ +# `InputConversionImageToImage` {#safeds.ml.nn.converters.InputConversionImageToImage data-toc-label='[class] InputConversionImageToImage'} + +The input conversion for a neural network, defines the input parameters for the neural network. + +**Parent type:** [`InputConversion, ImageList>`][safeds.ml.nn.converters.InputConversion] + +**Parameters:** + +| Name | Type | Description | Default | +|------|------|-------------|---------| +| `imageSize` | [`ModelImageSize`][safeds.ml.nn.typing.ModelImageSize] | the size of the input images | - | + +??? quote "Stub code in `InputConversionImageToImage.sdsstub`" + + ```sds linenums="10" + class InputConversionImageToImage( + @PythonName("image_size") imageSize: ModelImageSize + ) sub InputConversion, ImageList> + ``` diff --git a/docs/api/safeds/ml/nn/converters/InputConversionImageToTable.md b/docs/api/safeds/ml/nn/converters/InputConversionImageToTable.md new file mode 100644 index 000000000..4a406dd8b --- /dev/null +++ b/docs/api/safeds/ml/nn/converters/InputConversionImageToTable.md @@ -0,0 +1,19 @@ +# `InputConversionImageToTable` {#safeds.ml.nn.converters.InputConversionImageToTable data-toc-label='[class] InputConversionImageToTable'} + +The input conversion for a neural network, defines the input parameters for the neural network. + +**Parent type:** [`InputConversion, ImageList>`][safeds.ml.nn.converters.InputConversion] + +**Parameters:** + +| Name | Type | Description | Default | +|------|------|-------------|---------| +| `imageSize` | [`ModelImageSize`][safeds.ml.nn.typing.ModelImageSize] | the size of the input images | - | + +??? quote "Stub code in `InputConversionImageToTable.sdsstub`" + + ```sds linenums="10" + class InputConversionImageToTable( + @PythonName("image_size") imageSize: ModelImageSize + ) sub InputConversion, ImageList> + ``` diff --git a/docs/api/safeds/ml/nn/converters/InputConversionTable.md b/docs/api/safeds/ml/nn/converters/InputConversionTable.md index 24668f830..2942dd2d2 100644 --- a/docs/api/safeds/ml/nn/converters/InputConversionTable.md +++ b/docs/api/safeds/ml/nn/converters/InputConversionTable.md @@ -4,8 +4,16 @@ The input conversion for a neural network defines the input parameters for the n **Parent type:** [`InputConversion`][safeds.ml.nn.converters.InputConversion] +**Parameters:** + +| Name | Type | Description | Default | +|------|------|-------------|---------| +| `predictionName` | [`String`][safeds.lang.String] | The name of the new column where the prediction will be stored. | `#!sds "prediction"` | + ??? quote "Stub code in `InputConversionTable.sdsstub`" - ```sds linenums="10" - class InputConversionTable() sub InputConversion + ```sds linenums="9" + class InputConversionTable( + @PythonName("prediction_name") predictionName: String = "prediction" + ) sub InputConversion ``` diff --git a/docs/api/safeds/ml/nn/converters/InputConversionTimeSeries.md b/docs/api/safeds/ml/nn/converters/InputConversionTimeSeries.md index f24d61980..90c3b3f48 100644 --- a/docs/api/safeds/ml/nn/converters/InputConversionTimeSeries.md +++ b/docs/api/safeds/ml/nn/converters/InputConversionTimeSeries.md @@ -2,18 +2,18 @@ The input conversion for a neural network, defines the input parameters for the neural network. +**Parent type:** [`InputConversion`][safeds.ml.nn.converters.InputConversion] + **Parameters:** | Name | Type | Description | Default | |------|------|-------------|---------| -| `windowSize` | [`Int`][safeds.lang.Int] | The size of the created windows | - | -| `forecastHorizon` | [`Int`][safeds.lang.Int] | The forecast horizon defines the future lag of the predicted values | - | +| `predictionName` | [`String`][safeds.lang.String] | The name of the new column where the prediction will be stored. | `#!sds "prediction_nn"` | ??? quote "Stub code in `InputConversionTimeSeries.sdsstub`" - ```sds linenums="10" + ```sds linenums="9" class InputConversionTimeSeries( - @PythonName("window_size") windowSize: Int, - @PythonName("forecast_horizon") forecastHorizon: Int - ) + @PythonName("prediction_name") predictionName: String = "prediction_nn" + ) sub InputConversion ``` diff --git a/docs/api/safeds/ml/nn/converters/OutputConversion.md b/docs/api/safeds/ml/nn/converters/OutputConversion.md deleted file mode 100644 index 9227433d6..000000000 --- a/docs/api/safeds/ml/nn/converters/OutputConversion.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -search: - boost: 0.5 ---- - -# :test_tube:{ title="Experimental" } `OutputConversion` {#safeds.ml.nn.converters.OutputConversion data-toc-label='[class] OutputConversion'} - -The output conversion for a neural network, defines the output parameters for the neural network. - -**Type parameters:** - -| Name | Upper Bound | Description | Default | -|------|-------------|-------------|---------| -| `PredictIn` | [`Any?`][safeds.lang.Any] | - | - | -| `PredictOut` | [`Any?`][safeds.lang.Any] | - | - | - -**Inheritors:** - -- [`OutputConversionImageToColumn`][safeds.ml.nn.converters.OutputConversionImageToColumn] -- [`OutputConversionImageToImage`][safeds.ml.nn.converters.OutputConversionImageToImage] -- [`OutputConversionImageToTable`][safeds.ml.nn.converters.OutputConversionImageToTable] -- [`OutputConversionTable`][safeds.ml.nn.converters.OutputConversionTable] - -??? quote "Stub code in `OutputConversion.sdsstub`" - - ```sds linenums="7" - class OutputConversion - ``` diff --git a/docs/api/safeds/ml/nn/converters/OutputConversionImageToColumn.md b/docs/api/safeds/ml/nn/converters/OutputConversionImageToColumn.md deleted file mode 100644 index 591fc8eb4..000000000 --- a/docs/api/safeds/ml/nn/converters/OutputConversionImageToColumn.md +++ /dev/null @@ -1,9 +0,0 @@ -# :test_tube:{ title="Experimental" } `OutputConversionImageToColumn` {#safeds.ml.nn.converters.OutputConversionImageToColumn data-toc-label='[class] OutputConversionImageToColumn'} - -**Parent type:** [`OutputConversion>`][safeds.ml.nn.converters.OutputConversion] - -??? quote "Stub code in `OutputConversionImageToColumn.sdsstub`" - - ```sds linenums="4" - class OutputConversionImageToColumn() sub OutputConversion - ``` diff --git a/docs/api/safeds/ml/nn/converters/OutputConversionImageToImage.md b/docs/api/safeds/ml/nn/converters/OutputConversionImageToImage.md deleted file mode 100644 index 283b0d74e..000000000 --- a/docs/api/safeds/ml/nn/converters/OutputConversionImageToImage.md +++ /dev/null @@ -1,9 +0,0 @@ -# :test_tube:{ title="Experimental" } `OutputConversionImageToImage` {#safeds.ml.nn.converters.OutputConversionImageToImage data-toc-label='[class] OutputConversionImageToImage'} - -**Parent type:** [`OutputConversion`][safeds.ml.nn.converters.OutputConversion] - -??? quote "Stub code in `OutputConversionImageToImage.sdsstub`" - - ```sds linenums="4" - class OutputConversionImageToImage() sub OutputConversion - ``` diff --git a/docs/api/safeds/ml/nn/converters/OutputConversionImageToTable.md b/docs/api/safeds/ml/nn/converters/OutputConversionImageToTable.md deleted file mode 100644 index 38b85f0f8..000000000 --- a/docs/api/safeds/ml/nn/converters/OutputConversionImageToTable.md +++ /dev/null @@ -1,9 +0,0 @@ -# :test_tube:{ title="Experimental" } `OutputConversionImageToTable` {#safeds.ml.nn.converters.OutputConversionImageToTable data-toc-label='[class] OutputConversionImageToTable'} - -**Parent type:** [`OutputConversion`][safeds.ml.nn.converters.OutputConversion] - -??? quote "Stub code in `OutputConversionImageToTable.sdsstub`" - - ```sds linenums="4" - class OutputConversionImageToTable() sub OutputConversion - ``` diff --git a/docs/api/safeds/ml/nn/converters/OutputConversionTable.md b/docs/api/safeds/ml/nn/converters/OutputConversionTable.md deleted file mode 100644 index 66b277fa6..000000000 --- a/docs/api/safeds/ml/nn/converters/OutputConversionTable.md +++ /dev/null @@ -1,19 +0,0 @@ -# :test_tube:{ title="Experimental" } `OutputConversionTable` {#safeds.ml.nn.converters.OutputConversionTable data-toc-label='[class] OutputConversionTable'} - -The output conversion for a neural network defines the output parameters for the neural network. - -**Parent type:** [`OutputConversion`][safeds.ml.nn.converters.OutputConversion] - -**Parameters:** - -| Name | Type | Description | Default | -|------|------|-------------|---------| -| `predictionName` | [`String`][safeds.lang.String] | The name of the new column where the prediction will be stored. | `#!sds "prediction"` | - -??? quote "Stub code in `OutputConversionTable.sdsstub`" - - ```sds linenums="9" - class OutputConversionTable( - @PythonName("prediction_name") predictionName: String = "prediction" - ) sub OutputConversion - ``` diff --git a/docs/api/safeds/ml/nn/converters/OutputConversionTimeSeries.md b/docs/api/safeds/ml/nn/converters/OutputConversionTimeSeries.md deleted file mode 100644 index 4a9374058..000000000 --- a/docs/api/safeds/ml/nn/converters/OutputConversionTimeSeries.md +++ /dev/null @@ -1,17 +0,0 @@ -# :test_tube:{ title="Experimental" } `OutputConversionTimeSeries` {#safeds.ml.nn.converters.OutputConversionTimeSeries data-toc-label='[class] OutputConversionTimeSeries'} - -The output conversion for a neural network, defines the output parameters for the neural network. - -**Parameters:** - -| Name | Type | Description | Default | -|------|------|-------------|---------| -| `predictionName` | [`String`][safeds.lang.String] | The name of the new column where the prediction will be stored. | `#!sds "prediction_nn"` | - -??? quote "Stub code in `OutputConversionTimeSeries.sdsstub`" - - ```sds linenums="9" - class OutputConversionTimeSeries( - @PythonName("prediction_name") predictionName: String = "prediction_nn" - ) - ``` diff --git a/docs/api/safeds/ml/nn/layers/AveragePooling2DLayer.md b/docs/api/safeds/ml/nn/layers/AveragePooling2DLayer.md index 1a60cb1d4..3b17f1da6 100644 --- a/docs/api/safeds/ml/nn/layers/AveragePooling2DLayer.md +++ b/docs/api/safeds/ml/nn/layers/AveragePooling2DLayer.md @@ -1,5 +1,7 @@ # :test_tube:{ title="Experimental" } `AveragePooling2DLayer` {#safeds.ml.nn.layers.AveragePooling2DLayer data-toc-label='[class] AveragePooling2DLayer'} +An average pooling 2D Layer. + **Parent type:** [`Layer`][safeds.ml.nn.layers.Layer] **Parameters:** @@ -12,7 +14,7 @@ ??? quote "Stub code in `AveragePooling2DLayer.sdsstub`" - ```sds linenums="11" + ```sds linenums="14" class AveragePooling2DLayer( @PythonName("kernel_size") kernelSize: Int, stride: Int = -1, @@ -21,11 +23,11 @@ /** * Get the input_size of this layer. */ - @PythonName("input_size") attr inputSize: ImageSize + @PythonName("input_size") attr inputSize: ModelImageSize /** * Get the output_size of this layer. */ - @PythonName("output_size") attr outputSize: ImageSize + @PythonName("output_size") attr outputSize: ModelImageSize } ``` @@ -33,10 +35,10 @@ Get the input_size of this layer. -**Type:** [`ImageSize`][safeds.data.image.typing.ImageSize] +**Type:** [`ModelImageSize`][safeds.ml.nn.typing.ModelImageSize] ## `outputSize` {#safeds.ml.nn.layers.AveragePooling2DLayer.outputSize data-toc-label='[attribute] outputSize'} Get the output_size of this layer. -**Type:** [`ImageSize`][safeds.data.image.typing.ImageSize] +**Type:** [`ModelImageSize`][safeds.ml.nn.typing.ModelImageSize] diff --git a/docs/api/safeds/ml/nn/layers/Convolutional2DLayer.md b/docs/api/safeds/ml/nn/layers/Convolutional2DLayer.md index 376391e2b..6c6d08bc4 100644 --- a/docs/api/safeds/ml/nn/layers/Convolutional2DLayer.md +++ b/docs/api/safeds/ml/nn/layers/Convolutional2DLayer.md @@ -1,5 +1,7 @@ # :test_tube:{ title="Experimental" } `Convolutional2DLayer` {#safeds.ml.nn.layers.Convolutional2DLayer data-toc-label='[class] Convolutional2DLayer'} +A convolutional 2D Layer. + **Parent type:** [`Layer`][safeds.ml.nn.layers.Layer] **Parameters:** @@ -17,7 +19,7 @@ ??? quote "Stub code in `Convolutional2DLayer.sdsstub`" - ```sds linenums="13" + ```sds linenums="15" class Convolutional2DLayer( @PythonName("output_channel") outputChannel: Int, @PythonName("kernel_size") kernelSize: Int, @@ -27,11 +29,11 @@ /** * Get the input_size of this layer. */ - @PythonName("input_size") attr inputSize: ImageSize + @PythonName("input_size") attr inputSize: ModelImageSize /** * Get the output_size of this layer. */ - @PythonName("output_size") attr outputSize: ImageSize + @PythonName("output_size") attr outputSize: ModelImageSize } ``` @@ -39,10 +41,10 @@ Get the input_size of this layer. -**Type:** [`ImageSize`][safeds.data.image.typing.ImageSize] +**Type:** [`ModelImageSize`][safeds.ml.nn.typing.ModelImageSize] ## `outputSize` {#safeds.ml.nn.layers.Convolutional2DLayer.outputSize data-toc-label='[attribute] outputSize'} Get the output_size of this layer. -**Type:** [`ImageSize`][safeds.data.image.typing.ImageSize] +**Type:** [`ModelImageSize`][safeds.ml.nn.typing.ModelImageSize] diff --git a/docs/api/safeds/ml/nn/layers/ConvolutionalTranspose2DLayer.md b/docs/api/safeds/ml/nn/layers/ConvolutionalTranspose2DLayer.md index 57d89cc46..f64e92152 100644 --- a/docs/api/safeds/ml/nn/layers/ConvolutionalTranspose2DLayer.md +++ b/docs/api/safeds/ml/nn/layers/ConvolutionalTranspose2DLayer.md @@ -1,5 +1,7 @@ # :test_tube:{ title="Experimental" } `ConvolutionalTranspose2DLayer` {#safeds.ml.nn.layers.ConvolutionalTranspose2DLayer data-toc-label='[class] ConvolutionalTranspose2DLayer'} +A convolutional transpose 2D Layer. + **Parent type:** [`Convolutional2DLayer`][safeds.ml.nn.layers.Convolutional2DLayer] **Parameters:** @@ -14,7 +16,7 @@ ??? quote "Stub code in `ConvolutionalTranspose2DLayer.sdsstub`" - ```sds linenums="14" + ```sds linenums="16" class ConvolutionalTranspose2DLayer( @PythonName("output_channel") outputChannel: Int, @PythonName("kernel_size") kernelSize: Int, @@ -28,10 +30,10 @@ Get the input_size of this layer. -**Type:** [`ImageSize`][safeds.data.image.typing.ImageSize] +**Type:** [`ModelImageSize`][safeds.ml.nn.typing.ModelImageSize] ## `outputSize` {#safeds.ml.nn.layers.ConvolutionalTranspose2DLayer.outputSize data-toc-label='[attribute] outputSize'} Get the output_size of this layer. -**Type:** [`ImageSize`][safeds.data.image.typing.ImageSize] +**Type:** [`ModelImageSize`][safeds.ml.nn.typing.ModelImageSize] diff --git a/docs/api/safeds/ml/nn/layers/FlattenLayer.md b/docs/api/safeds/ml/nn/layers/FlattenLayer.md index 1ba7deb73..74d81b673 100644 --- a/docs/api/safeds/ml/nn/layers/FlattenLayer.md +++ b/docs/api/safeds/ml/nn/layers/FlattenLayer.md @@ -1,15 +1,17 @@ # :test_tube:{ title="Experimental" } `FlattenLayer` {#safeds.ml.nn.layers.FlattenLayer data-toc-label='[class] FlattenLayer'} +A flatten layer. + **Parent type:** [`Layer`][safeds.ml.nn.layers.Layer] ??? quote "Stub code in `FlattenLayer.sdsstub`" - ```sds linenums="7" + ```sds linenums="10" class FlattenLayer() sub Layer { /** * Get the input_size of this layer. */ - @PythonName("input_size") attr inputSize: ImageSize + @PythonName("input_size") attr inputSize: ModelImageSize /** * Get the output_size of this layer. */ @@ -21,7 +23,7 @@ Get the input_size of this layer. -**Type:** [`ImageSize`][safeds.data.image.typing.ImageSize] +**Type:** [`ModelImageSize`][safeds.ml.nn.typing.ModelImageSize] ## `outputSize` {#safeds.ml.nn.layers.FlattenLayer.outputSize data-toc-label='[attribute] outputSize'} diff --git a/docs/api/safeds/ml/nn/layers/ForwardLayer.md b/docs/api/safeds/ml/nn/layers/ForwardLayer.md index aca87c32e..53b21a7da 100644 --- a/docs/api/safeds/ml/nn/layers/ForwardLayer.md +++ b/docs/api/safeds/ml/nn/layers/ForwardLayer.md @@ -1,20 +1,20 @@ # :test_tube:{ title="Experimental" } `ForwardLayer` {#safeds.ml.nn.layers.ForwardLayer data-toc-label='[class] ForwardLayer'} +A fully connected forward layer. + **Parent type:** [`Layer`][safeds.ml.nn.layers.Layer] **Parameters:** | Name | Type | Description | Default | |------|------|-------------|---------| -| `outputSize` | [`Int`][safeds.lang.Int] | The number of neurons in this layer | - | -| `inputSize` | [`Int?`][safeds.lang.Int] | The number of neurons in the previous layer | `#!sds null` | +| `neuronCount` | [`Int`][safeds.lang.Int] | The number of neurons in this layer | - | ??? quote "Stub code in `ForwardLayer.sdsstub`" - ```sds linenums="10" + ```sds linenums="11" class ForwardLayer( - @PythonName("output_size") outputSize: Int, - @PythonName("input_size") inputSize: Int? = null + @PythonName("neuron_count") neuronCount: Int ) sub Layer { /** * Get the input_size of this layer. diff --git a/docs/api/safeds/ml/nn/layers/LSTMLayer.md b/docs/api/safeds/ml/nn/layers/LSTMLayer.md index 33abe035c..597958bc5 100644 --- a/docs/api/safeds/ml/nn/layers/LSTMLayer.md +++ b/docs/api/safeds/ml/nn/layers/LSTMLayer.md @@ -1,20 +1,20 @@ # :test_tube:{ title="Experimental" } `LSTMLayer` {#safeds.ml.nn.layers.LSTMLayer data-toc-label='[class] LSTMLayer'} +A long short-term memory (LSTM) layer. + **Parent type:** [`Layer`][safeds.ml.nn.layers.Layer] **Parameters:** | Name | Type | Description | Default | |------|------|-------------|---------| -| `outputSize` | [`Int`][safeds.lang.Int] | The number of neurons in this layer | - | -| `inputSize` | [`Int?`][safeds.lang.Int] | The number of neurons in the previous layer | `#!sds null` | +| `neuronCount` | [`Int`][safeds.lang.Int] | The number of neurons in this layer | - | ??? quote "Stub code in `LSTMLayer.sdsstub`" - ```sds linenums="10" + ```sds linenums="11" class LSTMLayer( - @PythonName("output_size") outputSize: Int, - @PythonName("input_size") inputSize: Int? = null + @PythonName("neuron_count") neuronCount: Int ) sub Layer { /** * Get the input_size of this layer. diff --git a/docs/api/safeds/ml/nn/layers/MaxPooling2DLayer.md b/docs/api/safeds/ml/nn/layers/MaxPooling2DLayer.md index 2a1427051..cbff82645 100644 --- a/docs/api/safeds/ml/nn/layers/MaxPooling2DLayer.md +++ b/docs/api/safeds/ml/nn/layers/MaxPooling2DLayer.md @@ -1,5 +1,7 @@ # :test_tube:{ title="Experimental" } `MaxPooling2DLayer` {#safeds.ml.nn.layers.MaxPooling2DLayer data-toc-label='[class] MaxPooling2DLayer'} +A maximum Pooling 2D Layer. + **Parent type:** [`Layer`][safeds.ml.nn.layers.Layer] **Parameters:** @@ -12,7 +14,7 @@ ??? quote "Stub code in `MaxPooling2DLayer.sdsstub`" - ```sds linenums="11" + ```sds linenums="14" class MaxPooling2DLayer( @PythonName("kernel_size") kernelSize: Int, stride: Int = -1, @@ -21,11 +23,11 @@ /** * Get the input_size of this layer. */ - @PythonName("input_size") attr inputSize: ImageSize + @PythonName("input_size") attr inputSize: ModelImageSize /** * Get the output_size of this layer. */ - @PythonName("output_size") attr outputSize: ImageSize + @PythonName("output_size") attr outputSize: ModelImageSize } ``` @@ -33,10 +35,10 @@ Get the input_size of this layer. -**Type:** [`ImageSize`][safeds.data.image.typing.ImageSize] +**Type:** [`ModelImageSize`][safeds.ml.nn.typing.ModelImageSize] ## `outputSize` {#safeds.ml.nn.layers.MaxPooling2DLayer.outputSize data-toc-label='[attribute] outputSize'} Get the output_size of this layer. -**Type:** [`ImageSize`][safeds.data.image.typing.ImageSize] +**Type:** [`ModelImageSize`][safeds.ml.nn.typing.ModelImageSize] diff --git a/docs/api/safeds/ml/nn/typing/ConstantImageSize.md b/docs/api/safeds/ml/nn/typing/ConstantImageSize.md new file mode 100644 index 000000000..3da3bd2e6 --- /dev/null +++ b/docs/api/safeds/ml/nn/typing/ConstantImageSize.md @@ -0,0 +1,38 @@ +--- +search: + boost: 0.5 +--- + +# :test_tube:{ title="Experimental" } `ConstantImageSize` {#safeds.ml.nn.typing.ConstantImageSize data-toc-label='[class] ConstantImageSize'} + +A container for constant image size in neural networks. + +**Parent type:** [`ModelImageSize`][safeds.ml.nn.typing.ModelImageSize] + +**Inheritors:** + +- [`ImageSize`][safeds.data.image.typing.ImageSize] + +??? quote "Stub code in `ConstantImageSize.sdsstub`" + + ```sds linenums="7" + class ConstantImageSize sub ModelImageSize + ``` + +## `channel` {#safeds.ml.nn.typing.ConstantImageSize.channel data-toc-label='[attribute] channel'} + +Get the channel of this `ImageSize` in pixels. + +**Type:** [`Int`][safeds.lang.Int] + +## `height` {#safeds.ml.nn.typing.ConstantImageSize.height data-toc-label='[attribute] height'} + +Get the height of this `ImageSize` in pixels. + +**Type:** [`Int`][safeds.lang.Int] + +## `width` {#safeds.ml.nn.typing.ConstantImageSize.width data-toc-label='[attribute] width'} + +Get the width of this `ImageSize` in pixels. + +**Type:** [`Int`][safeds.lang.Int] diff --git a/docs/api/safeds/ml/nn/typing/ModelImageSize.md b/docs/api/safeds/ml/nn/typing/ModelImageSize.md new file mode 100644 index 000000000..05c0a9ec3 --- /dev/null +++ b/docs/api/safeds/ml/nn/typing/ModelImageSize.md @@ -0,0 +1,50 @@ +--- +search: + boost: 0.5 +--- + +# :test_tube:{ title="Experimental" } `ModelImageSize` {#safeds.ml.nn.typing.ModelImageSize data-toc-label='[class] ModelImageSize'} + +A container for image size in neural networks. + +**Inheritors:** + +- [`ConstantImageSize`][safeds.ml.nn.typing.ConstantImageSize] +- [`VariableImageSize`][safeds.ml.nn.typing.VariableImageSize] + +??? quote "Stub code in `ModelImageSize.sdsstub`" + + ```sds linenums="9" + class ModelImageSize { + /** + * Get the width of this `ImageSize` in pixels. + */ + attr width: Int + /** + * Get the height of this `ImageSize` in pixels. + */ + attr height: Int + /** + * Get the channel of this `ImageSize` in pixels. + */ + attr channel: Int + } + ``` + +## `channel` {#safeds.ml.nn.typing.ModelImageSize.channel data-toc-label='[attribute] channel'} + +Get the channel of this `ImageSize` in pixels. + +**Type:** [`Int`][safeds.lang.Int] + +## `height` {#safeds.ml.nn.typing.ModelImageSize.height data-toc-label='[attribute] height'} + +Get the height of this `ImageSize` in pixels. + +**Type:** [`Int`][safeds.lang.Int] + +## `width` {#safeds.ml.nn.typing.ModelImageSize.width data-toc-label='[attribute] width'} + +Get the width of this `ImageSize` in pixels. + +**Type:** [`Int`][safeds.lang.Int] diff --git a/docs/api/safeds/ml/nn/typing/VariableImageSize.md b/docs/api/safeds/ml/nn/typing/VariableImageSize.md new file mode 100644 index 000000000..bbe381c5f --- /dev/null +++ b/docs/api/safeds/ml/nn/typing/VariableImageSize.md @@ -0,0 +1,36 @@ +--- +search: + boost: 0.5 +--- + +# :test_tube:{ title="Experimental" } `VariableImageSize` {#safeds.ml.nn.typing.VariableImageSize data-toc-label='[class] VariableImageSize'} + +A container for variable image size in neural networks. + +With a `VariableImageSize`, all image sizes that are a multiple of `width` and `height` are valid. + +**Parent type:** [`ModelImageSize`][safeds.ml.nn.typing.ModelImageSize] + +??? quote "Stub code in `VariableImageSize.sdsstub`" + + ```sds linenums="9" + class VariableImageSize sub ModelImageSize + ``` + +## `channel` {#safeds.ml.nn.typing.VariableImageSize.channel data-toc-label='[attribute] channel'} + +Get the channel of this `ImageSize` in pixels. + +**Type:** [`Int`][safeds.lang.Int] + +## `height` {#safeds.ml.nn.typing.VariableImageSize.height data-toc-label='[attribute] height'} + +Get the height of this `ImageSize` in pixels. + +**Type:** [`Int`][safeds.lang.Int] + +## `width` {#safeds.ml.nn.typing.VariableImageSize.width data-toc-label='[attribute] width'} + +Get the width of this `ImageSize` in pixels. + +**Type:** [`Int`][safeds.lang.Int] diff --git a/packages/safe-ds-lang/src/language/runtime/safe-ds-python-server.ts b/packages/safe-ds-lang/src/language/runtime/safe-ds-python-server.ts index 7567b0a90..579b50ed4 100644 --- a/packages/safe-ds-lang/src/language/runtime/safe-ds-python-server.ts +++ b/packages/safe-ds-lang/src/language/runtime/safe-ds-python-server.ts @@ -16,8 +16,8 @@ import { UpdateRunnerNotification, } from '../communication/rpc.js'; -const LOWEST_SUPPORTED_RUNNER_VERSION = '0.16.0'; -const LOWEST_UNSUPPORTED_RUNNER_VERSION = '0.17.0'; +const LOWEST_SUPPORTED_RUNNER_VERSION = '0.17.0'; +const LOWEST_UNSUPPORTED_RUNNER_VERSION = '0.18.0'; const npmVersionRange = `>=${LOWEST_SUPPORTED_RUNNER_VERSION} <${LOWEST_UNSUPPORTED_RUNNER_VERSION}`; export const pipVersionRange = `>=${LOWEST_SUPPORTED_RUNNER_VERSION},<${LOWEST_UNSUPPORTED_RUNNER_VERSION}`; diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/data/image/containers/ImageList.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/data/image/containers/ImageList.sdsstub index e86c89550..694ed6d9f 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/data/image/containers/ImageList.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/data/image/containers/ImageList.sdsstub @@ -16,29 +16,29 @@ from safeds.data.image.typing import ImageSize @Experimental class ImageList { /** - * Return the number of images in this image list. + * The number of images in this image list. */ - @PythonName("number_of_images") attr imageCount: Int + @PythonName("image_count") attr imageCount: Int /** - * Return a list of all widths in this image list. + * A list of all widths in this image list. */ attr widths: List /** - * Return a list of all heights in this image list. + * A list of all heights in this image list. */ attr heights: List /** - * Return the channel of all images. + * The channel of all images. */ attr channel: Int /** - * Return the sizes of all images. + * The sizes of all images. */ attr sizes: List /** - * Return the number of different sizes of images in this image list. + * The number of different sizes of images in this image list. */ - @PythonName("number_of_sizes") attr sizeCount: Int + @PythonName("size_count") attr sizeCount: Int /** * Create an ImageList from a list of images. diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/data/image/typing/ImageSize.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/data/image/typing/ImageSize.sdsstub index 2b82a1dfa..b51e6dae0 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/data/image/typing/ImageSize.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/data/image/typing/ImageSize.sdsstub @@ -1,6 +1,8 @@ package safeds.data.image.typing from safeds.data.image.containers import Image +from safeds.ml.nn.typing import ConstantImageSize + /** * A container for image size data. @@ -9,35 +11,5 @@ from safeds.data.image.containers import Image * @param height the height of the image * @param channel the channel of the image */ -class ImageSize( - width: Int, - height: Int, - channel: Int, - @PythonName("_ignore_invalid_channel") ignoreInvalidChannel: Boolean = false -) { - /** - * Get the width of this `ImageSize` in pixels. - */ - attr width: Int - /** - * Get the height of this `ImageSize` in pixels. - */ - attr height: Int - /** - * Get the channel of this `ImageSize` in pixels. - */ - attr channel: Int - - /** - * Create a `ImageSize` of a given image. - * - * @param image the given image for the `ImageSize` - * - * @result imageSize the calculated `ImageSize` - */ - @Pure - @PythonName("from_image") - static fun fromImage( - image: Image - ) -> imageSize: ImageSize -} +@Experimental +class ImageSize sub ConstantImageSize diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/data/labeled/containers/Dataset.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/data/labeled/containers/Dataset.sdsstub index e88e1c8cf..8e84fa350 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/data/labeled/containers/Dataset.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/data/labeled/containers/Dataset.sdsstub @@ -2,5 +2,8 @@ package safeds.data.labeled.containers /** * A dataset is used as input to machine learning models. + * + * @typeParam I The type of the input data. + * @typeParam O The type of the output data. */ -class Dataset +class Dataset diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/data/labeled/containers/ImageDataset.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/data/labeled/containers/ImageDataset.sdsstub index 66ba1a649..75bfd746d 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/data/labeled/containers/ImageDataset.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/data/labeled/containers/ImageDataset.sdsstub @@ -14,12 +14,12 @@ from safeds.data.tabular.containers import Table * @param shuffle weather the data should be shuffled after each epoch of training */ @Experimental -class ImageDataset( +class ImageDataset( @PythonName("input_data") inputData: ImageList, - @PythonName("output_data") outputData: T, + @PythonName("output_data") outputData: O, @PythonName("batch_size") batchSize: Int = 1, shuffle: Boolean = false -) sub Dataset { +) sub Dataset { /** * Get the input `ImageSize` of this dataset. */ @@ -45,7 +45,7 @@ class ImageDataset( */ @Pure @PythonName("get_output") - fun getOutput() -> output: T + fun getOutput() -> output: O /** * Return a new `ImageDataset` with shuffled data. @@ -55,5 +55,5 @@ class ImageDataset( * @result imageDataset the shuffled `ImageDataset` */ @Pure - fun shuffle() -> imageDataset: ImageDataset + fun shuffle() -> imageDataset: ImageDataset } diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/data/labeled/containers/TabularDataset.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/data/labeled/containers/TabularDataset.sdsstub index 1528ae870..61ece804a 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/data/labeled/containers/TabularDataset.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/data/labeled/containers/TabularDataset.sdsstub @@ -17,7 +17,7 @@ from safeds.data.tabular.containers import Table * are specified, all columns except the target column are used as features. * * @param data The data. - * @param targetName Name of the target column. + * @param targetName The name of the target column. * @param extraNames Names of the columns that are neither features nor target. If null, no extra columns are used, i.e. all but * the target column are used as features. * @@ -37,7 +37,7 @@ class TabularDataset( data: union>, Table>, @PythonName("target_name") targetName: String, @PythonName("extra_names") extraNames: List = [] -) sub Dataset { +) sub Dataset { /** * The feature columns of the tabular dataset. */ diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/data/labeled/containers/TimeSeriesDataset.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/data/labeled/containers/TimeSeriesDataset.sdsstub index ebe2ca1b8..bc59493cc 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/data/labeled/containers/TimeSeriesDataset.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/data/labeled/containers/TimeSeriesDataset.sdsstub @@ -3,24 +3,26 @@ package safeds.data.labeled.containers from safeds.data.tabular.containers import Table /** - * A time series dataset maps feature and time columns to a target column. Not like the TabularDataset a TimeSeries needs to contain one target and one time column, but can have empty features. + * A time series dataset maps feature and time columns to a target column. * - * Create a time series dataset from a mapping of column names to their values. + * Unlike a TabularDataset, a TimeSeries needs to contain one target and one time column, but can have empty features. * * @param data The data. - * @param targetName Name of the target column. - * @param timeName Name of the time column. - * @param extraNames Names of the columns that are neither features nor target. If null, no extra columns are used, i.e. all but + * @param targetName The name of the target column. + * @param timeName The name of the time column. + * @param windowSize The number of consecutive sample to use as input for prediction. + * @param extraNames Names of the columns that are neither features nor target. If None, no extra columns are used, i.e. all but * the target column are used as features. + * @param forecastHorizon The number of time steps to predict into the future. * * @example * pipeline example { - * // from safeds.data.labeled.containers import TabularDataset * // dataset = TimeSeriesDataset( * // {"id": [1, 2, 3], "feature": [4, 5, 6], "target": [1, 2, 3], "error":[0,0,1]}, * // target_name="target", * // time_name = "id", - * // extra_names=["error"] + * // window_size=1, + * // extra_names=["error"], * // ) * } */ @@ -29,8 +31,10 @@ class TimeSeriesDataset( data: union>, Table>, @PythonName("target_name") targetName: String, @PythonName("time_name") timeName: String, - @PythonName("extra_names") extraNames: List? = null -) { + @PythonName("window_size") windowSize: Int, + @PythonName("extra_names") extraNames: List? = null, + @PythonName("forecast_horizon") forecastHorizon: Int = 1 +) sub Dataset { /** * The feature columns of the time series dataset. */ @@ -43,6 +47,14 @@ class TimeSeriesDataset( * The time column of the time series dataset. */ attr time: Column + /** + * The number of consecutive sample to use as input for prediction. + */ + @PythonName("window_size") attr windowSize: Int + /** + * The number of time steps to predict into the future. + */ + @PythonName("forecast_horizon") attr forecastHorizon: Int /** * Additional columns of the time series dataset that are neither features, target nor time. * diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/containers/Cell.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/containers/Cell.sdsstub index c0ae8cb0a..fb5f9a184 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/containers/Cell.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/containers/Cell.sdsstub @@ -6,6 +6,11 @@ package safeds.data.tabular.containers * This class cannot be instantiated directly. It is only used for arguments of callbacks. */ class Cell { + /** + * Namespace for operations on strings. + */ + attr str: StringCell + /** * Negate a boolean. This WILL LATER BE equivalent to the ^not operator. * @@ -134,6 +139,21 @@ class Cell { other: Any ) -> result: Cell + /** + * Divide by a value. This WILL LATER BE equivalent to the `/` operator. + * + * @example + * pipeline example { + * val column = Column("example", [6, 8]); + * val result = column.transform((cell) -> cell.div(2)); + * // Column("example", [3, 4]) + * } + */ + @Pure + fun div( + other: Any + ) -> result: Cell + /** * Perform a modulo operation. * @@ -194,21 +214,6 @@ class Cell { other: Any ) -> result: Cell - /** - * Divide by a value. This WILL LATER BE equivalent to the `/` operator. - * - * @example - * pipeline example { - * val column = Column("example", [6, 8]); - * val result = column.transform((cell) -> cell.div(2)); - * // Column("example", [3, 4]) - * } - */ - @Pure - fun div( - other: Any - ) -> result: Cell - /** * Check if equal to a value. This WILL LATER BE equivalent to the `==` operator. * diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/containers/Column.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/containers/Column.sdsstub index a8c1083c4..f5efe9f9b 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/containers/Column.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/containers/Column.sdsstub @@ -8,7 +8,7 @@ from safeds.data.tabular.typing import DataType * A named, one-dimensional collection of homogeneous values. * * @param name The name of the column. - * @param data The data of the column. If null, an empty column is created. + * @param data The data of the column. * * @example * pipeline example { @@ -17,7 +17,7 @@ from safeds.data.tabular.typing import DataType */ class Column( name: String, - data: List? = null + data: List = [] ) { /** * Whether the column is numeric. @@ -34,7 +34,7 @@ class Column( /** * The number of rows in the column. */ - @PythonName("number_of_rows") attr rowCount: Int + @PythonName("row_count") attr rowCount: Int /** * The plotter for the column. */ @@ -44,8 +44,28 @@ class Column( */ attr type: DataType + /* + * Return the distinct values in the column. + * + * @param ignoreMissingValues Whether to ignore missing values. + * + * @result distinctValues The distinct values in the column. + * + * @example + * pipeline example { + * val column = Column("test", [1, 2, 3, 2]); + * val result = column.getDistinctValues(); + * // [1, 2, 3] + * } + */ + @Pure + @PythonName("get_distinct_values") + fun getDistinctValues( + @PythonName("ignore_missing_values") ignoreMissingValues: Boolean = true, + ) -> distinctValues: List + /** - * Return the column value at specified index. + * Return the column value at specified index. This WILL LATER BE equivalent to the `[]` operator (indexed access). * * Nonnegative indices are counted from the beginning (starting at 0), negative indices from the end (starting at * -1). @@ -66,10 +86,177 @@ class Column( index: Int ) -> value: T + /** + * Return whether all values in the column satisfy the predicate. + * + * The predicate can return one of three values: + * + * - true, if the value satisfies the predicate. + * - false, if the value does not satisfy the predicate. + * - null, if the truthiness of the predicate is unknown, e.g. due to missing values. + * + * By default, cases where the truthiness of the predicate is unknown are ignored and this method returns: + * + * - true, if the predicate always returns true or null. + * - false, if the predicate returns false at least once. + * + * You can instead enable Kleene logic by setting `ignoreUnknown = false`. In this case, this method returns: + * + * - true, if the predicate always returns true. + * - false, if the predicate returns false at least once. + * - null, if the predicate never returns false, but at least once null. + * + * @param predicate The predicate to apply to each value. + * @param ignoreUnknown Whether to ignore cases where the truthiness of the predicate is unknown. + * + * @result allSatisfyPredicate Whether all values in the column satisfy the predicate. + * + * @example + * pipeline example { + * val column = Column("test", [1, 2, 3]); + * val result = column.all((cell) -> cell.gt(0)); // true + * } + * + * @example + * pipeline example { + * val column = Column("test", [1, 2, 3]); + * val result = column.all((cell) -> cell.lt(3)); // false + * } + */ + @Pure + fun all( + predicate: (cell: Cell) -> satisfiesPredicate: Cell, + @PythonName("ignore_unknown") ignoreUnknown: Boolean = true, + ) -> allSatisfyPredicate: Boolean? + + /** + * Return whether any value in the column satisfies the predicate. + * + * The predicate can return one of three values: + * + * - true, if the value satisfies the predicate. + * - false, if the value does not satisfy the predicate. + * - null, if the truthiness of the predicate is unknown, e.g. due to missing values. + * + * By default, cases where the truthiness of the predicate is unknown are ignored and this method returns: + * + * - true, if the predicate returns true at least once. + * - false, if the predicate always returns false or null. + * + * You can instead enable Kleene logic by setting `ignoreUnknown = false`. In this case, this method returns: + * + * - true, if the predicate returns true at least once. + * - false, if the predicate always returns false. + * - null, if the predicate never returns true, but at least once null. + * + * @param predicate The predicate to apply to each value. + * @param ignoreUnknown Whether to ignore cases where the truthiness of the predicate is unknown. + * + * @result anySatisfyPredicate Whether any value in the column satisfies the predicate. + * + * @example + * pipeline example { + * val column = Column("test", [1, 2, 3]); + * val result = column.any((cell) -> cell.gt(2)); // true + * } + * + * @example + * pipeline example { + * val column = Column("test", [1, 2, 3]); + * val result = column.any((cell) -> cell.lt(0)); // false + * } + */ + @Pure + fun any( + predicate: (cell: Cell) -> satisfiesPredicate: Cell, + @PythonName("ignore_unknown") ignoreUnknown: Boolean = true, + ) -> anySatisfyPredicate: Boolean? + + /** + * Return how many values in the column satisfy the predicate. + * + * The predicate can return one of three results: + * + * - true, if the value satisfies the predicate. + * - false, if the value does not satisfy the predicate. + * - null, if the truthiness of the predicate is unknown, e.g. due to missing values. + * + * By default, cases where the truthiness of the predicate is unknown are ignored and this method returns how + * often the predicate returns true. + * + * You can instead enable Kleene logic by setting `ignore_unknown = False`. In this case, this method returns null + * if the predicate returns null at least once. Otherwise, it still returns how often the predicate returns true. + * + * @param predicate The predicate to apply to each value. + * @param ignoreUnknown Whether to ignore cases where the truthiness of the predicate is unknown. + * + * @result count The number of values in the column that satisfy the predicate. + * + * @example + * pipeline example { + * val column = Column("test", [1, 2, 3]); + * val result = column.countIf((cell) -> cell.gt(1)); // 2 + * } + * + * @example + * pipeline example { + * val column = Column("test", [1, 2, 3]); + * val result = column.countIf((cell) -> cell.lt(0)); // 0 + * } + */ + @Pure + fun countIf( + predicate: (cell: Cell) -> satisfiesPredicate: Cell, + @PythonName("ignore_unknown") ignoreUnknown: Boolean = true, + ) -> count: Int? + + /** + * Return whether no value in the column satisfies the predicate. + * + * The predicate can return one of three values: + * + * - true, if the value satisfies the predicate. + * - false, if the value does not satisfy the predicate. + * - null, if the truthiness of the predicate is unknown, e.g. due to missing values. + * + * By default, cases where the truthiness of the predicate is unknown are ignored and this method returns: + * + * - true, if the predicate always returns false or null. + * - false, if the predicate returns true at least once. + * + * You can instead enable Kleene logic by setting `ignoreUnknown = false`. In this case, this method returns: + * + * - true, if the predicate always returns false. + * - false, if the predicate returns true at least once. + * - null, if the predicate never returns true, but at least once null. + * + * @param predicate The predicate to apply to each value. + * @param ignoreUnknown Whether to ignore cases where the truthiness of the predicate is unknown. + * + * @result noneSatisfyPredicate Whether no value in the column satisfies the predicate. + * + * @example + * pipeline example { + * val column = Column("test", [1, 2, 3]); + * val result = column.none((cell) -> cell.lt(0)); // true + * } + * + * @example + * pipeline example { + * val column = Column("test", [1, 2, 3]); + * val result = column.none((cell) -> cell.gt(2)); // false + * } + */ + @Pure + fun none( + predicate: (cell: Cell) -> satisfiesPredicate: Cell, + @PythonName("ignore_unknown") ignoreUnknown: Boolean = true, + ) -> noneSatisfyPredicate: Int? + /** * Return a new column with a new name. * - * The original column is not modified. + * **Note:** The original column is not modified. * * @param newName The new name of the column. * @@ -90,7 +277,7 @@ class Column( /** * Return a new column with values transformed by the transformer. * - * The original column is not modified. + * **Note:** The original column is not modified. * * @param transformer The transformer to apply to each value. * @@ -299,6 +486,27 @@ class Column( @PythonName("missing_value_ratio") fun missingValueRatio() -> missingValueRatio: Float + /** + * Return the mode of the values in the column. + * + * The mode is the value that appears most frequently in the column. If multiple values occur equally often, all + * of them are returned. The values are sorted in ascending order. + * + * @param ignoreMissingValues Whether to ignore missing values. + * + * @result mode The mode of the values in the column. + * + * @example + * pipeline example { + * val column = Column("test", [3, 1, 2, 1, 3]); + * val result = column.mode(); // [1, 3] + * } + */ + @Pure + fun mode( + @PythonName("ignore_missing_values") ignoreMissingValues: Boolean = true, + ) -> mode: List + /** * Return the stability of the column. * diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/containers/Row.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/containers/Row.sdsstub index c4050fda7..ac17ab256 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/containers/Row.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/containers/Row.sdsstub @@ -16,18 +16,25 @@ class Row { /** * The number of columns in the row. */ - @PythonName("number_of_columns") attr columnCount: Int + @PythonName("column_count") attr columnCount: Int /** * The schema of the row. */ attr ^schema: Schema /** - * Get the value of the specified column. + * Get the value of the specified column. This WILL LATER BE equivalent to using the `[]` operator (indexed access). * * @param name The name of the column. * * @result value The value of the column. + * + * @example + * pipeline example { + * val table = Table({"col1": [1, 2], "col2": [3, 4]}); + * val result = table.removeRows((row) -> row.getValue("col1").eq(1)); + * // Table({"col1": [2], "col2": [4]}) + * } */ @Pure @PythonName("get_value") diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/containers/StringCell.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/containers/StringCell.sdsstub new file mode 100644 index 000000000..5425e78ba --- /dev/null +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/containers/StringCell.sdsstub @@ -0,0 +1,306 @@ +package safeds.data.tabular.containers + +/** + * Namespace for operations on strings. + * + * This class cannot be instantiated directly. It can only be accessed using the `str` attribute of a cell. + * + * @example + * pipeline example { + * val column = Column("example", ["ab", "bc", "cd"]); + * val result = column.transform((cell) -> cell.str.toUppercase()); + * // Column("example", ["AB", "BC", "CD"]) + * } + */ +class StringCell { + /** + * Check if the string value in the cell contains the substring. + * + * @param substring The substring to search for. + * + * @result contains Whether the string value contains the substring. + * + * @example + * pipeline example { + * val column = Column("example", ["ab", "bc", "cd"]); + * val result = column.countIf((cell) -> cell.str.contains("b")); // 2 + * } + */ + @Pure + fun contains( + substring: String + ) -> contains: Cell + + /** + * Check if the string value in the cell ends with the suffix. + * + * @param suffix The suffix to search for. + * + * @result endsWith Whether the string value ends with the suffix. + * + * @example + * pipeline example { + * val column = Column("example", ["ab", "bc", "cd"]); + * val result = column.countIf((cell) -> cell.str.endsWith("c")); // 1 + * } + */ + @Pure + @PythonName("ends_with") + fun endsWith( + suffix: String + ) -> endsWith: Cell + + /** + * Get the index of the first occurrence of the substring in the string value in the cell. + * + * @param substring The substring to search for. + * + * @result indexOf The index of the first occurrence of the substring. If the substring is not found, null is returned. + * + * @example + * pipeline example { + * val column = Column("example", ["ab", "bc", "cd"]); + * val result = column.transform((cell) -> cell.str.indexOf("b")); + * // Column("example", [1, 0, null]) + * } + */ + @Pure + @PythonName("index_of") + fun indexOf( + substring: String + ) -> indexOf: Cell + + /** + * Get the number of characters of the string value in the cell. + * + * @param optimizeForAscii Greatly speed up this operation if the string is ASCII-only. If the string contains non-ASCII characters, + * this option will return incorrect results, though. + * + * @result length The length of the string value. + * + * @example + * pipeline example { + * val column = Column("example", ["", "a", "abc"]); + * val result = column.transform((cell) -> cell.str.length()); + * // Column("example", [0, 1, 3]) + * } + */ + @Pure + fun length( + @PythonName("optimize_for_ascii") optimizeForAscii: Boolean = false + ) -> length: Cell + + /** + * Replace occurrences of the old substring with the new substring in the string value in the cell. + * + * @param old The substring to replace. + * @param new The substring to replace with. + * + * @result replacedString The string value with the occurrences replaced. + * + * @example + * pipeline example { + * val column = Column("example", ["ab", "bc", "cd"]); + * val result = column.transform((cell) -> cell.str.replace("b", "z")); + * // Column("example", ["az", "zc", "cd"]) + * } + */ + @Pure + fun replace( + old: String, + new: String + ) -> replacedString: Cell + + /** + * Check if the string value in the cell starts with the prefix. + * + * @param prefix The prefix to search for. + * + * @result startsWith Whether the string value starts with the prefix. + * + * @example + * pipeline example { + * val column = Column("example", ["ab", "bc", "cd"]); + * val result = column.countIf((cell) -> cell.str.startsWith("a")); // 1 + * } + */ + @Pure + @PythonName("starts_with") + fun startsWith( + prefix: String + ) -> startsWith: Cell + + /** + * Get a substring of the string value in the cell. + * + * @param start The start index of the substring. + * @param length The length of the substring. If null, the slice contains all rows starting from `start`. Must greater than + * or equal to 0. + * + * @result substring The substring of the string value. + * + * @example + * pipeline example { + * val column = Column("example", ["abc", "def", "ghi"]); + * val result = column.transform((cell) -> cell.str.substring(1, 2)); + * // Column("example", ["bc", "ef", "hi"]) + * } + */ + @Pure + fun substring( + start: Int = 0, + length: Int? = null + ) -> substring: Cell + + /** + * Convert the string value in the cell to a date. Requires the string to be in the ISO 8601 format. + * + * @result date The date value. If the string cannot be converted to a date, null is returned. + * + * @example + * pipeline example { + * val column = Column("example", ["2021-01-01", "2021-02-01", "abc"]); + * val result = column.transform((cell) -> cell.str.toDate()); + * } + */ + @Pure + @PythonName("to_date") + fun toDate() -> date: Cell // TODO: Add builtin type for date + + /** + * Convert the string value in the cell to a datetime. Requires the string to be in the ISO 8601 format. + * + * @result datetime The datetime value. If the string cannot be converted to a datetime, null is returned. + * + * @example + * pipeline example { + * val column = Column("example", ["2021-01-01T00:00:00z", "2021-02-01T00:00:00z", "abc"]); + * val result = column.transform((cell) -> cell.str.toDatetime()); + * } + */ + @Pure + @PythonName("to_datetime") + fun toDatetime() -> datetime: Cell // TODO: Add builtin type for datetime + + /** + * Convert the string value in the cell to a float. + * + * @result float The float value. If the string cannot be converted to a float, null is returned. + * + * @example + * pipeline example { + * val column = Column("example", ["1", "3.4", "5.6", "abc"]); + * val result = column.transform((cell) -> cell.str.toFloat()); + * // Column("example", [1.0, 3.4, 5.6, null]) + * } + */ + @Pure + @PythonName("to_float") + fun toFloat() -> float: Cell + + /** + * Convert the string value in the cell to an integer. + * + * @param base The base of the integer. + * + * @result int The integer value. If the string cannot be converted to an integer, null is returned. + * + * @example + * pipeline example { + * val column = Column("example", ["1", "2", "3", "abc"]); + * val result = column.transform((cell) -> cell.str.toInt()); + * // Column("example", [1, 2, 3, null]) + * } + * + * @example + * pipeline example { + * val column = Column("example", ["1", "10", "11", "abc"]); + * val result = column.transform((cell) -> cell.str.toInt(base = 2)); + * // Column("example", [1, 2, 3, null]) + * } + */ + @Pure + @PythonName("to_int") + fun toInt( + base: Int = 10 + ) -> int: Cell + + /** + * Convert the string value in the cell to lowercase. + * + * @result lowercase The string value in lowercase. + * + * @example + * pipeline example { + * val column = Column("example", ["AB", "BC", "CD"]); + * val result = column.transform((cell) -> cell.str.toLowercase()); + * // Column("example", ["ab", "bc", "cd"]) + * } + */ + @Pure + @PythonName("to_lowercase") + fun toLowercase() -> lowercase: Cell + + /** + * Convert the string value in the cell to uppercase. + * + * @result uppercase The string value in uppercase. + * + * @example + * pipeline example { + * val column = Column("example", ["ab", "bc", "cd"]); + * val result = column.transform((cell) -> cell.str.toUppercase()); + * // Column("example", ["AB", "BC", "CD"]) + * } + */ + @Pure + @PythonName("to_uppercase") + fun toUppercase() -> uppercase: Cell + + /** + * Remove whitespace from the start and end of the string value in the cell. + * + * @result trimmed The string value without whitespace at the start and end. + * + * @example + * pipeline example { + * val column = Column("example", ["", " abc", "abc ", " abc "]); + * val result = column.transform((cell) -> cell.str.trim()); + * // Column("example", ["", "abc", "abc", "abc"]) + * } + */ + @Pure + fun trim() -> trimmed: Cell + + /** + * Remove whitespace from the end of the string value in the cell. + * + * @result trimmed The string value without whitespace at the end. + * + * @example + * pipeline example { + * val column = Column("example", ["", " abc", "abc ", " abc "]); + * val result = column.transform((cell) -> cell.str.trimEnd()); + * // Column("example", ["", " abc", "abc", " abc"]) + * } + */ + @Pure + @PythonName("trim_end") + fun trimEnd() -> trimmed: Cell + + /** + * Remove whitespace from the start of the string value in the cell. + * + * @result trimmed The string value without whitespace at the start. + * + * @example + * pipeline example { + * val column = Column("example", ["", " abc", "abc ", " abc "]); + * val result = column.transform((cell) -> cell.str.trimStart()); + * // Column("example", ["", "abc", "abc ", "abc "]) + * } + */ + @Pure + @PythonName("trim_start") + fun trimStart() -> trimmed: Cell +} diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/containers/Table.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/containers/Table.sdsstub index cd76cd797..f71ac6c1a 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/containers/Table.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/containers/Table.sdsstub @@ -39,13 +39,13 @@ class Table( /** * The number of columns in the table. */ - @PythonName("number_of_columns") attr columnCount: Int + @PythonName("column_count") attr columnCount: Int /** * The number of rows in the table. * * **Note:** This operation must fully load the data into memory, which can be expensive. */ - @PythonName("number_of_rows") attr rowCount: Int + @PythonName("row_count") attr rowCount: Int /** * The plotter for the table. */ @@ -201,8 +201,6 @@ class Table( /** * Get a column from the table. * - * **Note:** This operation must fully load the data into memory, which can be expensive. - * * @param name The name of the column. * * @result column The column. @@ -422,6 +420,45 @@ class Table( transformer: (cell: Cell) -> transformedCell: Cell ) -> newTable: Table + /** + * Return how many rows in the table satisfy the predicate. + * + * The predicate can return one of three results: + * + * - true, if the row satisfies the predicate. + * - false, if the row does not satisfy the predicate. + * - null, if the truthiness of the predicate is unknown, e.g. due to missing values. + * + * By default, cases where the truthiness of the predicate is unknown are ignored and this method returns how often + * the predicate returns true. + * + * You can instead enable Kleene logic by setting `ignore_unknown = False`. In this case, this method returns null + * if the predicate returns null at least once. Otherwise, it still returns how often the predicate returns true. + * + * @param predicate The predicate to apply to each row. + * @param ignoreUnknown Whether to ignore cases where the truthiness of the predicate is unknown. + * + * @result count The number of rows in the table that satisfy the predicate. + * + * @example + * pipeline example { + * val table = Table({"col1": [1, 2, 3], "col2": [1, 3, 3]}); + * val result = table.countRowIf((row) -> row.getValue("col1").eq(row.getValue("col2"))); // 2 + * } + * + * @example + * pipeline example { + * val table = Table({"col1": [1, 2, 3], "col2": [1, 3, 3]}); + * val result = table.countRowIf((row) -> row.getValue("col1").gt(row.getValue("col2"))); // 0 + * } + */ + @Pure + @PythonName("count_row_if") + fun countRowIf( + predicate: (cell: Row) -> satisfiesPredicate: Cell, + @PythonName("ignore_unknown") ignoreUnknown: Boolean = true, + ) -> count: Int? + /** * Return a new table without duplicate rows. * @@ -743,7 +780,7 @@ class Table( * @example * pipeline example { * val table = Table({"a": [1, 2, 3]}); - * val transformer, val transformedTable = RangeScaler(min=0.0, max=1.0).fitAndTransform(table, ["a"]); + * val transformer, val transformedTable = RangeScaler(min=0.0, max=1.0, columnNames="a").fitAndTransform(table); * val result = transformedTable.inverseTransformTable(transformer); * // Table({"a": [1, 2, 3]}) * } @@ -769,7 +806,7 @@ class Table( * @example * pipeline example { * val table = Table({"a": [1, 2, 3]}); - * val transformer = RangeScaler(min=0.0, max=1.0).fit(table, ["a"]); + * val transformer = RangeScaler(min=0.0, max=1.0, columnNames="a").fit(table); * val result = table.transformTable(transformer); * // Table({"a": [0, 0.5, 1]}) * } @@ -903,7 +940,7 @@ class Table( * Feature columns are implicitly defined as all columns except the target and extra columns. If no extra columns * are specified, all columns except the target column are used as features. * - * @param targetName Name of the target column. + * @param targetName The name of the target column. * @param extraNames Names of the columns that are neither feature nor target. If null, no extra columns are used, i.e. all but * the target column are used as features. * @@ -933,10 +970,12 @@ class Table( * * The original table is not modified. * - * @param targetName Name of the target column. - * @param timeName Name of the time column. - * @param extraNames Names of the columns that are neither features nor target. If null, no extra columns are used, i.e. all but + * @param targetName The name of the target column. + * @param timeName The name of the time column. + * @param windowSize The number of consecutive sample to use as input for prediction. + * @param extraNames Names of the columns that are neither features nor target. If None, no extra columns are used, i.e. all but * the target column are used as features. + * @param forecastHorizon The number of time steps to predict into the future. * * @result dataset A new time series dataset with the given target and feature names. * @@ -949,7 +988,7 @@ class Table( * "amount_bought": [74, 72, 51], * } * ); - * val dataset = table.toTimeSeriesDataset(targetName="amount_bought", timeName= "day"); + * val dataset = table.toTimeSeriesDataset(targetName="amount_bought", timeName= "day", windowSize=2); * } */ @Pure @@ -957,6 +996,8 @@ class Table( fun toTimeSeriesDataset( @PythonName("target_name") targetName: String, @PythonName("time_name") timeName: String, - @PythonName("extra_names") extraNames: List? = null + @PythonName("window_size") windowSize: Int, + @PythonName("extra_names") extraNames: List? = null, + @PythonName("forecast_horizon") forecastHorizon: Int = 1 ) -> dataset: TimeSeriesDataset } diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/plotting/ColumnPlotter.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/plotting/ColumnPlotter.sdsstub index 3730aaa79..323625efa 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/plotting/ColumnPlotter.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/plotting/ColumnPlotter.sdsstub @@ -46,7 +46,7 @@ class ColumnPlotter( */ @Pure fun histogram( - @PythonName("maximum_number_of_bins") const maxBinCount: Int = 10 + @PythonName("max_bin_count") const maxBinCount: Int = 10 ) -> plot: Image where { maxBinCount > 0 } diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/plotting/TablePlotter.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/plotting/TablePlotter.sdsstub index 1b205aafd..134263152 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/plotting/TablePlotter.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/plotting/TablePlotter.sdsstub @@ -62,7 +62,7 @@ class TablePlotter( */ @Pure fun histograms( - @PythonName("maximum_number_of_bins") const maxBinCount: Int = 10 + @PythonName("max_bin_count") const maxBinCount: Int = 10 ) -> plot: Image where { maxBinCount > 0 } diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/Discretizer.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/Discretizer.sdsstub index 407cd9e36..e81babbf0 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/Discretizer.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/Discretizer.sdsstub @@ -7,24 +7,26 @@ from safeds.data.tabular.transformation import TableTransformer * The Discretizer bins continuous data into intervals. * * @param binCount The number of bins to be created. + * @param columnNames The list of columns used to fit the transformer. If `None`, all numeric columns are used. * * @example * pipeline example { * val table = Table({"a": [1, 2, 3, 4]}); - * val discretizer = Discretizer(2).fit(table, ["a"]); + * val discretizer = Discretizer(2, columnNames = "a").fit(table); * val transformedTable = discretizer.transform(table); * // Table({"a": [0, 0, 1, 1]}) * } */ class Discretizer( - @PythonName("number_of_bins") const binCount: Int = 5 + @PythonName("bin_count") const binCount: Int = 5, + @PythonName("column_names") columnNames: union, String, Nothing?> = null ) sub TableTransformer where { binCount >= 2 } { /** * The number of bins to be created. */ - @PythonName("number_of_bins") attr binCount: Int + @PythonName("bin_count") attr binCount: Int /** * Learn a transformation for a set of columns in a table. @@ -32,14 +34,12 @@ class Discretizer( * This transformer is not modified. * * @param table The table used to fit the transformer. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. */ @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: Discretizer /** @@ -48,7 +48,6 @@ class Discretizer( * **Note:** Neither this transformer nor the given table are modified. * * @param table The table used to fit the transformer. The transformer is then applied to this table. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. * @result transformedTable The transformed table. @@ -56,7 +55,6 @@ class Discretizer( @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: Discretizer, transformedTable: Table) } diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/InvertibleTableTransformer.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/InvertibleTableTransformer.sdsstub index 98855d797..b1da833b1 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/InvertibleTableTransformer.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/InvertibleTableTransformer.sdsstub @@ -13,14 +13,12 @@ class InvertibleTableTransformer() sub TableTransformer { * **Note:** This transformer is not modified. * * @param table The table used to fit the transformer. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. */ @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: InvertibleTableTransformer /** @@ -29,7 +27,6 @@ class InvertibleTableTransformer() sub TableTransformer { * **Note:** Neither this transformer nor the given table are modified. * * @param table The table used to fit the transformer. The transformer is then applied to this table. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. * @result transformedTable The transformed table. @@ -37,8 +34,7 @@ class InvertibleTableTransformer() sub TableTransformer { @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: InvertibleTableTransformer, transformedTable: Table) /** diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/LabelEncoder.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/LabelEncoder.sdsstub index b7be4b73c..f6d7ffcff 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/LabelEncoder.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/LabelEncoder.sdsstub @@ -6,13 +6,14 @@ from safeds.data.tabular.transformation import InvertibleTableTransformer /** * The LabelEncoder encodes one or more given columns into labels. * + * @param columnNames The list of columns used to fit the transformer. If `None`, all non-numeric columns are used. * @param partialOrder The partial order of the labels. The labels are encoded in the order of the given list. Additional values are - * encoded as the next integer after the last value in the list in the order they appear in the data. + * assigned labels in the order they are encountered during fitting. * * @example * pipeline example { * val table = Table({"a": ["z", "y"], "b": [3, 4]}); - * val encoder = LabelEncoder().fit(table, ["a"]); + * val encoder = LabelEncoder(columnNames = "a").fit(table); * val transformedTable = encoder.transform(table); * // Table({"a": [1, 0], "b": [3, 4]}) * val originalTable = encoder.inverseTransform(transformedTable); @@ -20,22 +21,26 @@ from safeds.data.tabular.transformation import InvertibleTableTransformer * } */ class LabelEncoder( + @PythonName("column_names") columnNames: union, String, Nothing?> = null, @PythonName("partial_order") partialOrder: List = [] ) sub InvertibleTableTransformer { + /** + * The partial order of the labels. + */ + @PythonName("partial_order") attr partialOrder: List + /** * Learn a transformation for a set of columns in a table. * * This transformer is not modified. * * @param table The table used to fit the transformer. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all non-numeric columns are used. * * @result fittedTransformer The fitted transformer. */ @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: LabelEncoder /** @@ -44,7 +49,6 @@ class LabelEncoder( * **Note:** Neither this transformer nor the given table are modified. * * @param table The table used to fit the transformer. The transformer is then applied to this table. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. * @result transformedTable The transformed table. @@ -52,7 +56,6 @@ class LabelEncoder( @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: LabelEncoder, transformedTable: Table) } diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/OneHotEncoder.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/OneHotEncoder.sdsstub index b391b3d6d..329aef5f9 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/OneHotEncoder.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/OneHotEncoder.sdsstub @@ -29,12 +29,13 @@ from safeds.data.tabular.transformation import InvertibleTableTransformer * The name "one-hot" comes from the fact that each row has exactly one 1 in it, and the rest of the values are 0s. * One-hot encoding is closely related to dummy variable / indicator variables, which are used in statistics. * + * @param columnNames The list of columns used to fit the transformer. If `None`, all non-numeric columns are used. * @param separator The separator used to separate the original column name from the value in the new column names. * * @example * pipeline example { * val table = Table({"a": ["z", "y"], "b": [3, 4]}); - * val encoder = OneHotEncoder().fit(table, ["a"]); + * val encoder = OneHotEncoder(columnNames=["a"]).fit(table); * val transformedTable = encoder.transform(table); * // Table({"a__z": [1, 0], "a__y": [0, 1], "b": [3, 4]}) * val originalTable = encoder.inverseTransform(transformedTable); @@ -42,22 +43,26 @@ from safeds.data.tabular.transformation import InvertibleTableTransformer * } */ class OneHotEncoder( + @PythonName("column_names") columnNames: union, String, Nothing?> = null, separator: String = "__" ) sub InvertibleTableTransformer { + /** + * The separator used to separate the original column name from the value in the new column names. + */ + attr separator: String + /** * Learn a transformation for a set of columns in a table. * * This transformer is not modified. * * @param table The table used to fit the transformer. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. */ @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: OneHotEncoder /** @@ -66,7 +71,6 @@ class OneHotEncoder( * **Note:** Neither this transformer nor the given table are modified. * * @param table The table used to fit the transformer. The transformer is then applied to this table. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. * @result transformedTable The transformed table. @@ -74,7 +78,6 @@ class OneHotEncoder( @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: OneHotEncoder, transformedTable: Table) } diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/RangeScaler.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/RangeScaler.sdsstub index 1605535a6..d8b5b9cfc 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/RangeScaler.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/RangeScaler.sdsstub @@ -8,11 +8,12 @@ from safeds.data.tabular.transformation import InvertibleTableTransformer * * @param min The minimum of the new range after the transformation * @param max The maximum of the new range after the transformation + * @param columnNames The list of columns used to fit the transformer. If `None`, all numeric columns are used. * * @example * pipeline example { * val table = Table({"a": [1, 2, 3]}); - * val scaler = RangeScaler(0.0, 1.0).fit(table, ["a"]); + * val scaler = RangeScaler(0.0, 1.0, columnNames = "a").fit(table); * val transformedTable = scaler.transform(table); * // transformedTable = Table({"a": [0.0, 0.5, 1.0]}); * val originalTable = scaler.inverseTransform(transformedTable); @@ -20,8 +21,9 @@ from safeds.data.tabular.transformation import InvertibleTableTransformer * } */ class RangeScaler( - const min: Float = 0.0, - const max: Float = 1.0 + @PythonName("min_") const min: Float = 0.0, + @PythonName("max_") const max: Float = 1.0, + @PythonName("column_names") columnNames: union, String, Nothing?> = null ) sub InvertibleTableTransformer { /** * The minimum of the new range after the transformation. @@ -38,14 +40,12 @@ class RangeScaler( * This transformer is not modified. * * @param table The table used to fit the transformer. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all numeric columns are used. * * @result fittedTransformer The fitted transformer. */ @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: RangeScaler /** @@ -54,7 +54,6 @@ class RangeScaler( * **Note:** Neither this transformer nor the given table are modified. * * @param table The table used to fit the transformer. The transformer is then applied to this table. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. * @result transformedTable The transformed table. @@ -62,7 +61,6 @@ class RangeScaler( @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: RangeScaler, transformedTable: Table) } diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/SimpleImputer.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/SimpleImputer.sdsstub index 717162347..3ead67ec6 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/SimpleImputer.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/SimpleImputer.sdsstub @@ -7,11 +7,13 @@ from safeds.data.tabular.transformation import TableTransformer * Replace missing values with the given strategy. * * @param strategy The strategy used to impute missing values. + * @param columnNames The list of columns used to fit the transformer. If `None`, all columns are used. + * @param valueToReplace The value that should be replaced. * * @example * pipeline example { * val table = Table({"a": [1, null], "b": [3, 4]}); - * val imputer = SimpleImputer(SimpleImputer.Strategy.Mean).fit(table, ["a"]); + * val imputer = SimpleImputer(SimpleImputer.Strategy.Mean, columnNames = "a").fit(table); * val transformedTable = imputer.transform(table); * // Table({"a": [1, 1], "b": [3, 4]}) * } @@ -19,13 +21,14 @@ from safeds.data.tabular.transformation import TableTransformer * @example * pipeline example { * val table = Table({"a": [1, null], "b": [3, 4]}); - * val imputer = SimpleImputer(SimpleImputer.Strategy.Constant(0)).fit(table, ["a"]); + * val imputer = SimpleImputer(SimpleImputer.Strategy.Constant(0), columnNames = "a").fit(table); * val transformedTable = imputer.transform(table); * // Table({"a": [1, 0], "b": [3, 4]}) * } */ class SimpleImputer( strategy: SimpleImputer.Strategy, + @PythonName("column_names") columnNames: union, String, Nothing?> = null, @PythonName("value_to_replace") valueToReplace: union = null ) sub TableTransformer { /** @@ -74,14 +77,12 @@ class SimpleImputer( * This transformer is not modified. * * @param table The table used to fit the transformer. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. */ @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: SimpleImputer /** @@ -90,7 +91,6 @@ class SimpleImputer( * **Note:** Neither this transformer nor the given table are modified. * * @param table The table used to fit the transformer. The transformer is then applied to this table. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. * @result transformedTable The transformed table. @@ -98,7 +98,6 @@ class SimpleImputer( @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: SimpleImputer, transformedTable: Table) } diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/StandardScaler.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/StandardScaler.sdsstub index 4d0c09127..3c2ace5bb 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/StandardScaler.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/StandardScaler.sdsstub @@ -6,31 +6,33 @@ from safeds.data.tabular.transformation import InvertibleTableTransformer /** * The StandardScaler transforms column values to a range by removing the mean and scaling to unit variance. * + * @param columnNames The list of columns used to fit the transformer. If `None`, all numeric columns are used. + * * @example * pipeline example { * val table = Table({"a": [0, 1, 0]}); - * val scaler = StandardScaler().fit(table, ["a"]); + * val scaler = StandardScaler(columnNames = "a").fit(table); * val transformedTable = scaler.transform(table); * // transformedTable = Table({"a": [-0.707, 1.414, -0.707]}); * val originalTable = scaler.inverseTransform(transformedTable); * // originalTable = Table({"a": [1, 2, 3]}); * } */ -class StandardScaler() sub InvertibleTableTransformer { +class StandardScaler( + @PythonName("column_names") columnNames: union, String, Nothing?> = null +) sub InvertibleTableTransformer { /** * Learn a transformation for a set of columns in a table. * * This transformer is not modified. * * @param table The table used to fit the transformer. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. */ @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: StandardScaler /** @@ -39,7 +41,6 @@ class StandardScaler() sub InvertibleTableTransformer { * **Note:** Neither this transformer nor the given table are modified. * * @param table The table used to fit the transformer. The transformer is then applied to this table. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. * @result transformedTable The transformed table. @@ -47,7 +48,6 @@ class StandardScaler() sub InvertibleTableTransformer { @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: StandardScaler, transformedTable: Table) } diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/TableTransformer.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/TableTransformer.sdsstub index 016b9e42c..4994104ae 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/TableTransformer.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/data/tabular/transformation/TableTransformer.sdsstub @@ -4,6 +4,8 @@ from safeds.data.tabular.containers import Table /** * Learn a transformation for a set of columns in a `Table` and transform another `Table` with the same columns. + * + * @param columnNames The list of columns used to fit the transformer. If `None`, all suitable columns are used. */ class TableTransformer { /** @@ -17,14 +19,12 @@ class TableTransformer { * **Note:** This transformer is not modified. * * @param table The table used to fit the transformer. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. */ @Pure fun fit( - table: Table, - @PythonName("column_names") columnNames: List? + table: Table ) -> fittedTransformer: TableTransformer /** @@ -47,7 +47,6 @@ class TableTransformer { * **Note:** Neither this transformer nor the given table are modified. * * @param table The table used to fit the transformer. The transformer is then applied to this table. - * @param columnNames The list of columns from the table used to fit the transformer. If `null`, all columns are used. * * @result fittedTransformer The fitted transformer. * @result transformedTable The transformed table. @@ -55,7 +54,6 @@ class TableTransformer { @Pure @PythonName("fit_and_transform") fun fitAndTransform( - table: Table, - @PythonName("column_names") columnNames: List? = null + table: Table ) -> (fittedTransformer: TableTransformer, transformedTable: Table) } diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/AdaBoostClassifier.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/AdaBoostClassifier.sdsstub index 563e8c37d..723b19fca 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/AdaBoostClassifier.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/AdaBoostClassifier.sdsstub @@ -23,22 +23,22 @@ from safeds.ml.classical.classification import Classifier */ class AdaBoostClassifier( learner: Classifier = DecisionTreeClassifier(), - @PythonName("maximum_number_of_learners") const maxLearnerCount: Int = 50, + @PythonName("max_learner_count") const maxLearnerCount: Int = 50, @PythonName("learning_rate") const learningRate: Float = 1.0 ) sub Classifier where { maxLearnerCount >= 1, learningRate > 0.0 } { /** - * Get the base learner used for training the ensemble. + * The base learner used for training the ensemble. */ attr learner: Classifier /** - * Get the maximum number of learners in the ensemble. + * The maximum number of learners in the ensemble. */ - @PythonName("maximum_number_of_learners") attr maxLearnerCount: Int + @PythonName("max_learner_count") attr maxLearnerCount: Int /** - * Get the learning rate. + * The learning rate. */ @PythonName("learning_rate") attr learningRate: Float diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/DecisionTreeClassifier.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/DecisionTreeClassifier.sdsstub index 8a28ae788..d1e814ca2 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/DecisionTreeClassifier.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/DecisionTreeClassifier.sdsstub @@ -18,19 +18,19 @@ from safeds.ml.classical.classification import Classifier * } */ class DecisionTreeClassifier( - @PythonName("maximum_depth") maxDepth: Int? = null, - @PythonName("minimum_number_of_samples_in_leaves") const minSampleCountInLeaves: Int = 1 + @PythonName("max_depth") maxDepth: Int? = null, + @PythonName("min_sample_count_in_leaves") const minSampleCountInLeaves: Int = 1 ) sub Classifier where { minSampleCountInLeaves > 0 } { /** * The maximum depth of the tree. */ - @PythonName("maximum_depth") attr maxDepth: Int? + @PythonName("max_depth") attr maxDepth: Int? /** * The minimum number of samples that must remain in the leaves of the tree. */ - @PythonName("minimum_number_of_samples_in_leaves") attr minSampleCountInLeaves: Int + @PythonName("min_sample_count_in_leaves") attr minSampleCountInLeaves: Int /** * Create a copy of this classifier and fit it with the given training data. diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/GradientBoostingClassifier.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/GradientBoostingClassifier.sdsstub index 488aa073a..cdddca560 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/GradientBoostingClassifier.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/GradientBoostingClassifier.sdsstub @@ -21,18 +21,18 @@ from safeds.ml.classical.classification import Classifier * } */ class GradientBoostingClassifier( - @PythonName("number_of_trees") const treeCount: Int = 100, + @PythonName("tree_count") const treeCount: Int = 100, @PythonName("learning_rate") const learningRate: Float = 0.1 ) sub Classifier where { treeCount >= 1, learningRate > 0.0 } { /** - * Get the number of trees (estimators) in the ensemble. + * The number of trees (estimators) in the ensemble. */ - @PythonName("number_of_trees") attr treeCount: Int + @PythonName("tree_count") attr treeCount: Int /** - * Get the learning rate. + * The learning rate. */ @PythonName("learning_rate") attr learningRate: Float diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/KNearestNeighborsClassifier.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/KNearestNeighborsClassifier.sdsstub index d1e30afa8..28e4d396f 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/KNearestNeighborsClassifier.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/KNearestNeighborsClassifier.sdsstub @@ -19,14 +19,14 @@ from safeds.ml.classical.classification import Classifier * } */ class KNearestNeighborsClassifier( - @PythonName("number_of_neighbors") const neighborCount: Int + @PythonName("neighbor_count") const neighborCount: Int ) sub Classifier where { neighborCount >= 1 } { /** - * Get the number of neighbors used for interpolation. + * The number of neighbors used for interpolation. */ - @PythonName("number_of_neighbors") attr neighborCount: Int + @PythonName("neighbor_count") attr neighborCount: Int /** * Create a copy of this classifier and fit it with the given training data. diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/RandomForestClassifier.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/RandomForestClassifier.sdsstub index 3cbf54484..86223cd0c 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/RandomForestClassifier.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/RandomForestClassifier.sdsstub @@ -20,25 +20,25 @@ from safeds.ml.classical.classification import Classifier * } */ class RandomForestClassifier( - @PythonName("number_of_trees") const treeCount: Int = 100, - @PythonName("maximum_depth") maxDepth: Int? = null, - @PythonName("minimum_number_of_samples_in_leaves") const minSampleCountInLeaves: Int = 1, + @PythonName("tree_count") const treeCount: Int = 100, + @PythonName("max_depth") maxDepth: Int? = null, + @PythonName("min_sample_count_in_leaves") const minSampleCountInLeaves: Int = 1, ) sub Classifier where { treeCount > 0, minSampleCountInLeaves > 0, } { /** - * Get the number of trees used in the random forest. + * The number of trees used in the random forest. */ - @PythonName("number_of_trees") attr treeCount: Int + @PythonName("tree_count") attr treeCount: Int /** * The maximum depth of each tree. */ - @PythonName("maximum_depth") attr maxDepth: Int? + @PythonName("max_depth") attr maxDepth: Int? /** * The minimum number of samples that must remain in the leaves of each tree. */ - @PythonName("minimum_number_of_samples_in_leaves") attr minSampleCountInLeaves: Int + @PythonName("min_sample_count_in_leaves") attr minSampleCountInLeaves: Int /** * Create a copy of this classifier and fit it with the given training data. diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/SupportVectorClassifier.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/SupportVectorClassifier.sdsstub index e73a14f34..e10673760 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/SupportVectorClassifier.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/classification/SupportVectorClassifier.sdsstub @@ -60,11 +60,11 @@ class SupportVectorClassifier( } /** - * Get the regularization strength. + * The regularization strength. */ attr c: Float /** - * Get the type of kernel used. + * The type of kernel used. */ attr kernel: SupportVectorClassifier.Kernel diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/AdaBoostRegressor.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/AdaBoostRegressor.sdsstub index 733ff04da..43ab8dfee 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/AdaBoostRegressor.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/AdaBoostRegressor.sdsstub @@ -23,22 +23,22 @@ from safeds.ml.classical.regression import Regressor */ class AdaBoostRegressor( learner: Regressor = DecisionTreeRegressor(), - @PythonName("maximum_number_of_learners") const maxLearnerCount: Int = 50, + @PythonName("max_learner_count") const maxLearnerCount: Int = 50, @PythonName("learning_rate") const learningRate: Float = 1.0 ) sub Regressor where { maxLearnerCount >= 1, learningRate > 0.0 } { /** - * Get the base learner used for training the ensemble. + * The base learner used for training the ensemble. */ attr learner: Regressor /** - * Get the maximum number of learners in the ensemble. + * The maximum number of learners in the ensemble. */ - @PythonName("maximum_number_of_learners") attr maxLearnerCount: Int + @PythonName("max_learner_count") attr maxLearnerCount: Int /** - * Get the learning rate. + * The learning rate. */ @PythonName("learning_rate") attr learningRate: Float diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/DecisionTreeRegressor.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/DecisionTreeRegressor.sdsstub index 65c1009b7..e60d8d637 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/DecisionTreeRegressor.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/DecisionTreeRegressor.sdsstub @@ -19,19 +19,19 @@ from safeds.ml.classical.regression import Regressor * } */ class DecisionTreeRegressor( - @PythonName("maximum_depth") maxDepth: Int? = null, - @PythonName("minimum_number_of_samples_in_leaves") const minSampleCountInLeaves: Int = 1 + @PythonName("max_depth") maxDepth: Int? = null, + @PythonName("min_sample_count_in_leaves") const minSampleCountInLeaves: Int = 1 ) sub Regressor where { minSampleCountInLeaves > 0 } { /** * The maximum depth of the tree. */ - @PythonName("maximum_depth") attr maxDepth: Int? + @PythonName("max_depth") attr maxDepth: Int? /** * The minimum number of samples that must remain in the leaves of the tree. */ - @PythonName("minimum_number_of_samples_in_leaves") attr minSampleCountInLeaves: Int + @PythonName("min_sample_count_in_leaves") attr minSampleCountInLeaves: Int /** * Create a copy of this regressor and fit it with the given training data. diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/ElasticNetRegressor.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/ElasticNetRegressor.sdsstub index b92bad02f..b44dd19ad 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/ElasticNetRegressor.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/ElasticNetRegressor.sdsstub @@ -28,11 +28,11 @@ class ElasticNetRegressor( lassoRatio <= 1.0 } { /** - * Get the regularization of the model. + * The regularization of the model. */ attr alpha: Float /** - * Get the ratio between Lasso and Ridge regularization. + * The ratio between Lasso and Ridge regularization. */ @PythonName("lasso_ratio") attr lassoRatio: Float diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/GradientBoostingRegressor.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/GradientBoostingRegressor.sdsstub index 458a2d390..33593c76f 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/GradientBoostingRegressor.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/GradientBoostingRegressor.sdsstub @@ -21,18 +21,18 @@ from safeds.ml.classical.regression import Regressor * } */ class GradientBoostingRegressor( - @PythonName("number_of_trees") const treeCount: Int = 100, + @PythonName("tree_count") const treeCount: Int = 100, @PythonName("learning_rate") const learningRate: Float = 0.1 ) sub Regressor where { treeCount >= 1, learningRate > 0.0 } { /** - * Get the number of trees (estimators) in the ensemble. + * The number of trees (estimators) in the ensemble. */ - @PythonName("number_of_trees") attr treeCount: Int + @PythonName("tree_count") attr treeCount: Int /** - * Get the learning rate. + * The learning rate. */ @PythonName("learning_rate") attr learningRate: Float diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/KNearestNeighborsRegressor.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/KNearestNeighborsRegressor.sdsstub index 6f9e1d4bb..120a1df0f 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/KNearestNeighborsRegressor.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/KNearestNeighborsRegressor.sdsstub @@ -19,14 +19,14 @@ from safeds.ml.classical.regression import Regressor * } */ class KNearestNeighborsRegressor( - @PythonName("number_of_neighbors") const neighborCount: Int + @PythonName("neighbor_count") const neighborCount: Int ) sub Regressor where { neighborCount >= 1 } { /** - * Get the number of neighbors used for interpolation. + * The number of neighbors used for interpolation. */ - @PythonName("number_of_neighbors") attr neighborCount: Int + @PythonName("neighbor_count") attr neighborCount: Int /** * Create a copy of this regressor and fit it with the given training data. diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/LassoRegressor.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/LassoRegressor.sdsstub index 1bbfdbcde..702900e9f 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/LassoRegressor.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/LassoRegressor.sdsstub @@ -23,7 +23,7 @@ class LassoRegressor( alpha >= 0.0 } { /** - * Get the regularization of the model. + * The regularization of the model. */ attr alpha: Float diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/RandomForestRegressor.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/RandomForestRegressor.sdsstub index bf0ab2e31..13ddfc1b0 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/RandomForestRegressor.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/RandomForestRegressor.sdsstub @@ -20,25 +20,25 @@ from safeds.ml.classical.regression import Regressor * } */ class RandomForestRegressor( - @PythonName("number_of_trees") const treeCount: Int = 100, - @PythonName("maximum_depth") maxDepth: Int? = null, - @PythonName("minimum_number_of_samples_in_leaves") const minSampleCountInLeaves: Int = 1, + @PythonName("tree_count") const treeCount: Int = 100, + @PythonName("max_depth") maxDepth: Int? = null, + @PythonName("min_sample_count_in_leaves") const minSampleCountInLeaves: Int = 1, ) sub Regressor where { treeCount > 0, minSampleCountInLeaves > 0, } { /** - * Get the number of trees used in the random forest. + * The number of trees used in the random forest. */ - @PythonName("number_of_trees") attr treeCount: Int + @PythonName("tree_count") attr treeCount: Int /** * The maximum depth of each tree. */ - @PythonName("maximum_depth") attr maxDepth: Int? + @PythonName("max_depth") attr maxDepth: Int? /** * The minimum number of samples that must remain in the leaves of each tree. */ - @PythonName("minimum_number_of_samples_in_leaves") attr minSampleCountInLeaves: Int + @PythonName("min_sample_count_in_leaves") attr minSampleCountInLeaves: Int /** * Create a copy of this regressor and fit it with the given training data. diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/Regressor.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/Regressor.sdsstub index 0c18cc8a8..f449fbaf9 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/Regressor.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/Regressor.sdsstub @@ -25,6 +25,8 @@ class Regressor sub SupervisedModel { /** * Summarize the regressor's metrics on the given data. * + * **Note:** The model must be fitted. + * * @param validationOrTestSet The validation or test set. * * @result metrics A table containing the regressor's metrics. @@ -51,7 +53,10 @@ class Regressor sub SupervisedModel { * | 0.0 | The model is as good as predicting the mean of the target values. Try something else. | * | (-∞, 0.0) | The model is worse than predicting the mean of the target values. Something is very wrong. | * - * **Note:** Some other libraries call this metric `r2_score`. + * **Notes:** + * + * - The model must be fitted. + * - Some other libraries call this metric `r2_score`. * * @param validationOrTestSet The validation or test set. * @@ -70,6 +75,9 @@ class Regressor sub SupervisedModel { * values. The **lower** the mean absolute error, the better the regressor. Results range from 0.0 to positive * infinity. * + * + * **Note:** The model must be fitted. + * * @param validationOrTestSet The validation or test set. * * @result meanAbsoluteError The mean absolute error of the regressor. @@ -91,6 +99,9 @@ class Regressor sub SupervisedModel { * This metric is useful for time series data, where the order of the target values has a meaning. It is not useful * for other types of data. Because of this, it is not included in the `summarize_metrics` method. * + * + * **Note:** The model must be fitted. + * * @param validationOrTestSet The validation or test set. * * @result meanDirectionalAccuracy The mean directional accuracy of the regressor. @@ -108,7 +119,10 @@ class Regressor sub SupervisedModel { * values. The **lower** the mean squared error, the better the regressor. Results range from 0.0 to positive * infinity. * - * **Note:** To get the root mean squared error (RMSE), take the square root of the result. + * **Notes:** + * + * - The model must be fitted. + * - To get the root mean squared error (RMSE), take the square root of the result. * * @param validationOrTestSet The validation or test set. * @@ -127,6 +141,9 @@ class Regressor sub SupervisedModel { * target values. The **lower** the median absolute deviation, the better the regressor. Results range from 0.0 to * positive infinity. * + * + * **Note:** The model must be fitted. + * * @param validationOrTestSet The validation or test set. * * @result medianAbsoluteDeviation The median absolute deviation of the regressor. diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/RidgeRegressor.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/RidgeRegressor.sdsstub index f91095e75..5a92ca77f 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/RidgeRegressor.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/RidgeRegressor.sdsstub @@ -23,7 +23,7 @@ class RidgeRegressor( alpha >= 0.0 } { /** - * Get the regularization of the model. + * The regularization of the model. */ attr alpha: Float diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/SupportVectorRegressor.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/SupportVectorRegressor.sdsstub index ba248f647..b87c347bb 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/SupportVectorRegressor.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/classical/regression/SupportVectorRegressor.sdsstub @@ -60,11 +60,11 @@ class SupportVectorRegressor( } /** - * Get the regularization strength. + * The regularization strength. */ attr c: Float /** - * Get the type of kernel used. + * The type of kernel used. */ attr kernel: SupportVectorRegressor.Kernel diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/NeuralNetworkClassifier.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/NeuralNetworkClassifier.sdsstub index 06bd998fb..b9e56d405 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/NeuralNetworkClassifier.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/NeuralNetworkClassifier.sdsstub @@ -9,19 +9,33 @@ from safeds.ml.nn.layers import Layer * * @param inputConversion to convert the input data for the neural network * @param layers a list of layers for the neural network to learn - * @param outputConversion to convert the output data of the neural network back + * + * @typeParam D The type of the full dataset. It's the input to `fit` and the output of `predict`. + * @typeParam F The type of the features. It's the input to `predict`. */ @Experimental -class NeuralNetworkClassifier( - @PythonName("input_conversion") inputConversion: InputConversion, +class NeuralNetworkClassifier( + @PythonName("input_conversion") inputConversion: InputConversion, layers: List, - @PythonName("output_conversion") outputConversion: OutputConversion ) { /** * Whether the classifier is fitted. */ @PythonName("is_fitted") attr isFitted: Boolean + /** + * Load a pretrained model from a [Huggingface repository](https://huggingface.co/models/). + * + * @param huggingfaceRepo the name of the huggingface repository + * + * @result pretrainedModel the pretrained model as a NeuralNetworkClassifier + */ + @Pure + @PythonName("load_pretrained_model") + static fun loadPretrainedModel( + @PythonName("huggingface_repo") huggingfaceRepo: String + ) -> pretrainedModel: NeuralNetworkClassifier + /** * Train the neural network with given training data. * @@ -31,8 +45,10 @@ class NeuralNetworkClassifier( * @param epochSize The number of times the training cycle should be done. * @param batchSize The size of data batches that should be loaded at one time. * @param learningRate The learning rate of the neural network. - * @param callbackOnBatchCompletion Function used to view metrics while training. Gets called after a batch is completed with the index of the last batch and the overall loss average. - * @param callbackOnEpochCompletion Function used to view metrics while training. Gets called after an epoch is completed with the index of the last epoch and the overall loss average. + * @param callbackOnBatchCompletion Function used to view metrics while training. Gets called after a batch is completed with the index of the + * last batch and the overall loss average. + * @param callbackOnEpochCompletion Function used to view metrics while training. Gets called after an epoch is completed with the index of the + * last epoch and the overall loss average. * * @result fittedClassifier The trained Model * @@ -43,13 +59,13 @@ class NeuralNetworkClassifier( */ @Pure fun fit( - @PythonName("train_data") trainData: FitIn, + @PythonName("train_data") trainData: D, @PythonName("epoch_size") const epochSize: Int = 25, @PythonName("batch_size") const batchSize: Int = 1, @PythonName("learning_rate") learningRate: Float = 0.001, @PythonName("callback_on_batch_completion") callbackOnBatchCompletion: (param1: Int, param2: Float) -> () = (param1, param2) {}, @PythonName("callback_on_epoch_completion") callbackOnEpochCompletion: (param1: Int, param2: Float) -> () = (param1, param2) {} - ) -> fittedClassifier: NeuralNetworkClassifier where { + ) -> fittedClassifier: NeuralNetworkClassifier where { epochSize >= 1, batchSize >= 1 } @@ -70,6 +86,6 @@ class NeuralNetworkClassifier( */ @Pure fun predict( - @PythonName("test_data") testData: PredictIn - ) -> prediction: PredictOut + @PythonName("test_data") testData: F + ) -> prediction: D } diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/NeuralNetworkRegressor.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/NeuralNetworkRegressor.sdsstub index bc75417ad..92150be97 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/NeuralNetworkRegressor.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/NeuralNetworkRegressor.sdsstub @@ -9,19 +9,33 @@ from safeds.ml.nn.layers import Layer * * @param inputConversion to convert the input data for the neural network * @param layers a list of layers for the neural network to learn - * @param outputConversion to convert the output data of the neural network back + * + * @typeParam D The type of the full dataset. It's the input to `fit` and the output of `predict`. + * @typeParam F The type of the features. It's the input to `predict`. */ @Experimental -class NeuralNetworkRegressor( - @PythonName("input_conversion") inputConversion: InputConversion, - layers: List, - @PythonName("output_conversion") outputConversion: OutputConversion +class NeuralNetworkRegressor( + @PythonName("input_conversion") inputConversion: InputConversion, + layers: List ) { /** * Whether the regressor is fitted. */ @PythonName("is_fitted") attr isFitted: Boolean + /** + * Load a pretrained model from a [Huggingface repository](https://huggingface.co/models/). + * + * @param huggingfaceRepo the name of the huggingface repository + * + * @result pretrainedModel the pretrained model as a NeuralNetworkRegressor + */ + @Pure + @PythonName("load_pretrained_model") + static fun loadPretrainedModel( + @PythonName("huggingface_repo") huggingfaceRepo: String + ) -> pretrainedModel: NeuralNetworkRegressor + /** * Train the neural network with given training data. * @@ -31,8 +45,10 @@ class NeuralNetworkRegressor( * @param epochSize The number of times the training cycle should be done. * @param batchSize The size of data batches that should be loaded at one time. * @param learningRate The learning rate of the neural network. - * @param callbackOnBatchCompletion Function used to view metrics while training. Gets called after a batch is completed with the index of the last batch and the overall loss average. - * @param callbackOnEpochCompletion Function used to view metrics while training. Gets called after an epoch is completed with the index of the last epoch and the overall loss average. + * @param callbackOnBatchCompletion Function used to view metrics while training. Gets called after a batch is completed with the index of the + * last batch and the overall loss average. + * @param callbackOnEpochCompletion Function used to view metrics while training. Gets called after an epoch is completed with the index of the + * last epoch and the overall loss average. * * @result fittedRegressor The trained Model * @@ -43,13 +59,13 @@ class NeuralNetworkRegressor( */ @Pure fun fit( - @PythonName("train_data") trainData: FitIn, + @PythonName("train_data") trainData: D, @PythonName("epoch_size") const epochSize: Int = 25, @PythonName("batch_size") const batchSize: Int = 1, @PythonName("learning_rate") learningRate: Float = 0.001, @PythonName("callback_on_batch_completion") callbackOnBatchCompletion: (param1: Int, param2: Float) -> () = (param1, param2) {}, @PythonName("callback_on_epoch_completion") callbackOnEpochCompletion: (param1: Int, param2: Float) -> () = (param1, param2) {} - ) -> fittedRegressor: NeuralNetworkRegressor where { + ) -> fittedRegressor: NeuralNetworkRegressor where { epochSize >= 1, batchSize >= 1 } @@ -70,6 +86,6 @@ class NeuralNetworkRegressor( */ @Pure fun predict( - @PythonName("test_data") testData: PredictIn - ) -> prediction: PredictOut + @PythonName("test_data") testData: F + ) -> prediction: D } diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversion.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversion.sdsstub index ef32c7d98..1f416008f 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversion.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversion.sdsstub @@ -2,6 +2,9 @@ package safeds.ml.nn.converters /** * The input conversion for a neural network, defines the input parameters for the neural network. + * + * @typeParam D The type of the full dataset. It's the input to `fit` and the output of `predict`. + * @typeParam F The type of the features. It's the input to `predict`. */ @Experimental -class InputConversion +class InputConversion diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversionImageToColumn.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversionImageToColumn.sdsstub new file mode 100644 index 000000000..68c17a0ae --- /dev/null +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversionImageToColumn.sdsstub @@ -0,0 +1,12 @@ +package safeds.ml.nn.converters + +from safeds.ml.nn.typing import ModelImageSize + +/** + * The input conversion for a neural network, defines the input parameters for the neural network. + * + * @param imageSize the size of the input images + */ +class InputConversionImageToColumn( + @PythonName("image_size") imageSize: ModelImageSize +) sub InputConversion, ImageList> diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversionImageToImage.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversionImageToImage.sdsstub new file mode 100644 index 000000000..6947ab146 --- /dev/null +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversionImageToImage.sdsstub @@ -0,0 +1,12 @@ +package safeds.ml.nn.converters + +from safeds.ml.nn.typing import ModelImageSize + +/** + * The input conversion for a neural network, defines the input parameters for the neural network. + * + * @param imageSize the size of the input images + */ +class InputConversionImageToImage( + @PythonName("image_size") imageSize: ModelImageSize +) sub InputConversion, ImageList> diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversionImage.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversionImageToTable.sdsstub similarity index 50% rename from packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversionImage.sdsstub rename to packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversionImageToTable.sdsstub index 5a6c49a87..1ea35051d 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversionImage.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversionImageToTable.sdsstub @@ -1,13 +1,12 @@ package safeds.ml.nn.converters -from safeds.data.image.typing import ImageSize +from safeds.ml.nn.typing import ModelImageSize /** * The input conversion for a neural network, defines the input parameters for the neural network. * * @param imageSize the size of the input images */ -@Experimental -class InputConversionImage( - @PythonName("image_size") imageSize: ImageSize -) +class InputConversionImageToTable( + @PythonName("image_size") imageSize: ModelImageSize +) sub InputConversion, ImageList> diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversionTable.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversionTable.sdsstub index 18ab8c40d..1dc5a4f44 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversionTable.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversionTable.sdsstub @@ -3,8 +3,9 @@ package safeds.ml.nn.converters /** * The input conversion for a neural network defines the input parameters for the neural network. * - * @param featureNames The names of the features for the input table, used as features for the training. - * @param targetName The name of the target for the input table, used as target for the training. + * @param predictionName The name of the new column where the prediction will be stored. */ @Experimental -class InputConversionTable() sub InputConversion +class InputConversionTable( + @PythonName("prediction_name") predictionName: String = "prediction" +) sub InputConversion diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversionTimeSeries.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversionTimeSeries.sdsstub index ed9430f24..41f08b247 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversionTimeSeries.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/InputConversionTimeSeries.sdsstub @@ -3,11 +3,9 @@ package safeds.ml.nn.converters /** * The input conversion for a neural network, defines the input parameters for the neural network. * - * @param windowSize The size of the created windows - * @param forecastHorizon The forecast horizon defines the future lag of the predicted values + * @param predictionName The name of the new column where the prediction will be stored. */ @Experimental class InputConversionTimeSeries( - @PythonName("window_size") windowSize: Int, - @PythonName("forecast_horizon") forecastHorizon: Int -) + @PythonName("prediction_name") predictionName: String = "prediction_nn" +) sub InputConversion diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/OutputConversion.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/OutputConversion.sdsstub deleted file mode 100644 index 1f0c58aa3..000000000 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/OutputConversion.sdsstub +++ /dev/null @@ -1,7 +0,0 @@ -package safeds.ml.nn.converters - -/** - * The output conversion for a neural network, defines the output parameters for the neural network. - */ -@Experimental -class OutputConversion diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/OutputConversionImageToColumn.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/OutputConversionImageToColumn.sdsstub deleted file mode 100644 index 01c61d891..000000000 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/OutputConversionImageToColumn.sdsstub +++ /dev/null @@ -1,4 +0,0 @@ -package safeds.ml.nn.converters - -@Experimental -class OutputConversionImageToColumn() sub OutputConversion diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/OutputConversionImageToImage.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/OutputConversionImageToImage.sdsstub deleted file mode 100644 index f032f2b4e..000000000 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/OutputConversionImageToImage.sdsstub +++ /dev/null @@ -1,4 +0,0 @@ -package safeds.ml.nn.converters - -@Experimental -class OutputConversionImageToImage() sub OutputConversion diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/OutputConversionImageToTable.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/OutputConversionImageToTable.sdsstub deleted file mode 100644 index 94869f212..000000000 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/OutputConversionImageToTable.sdsstub +++ /dev/null @@ -1,4 +0,0 @@ -package safeds.ml.nn.converters - -@Experimental -class OutputConversionImageToTable() sub OutputConversion diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/OutputConversionTable.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/OutputConversionTable.sdsstub deleted file mode 100644 index ba0f3f607..000000000 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/OutputConversionTable.sdsstub +++ /dev/null @@ -1,11 +0,0 @@ -package safeds.ml.nn.converters - -/** - * The output conversion for a neural network defines the output parameters for the neural network. - * - * @param predictionName The name of the new column where the prediction will be stored. - */ -@Experimental -class OutputConversionTable( - @PythonName("prediction_name") predictionName: String = "prediction" -) sub OutputConversion diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/OutputConversionTimeSeries.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/OutputConversionTimeSeries.sdsstub deleted file mode 100644 index 1d0f47133..000000000 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/converters/OutputConversionTimeSeries.sdsstub +++ /dev/null @@ -1,11 +0,0 @@ -package safeds.ml.nn.converters - -/** - * The output conversion for a neural network, defines the output parameters for the neural network. - * - * @param predictionName The name of the new column where the prediction will be stored. - */ -@Experimental -class OutputConversionTimeSeries( - @PythonName("prediction_name") predictionName: String = "prediction_nn" -) diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/AveragePooling2DLayer.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/AveragePooling2DLayer.sdsstub index 214f5d658..3e28720d4 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/AveragePooling2DLayer.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/AveragePooling2DLayer.sdsstub @@ -1,8 +1,11 @@ package safeds.ml.nn.layers from safeds.ml.nn.layers import Layer +from safeds.ml.nn.typing import ModelImageSize /** + * An average pooling 2D Layer. + * * @param kernelSize the size of the kernel * @param stride the stride of the pooling * @param padding the padding of the pooling @@ -16,9 +19,9 @@ class AveragePooling2DLayer( /** * Get the input_size of this layer. */ - @PythonName("input_size") attr inputSize: ImageSize + @PythonName("input_size") attr inputSize: ModelImageSize /** * Get the output_size of this layer. */ - @PythonName("output_size") attr outputSize: ImageSize + @PythonName("output_size") attr outputSize: ModelImageSize } diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/Convolutional2DLayer.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/Convolutional2DLayer.sdsstub index 331edadf3..54d88ff23 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/Convolutional2DLayer.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/Convolutional2DLayer.sdsstub @@ -1,9 +1,11 @@ package safeds.ml.nn.layers -from safeds.data.image.typing import ImageSize from safeds.ml.nn.layers import Layer +from safeds.ml.nn.typing import ModelImageSize /** + * A convolutional 2D Layer. + * * @param outputChannel the amount of output channels * @param kernelSize the size of the kernel * @param stride the stride of the convolution @@ -19,9 +21,9 @@ class Convolutional2DLayer( /** * Get the input_size of this layer. */ - @PythonName("input_size") attr inputSize: ImageSize + @PythonName("input_size") attr inputSize: ModelImageSize /** * Get the output_size of this layer. */ - @PythonName("output_size") attr outputSize: ImageSize + @PythonName("output_size") attr outputSize: ModelImageSize } diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/ConvolutionalTranspose2DLayer.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/ConvolutionalTranspose2DLayer.sdsstub index c83ea7970..f45f052d3 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/ConvolutionalTranspose2DLayer.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/ConvolutionalTranspose2DLayer.sdsstub @@ -4,6 +4,8 @@ from safeds.data.image.typing import ImageSize from safeds.ml.nn.layers import Convolutional2DLayer /** + * A convolutional transpose 2D Layer. + * * @param outputChannel the amount of output channels * @param kernelSize the size of the kernel * @param stride the stride of the transposed convolution diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/FlattenLayer.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/FlattenLayer.sdsstub index 585a09de9..665ca6afd 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/FlattenLayer.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/FlattenLayer.sdsstub @@ -1,14 +1,17 @@ package safeds.ml.nn.layers -from safeds.data.image.typing import ImageSize from safeds.ml.nn.layers import Layer +from safeds.ml.nn.typing import ModelImageSize +/** + * A flatten layer. + */ @Experimental class FlattenLayer() sub Layer { /** * Get the input_size of this layer. */ - @PythonName("input_size") attr inputSize: ImageSize + @PythonName("input_size") attr inputSize: ModelImageSize /** * Get the output_size of this layer. */ diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/ForwardLayer.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/ForwardLayer.sdsstub index 7546b2e54..4ae6b3974 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/ForwardLayer.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/ForwardLayer.sdsstub @@ -3,13 +3,13 @@ package safeds.ml.nn.layers from safeds.ml.nn.layers import Layer /** - * @param outputSize The number of neurons in this layer - * @param inputSize The number of neurons in the previous layer + * A fully connected forward layer. + * + * @param neuronCount The number of neurons in this layer */ @Experimental class ForwardLayer( - @PythonName("output_size") outputSize: Int, - @PythonName("input_size") inputSize: Int? = null + @PythonName("neuron_count") neuronCount: Int ) sub Layer { /** * Get the input_size of this layer. diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/LSTMLayer.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/LSTMLayer.sdsstub index ace1f92c8..b75a82410 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/LSTMLayer.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/LSTMLayer.sdsstub @@ -3,13 +3,13 @@ package safeds.ml.nn.layers from safeds.ml.nn.layers import Layer /** - * @param outputSize The number of neurons in this layer - * @param inputSize The number of neurons in the previous layer + * A long short-term memory (LSTM) layer. + * + * @param neuronCount The number of neurons in this layer */ @Experimental class LSTMLayer( - @PythonName("output_size") outputSize: Int, - @PythonName("input_size") inputSize: Int? = null + @PythonName("neuron_count") neuronCount: Int ) sub Layer { /** * Get the input_size of this layer. diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/MaxPooling2DLayer.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/MaxPooling2DLayer.sdsstub index 0d807e6df..3b995ad75 100644 --- a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/MaxPooling2DLayer.sdsstub +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/layers/MaxPooling2DLayer.sdsstub @@ -1,8 +1,11 @@ package safeds.ml.nn.layers from safeds.ml.nn.layers import Layer +from safeds.ml.nn.typing import ModelImageSize /** + * A maximum Pooling 2D Layer. + * * @param kernelSize the size of the kernel * @param stride the stride of the pooling * @param padding the padding of the pooling @@ -16,9 +19,9 @@ class MaxPooling2DLayer( /** * Get the input_size of this layer. */ - @PythonName("input_size") attr inputSize: ImageSize + @PythonName("input_size") attr inputSize: ModelImageSize /** * Get the output_size of this layer. */ - @PythonName("output_size") attr outputSize: ImageSize + @PythonName("output_size") attr outputSize: ModelImageSize } diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/typing/ConstantImageSize.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/typing/ConstantImageSize.sdsstub new file mode 100644 index 000000000..21ab17ec2 --- /dev/null +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/typing/ConstantImageSize.sdsstub @@ -0,0 +1,7 @@ +package safeds.ml.nn.typing + +/** + * A container for constant image size in neural networks. + */ +@Experimental +class ConstantImageSize sub ModelImageSize diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/typing/ModelImageSize.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/typing/ModelImageSize.sdsstub new file mode 100644 index 000000000..04252b4b2 --- /dev/null +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/typing/ModelImageSize.sdsstub @@ -0,0 +1,22 @@ +package safeds.ml.nn.typing + +from safeds.data.image.containers import Image + +/** + * A container for image size in neural networks. + */ +@Experimental +class ModelImageSize { + /** + * Get the width of this `ImageSize` in pixels. + */ + attr width: Int + /** + * Get the height of this `ImageSize` in pixels. + */ + attr height: Int + /** + * Get the channel of this `ImageSize` in pixels. + */ + attr channel: Int +} diff --git a/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/typing/VariableImageSize.sdsstub b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/typing/VariableImageSize.sdsstub new file mode 100644 index 000000000..65a9a9fa4 --- /dev/null +++ b/packages/safe-ds-lang/src/resources/builtins/safeds/ml/nn/typing/VariableImageSize.sdsstub @@ -0,0 +1,9 @@ +package safeds.ml.nn.typing + +/** + * A container for variable image size in neural networks. + * + * With a `VariableImageSize`, all image sizes that are a multiple of `width` and `height` are valid. + */ +@Experimental +class VariableImageSize sub ModelImageSize