Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[AutoPR datafactory/resource-manager] [Datafactory] DatabricksSparkJarActivity, DatabricksSparkPythonActivity and other D… #2907

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,8 @@
from .schedule_trigger_py3 import ScheduleTrigger
from .multiple_pipeline_trigger_py3 import MultiplePipelineTrigger
from .activity_policy_py3 import ActivityPolicy
from .databricks_spark_python_activity_py3 import DatabricksSparkPythonActivity
from .databricks_spark_jar_activity_py3 import DatabricksSparkJarActivity
from .databricks_notebook_activity_py3 import DatabricksNotebookActivity
from .data_lake_analytics_usql_activity_py3 import DataLakeAnalyticsUSQLActivity
from .azure_ml_update_resource_activity_py3 import AzureMLUpdateResourceActivity
Expand Down Expand Up @@ -542,6 +544,8 @@
from .schedule_trigger import ScheduleTrigger
from .multiple_pipeline_trigger import MultiplePipelineTrigger
from .activity_policy import ActivityPolicy
from .databricks_spark_python_activity import DatabricksSparkPythonActivity
from .databricks_spark_jar_activity import DatabricksSparkJarActivity
from .databricks_notebook_activity import DatabricksNotebookActivity
from .data_lake_analytics_usql_activity import DataLakeAnalyticsUSQLActivity
from .azure_ml_update_resource_activity import AzureMLUpdateResourceActivity
Expand Down Expand Up @@ -940,6 +944,8 @@
'ScheduleTrigger',
'MultiplePipelineTrigger',
'ActivityPolicy',
'DatabricksSparkPythonActivity',
'DatabricksSparkJarActivity',
'DatabricksNotebookActivity',
'DataLakeAnalyticsUSQLActivity',
'AzureMLUpdateResourceActivity',
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,8 @@ class AzureDatabricksLinkedService(LinkedService):
:param new_cluster_spark_conf: a set of optional, user-specified Spark
configuration key-value pairs.
:type new_cluster_spark_conf: dict[str, object]
:param new_cluster_custom_tags: Additional tags for cluster resources.
:type new_cluster_custom_tags: dict[str, object]
:param encrypted_credential: The encrypted credential used for
authentication. Credentials are encrypted using the integration runtime
credential manager. Type: string (or Expression with resultType string).
Expand All @@ -85,6 +87,7 @@ class AzureDatabricksLinkedService(LinkedService):
'new_cluster_num_of_worker': {'key': 'typeProperties.newClusterNumOfWorker', 'type': 'object'},
'new_cluster_node_type': {'key': 'typeProperties.newClusterNodeType', 'type': 'object'},
'new_cluster_spark_conf': {'key': 'typeProperties.newClusterSparkConf', 'type': '{object}'},
'new_cluster_custom_tags': {'key': 'typeProperties.newClusterCustomTags', 'type': '{object}'},
'encrypted_credential': {'key': 'typeProperties.encryptedCredential', 'type': 'object'},
}

Expand All @@ -97,5 +100,6 @@ def __init__(self, **kwargs):
self.new_cluster_num_of_worker = kwargs.get('new_cluster_num_of_worker', None)
self.new_cluster_node_type = kwargs.get('new_cluster_node_type', None)
self.new_cluster_spark_conf = kwargs.get('new_cluster_spark_conf', None)
self.new_cluster_custom_tags = kwargs.get('new_cluster_custom_tags', None)
self.encrypted_credential = kwargs.get('encrypted_credential', None)
self.type = 'AzureDatabricks'
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,8 @@ class AzureDatabricksLinkedService(LinkedService):
:param new_cluster_spark_conf: a set of optional, user-specified Spark
configuration key-value pairs.
:type new_cluster_spark_conf: dict[str, object]
:param new_cluster_custom_tags: Additional tags for cluster resources.
:type new_cluster_custom_tags: dict[str, object]
:param encrypted_credential: The encrypted credential used for
authentication. Credentials are encrypted using the integration runtime
credential manager. Type: string (or Expression with resultType string).
Expand All @@ -85,10 +87,11 @@ class AzureDatabricksLinkedService(LinkedService):
'new_cluster_num_of_worker': {'key': 'typeProperties.newClusterNumOfWorker', 'type': 'object'},
'new_cluster_node_type': {'key': 'typeProperties.newClusterNodeType', 'type': 'object'},
'new_cluster_spark_conf': {'key': 'typeProperties.newClusterSparkConf', 'type': '{object}'},
'new_cluster_custom_tags': {'key': 'typeProperties.newClusterCustomTags', 'type': '{object}'},
'encrypted_credential': {'key': 'typeProperties.encryptedCredential', 'type': 'object'},
}

def __init__(self, *, domain, access_token, additional_properties=None, connect_via=None, description: str=None, parameters=None, annotations=None, existing_cluster_id=None, new_cluster_version=None, new_cluster_num_of_worker=None, new_cluster_node_type=None, new_cluster_spark_conf=None, encrypted_credential=None, **kwargs) -> None:
def __init__(self, *, domain, access_token, additional_properties=None, connect_via=None, description: str=None, parameters=None, annotations=None, existing_cluster_id=None, new_cluster_version=None, new_cluster_num_of_worker=None, new_cluster_node_type=None, new_cluster_spark_conf=None, new_cluster_custom_tags=None, encrypted_credential=None, **kwargs) -> None:
super(AzureDatabricksLinkedService, self).__init__(additional_properties=additional_properties, connect_via=connect_via, description=description, parameters=parameters, annotations=annotations, **kwargs)
self.domain = domain
self.access_token = access_token
Expand All @@ -97,5 +100,6 @@ def __init__(self, *, domain, access_token, additional_properties=None, connect_
self.new_cluster_num_of_worker = new_cluster_num_of_worker
self.new_cluster_node_type = new_cluster_node_type
self.new_cluster_spark_conf = new_cluster_spark_conf
self.new_cluster_custom_tags = new_cluster_custom_tags
self.encrypted_credential = encrypted_credential
self.type = 'AzureDatabricks'
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,9 @@ class DatabricksNotebookActivity(ExecutionActivity):
job.If the notebook takes a parameter that is not specified, the default
value from the notebook will be used.
:type base_parameters: dict[str, object]
:param libraries: A list of libraries to be installed on the cluster that
will execute the job.
:type libraries: list[dict[str, object]]
"""

_validation = {
Expand All @@ -62,10 +65,12 @@ class DatabricksNotebookActivity(ExecutionActivity):
'policy': {'key': 'policy', 'type': 'ActivityPolicy'},
'notebook_path': {'key': 'typeProperties.notebookPath', 'type': 'object'},
'base_parameters': {'key': 'typeProperties.baseParameters', 'type': '{object}'},
'libraries': {'key': 'typeProperties.libraries', 'type': '[{object}]'},
}

def __init__(self, **kwargs):
super(DatabricksNotebookActivity, self).__init__(**kwargs)
self.notebook_path = kwargs.get('notebook_path', None)
self.base_parameters = kwargs.get('base_parameters', None)
self.libraries = kwargs.get('libraries', None)
self.type = 'DatabricksNotebook'
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,9 @@ class DatabricksNotebookActivity(ExecutionActivity):
job.If the notebook takes a parameter that is not specified, the default
value from the notebook will be used.
:type base_parameters: dict[str, object]
:param libraries: A list of libraries to be installed on the cluster that
will execute the job.
:type libraries: list[dict[str, object]]
"""

_validation = {
Expand All @@ -62,10 +65,12 @@ class DatabricksNotebookActivity(ExecutionActivity):
'policy': {'key': 'policy', 'type': 'ActivityPolicy'},
'notebook_path': {'key': 'typeProperties.notebookPath', 'type': 'object'},
'base_parameters': {'key': 'typeProperties.baseParameters', 'type': '{object}'},
'libraries': {'key': 'typeProperties.libraries', 'type': '[{object}]'},
}

def __init__(self, *, name: str, notebook_path, additional_properties=None, description: str=None, depends_on=None, user_properties=None, linked_service_name=None, policy=None, base_parameters=None, **kwargs) -> None:
def __init__(self, *, name: str, notebook_path, additional_properties=None, description: str=None, depends_on=None, user_properties=None, linked_service_name=None, policy=None, base_parameters=None, libraries=None, **kwargs) -> None:
super(DatabricksNotebookActivity, self).__init__(additional_properties=additional_properties, name=name, description=description, depends_on=depends_on, user_properties=user_properties, linked_service_name=linked_service_name, policy=policy, **kwargs)
self.notebook_path = notebook_path
self.base_parameters = base_parameters
self.libraries = libraries
self.type = 'DatabricksNotebook'
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------

from .execution_activity import ExecutionActivity


class DatabricksSparkJarActivity(ExecutionActivity):
"""DatabricksSparkJar activity.

All required parameters must be populated in order to send to Azure.

:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param name: Required. Activity name.
:type name: str
:param description: Activity description.
:type description: str
:param depends_on: Activity depends on condition.
:type depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:param user_properties: Activity user properties.
:type user_properties: dict[str, str]
:param type: Required. Constant filled by server.
:type type: str
:param linked_service_name: Linked service reference.
:type linked_service_name:
~azure.mgmt.datafactory.models.LinkedServiceReference
:param policy: Activity policy.
:type policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:param main_class_name: Required. The full name of the class containing
the main method to be executed. This class must be contained in a JAR
provided as a library. Type: string (or Expression with resultType
string).
:type main_class_name: object
:param parameters: Parameters that will be passed to the main method.
:type parameters: list[object]
:param libraries: A list of libraries to be installed on the cluster that
will execute the job.
:type libraries: list[dict[str, object]]
"""

_validation = {
'name': {'required': True},
'type': {'required': True},
'main_class_name': {'required': True},
}

_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'depends_on': {'key': 'dependsOn', 'type': '[ActivityDependency]'},
'user_properties': {'key': 'userProperties', 'type': '{str}'},
'type': {'key': 'type', 'type': 'str'},
'linked_service_name': {'key': 'linkedServiceName', 'type': 'LinkedServiceReference'},
'policy': {'key': 'policy', 'type': 'ActivityPolicy'},
'main_class_name': {'key': 'typeProperties.mainClassName', 'type': 'object'},
'parameters': {'key': 'typeProperties.parameters', 'type': '[object]'},
'libraries': {'key': 'typeProperties.libraries', 'type': '[{object}]'},
}

def __init__(self, **kwargs):
super(DatabricksSparkJarActivity, self).__init__(**kwargs)
self.main_class_name = kwargs.get('main_class_name', None)
self.parameters = kwargs.get('parameters', None)
self.libraries = kwargs.get('libraries', None)
self.type = 'DatabricksSparkJar'
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------

from .execution_activity_py3 import ExecutionActivity


class DatabricksSparkJarActivity(ExecutionActivity):
"""DatabricksSparkJar activity.

All required parameters must be populated in order to send to Azure.

:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param name: Required. Activity name.
:type name: str
:param description: Activity description.
:type description: str
:param depends_on: Activity depends on condition.
:type depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:param user_properties: Activity user properties.
:type user_properties: dict[str, str]
:param type: Required. Constant filled by server.
:type type: str
:param linked_service_name: Linked service reference.
:type linked_service_name:
~azure.mgmt.datafactory.models.LinkedServiceReference
:param policy: Activity policy.
:type policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:param main_class_name: Required. The full name of the class containing
the main method to be executed. This class must be contained in a JAR
provided as a library. Type: string (or Expression with resultType
string).
:type main_class_name: object
:param parameters: Parameters that will be passed to the main method.
:type parameters: list[object]
:param libraries: A list of libraries to be installed on the cluster that
will execute the job.
:type libraries: list[dict[str, object]]
"""

_validation = {
'name': {'required': True},
'type': {'required': True},
'main_class_name': {'required': True},
}

_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'depends_on': {'key': 'dependsOn', 'type': '[ActivityDependency]'},
'user_properties': {'key': 'userProperties', 'type': '{str}'},
'type': {'key': 'type', 'type': 'str'},
'linked_service_name': {'key': 'linkedServiceName', 'type': 'LinkedServiceReference'},
'policy': {'key': 'policy', 'type': 'ActivityPolicy'},
'main_class_name': {'key': 'typeProperties.mainClassName', 'type': 'object'},
'parameters': {'key': 'typeProperties.parameters', 'type': '[object]'},
'libraries': {'key': 'typeProperties.libraries', 'type': '[{object}]'},
}

def __init__(self, *, name: str, main_class_name, additional_properties=None, description: str=None, depends_on=None, user_properties=None, linked_service_name=None, policy=None, parameters=None, libraries=None, **kwargs) -> None:
super(DatabricksSparkJarActivity, self).__init__(additional_properties=additional_properties, name=name, description=description, depends_on=depends_on, user_properties=user_properties, linked_service_name=linked_service_name, policy=policy, **kwargs)
self.main_class_name = main_class_name
self.parameters = parameters
self.libraries = libraries
self.type = 'DatabricksSparkJar'
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------

from .execution_activity import ExecutionActivity


class DatabricksSparkPythonActivity(ExecutionActivity):
"""DatabricksSparkPython activity.

All required parameters must be populated in order to send to Azure.

:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param name: Required. Activity name.
:type name: str
:param description: Activity description.
:type description: str
:param depends_on: Activity depends on condition.
:type depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:param user_properties: Activity user properties.
:type user_properties: dict[str, str]
:param type: Required. Constant filled by server.
:type type: str
:param linked_service_name: Linked service reference.
:type linked_service_name:
~azure.mgmt.datafactory.models.LinkedServiceReference
:param policy: Activity policy.
:type policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:param python_file: Required. The URI of the Python file to be executed.
DBFS paths are supported. Type: string (or Expression with resultType
string).
:type python_file: object
:param parameters: Command line parameters that will be passed to the
Python file.
:type parameters: list[object]
:param libraries: A list of libraries to be installed on the cluster that
will execute the job.
:type libraries: list[dict[str, object]]
"""

_validation = {
'name': {'required': True},
'type': {'required': True},
'python_file': {'required': True},
}

_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'depends_on': {'key': 'dependsOn', 'type': '[ActivityDependency]'},
'user_properties': {'key': 'userProperties', 'type': '{str}'},
'type': {'key': 'type', 'type': 'str'},
'linked_service_name': {'key': 'linkedServiceName', 'type': 'LinkedServiceReference'},
'policy': {'key': 'policy', 'type': 'ActivityPolicy'},
'python_file': {'key': 'typeProperties.pythonFile', 'type': 'object'},
'parameters': {'key': 'typeProperties.parameters', 'type': '[object]'},
'libraries': {'key': 'typeProperties.libraries', 'type': '[{object}]'},
}

def __init__(self, **kwargs):
super(DatabricksSparkPythonActivity, self).__init__(**kwargs)
self.python_file = kwargs.get('python_file', None)
self.parameters = kwargs.get('parameters', None)
self.libraries = kwargs.get('libraries', None)
self.type = 'DatabricksSparkPython'
Loading