diff --git a/sdk/azure-hdinsight-job/CHANGELOG.md b/sdk/azure-hdinsight-job/CHANGELOG.md new file mode 100644 index 000000000000..578ed6acf479 --- /dev/null +++ b/sdk/azure-hdinsight-job/CHANGELOG.md @@ -0,0 +1,5 @@ +# Release History + +## 0.1.0 (1970-01-01) + +* Initial Release diff --git a/sdk/azure-hdinsight-job/MANIFEST.in b/sdk/azure-hdinsight-job/MANIFEST.in new file mode 100644 index 000000000000..94c87b0b3cba --- /dev/null +++ b/sdk/azure-hdinsight-job/MANIFEST.in @@ -0,0 +1,5 @@ +recursive-include tests *.py *.yaml +include *.md +include azure/__init__.py +include azure/hdinsight/__init__.py + diff --git a/sdk/azure-hdinsight-job/README.md b/sdk/azure-hdinsight-job/README.md new file mode 100644 index 000000000000..1c12d44aef62 --- /dev/null +++ b/sdk/azure-hdinsight-job/README.md @@ -0,0 +1,21 @@ +# Microsoft Azure SDK for Python + +This is the Microsoft Azure MyService Management Client Library. +This package has been tested with Python 2.7, 3.5, 3.6, 3.7 and 3.8. +For a more complete view of Azure libraries, see the [Github repo](https://github.com/Azure/azure-sdk-for-python/) + + +# Usage + +For code examples, see [MyService Management](https://docs.microsoft.com/python/api/overview/azure/) +on docs.microsoft.com. + + +# Provide Feedback + +If you encounter any bugs or have suggestions, please file an issue in the +[Issues](https://github.com/Azure/azure-sdk-for-python/issues) +section of the project. + + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-python%2Fazure-hdinsight-job%2FREADME.png) diff --git a/sdk/azure-hdinsight-job/azure/__init__.py b/sdk/azure-hdinsight-job/azure/__init__.py new file mode 100644 index 000000000000..0260537a02bb --- /dev/null +++ b/sdk/azure-hdinsight-job/azure/__init__.py @@ -0,0 +1 @@ +__path__ = __import__('pkgutil').extend_path(__path__, __name__) \ No newline at end of file diff --git a/sdk/azure-hdinsight-job/azure/hdinsight/__init__.py b/sdk/azure-hdinsight-job/azure/hdinsight/__init__.py new file mode 100644 index 000000000000..0260537a02bb --- /dev/null +++ b/sdk/azure-hdinsight-job/azure/hdinsight/__init__.py @@ -0,0 +1 @@ +__path__ = __import__('pkgutil').extend_path(__path__, __name__) \ No newline at end of file diff --git a/sdk/azure-hdinsight-job/azure/hdinsight/job/__init__.py b/sdk/azure-hdinsight-job/azure/hdinsight/job/__init__.py new file mode 100644 index 000000000000..9d85106e47ea --- /dev/null +++ b/sdk/azure-hdinsight-job/azure/hdinsight/job/__init__.py @@ -0,0 +1,19 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._configuration import HDInsightJobClientConfiguration +from ._hd_insight_job_client import HDInsightJobClient +__all__ = ['HDInsightJobClient', 'HDInsightJobClientConfiguration'] + +from .version import VERSION + +__version__ = VERSION + diff --git a/sdk/azure-hdinsight-job/azure/hdinsight/job/_configuration.py b/sdk/azure-hdinsight-job/azure/hdinsight/job/_configuration.py new file mode 100644 index 000000000000..2a54d18299dc --- /dev/null +++ b/sdk/azure-hdinsight-job/azure/hdinsight/job/_configuration.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +from msrestazure import AzureConfiguration + +from .version import VERSION + + +class HDInsightJobClientConfiguration(AzureConfiguration): + """Configuration for HDInsightJobClient + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credentials: Credentials needed for the client to connect to Azure. + :type credentials: :mod:`A msrestazure Credentials + object` + :param endpoint: The cluster endpoint, for example + https://clustername.azurehdinsight.net. + :type endpoint: str + :param username: The user name used for running job. + :type username: str + """ + + def __init__( + self, credentials, endpoint, username): + + if credentials is None: + raise ValueError("Parameter 'credentials' must not be None.") + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if username is None: + raise ValueError("Parameter 'username' must not be None.") + base_url = 'https://{endpoint}' + + super(HDInsightJobClientConfiguration, self).__init__(base_url) + + # Starting Autorest.Python 4.0.64, make connection pool activated by default + self.keep_alive = True + + self.add_user_agent('azure-hdinsight-job/{}'.format(VERSION)) + self.add_user_agent('Azure-SDK-For-Python') + + self.credentials = credentials + self.endpoint = endpoint + self.username = username diff --git a/sdk/azure-hdinsight-job/azure/hdinsight/job/_hd_insight_job_client.py b/sdk/azure-hdinsight-job/azure/hdinsight/job/_hd_insight_job_client.py new file mode 100644 index 000000000000..e8114f195c37 --- /dev/null +++ b/sdk/azure-hdinsight-job/azure/hdinsight/job/_hd_insight_job_client.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.service_client import SDKClient +from msrest import Serializer, Deserializer + +from ._configuration import HDInsightJobClientConfiguration +from .operations import JobOperations +from . import models + + +class HDInsightJobClient(SDKClient): + """The HDInsight Job Client. + + :ivar config: Configuration for client. + :vartype config: HDInsightJobClientConfiguration + + :ivar job: Job operations + :vartype job: azure.hdinsight.job.operations.JobOperations + + :param credentials: Credentials needed for the client to connect to Azure. + :type credentials: :mod:`A msrestazure Credentials + object` + :param endpoint: The cluster endpoint, for example + https://clustername.azurehdinsight.net. + :type endpoint: str + :param username: The user name used for running job. + :type username: str + """ + + def __init__( + self, credentials, endpoint, username): + + self.config = HDInsightJobClientConfiguration(credentials, endpoint, username) + super(HDInsightJobClient, self).__init__(self.config.credentials, self.config) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self.api_version = '2018-11-01-preview' + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + + self.job = JobOperations( + self._client, self.config, self._serialize, self._deserialize) diff --git a/sdk/azure-hdinsight-job/azure/hdinsight/job/models/__init__.py b/sdk/azure-hdinsight-job/azure/hdinsight/job/models/__init__.py new file mode 100644 index 000000000000..41326dc7c5cc --- /dev/null +++ b/sdk/azure-hdinsight-job/azure/hdinsight/job/models/__init__.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +try: + from ._models_py3 import AppState + from ._models_py3 import JobDetailRootJsonObject + from ._models_py3 import JobID + from ._models_py3 import JobListJsonObject + from ._models_py3 import JobOperationsErrorResponse, JobOperationsErrorResponseException + from ._models_py3 import JobSubmissionJsonResponse + from ._models_py3 import Profile + from ._models_py3 import SparkBatchJob + from ._models_py3 import SparkBatchJobCollection + from ._models_py3 import SparkBatchJobRequest + from ._models_py3 import SparkJobDeletedResult + from ._models_py3 import SparkJobLog + from ._models_py3 import SparkJobState + from ._models_py3 import SparkSessionCollection + from ._models_py3 import SparkSessionJob + from ._models_py3 import SparkSessionJobRequest + from ._models_py3 import SparkStatement + from ._models_py3 import SparkStatementCancellationResult + from ._models_py3 import SparkStatementCollection + from ._models_py3 import SparkStatementOutput + from ._models_py3 import SparkStatementRequest + from ._models_py3 import Status + from ._models_py3 import Userargs +except (SyntaxError, ImportError): + from ._models import AppState + from ._models import JobDetailRootJsonObject + from ._models import JobID + from ._models import JobListJsonObject + from ._models import JobOperationsErrorResponse, JobOperationsErrorResponseException + from ._models import JobSubmissionJsonResponse + from ._models import Profile + from ._models import SparkBatchJob + from ._models import SparkBatchJobCollection + from ._models import SparkBatchJobRequest + from ._models import SparkJobDeletedResult + from ._models import SparkJobLog + from ._models import SparkJobState + from ._models import SparkSessionCollection + from ._models import SparkSessionJob + from ._models import SparkSessionJobRequest + from ._models import SparkStatement + from ._models import SparkStatementCancellationResult + from ._models import SparkStatementCollection + from ._models import SparkStatementOutput + from ._models import SparkStatementRequest + from ._models import Status + from ._models import Userargs +from ._hd_insight_job_client_enums import ( + ApplicationState, + JobState, + SessionJobKind, + StatementState, + StatementExecutionStatus, +) + +__all__ = [ + 'AppState', + 'JobDetailRootJsonObject', + 'JobID', + 'JobListJsonObject', + 'JobOperationsErrorResponse', 'JobOperationsErrorResponseException', + 'JobSubmissionJsonResponse', + 'Profile', + 'SparkBatchJob', + 'SparkBatchJobCollection', + 'SparkBatchJobRequest', + 'SparkJobDeletedResult', + 'SparkJobLog', + 'SparkJobState', + 'SparkSessionCollection', + 'SparkSessionJob', + 'SparkSessionJobRequest', + 'SparkStatement', + 'SparkStatementCancellationResult', + 'SparkStatementCollection', + 'SparkStatementOutput', + 'SparkStatementRequest', + 'Status', + 'Userargs', + 'ApplicationState', + 'JobState', + 'SessionJobKind', + 'StatementState', + 'StatementExecutionStatus', +] diff --git a/sdk/azure-hdinsight-job/azure/hdinsight/job/models/_hd_insight_job_client_enums.py b/sdk/azure-hdinsight-job/azure/hdinsight/job/models/_hd_insight_job_client_enums.py new file mode 100644 index 000000000000..ef568bb913a8 --- /dev/null +++ b/sdk/azure-hdinsight-job/azure/hdinsight/job/models/_hd_insight_job_client_enums.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum + + +class ApplicationState(str, Enum): + + new = "NEW" + new_saving = "NEW_SAVING" + submitted = "SUBMITTED" + accepted = "ACCEPTED" + running = "RUNNING" + finished = "FINISHED" + finishing = "FINISHING" + failed = "FAILED" + killed = "KILLED" + + +class JobState(str, Enum): + + not_started = "not_started" + starting = "starting" + idle = "idle" + running = "running" + busy = "busy" + shutting_down = "shutting_down" + error = "error" + dead = "dead" + killed = "killed" + success = "success" + recovering = "recovering" + + +class SessionJobKind(str, Enum): + + spark = "spark" + pyspark = "pyspark" + sparkr = "sparkr" + sql = "sql" + + +class StatementState(str, Enum): + + waiting = "waiting" + running = "running" + available = "available" + error = "error" + cancelling = "cancelling" + cancelled = "cancelled" + + +class StatementExecutionStatus(str, Enum): + + ok = "ok" + error = "error" + abort = "abort" diff --git a/sdk/azure-hdinsight-job/azure/hdinsight/job/models/_models.py b/sdk/azure-hdinsight-job/azure/hdinsight/job/models/_models.py new file mode 100644 index 000000000000..9fd36835321b --- /dev/null +++ b/sdk/azure-hdinsight-job/azure/hdinsight/job/models/_models.py @@ -0,0 +1,868 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from msrest.exceptions import HttpOperationError + + +class AppState(Model): + """The State of the application. + + :param state: The State of the application. Possible values include: + 'NEW', 'NEW_SAVING', 'SUBMITTED', 'ACCEPTED', 'RUNNING', 'FINISHED', + 'FINISHING', 'FAILED', 'KILLED' + :type state: str or ~azure.hdinsight.job.models.ApplicationState + """ + + _attribute_map = { + 'state': {'key': 'state', 'type': 'ApplicationState'}, + } + + def __init__(self, **kwargs): + super(AppState, self).__init__(**kwargs) + self.state = kwargs.get('state', None) + + +class CloudError(Model): + """CloudError. + """ + + _attribute_map = { + } + + +class JobDetailRootJsonObject(Model): + """The object containing the job details. + + :param callback: The callback URL, if any. + :type callback: object + :param completed: The string representing completed status, for example + 'done'. + :type completed: str + :param exit_value: The job's exit value. + :type exit_value: int + :param id: The job ID. + :type id: str + :param msg: The message returned. + :type msg: object + :param parent_id: The parent job ID. + :type parent_id: str + :param percent_complete: The job completion percentage, for example '75% + complete'. + :type percent_complete: str + :param profile: The object containing the job profile information. + :type profile: ~azure.hdinsight.job.models.Profile + :param status: The object containing the job status information. + :type status: ~azure.hdinsight.job.models.Status + :param user: The user name of the job creator. + :type user: str + :param userargs: The arguments passed in by the user. + :type userargs: ~azure.hdinsight.job.models.Userargs + """ + + _attribute_map = { + 'callback': {'key': 'callback', 'type': 'object'}, + 'completed': {'key': 'completed', 'type': 'str'}, + 'exit_value': {'key': 'exitValue', 'type': 'int'}, + 'id': {'key': 'id', 'type': 'str'}, + 'msg': {'key': 'msg', 'type': 'object'}, + 'parent_id': {'key': 'parentId', 'type': 'str'}, + 'percent_complete': {'key': 'percentComplete', 'type': 'str'}, + 'profile': {'key': 'profile', 'type': 'Profile'}, + 'status': {'key': 'status', 'type': 'Status'}, + 'user': {'key': 'user', 'type': 'str'}, + 'userargs': {'key': 'userargs', 'type': 'Userargs'}, + } + + def __init__(self, **kwargs): + super(JobDetailRootJsonObject, self).__init__(**kwargs) + self.callback = kwargs.get('callback', None) + self.completed = kwargs.get('completed', None) + self.exit_value = kwargs.get('exit_value', None) + self.id = kwargs.get('id', None) + self.msg = kwargs.get('msg', None) + self.parent_id = kwargs.get('parent_id', None) + self.percent_complete = kwargs.get('percent_complete', None) + self.profile = kwargs.get('profile', None) + self.status = kwargs.get('status', None) + self.user = kwargs.get('user', None) + self.userargs = kwargs.get('userargs', None) + + +class JobID(Model): + """The object with the Job ID. + + :param id: The job number. + :type id: long + :param jt_identifier: The jobTracker identifier. + :type jt_identifier: str + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'long'}, + 'jt_identifier': {'key': 'jtIdentifier', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(JobID, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.jt_identifier = kwargs.get('jt_identifier', None) + + +class JobListJsonObject(Model): + """The List Job operation response. + + :param detail: The detail of the job. + :type detail: ~azure.hdinsight.job.models.JobDetailRootJsonObject + :param id: The Id of the job. + :type id: str + """ + + _attribute_map = { + 'detail': {'key': 'detail', 'type': 'JobDetailRootJsonObject'}, + 'id': {'key': 'id', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(JobListJsonObject, self).__init__(**kwargs) + self.detail = kwargs.get('detail', None) + self.id = kwargs.get('id', None) + + +class JobOperationsErrorResponse(Model): + """Describes the format of Error response. + + :param error: Error message indicating why the operation failed. + :type error: str + """ + + _attribute_map = { + 'error': {'key': 'error', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(JobOperationsErrorResponse, self).__init__(**kwargs) + self.error = kwargs.get('error', None) + + +class JobOperationsErrorResponseException(HttpOperationError): + """Server responsed with exception of type: 'JobOperationsErrorResponse'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, deserialize, response, *args): + + super(JobOperationsErrorResponseException, self).__init__(deserialize, response, 'JobOperationsErrorResponse', *args) + + +class JobSubmissionJsonResponse(Model): + """The job submission json response. + + :param id: The Id of the created job. + :type id: str + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(JobSubmissionJsonResponse, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + + +class Profile(Model): + """The object containing the job profile information. + + :param job_file: The job configuration file. + :type job_file: str + :param job_id: The full ID of the job. + :type job_id: str + :param job_id1: The ID of the job. + :type job_id1: ~azure.hdinsight.job.models.JobID + :param job_name: The user-specified job name. + :type job_name: str + :param queue_name: The name of the queue to which the job is submitted. + :type queue_name: str + :param url: The link to the web-ui for details of the job. + :type url: str + :param user: The userid of the person who submitted the job. + :type user: str + """ + + _attribute_map = { + 'job_file': {'key': 'jobFile', 'type': 'str'}, + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'job_id1': {'key': 'jobID', 'type': 'JobID'}, + 'job_name': {'key': 'jobName', 'type': 'str'}, + 'queue_name': {'key': 'queueName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'user': {'key': 'user', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(Profile, self).__init__(**kwargs) + self.job_file = kwargs.get('job_file', None) + self.job_id = kwargs.get('job_id', None) + self.job_id1 = kwargs.get('job_id1', None) + self.job_name = kwargs.get('job_name', None) + self.queue_name = kwargs.get('queue_name', None) + self.url = kwargs.get('url', None) + self.user = kwargs.get('user', None) + + +class SparkBatchJob(Model): + """SparkBatchJob. + + :param id: The livy id of the spark batch job. + :type id: int + :param app_id: The application id of this job. + :type app_id: str + :param app_info: The detailed application info. + :type app_info: dict[str, str] + :param state: The current state of the spark batch job. Possible values + include: 'not_started', 'starting', 'idle', 'running', 'busy', + 'shutting_down', 'error', 'dead', 'killed', 'success', 'recovering' + :type state: str or ~azure.hdinsight.job.models.JobState + :param log_lines: The log lines. + :type log_lines: list[str] + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'int'}, + 'app_id': {'key': 'appId', 'type': 'str'}, + 'app_info': {'key': 'appInfo', 'type': '{str}'}, + 'state': {'key': 'state', 'type': 'str'}, + 'log_lines': {'key': 'log', 'type': '[str]'}, + } + + def __init__(self, **kwargs): + super(SparkBatchJob, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.app_id = kwargs.get('app_id', None) + self.app_info = kwargs.get('app_info', None) + self.state = kwargs.get('state', None) + self.log_lines = kwargs.get('log_lines', None) + + +class SparkBatchJobCollection(Model): + """SparkBatchJobCollection. + + :param from_property: The start index to fetch Spark Batch jobs. + :type from_property: int + :param total: Number of Spark Batch jobs to fetch. + :type total: int + :param sessions: List of spark batch jobs. + :type sessions: list[~azure.hdinsight.job.models.SparkBatchJob] + """ + + _attribute_map = { + 'from_property': {'key': 'from', 'type': 'int'}, + 'total': {'key': 'total', 'type': 'int'}, + 'sessions': {'key': 'sessions', 'type': '[SparkBatchJob]'}, + } + + def __init__(self, **kwargs): + super(SparkBatchJobCollection, self).__init__(**kwargs) + self.from_property = kwargs.get('from_property', None) + self.total = kwargs.get('total', None) + self.sessions = kwargs.get('sessions', None) + + +class SparkBatchJobRequest(Model): + """SparkBatchJobRequest. + + :param file: File containing the application to execute. + :type file: str + :param proxy_user: User to impersonate when running the job. + :type proxy_user: str + :param class_name: Application Java/Spark main class. + :type class_name: str + :param arguments: Command line arguments for the application. + :type arguments: list[str] + :param jars: Jars to be used in this batch job. + :type jars: list[str] + :param python_files: Python files to be used in this batch job. + :type python_files: list[str] + :param files: Files to be used in this batch job. + :type files: list[str] + :param driver_memory: Amount of memory to use for the driver process. + :type driver_memory: str + :param driver_cores: Number of cores to use for the driver process. + :type driver_cores: int + :param executor_memory: Amount of memory to use per executor process. + :type executor_memory: str + :param executor_cores: Number of cores to use for each executor. + :type executor_cores: int + :param executor_count: Number of executors to launch for this batch job. + :type executor_count: int + :param archives: Archives to be used in this batch job. + :type archives: list[str] + :param queue: The name of the YARN queue to which submitted. + :type queue: str + :param name: The name of this batch job. + :type name: str + :param configuration: Spark configuration properties. + :type configuration: dict[str, str] + """ + + _attribute_map = { + 'file': {'key': 'file', 'type': 'str'}, + 'proxy_user': {'key': 'proxyUser', 'type': 'str'}, + 'class_name': {'key': 'className', 'type': 'str'}, + 'arguments': {'key': 'args', 'type': '[str]'}, + 'jars': {'key': 'jars', 'type': '[str]'}, + 'python_files': {'key': 'pyFiles', 'type': '[str]'}, + 'files': {'key': 'files', 'type': '[str]'}, + 'driver_memory': {'key': 'driverMemory', 'type': 'str'}, + 'driver_cores': {'key': 'driverCores', 'type': 'int'}, + 'executor_memory': {'key': 'executorMemory', 'type': 'str'}, + 'executor_cores': {'key': 'executorCores', 'type': 'int'}, + 'executor_count': {'key': 'numExecutors', 'type': 'int'}, + 'archives': {'key': 'archives', 'type': '[str]'}, + 'queue': {'key': 'queue', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'configuration': {'key': 'conf', 'type': '{str}'}, + } + + def __init__(self, **kwargs): + super(SparkBatchJobRequest, self).__init__(**kwargs) + self.file = kwargs.get('file', None) + self.proxy_user = kwargs.get('proxy_user', None) + self.class_name = kwargs.get('class_name', None) + self.arguments = kwargs.get('arguments', None) + self.jars = kwargs.get('jars', None) + self.python_files = kwargs.get('python_files', None) + self.files = kwargs.get('files', None) + self.driver_memory = kwargs.get('driver_memory', None) + self.driver_cores = kwargs.get('driver_cores', None) + self.executor_memory = kwargs.get('executor_memory', None) + self.executor_cores = kwargs.get('executor_cores', None) + self.executor_count = kwargs.get('executor_count', None) + self.archives = kwargs.get('archives', None) + self.queue = kwargs.get('queue', None) + self.name = kwargs.get('name', None) + self.configuration = kwargs.get('configuration', None) + + +class SparkJobDeletedResult(Model): + """SparkJobDeletedResult. + + :param deleted_message: + :type deleted_message: str + """ + + _attribute_map = { + 'deleted_message': {'key': 'msg', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(SparkJobDeletedResult, self).__init__(**kwargs) + self.deleted_message = kwargs.get('deleted_message', None) + + +class SparkJobLog(Model): + """SparkJobLog. + + :param id: The livy id of the spark job. + :type id: int + :param from_property: Offset from start of log. + :type from_property: int + :param size: Max number of log lines. + :type size: int + :param total: Total number of log lines. + :type total: long + :param log_lines: The log lines. + :type log_lines: list[str] + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'int'}, + 'from_property': {'key': 'from', 'type': 'int'}, + 'size': {'key': 'size', 'type': 'int'}, + 'total': {'key': 'total', 'type': 'long'}, + 'log_lines': {'key': 'log', 'type': '[str]'}, + } + + def __init__(self, **kwargs): + super(SparkJobLog, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.from_property = kwargs.get('from_property', None) + self.size = kwargs.get('size', None) + self.total = kwargs.get('total', None) + self.log_lines = kwargs.get('log_lines', None) + + +class SparkJobState(Model): + """SparkJobState. + + :param id: The livy id of the spark job. + :type id: int + :param state: The current state of the spark job. Possible values include: + 'not_started', 'starting', 'idle', 'running', 'busy', 'shutting_down', + 'error', 'dead', 'killed', 'success', 'recovering' + :type state: str or ~azure.hdinsight.job.models.JobState + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'int'}, + 'state': {'key': 'state', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(SparkJobState, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.state = kwargs.get('state', None) + + +class SparkSessionCollection(Model): + """SparkSessionCollection. + + :param from_property: The start index to fetch spark sessions. + :type from_property: int + :param total: Number of spark sessions to fetch. + :type total: int + :param sessions: List of spark sessions. + :type sessions: list[~azure.hdinsight.job.models.SparkSessionJob] + """ + + _attribute_map = { + 'from_property': {'key': 'from', 'type': 'int'}, + 'total': {'key': 'total', 'type': 'int'}, + 'sessions': {'key': 'sessions', 'type': '[SparkSessionJob]'}, + } + + def __init__(self, **kwargs): + super(SparkSessionCollection, self).__init__(**kwargs) + self.from_property = kwargs.get('from_property', None) + self.total = kwargs.get('total', None) + self.sessions = kwargs.get('sessions', None) + + +class SparkSessionJob(Model): + """SparkSessionJob. + + :param id: The livy id of the spark session job. + :type id: int + :param app_id: The application id of this job. + :type app_id: str + :param owner: Remote user who submitted this job. + :type owner: str + :param proxy_user: User to impersonate when running. + :type proxy_user: str + :param kind: Spark session job kind. Possible values include: 'spark', + 'pyspark', 'sparkr', 'sql' + :type kind: str or ~azure.hdinsight.job.models.SessionJobKind + :param log_lines: The log lines. + :type log_lines: list[str] + :param state: The current state of the spark session job. Possible values + include: 'not_started', 'starting', 'idle', 'running', 'busy', + 'shutting_down', 'error', 'dead', 'killed', 'success', 'recovering' + :type state: str or ~azure.hdinsight.job.models.JobState + :param app_info: The detailed application info. + :type app_info: dict[str, str] + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'int'}, + 'app_id': {'key': 'appId', 'type': 'str'}, + 'owner': {'key': 'owner', 'type': 'str'}, + 'proxy_user': {'key': 'proxyUser', 'type': 'str'}, + 'kind': {'key': 'kind', 'type': 'str'}, + 'log_lines': {'key': 'log', 'type': '[str]'}, + 'state': {'key': 'state', 'type': 'str'}, + 'app_info': {'key': 'appInfo', 'type': '{str}'}, + } + + def __init__(self, **kwargs): + super(SparkSessionJob, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.app_id = kwargs.get('app_id', None) + self.owner = kwargs.get('owner', None) + self.proxy_user = kwargs.get('proxy_user', None) + self.kind = kwargs.get('kind', None) + self.log_lines = kwargs.get('log_lines', None) + self.state = kwargs.get('state', None) + self.app_info = kwargs.get('app_info', None) + + +class SparkSessionJobRequest(Model): + """SparkSessionJobRequest. + + :param kind: Spark session job kind. Possible values include: 'spark', + 'pyspark', 'sparkr', 'sql' + :type kind: str or ~azure.hdinsight.job.models.SessionJobKind + :param proxy_user: User to impersonate when starting the session. + :type proxy_user: str + :param jars: Jars to be used in this session. + :type jars: list[str] + :param python_files: Python files to be used in this session. + :type python_files: list[str] + :param files: Files to be used in this session. + :type files: list[str] + :param driver_memory: Amount of memory to use for the driver process. + :type driver_memory: str + :param driver_cores: Number of cores to use for the driver process. + :type driver_cores: int + :param executor_memory: Amount of memory to use per executor process. + :type executor_memory: str + :param executor_cores: Number of cores to use for each executor. + :type executor_cores: int + :param executor_count: Number of executors to launch for this session. + :type executor_count: int + :param archives: Archives to be used in this session. + :type archives: list[str] + :param queue: The name of the YARN queue to which submitted. + :type queue: str + :param name: The name of this session. + :type name: str + :param configuration: Spark configuration properties. + :type configuration: dict[str, str] + :param heartbeat_timeout_in_second: Timeout in second to which session be + orphaned. + :type heartbeat_timeout_in_second: int + """ + + _attribute_map = { + 'kind': {'key': 'kind', 'type': 'str'}, + 'proxy_user': {'key': 'proxyUser', 'type': 'str'}, + 'jars': {'key': 'jars', 'type': '[str]'}, + 'python_files': {'key': 'pyFiles', 'type': '[str]'}, + 'files': {'key': 'files', 'type': '[str]'}, + 'driver_memory': {'key': 'driverMemory', 'type': 'str'}, + 'driver_cores': {'key': 'driverCores', 'type': 'int'}, + 'executor_memory': {'key': 'executorMemory', 'type': 'str'}, + 'executor_cores': {'key': 'executorCores', 'type': 'int'}, + 'executor_count': {'key': 'numExecutors', 'type': 'int'}, + 'archives': {'key': 'archives', 'type': '[str]'}, + 'queue': {'key': 'queue', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'configuration': {'key': 'conf', 'type': '{str}'}, + 'heartbeat_timeout_in_second': {'key': 'heartbeatTimeoutInSecond', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(SparkSessionJobRequest, self).__init__(**kwargs) + self.kind = kwargs.get('kind', None) + self.proxy_user = kwargs.get('proxy_user', None) + self.jars = kwargs.get('jars', None) + self.python_files = kwargs.get('python_files', None) + self.files = kwargs.get('files', None) + self.driver_memory = kwargs.get('driver_memory', None) + self.driver_cores = kwargs.get('driver_cores', None) + self.executor_memory = kwargs.get('executor_memory', None) + self.executor_cores = kwargs.get('executor_cores', None) + self.executor_count = kwargs.get('executor_count', None) + self.archives = kwargs.get('archives', None) + self.queue = kwargs.get('queue', None) + self.name = kwargs.get('name', None) + self.configuration = kwargs.get('configuration', None) + self.heartbeat_timeout_in_second = kwargs.get('heartbeat_timeout_in_second', None) + + +class SparkStatement(Model): + """SparkStatement. + + :param id: The livy id of the spark statement job. + :type id: int + :param code: The execution code. + :type code: str + :param state: The current state of the spark statement. Possible values + include: 'waiting', 'running', 'available', 'error', 'cancelling', + 'cancelled' + :type state: str or ~azure.hdinsight.job.models.StatementState + :param output: The execution output. + :type output: ~azure.hdinsight.job.models.SparkStatementOutput + :param progress: The execution progress. + :type progress: float + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'int'}, + 'code': {'key': 'code', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'str'}, + 'output': {'key': 'output', 'type': 'SparkStatementOutput'}, + 'progress': {'key': 'progress', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(SparkStatement, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.code = kwargs.get('code', None) + self.state = kwargs.get('state', None) + self.output = kwargs.get('output', None) + self.progress = kwargs.get('progress', None) + + +class SparkStatementCancellationResult(Model): + """SparkStatementCancellationResult. + + :param cancel_message: + :type cancel_message: str + """ + + _attribute_map = { + 'cancel_message': {'key': 'msg', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(SparkStatementCancellationResult, self).__init__(**kwargs) + self.cancel_message = kwargs.get('cancel_message', None) + + +class SparkStatementCollection(Model): + """SparkStatementCollection. + + :param statements: List of spark statements. + :type statements: list[~azure.hdinsight.job.models.SparkStatement] + """ + + _attribute_map = { + 'statements': {'key': 'statements', 'type': '[SparkStatement]'}, + } + + def __init__(self, **kwargs): + super(SparkStatementCollection, self).__init__(**kwargs) + self.statements = kwargs.get('statements', None) + + +class SparkStatementOutput(Model): + """SparkStatementOutput. + + :param status: Execution status. Possible values include: 'ok', 'error', + 'abort' + :type status: str or ~azure.hdinsight.job.models.StatementExecutionStatus + :param execution_count: A monotonically increasing number. + :type execution_count: int + :param data: Statement output. + :type data: object + """ + + _attribute_map = { + 'status': {'key': 'status', 'type': 'str'}, + 'execution_count': {'key': 'execution_count', 'type': 'int'}, + 'data': {'key': 'data', 'type': 'object'}, + } + + def __init__(self, **kwargs): + super(SparkStatementOutput, self).__init__(**kwargs) + self.status = kwargs.get('status', None) + self.execution_count = kwargs.get('execution_count', None) + self.data = kwargs.get('data', None) + + +class SparkStatementRequest(Model): + """SparkStatementRequest. + + :param code: + :type code: str + :param kind: Possible values include: 'spark', 'pyspark', 'sparkr', 'sql' + :type kind: str or ~azure.hdinsight.job.models.SessionJobKind + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'kind': {'key': 'kind', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(SparkStatementRequest, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.kind = kwargs.get('kind', None) + + +class Status(Model): + """Gets or sets the object containing the job status information. + + :param cleanup_progress: The progress made on the cleanup. + :type cleanup_progress: float + :param failure_info: The information about any failures that have + occurred. + :type failure_info: str + :param finish_time: The time at which the job completed. It is an integer + in milliseconds, as a Unix timestamp relative to 1/1/1970 00:00:00. + :type finish_time: long + :param history_file: The history file of the job. + :type history_file: str + :param job_ac_ls: The ACLs of the job. + :type job_ac_ls: object + :param job_complete: Whether or not the job has completed. + :type job_complete: bool + :param job_file: The job configuration file. + :type job_file: str + :param job_id: The full ID of the job. + :type job_id: str + :param job_id1: The ID of the job. + :type job_id1: ~azure.hdinsight.job.models.JobID + :param job_name: The user-specified job name. + :type job_name: str + :param job_priority: The priority of the job. + :type job_priority: str + :param map_progress: The progress made on the maps. + :type map_progress: float + :param needed_mem: The amount of memory needed for the job. + :type needed_mem: long + :param num_reserved_slots: The number of slots reserved. + :type num_reserved_slots: int + :param num_used_slots: The number of slots used for the job. + :type num_used_slots: int + :param priority: The priority of the job. + :type priority: str + :param queue: The job queue name. + :type queue: str + :param reduce_progress: The progress made on the reduces. + :type reduce_progress: float + :param reserved_mem: The amount of memory reserved for the job. + :type reserved_mem: long + :param retired: Whether or not the job has been retired. + :type retired: bool + :param run_state: The current state of the job. + :type run_state: int + :param scheduling_info: The information about the scheduling of the job. + :type scheduling_info: str + :param setup_progress: The progress made on the setup. + :type setup_progress: float + :param start_time: The time at which the job started. It is an integer in + milliseconds, as a Unix timestamp relative to 1/1/1970 00:00:00. + :type start_time: long + :param state: The state of the job. + :type state: str + :param tracking_url: The link to the web-ui for details of the job. + :type tracking_url: str + :param uber: Whether job running in uber mode. + :type uber: bool + :param used_mem: The amount of memory used by the job. + :type used_mem: long + :param username: The userid of the person who submitted the job. + :type username: str + """ + + _attribute_map = { + 'cleanup_progress': {'key': 'cleanupProgress', 'type': 'float'}, + 'failure_info': {'key': 'failureInfo', 'type': 'str'}, + 'finish_time': {'key': 'finishTime', 'type': 'long'}, + 'history_file': {'key': 'historyFile', 'type': 'str'}, + 'job_ac_ls': {'key': 'jobACLs', 'type': 'object'}, + 'job_complete': {'key': 'jobComplete', 'type': 'bool'}, + 'job_file': {'key': 'jobFile', 'type': 'str'}, + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'job_id1': {'key': 'jobID', 'type': 'JobID'}, + 'job_name': {'key': 'jobName', 'type': 'str'}, + 'job_priority': {'key': 'jobPriority', 'type': 'str'}, + 'map_progress': {'key': 'mapProgress', 'type': 'float'}, + 'needed_mem': {'key': 'neededMem', 'type': 'long'}, + 'num_reserved_slots': {'key': 'numReservedSlots', 'type': 'int'}, + 'num_used_slots': {'key': 'numUsedSlots', 'type': 'int'}, + 'priority': {'key': 'priority', 'type': 'str'}, + 'queue': {'key': 'queue', 'type': 'str'}, + 'reduce_progress': {'key': 'reduceProgress', 'type': 'float'}, + 'reserved_mem': {'key': 'reservedMem', 'type': 'long'}, + 'retired': {'key': 'retired', 'type': 'bool'}, + 'run_state': {'key': 'runState', 'type': 'int'}, + 'scheduling_info': {'key': 'schedulingInfo', 'type': 'str'}, + 'setup_progress': {'key': 'setupProgress', 'type': 'float'}, + 'start_time': {'key': 'startTime', 'type': 'long'}, + 'state': {'key': 'state', 'type': 'str'}, + 'tracking_url': {'key': 'trackingUrl', 'type': 'str'}, + 'uber': {'key': 'uber', 'type': 'bool'}, + 'used_mem': {'key': 'usedMem', 'type': 'long'}, + 'username': {'key': 'username', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(Status, self).__init__(**kwargs) + self.cleanup_progress = kwargs.get('cleanup_progress', None) + self.failure_info = kwargs.get('failure_info', None) + self.finish_time = kwargs.get('finish_time', None) + self.history_file = kwargs.get('history_file', None) + self.job_ac_ls = kwargs.get('job_ac_ls', None) + self.job_complete = kwargs.get('job_complete', None) + self.job_file = kwargs.get('job_file', None) + self.job_id = kwargs.get('job_id', None) + self.job_id1 = kwargs.get('job_id1', None) + self.job_name = kwargs.get('job_name', None) + self.job_priority = kwargs.get('job_priority', None) + self.map_progress = kwargs.get('map_progress', None) + self.needed_mem = kwargs.get('needed_mem', None) + self.num_reserved_slots = kwargs.get('num_reserved_slots', None) + self.num_used_slots = kwargs.get('num_used_slots', None) + self.priority = kwargs.get('priority', None) + self.queue = kwargs.get('queue', None) + self.reduce_progress = kwargs.get('reduce_progress', None) + self.reserved_mem = kwargs.get('reserved_mem', None) + self.retired = kwargs.get('retired', None) + self.run_state = kwargs.get('run_state', None) + self.scheduling_info = kwargs.get('scheduling_info', None) + self.setup_progress = kwargs.get('setup_progress', None) + self.start_time = kwargs.get('start_time', None) + self.state = kwargs.get('state', None) + self.tracking_url = kwargs.get('tracking_url', None) + self.uber = kwargs.get('uber', None) + self.used_mem = kwargs.get('used_mem', None) + self.username = kwargs.get('username', None) + + +class Userargs(Model): + """Gets or sets the object containing the user arguments. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar arg: The list of args defined by the user. + :vartype arg: list[str] + :param callback: The callback URL, if any. + :type callback: object + :ivar define: The define properties defined by the user. + :vartype define: list[str] + :param enablelog: Whether or not the user enabled logs. + :type enablelog: str + :param execute: The query defined by the user. + :type execute: str + :param file: The query file provided by the user. + :type file: object + :param files: The files defined by the user. + :type files: object + :param jar: The JAR file provided by the user. + :type jar: str + :param statusdir: The status directory defined by the user. + :type statusdir: object + """ + + _validation = { + 'arg': {'readonly': True}, + 'define': {'readonly': True}, + } + + _attribute_map = { + 'arg': {'key': 'arg', 'type': '[str]'}, + 'callback': {'key': 'callback', 'type': 'object'}, + 'define': {'key': 'define', 'type': '[str]'}, + 'enablelog': {'key': 'enablelog', 'type': 'str'}, + 'execute': {'key': 'execute', 'type': 'str'}, + 'file': {'key': 'file', 'type': 'object'}, + 'files': {'key': 'files', 'type': 'object'}, + 'jar': {'key': 'jar', 'type': 'str'}, + 'statusdir': {'key': 'statusdir', 'type': 'object'}, + } + + def __init__(self, **kwargs): + super(Userargs, self).__init__(**kwargs) + self.arg = None + self.callback = kwargs.get('callback', None) + self.define = None + self.enablelog = kwargs.get('enablelog', None) + self.execute = kwargs.get('execute', None) + self.file = kwargs.get('file', None) + self.files = kwargs.get('files', None) + self.jar = kwargs.get('jar', None) + self.statusdir = kwargs.get('statusdir', None) diff --git a/sdk/azure-hdinsight-job/azure/hdinsight/job/models/_models_py3.py b/sdk/azure-hdinsight-job/azure/hdinsight/job/models/_models_py3.py new file mode 100644 index 000000000000..26cad1ca424c --- /dev/null +++ b/sdk/azure-hdinsight-job/azure/hdinsight/job/models/_models_py3.py @@ -0,0 +1,868 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from msrest.exceptions import HttpOperationError + + +class AppState(Model): + """The State of the application. + + :param state: The State of the application. Possible values include: + 'NEW', 'NEW_SAVING', 'SUBMITTED', 'ACCEPTED', 'RUNNING', 'FINISHED', + 'FINISHING', 'FAILED', 'KILLED' + :type state: str or ~azure.hdinsight.job.models.ApplicationState + """ + + _attribute_map = { + 'state': {'key': 'state', 'type': 'ApplicationState'}, + } + + def __init__(self, *, state=None, **kwargs) -> None: + super(AppState, self).__init__(**kwargs) + self.state = state + + +class CloudError(Model): + """CloudError. + """ + + _attribute_map = { + } + + +class JobDetailRootJsonObject(Model): + """The object containing the job details. + + :param callback: The callback URL, if any. + :type callback: object + :param completed: The string representing completed status, for example + 'done'. + :type completed: str + :param exit_value: The job's exit value. + :type exit_value: int + :param id: The job ID. + :type id: str + :param msg: The message returned. + :type msg: object + :param parent_id: The parent job ID. + :type parent_id: str + :param percent_complete: The job completion percentage, for example '75% + complete'. + :type percent_complete: str + :param profile: The object containing the job profile information. + :type profile: ~azure.hdinsight.job.models.Profile + :param status: The object containing the job status information. + :type status: ~azure.hdinsight.job.models.Status + :param user: The user name of the job creator. + :type user: str + :param userargs: The arguments passed in by the user. + :type userargs: ~azure.hdinsight.job.models.Userargs + """ + + _attribute_map = { + 'callback': {'key': 'callback', 'type': 'object'}, + 'completed': {'key': 'completed', 'type': 'str'}, + 'exit_value': {'key': 'exitValue', 'type': 'int'}, + 'id': {'key': 'id', 'type': 'str'}, + 'msg': {'key': 'msg', 'type': 'object'}, + 'parent_id': {'key': 'parentId', 'type': 'str'}, + 'percent_complete': {'key': 'percentComplete', 'type': 'str'}, + 'profile': {'key': 'profile', 'type': 'Profile'}, + 'status': {'key': 'status', 'type': 'Status'}, + 'user': {'key': 'user', 'type': 'str'}, + 'userargs': {'key': 'userargs', 'type': 'Userargs'}, + } + + def __init__(self, *, callback=None, completed: str=None, exit_value: int=None, id: str=None, msg=None, parent_id: str=None, percent_complete: str=None, profile=None, status=None, user: str=None, userargs=None, **kwargs) -> None: + super(JobDetailRootJsonObject, self).__init__(**kwargs) + self.callback = callback + self.completed = completed + self.exit_value = exit_value + self.id = id + self.msg = msg + self.parent_id = parent_id + self.percent_complete = percent_complete + self.profile = profile + self.status = status + self.user = user + self.userargs = userargs + + +class JobID(Model): + """The object with the Job ID. + + :param id: The job number. + :type id: long + :param jt_identifier: The jobTracker identifier. + :type jt_identifier: str + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'long'}, + 'jt_identifier': {'key': 'jtIdentifier', 'type': 'str'}, + } + + def __init__(self, *, id: int=None, jt_identifier: str=None, **kwargs) -> None: + super(JobID, self).__init__(**kwargs) + self.id = id + self.jt_identifier = jt_identifier + + +class JobListJsonObject(Model): + """The List Job operation response. + + :param detail: The detail of the job. + :type detail: ~azure.hdinsight.job.models.JobDetailRootJsonObject + :param id: The Id of the job. + :type id: str + """ + + _attribute_map = { + 'detail': {'key': 'detail', 'type': 'JobDetailRootJsonObject'}, + 'id': {'key': 'id', 'type': 'str'}, + } + + def __init__(self, *, detail=None, id: str=None, **kwargs) -> None: + super(JobListJsonObject, self).__init__(**kwargs) + self.detail = detail + self.id = id + + +class JobOperationsErrorResponse(Model): + """Describes the format of Error response. + + :param error: Error message indicating why the operation failed. + :type error: str + """ + + _attribute_map = { + 'error': {'key': 'error', 'type': 'str'}, + } + + def __init__(self, *, error: str=None, **kwargs) -> None: + super(JobOperationsErrorResponse, self).__init__(**kwargs) + self.error = error + + +class JobOperationsErrorResponseException(HttpOperationError): + """Server responsed with exception of type: 'JobOperationsErrorResponse'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, deserialize, response, *args): + + super(JobOperationsErrorResponseException, self).__init__(deserialize, response, 'JobOperationsErrorResponse', *args) + + +class JobSubmissionJsonResponse(Model): + """The job submission json response. + + :param id: The Id of the created job. + :type id: str + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + } + + def __init__(self, *, id: str=None, **kwargs) -> None: + super(JobSubmissionJsonResponse, self).__init__(**kwargs) + self.id = id + + +class Profile(Model): + """The object containing the job profile information. + + :param job_file: The job configuration file. + :type job_file: str + :param job_id: The full ID of the job. + :type job_id: str + :param job_id1: The ID of the job. + :type job_id1: ~azure.hdinsight.job.models.JobID + :param job_name: The user-specified job name. + :type job_name: str + :param queue_name: The name of the queue to which the job is submitted. + :type queue_name: str + :param url: The link to the web-ui for details of the job. + :type url: str + :param user: The userid of the person who submitted the job. + :type user: str + """ + + _attribute_map = { + 'job_file': {'key': 'jobFile', 'type': 'str'}, + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'job_id1': {'key': 'jobID', 'type': 'JobID'}, + 'job_name': {'key': 'jobName', 'type': 'str'}, + 'queue_name': {'key': 'queueName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'user': {'key': 'user', 'type': 'str'}, + } + + def __init__(self, *, job_file: str=None, job_id: str=None, job_id1=None, job_name: str=None, queue_name: str=None, url: str=None, user: str=None, **kwargs) -> None: + super(Profile, self).__init__(**kwargs) + self.job_file = job_file + self.job_id = job_id + self.job_id1 = job_id1 + self.job_name = job_name + self.queue_name = queue_name + self.url = url + self.user = user + + +class SparkBatchJob(Model): + """SparkBatchJob. + + :param id: The livy id of the spark batch job. + :type id: int + :param app_id: The application id of this job. + :type app_id: str + :param app_info: The detailed application info. + :type app_info: dict[str, str] + :param state: The current state of the spark batch job. Possible values + include: 'not_started', 'starting', 'idle', 'running', 'busy', + 'shutting_down', 'error', 'dead', 'killed', 'success', 'recovering' + :type state: str or ~azure.hdinsight.job.models.JobState + :param log_lines: The log lines. + :type log_lines: list[str] + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'int'}, + 'app_id': {'key': 'appId', 'type': 'str'}, + 'app_info': {'key': 'appInfo', 'type': '{str}'}, + 'state': {'key': 'state', 'type': 'str'}, + 'log_lines': {'key': 'log', 'type': '[str]'}, + } + + def __init__(self, *, id: int=None, app_id: str=None, app_info=None, state=None, log_lines=None, **kwargs) -> None: + super(SparkBatchJob, self).__init__(**kwargs) + self.id = id + self.app_id = app_id + self.app_info = app_info + self.state = state + self.log_lines = log_lines + + +class SparkBatchJobCollection(Model): + """SparkBatchJobCollection. + + :param from_property: The start index to fetch Spark Batch jobs. + :type from_property: int + :param total: Number of Spark Batch jobs to fetch. + :type total: int + :param sessions: List of spark batch jobs. + :type sessions: list[~azure.hdinsight.job.models.SparkBatchJob] + """ + + _attribute_map = { + 'from_property': {'key': 'from', 'type': 'int'}, + 'total': {'key': 'total', 'type': 'int'}, + 'sessions': {'key': 'sessions', 'type': '[SparkBatchJob]'}, + } + + def __init__(self, *, from_property: int=None, total: int=None, sessions=None, **kwargs) -> None: + super(SparkBatchJobCollection, self).__init__(**kwargs) + self.from_property = from_property + self.total = total + self.sessions = sessions + + +class SparkBatchJobRequest(Model): + """SparkBatchJobRequest. + + :param file: File containing the application to execute. + :type file: str + :param proxy_user: User to impersonate when running the job. + :type proxy_user: str + :param class_name: Application Java/Spark main class. + :type class_name: str + :param arguments: Command line arguments for the application. + :type arguments: list[str] + :param jars: Jars to be used in this batch job. + :type jars: list[str] + :param python_files: Python files to be used in this batch job. + :type python_files: list[str] + :param files: Files to be used in this batch job. + :type files: list[str] + :param driver_memory: Amount of memory to use for the driver process. + :type driver_memory: str + :param driver_cores: Number of cores to use for the driver process. + :type driver_cores: int + :param executor_memory: Amount of memory to use per executor process. + :type executor_memory: str + :param executor_cores: Number of cores to use for each executor. + :type executor_cores: int + :param executor_count: Number of executors to launch for this batch job. + :type executor_count: int + :param archives: Archives to be used in this batch job. + :type archives: list[str] + :param queue: The name of the YARN queue to which submitted. + :type queue: str + :param name: The name of this batch job. + :type name: str + :param configuration: Spark configuration properties. + :type configuration: dict[str, str] + """ + + _attribute_map = { + 'file': {'key': 'file', 'type': 'str'}, + 'proxy_user': {'key': 'proxyUser', 'type': 'str'}, + 'class_name': {'key': 'className', 'type': 'str'}, + 'arguments': {'key': 'args', 'type': '[str]'}, + 'jars': {'key': 'jars', 'type': '[str]'}, + 'python_files': {'key': 'pyFiles', 'type': '[str]'}, + 'files': {'key': 'files', 'type': '[str]'}, + 'driver_memory': {'key': 'driverMemory', 'type': 'str'}, + 'driver_cores': {'key': 'driverCores', 'type': 'int'}, + 'executor_memory': {'key': 'executorMemory', 'type': 'str'}, + 'executor_cores': {'key': 'executorCores', 'type': 'int'}, + 'executor_count': {'key': 'numExecutors', 'type': 'int'}, + 'archives': {'key': 'archives', 'type': '[str]'}, + 'queue': {'key': 'queue', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'configuration': {'key': 'conf', 'type': '{str}'}, + } + + def __init__(self, *, file: str=None, proxy_user: str=None, class_name: str=None, arguments=None, jars=None, python_files=None, files=None, driver_memory: str=None, driver_cores: int=None, executor_memory: str=None, executor_cores: int=None, executor_count: int=None, archives=None, queue: str=None, name: str=None, configuration=None, **kwargs) -> None: + super(SparkBatchJobRequest, self).__init__(**kwargs) + self.file = file + self.proxy_user = proxy_user + self.class_name = class_name + self.arguments = arguments + self.jars = jars + self.python_files = python_files + self.files = files + self.driver_memory = driver_memory + self.driver_cores = driver_cores + self.executor_memory = executor_memory + self.executor_cores = executor_cores + self.executor_count = executor_count + self.archives = archives + self.queue = queue + self.name = name + self.configuration = configuration + + +class SparkJobDeletedResult(Model): + """SparkJobDeletedResult. + + :param deleted_message: + :type deleted_message: str + """ + + _attribute_map = { + 'deleted_message': {'key': 'msg', 'type': 'str'}, + } + + def __init__(self, *, deleted_message: str=None, **kwargs) -> None: + super(SparkJobDeletedResult, self).__init__(**kwargs) + self.deleted_message = deleted_message + + +class SparkJobLog(Model): + """SparkJobLog. + + :param id: The livy id of the spark job. + :type id: int + :param from_property: Offset from start of log. + :type from_property: int + :param size: Max number of log lines. + :type size: int + :param total: Total number of log lines. + :type total: long + :param log_lines: The log lines. + :type log_lines: list[str] + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'int'}, + 'from_property': {'key': 'from', 'type': 'int'}, + 'size': {'key': 'size', 'type': 'int'}, + 'total': {'key': 'total', 'type': 'long'}, + 'log_lines': {'key': 'log', 'type': '[str]'}, + } + + def __init__(self, *, id: int=None, from_property: int=None, size: int=None, total: int=None, log_lines=None, **kwargs) -> None: + super(SparkJobLog, self).__init__(**kwargs) + self.id = id + self.from_property = from_property + self.size = size + self.total = total + self.log_lines = log_lines + + +class SparkJobState(Model): + """SparkJobState. + + :param id: The livy id of the spark job. + :type id: int + :param state: The current state of the spark job. Possible values include: + 'not_started', 'starting', 'idle', 'running', 'busy', 'shutting_down', + 'error', 'dead', 'killed', 'success', 'recovering' + :type state: str or ~azure.hdinsight.job.models.JobState + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'int'}, + 'state': {'key': 'state', 'type': 'str'}, + } + + def __init__(self, *, id: int=None, state=None, **kwargs) -> None: + super(SparkJobState, self).__init__(**kwargs) + self.id = id + self.state = state + + +class SparkSessionCollection(Model): + """SparkSessionCollection. + + :param from_property: The start index to fetch spark sessions. + :type from_property: int + :param total: Number of spark sessions to fetch. + :type total: int + :param sessions: List of spark sessions. + :type sessions: list[~azure.hdinsight.job.models.SparkSessionJob] + """ + + _attribute_map = { + 'from_property': {'key': 'from', 'type': 'int'}, + 'total': {'key': 'total', 'type': 'int'}, + 'sessions': {'key': 'sessions', 'type': '[SparkSessionJob]'}, + } + + def __init__(self, *, from_property: int=None, total: int=None, sessions=None, **kwargs) -> None: + super(SparkSessionCollection, self).__init__(**kwargs) + self.from_property = from_property + self.total = total + self.sessions = sessions + + +class SparkSessionJob(Model): + """SparkSessionJob. + + :param id: The livy id of the spark session job. + :type id: int + :param app_id: The application id of this job. + :type app_id: str + :param owner: Remote user who submitted this job. + :type owner: str + :param proxy_user: User to impersonate when running. + :type proxy_user: str + :param kind: Spark session job kind. Possible values include: 'spark', + 'pyspark', 'sparkr', 'sql' + :type kind: str or ~azure.hdinsight.job.models.SessionJobKind + :param log_lines: The log lines. + :type log_lines: list[str] + :param state: The current state of the spark session job. Possible values + include: 'not_started', 'starting', 'idle', 'running', 'busy', + 'shutting_down', 'error', 'dead', 'killed', 'success', 'recovering' + :type state: str or ~azure.hdinsight.job.models.JobState + :param app_info: The detailed application info. + :type app_info: dict[str, str] + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'int'}, + 'app_id': {'key': 'appId', 'type': 'str'}, + 'owner': {'key': 'owner', 'type': 'str'}, + 'proxy_user': {'key': 'proxyUser', 'type': 'str'}, + 'kind': {'key': 'kind', 'type': 'str'}, + 'log_lines': {'key': 'log', 'type': '[str]'}, + 'state': {'key': 'state', 'type': 'str'}, + 'app_info': {'key': 'appInfo', 'type': '{str}'}, + } + + def __init__(self, *, id: int=None, app_id: str=None, owner: str=None, proxy_user: str=None, kind=None, log_lines=None, state=None, app_info=None, **kwargs) -> None: + super(SparkSessionJob, self).__init__(**kwargs) + self.id = id + self.app_id = app_id + self.owner = owner + self.proxy_user = proxy_user + self.kind = kind + self.log_lines = log_lines + self.state = state + self.app_info = app_info + + +class SparkSessionJobRequest(Model): + """SparkSessionJobRequest. + + :param kind: Spark session job kind. Possible values include: 'spark', + 'pyspark', 'sparkr', 'sql' + :type kind: str or ~azure.hdinsight.job.models.SessionJobKind + :param proxy_user: User to impersonate when starting the session. + :type proxy_user: str + :param jars: Jars to be used in this session. + :type jars: list[str] + :param python_files: Python files to be used in this session. + :type python_files: list[str] + :param files: Files to be used in this session. + :type files: list[str] + :param driver_memory: Amount of memory to use for the driver process. + :type driver_memory: str + :param driver_cores: Number of cores to use for the driver process. + :type driver_cores: int + :param executor_memory: Amount of memory to use per executor process. + :type executor_memory: str + :param executor_cores: Number of cores to use for each executor. + :type executor_cores: int + :param executor_count: Number of executors to launch for this session. + :type executor_count: int + :param archives: Archives to be used in this session. + :type archives: list[str] + :param queue: The name of the YARN queue to which submitted. + :type queue: str + :param name: The name of this session. + :type name: str + :param configuration: Spark configuration properties. + :type configuration: dict[str, str] + :param heartbeat_timeout_in_second: Timeout in second to which session be + orphaned. + :type heartbeat_timeout_in_second: int + """ + + _attribute_map = { + 'kind': {'key': 'kind', 'type': 'str'}, + 'proxy_user': {'key': 'proxyUser', 'type': 'str'}, + 'jars': {'key': 'jars', 'type': '[str]'}, + 'python_files': {'key': 'pyFiles', 'type': '[str]'}, + 'files': {'key': 'files', 'type': '[str]'}, + 'driver_memory': {'key': 'driverMemory', 'type': 'str'}, + 'driver_cores': {'key': 'driverCores', 'type': 'int'}, + 'executor_memory': {'key': 'executorMemory', 'type': 'str'}, + 'executor_cores': {'key': 'executorCores', 'type': 'int'}, + 'executor_count': {'key': 'numExecutors', 'type': 'int'}, + 'archives': {'key': 'archives', 'type': '[str]'}, + 'queue': {'key': 'queue', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'configuration': {'key': 'conf', 'type': '{str}'}, + 'heartbeat_timeout_in_second': {'key': 'heartbeatTimeoutInSecond', 'type': 'int'}, + } + + def __init__(self, *, kind=None, proxy_user: str=None, jars=None, python_files=None, files=None, driver_memory: str=None, driver_cores: int=None, executor_memory: str=None, executor_cores: int=None, executor_count: int=None, archives=None, queue: str=None, name: str=None, configuration=None, heartbeat_timeout_in_second: int=None, **kwargs) -> None: + super(SparkSessionJobRequest, self).__init__(**kwargs) + self.kind = kind + self.proxy_user = proxy_user + self.jars = jars + self.python_files = python_files + self.files = files + self.driver_memory = driver_memory + self.driver_cores = driver_cores + self.executor_memory = executor_memory + self.executor_cores = executor_cores + self.executor_count = executor_count + self.archives = archives + self.queue = queue + self.name = name + self.configuration = configuration + self.heartbeat_timeout_in_second = heartbeat_timeout_in_second + + +class SparkStatement(Model): + """SparkStatement. + + :param id: The livy id of the spark statement job. + :type id: int + :param code: The execution code. + :type code: str + :param state: The current state of the spark statement. Possible values + include: 'waiting', 'running', 'available', 'error', 'cancelling', + 'cancelled' + :type state: str or ~azure.hdinsight.job.models.StatementState + :param output: The execution output. + :type output: ~azure.hdinsight.job.models.SparkStatementOutput + :param progress: The execution progress. + :type progress: float + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'int'}, + 'code': {'key': 'code', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'str'}, + 'output': {'key': 'output', 'type': 'SparkStatementOutput'}, + 'progress': {'key': 'progress', 'type': 'float'}, + } + + def __init__(self, *, id: int=None, code: str=None, state=None, output=None, progress: float=None, **kwargs) -> None: + super(SparkStatement, self).__init__(**kwargs) + self.id = id + self.code = code + self.state = state + self.output = output + self.progress = progress + + +class SparkStatementCancellationResult(Model): + """SparkStatementCancellationResult. + + :param cancel_message: + :type cancel_message: str + """ + + _attribute_map = { + 'cancel_message': {'key': 'msg', 'type': 'str'}, + } + + def __init__(self, *, cancel_message: str=None, **kwargs) -> None: + super(SparkStatementCancellationResult, self).__init__(**kwargs) + self.cancel_message = cancel_message + + +class SparkStatementCollection(Model): + """SparkStatementCollection. + + :param statements: List of spark statements. + :type statements: list[~azure.hdinsight.job.models.SparkStatement] + """ + + _attribute_map = { + 'statements': {'key': 'statements', 'type': '[SparkStatement]'}, + } + + def __init__(self, *, statements=None, **kwargs) -> None: + super(SparkStatementCollection, self).__init__(**kwargs) + self.statements = statements + + +class SparkStatementOutput(Model): + """SparkStatementOutput. + + :param status: Execution status. Possible values include: 'ok', 'error', + 'abort' + :type status: str or ~azure.hdinsight.job.models.StatementExecutionStatus + :param execution_count: A monotonically increasing number. + :type execution_count: int + :param data: Statement output. + :type data: object + """ + + _attribute_map = { + 'status': {'key': 'status', 'type': 'str'}, + 'execution_count': {'key': 'execution_count', 'type': 'int'}, + 'data': {'key': 'data', 'type': 'object'}, + } + + def __init__(self, *, status=None, execution_count: int=None, data=None, **kwargs) -> None: + super(SparkStatementOutput, self).__init__(**kwargs) + self.status = status + self.execution_count = execution_count + self.data = data + + +class SparkStatementRequest(Model): + """SparkStatementRequest. + + :param code: + :type code: str + :param kind: Possible values include: 'spark', 'pyspark', 'sparkr', 'sql' + :type kind: str or ~azure.hdinsight.job.models.SessionJobKind + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'kind': {'key': 'kind', 'type': 'str'}, + } + + def __init__(self, *, code: str=None, kind=None, **kwargs) -> None: + super(SparkStatementRequest, self).__init__(**kwargs) + self.code = code + self.kind = kind + + +class Status(Model): + """Gets or sets the object containing the job status information. + + :param cleanup_progress: The progress made on the cleanup. + :type cleanup_progress: float + :param failure_info: The information about any failures that have + occurred. + :type failure_info: str + :param finish_time: The time at which the job completed. It is an integer + in milliseconds, as a Unix timestamp relative to 1/1/1970 00:00:00. + :type finish_time: long + :param history_file: The history file of the job. + :type history_file: str + :param job_ac_ls: The ACLs of the job. + :type job_ac_ls: object + :param job_complete: Whether or not the job has completed. + :type job_complete: bool + :param job_file: The job configuration file. + :type job_file: str + :param job_id: The full ID of the job. + :type job_id: str + :param job_id1: The ID of the job. + :type job_id1: ~azure.hdinsight.job.models.JobID + :param job_name: The user-specified job name. + :type job_name: str + :param job_priority: The priority of the job. + :type job_priority: str + :param map_progress: The progress made on the maps. + :type map_progress: float + :param needed_mem: The amount of memory needed for the job. + :type needed_mem: long + :param num_reserved_slots: The number of slots reserved. + :type num_reserved_slots: int + :param num_used_slots: The number of slots used for the job. + :type num_used_slots: int + :param priority: The priority of the job. + :type priority: str + :param queue: The job queue name. + :type queue: str + :param reduce_progress: The progress made on the reduces. + :type reduce_progress: float + :param reserved_mem: The amount of memory reserved for the job. + :type reserved_mem: long + :param retired: Whether or not the job has been retired. + :type retired: bool + :param run_state: The current state of the job. + :type run_state: int + :param scheduling_info: The information about the scheduling of the job. + :type scheduling_info: str + :param setup_progress: The progress made on the setup. + :type setup_progress: float + :param start_time: The time at which the job started. It is an integer in + milliseconds, as a Unix timestamp relative to 1/1/1970 00:00:00. + :type start_time: long + :param state: The state of the job. + :type state: str + :param tracking_url: The link to the web-ui for details of the job. + :type tracking_url: str + :param uber: Whether job running in uber mode. + :type uber: bool + :param used_mem: The amount of memory used by the job. + :type used_mem: long + :param username: The userid of the person who submitted the job. + :type username: str + """ + + _attribute_map = { + 'cleanup_progress': {'key': 'cleanupProgress', 'type': 'float'}, + 'failure_info': {'key': 'failureInfo', 'type': 'str'}, + 'finish_time': {'key': 'finishTime', 'type': 'long'}, + 'history_file': {'key': 'historyFile', 'type': 'str'}, + 'job_ac_ls': {'key': 'jobACLs', 'type': 'object'}, + 'job_complete': {'key': 'jobComplete', 'type': 'bool'}, + 'job_file': {'key': 'jobFile', 'type': 'str'}, + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'job_id1': {'key': 'jobID', 'type': 'JobID'}, + 'job_name': {'key': 'jobName', 'type': 'str'}, + 'job_priority': {'key': 'jobPriority', 'type': 'str'}, + 'map_progress': {'key': 'mapProgress', 'type': 'float'}, + 'needed_mem': {'key': 'neededMem', 'type': 'long'}, + 'num_reserved_slots': {'key': 'numReservedSlots', 'type': 'int'}, + 'num_used_slots': {'key': 'numUsedSlots', 'type': 'int'}, + 'priority': {'key': 'priority', 'type': 'str'}, + 'queue': {'key': 'queue', 'type': 'str'}, + 'reduce_progress': {'key': 'reduceProgress', 'type': 'float'}, + 'reserved_mem': {'key': 'reservedMem', 'type': 'long'}, + 'retired': {'key': 'retired', 'type': 'bool'}, + 'run_state': {'key': 'runState', 'type': 'int'}, + 'scheduling_info': {'key': 'schedulingInfo', 'type': 'str'}, + 'setup_progress': {'key': 'setupProgress', 'type': 'float'}, + 'start_time': {'key': 'startTime', 'type': 'long'}, + 'state': {'key': 'state', 'type': 'str'}, + 'tracking_url': {'key': 'trackingUrl', 'type': 'str'}, + 'uber': {'key': 'uber', 'type': 'bool'}, + 'used_mem': {'key': 'usedMem', 'type': 'long'}, + 'username': {'key': 'username', 'type': 'str'}, + } + + def __init__(self, *, cleanup_progress: float=None, failure_info: str=None, finish_time: int=None, history_file: str=None, job_ac_ls=None, job_complete: bool=None, job_file: str=None, job_id: str=None, job_id1=None, job_name: str=None, job_priority: str=None, map_progress: float=None, needed_mem: int=None, num_reserved_slots: int=None, num_used_slots: int=None, priority: str=None, queue: str=None, reduce_progress: float=None, reserved_mem: int=None, retired: bool=None, run_state: int=None, scheduling_info: str=None, setup_progress: float=None, start_time: int=None, state: str=None, tracking_url: str=None, uber: bool=None, used_mem: int=None, username: str=None, **kwargs) -> None: + super(Status, self).__init__(**kwargs) + self.cleanup_progress = cleanup_progress + self.failure_info = failure_info + self.finish_time = finish_time + self.history_file = history_file + self.job_ac_ls = job_ac_ls + self.job_complete = job_complete + self.job_file = job_file + self.job_id = job_id + self.job_id1 = job_id1 + self.job_name = job_name + self.job_priority = job_priority + self.map_progress = map_progress + self.needed_mem = needed_mem + self.num_reserved_slots = num_reserved_slots + self.num_used_slots = num_used_slots + self.priority = priority + self.queue = queue + self.reduce_progress = reduce_progress + self.reserved_mem = reserved_mem + self.retired = retired + self.run_state = run_state + self.scheduling_info = scheduling_info + self.setup_progress = setup_progress + self.start_time = start_time + self.state = state + self.tracking_url = tracking_url + self.uber = uber + self.used_mem = used_mem + self.username = username + + +class Userargs(Model): + """Gets or sets the object containing the user arguments. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar arg: The list of args defined by the user. + :vartype arg: list[str] + :param callback: The callback URL, if any. + :type callback: object + :ivar define: The define properties defined by the user. + :vartype define: list[str] + :param enablelog: Whether or not the user enabled logs. + :type enablelog: str + :param execute: The query defined by the user. + :type execute: str + :param file: The query file provided by the user. + :type file: object + :param files: The files defined by the user. + :type files: object + :param jar: The JAR file provided by the user. + :type jar: str + :param statusdir: The status directory defined by the user. + :type statusdir: object + """ + + _validation = { + 'arg': {'readonly': True}, + 'define': {'readonly': True}, + } + + _attribute_map = { + 'arg': {'key': 'arg', 'type': '[str]'}, + 'callback': {'key': 'callback', 'type': 'object'}, + 'define': {'key': 'define', 'type': '[str]'}, + 'enablelog': {'key': 'enablelog', 'type': 'str'}, + 'execute': {'key': 'execute', 'type': 'str'}, + 'file': {'key': 'file', 'type': 'object'}, + 'files': {'key': 'files', 'type': 'object'}, + 'jar': {'key': 'jar', 'type': 'str'}, + 'statusdir': {'key': 'statusdir', 'type': 'object'}, + } + + def __init__(self, *, callback=None, enablelog: str=None, execute: str=None, file=None, files=None, jar: str=None, statusdir=None, **kwargs) -> None: + super(Userargs, self).__init__(**kwargs) + self.arg = None + self.callback = callback + self.define = None + self.enablelog = enablelog + self.execute = execute + self.file = file + self.files = files + self.jar = jar + self.statusdir = statusdir diff --git a/sdk/azure-hdinsight-job/azure/hdinsight/job/operations/__init__.py b/sdk/azure-hdinsight-job/azure/hdinsight/job/operations/__init__.py new file mode 100644 index 000000000000..2c6ffba81cf5 --- /dev/null +++ b/sdk/azure-hdinsight-job/azure/hdinsight/job/operations/__init__.py @@ -0,0 +1,16 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._job_operations import JobOperations + +__all__ = [ + 'JobOperations', +] diff --git a/sdk/azure-hdinsight-job/azure/hdinsight/job/operations/_job_operations.py b/sdk/azure-hdinsight-job/azure/hdinsight/job/operations/_job_operations.py new file mode 100644 index 000000000000..b84650c25e52 --- /dev/null +++ b/sdk/azure-hdinsight-job/azure/hdinsight/job/operations/_job_operations.py @@ -0,0 +1,1657 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse +from msrestazure.azure_exceptions import CloudError + +from .. import models + + +class JobOperations(object): + """JobOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar fields: If fields set to '*', the request will return full details of the job. Currently the value can only be '*'. Constant value: "*". + :ivar showall: If showall is set to 'true', the request will return all jobs the user has permission to view, not only the jobs belonging to the user. Constant value: "true". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.fields = "*" + self.showall = "true" + + self.config = config + + def get( + self, job_id, custom_headers=None, raw=False, **operation_config): + """Gets job details from the specified HDInsight cluster. + + :param job_id: The id of the job. + :type job_id: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: JobDetailRootJsonObject or ClientRawResponse if raw=true + :rtype: ~azure.hdinsight.job.models.JobDetailRootJsonObject or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`JobOperationsErrorResponseException` + """ + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['user.name'] = self._serialize.query("self.config.username", self.config.username, 'str') + query_parameters['fields'] = self._serialize.query("self.fields", self.fields, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.JobOperationsErrorResponseException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('JobDetailRootJsonObject', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get.metadata = {'url': '/templeton/v1/jobs/{jobId}'} + + def kill( + self, job_id, custom_headers=None, raw=False, **operation_config): + """Initiates cancel on given running job in the specified HDInsight. + + :param job_id: The id of the job. + :type job_id: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: JobDetailRootJsonObject or ClientRawResponse if raw=true + :rtype: ~azure.hdinsight.job.models.JobDetailRootJsonObject or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`JobOperationsErrorResponseException` + """ + # Construct URL + url = self.kill.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str', min_length=1) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['user.name'] = self._serialize.query("self.config.username", self.config.username, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.JobOperationsErrorResponseException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('JobDetailRootJsonObject', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + kill.metadata = {'url': '/templeton/v1/jobs/{jobId}'} + + def list( + self, custom_headers=None, raw=False, **operation_config): + """Gets the list of jobs from the specified HDInsight cluster. + + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.hdinsight.job.models.JobListJsonObject] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`JobOperationsErrorResponseException` + """ + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['user.name'] = self._serialize.query("self.config.username", self.config.username, 'str') + query_parameters['showall'] = self._serialize.query("self.showall", self.showall, 'str') + query_parameters['fields'] = self._serialize.query("self.fields", self.fields, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.JobOperationsErrorResponseException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[JobListJsonObject]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + list.metadata = {'url': '/templeton/v1/jobs'} + + def list_after_job_id( + self, jobid=None, numrecords=None, custom_headers=None, raw=False, **operation_config): + """Gets numrecords Of Jobs after jobid from the specified HDInsight + cluster. + + :param jobid: JobId from where to list jobs. + :type jobid: str + :param numrecords: Number of jobs to fetch. + :type numrecords: int + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.hdinsight.job.models.JobListJsonObject] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`JobOperationsErrorResponseException` + """ + # Construct URL + url = self.list_after_job_id.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['user.name'] = self._serialize.query("self.config.username", self.config.username, 'str') + if jobid is not None: + query_parameters['jobid'] = self._serialize.query("jobid", jobid, 'str') + if numrecords is not None: + query_parameters['numrecords'] = self._serialize.query("numrecords", numrecords, 'int', minimum=1) + query_parameters['showall'] = self._serialize.query("self.showall", self.showall, 'str') + query_parameters['fields'] = self._serialize.query("self.fields", self.fields, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.JobOperationsErrorResponseException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[JobListJsonObject]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + list_after_job_id.metadata = {'url': '/templeton/v1/jobs'} + + def submit_hive_job( + self, content, custom_headers=None, raw=False, callback=None, **operation_config): + """Submits a Hive job to an HDInsight cluster. + + :param content: The content of the Hive job request. + :type content: Generator + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param callback: When specified, will be called with each chunk of + data that is streamed. The callback should take two arguments, the + bytes of the current chunk of data and the response object. If the + data is uploading, response will be None. + :type callback: Callable[Bytes, response=None] + :param operation_config: :ref:`Operation configuration + overrides`. + :return: JobSubmissionJsonResponse or ClientRawResponse if raw=true + :rtype: ~azure.hdinsight.job.models.JobSubmissionJsonResponse or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`JobOperationsErrorResponseException` + """ + # Construct URL + url = self.submit_hive_job.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['user.name'] = self._serialize.query("self.config.username", self.config.username, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/text' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct body + body_content = self._client.stream_upload(content, callback) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.JobOperationsErrorResponseException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('JobSubmissionJsonResponse', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + submit_hive_job.metadata = {'url': '/templeton/v1/hive'} + + def submit_map_reduce_job( + self, content, custom_headers=None, raw=False, callback=None, **operation_config): + """Submits a MapReduce job to an HDInsight cluster. + + :param content: The content of the MapReduce job request. + :type content: Generator + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param callback: When specified, will be called with each chunk of + data that is streamed. The callback should take two arguments, the + bytes of the current chunk of data and the response object. If the + data is uploading, response will be None. + :type callback: Callable[Bytes, response=None] + :param operation_config: :ref:`Operation configuration + overrides`. + :return: JobSubmissionJsonResponse or ClientRawResponse if raw=true + :rtype: ~azure.hdinsight.job.models.JobSubmissionJsonResponse or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`JobOperationsErrorResponseException` + """ + # Construct URL + url = self.submit_map_reduce_job.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['user.name'] = self._serialize.query("self.config.username", self.config.username, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'text/plain' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct body + body_content = self._client.stream_upload(content, callback) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.JobOperationsErrorResponseException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('JobSubmissionJsonResponse', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + submit_map_reduce_job.metadata = {'url': '/templeton/v1/mapreduce/jar'} + + def submit_map_reduce_streaming_job( + self, content, custom_headers=None, raw=False, callback=None, **operation_config): + """Submits a MapReduce streaming job to an HDInsight cluster. + + :param content: The content of the MapReduce job request. + :type content: Generator + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param callback: When specified, will be called with each chunk of + data that is streamed. The callback should take two arguments, the + bytes of the current chunk of data and the response object. If the + data is uploading, response will be None. + :type callback: Callable[Bytes, response=None] + :param operation_config: :ref:`Operation configuration + overrides`. + :return: JobSubmissionJsonResponse or ClientRawResponse if raw=true + :rtype: ~azure.hdinsight.job.models.JobSubmissionJsonResponse or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`JobOperationsErrorResponseException` + """ + # Construct URL + url = self.submit_map_reduce_streaming_job.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['user.name'] = self._serialize.query("self.config.username", self.config.username, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'text/plain' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct body + body_content = self._client.stream_upload(content, callback) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.JobOperationsErrorResponseException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('JobSubmissionJsonResponse', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + submit_map_reduce_streaming_job.metadata = {'url': '/templeton/v1/mapreduce/streaming'} + + def submit_pig_job( + self, content, custom_headers=None, raw=False, callback=None, **operation_config): + """Submits a Pig job to an HDInsight cluster. + + :param content: The content of the Pig job request. + :type content: Generator + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param callback: When specified, will be called with each chunk of + data that is streamed. The callback should take two arguments, the + bytes of the current chunk of data and the response object. If the + data is uploading, response will be None. + :type callback: Callable[Bytes, response=None] + :param operation_config: :ref:`Operation configuration + overrides`. + :return: JobSubmissionJsonResponse or ClientRawResponse if raw=true + :rtype: ~azure.hdinsight.job.models.JobSubmissionJsonResponse or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`JobOperationsErrorResponseException` + """ + # Construct URL + url = self.submit_pig_job.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['user.name'] = self._serialize.query("self.config.username", self.config.username, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'text/plain' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct body + body_content = self._client.stream_upload(content, callback) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.JobOperationsErrorResponseException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('JobSubmissionJsonResponse', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + submit_pig_job.metadata = {'url': '/templeton/v1/pig'} + + def submit_sqoop_job( + self, content, custom_headers=None, raw=False, callback=None, **operation_config): + """Submits a Sqoop job to an HDInsight cluster. + + :param content: The content of the Sqoop job request. + :type content: Generator + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param callback: When specified, will be called with each chunk of + data that is streamed. The callback should take two arguments, the + bytes of the current chunk of data and the response object. If the + data is uploading, response will be None. + :type callback: Callable[Bytes, response=None] + :param operation_config: :ref:`Operation configuration + overrides`. + :return: JobSubmissionJsonResponse or ClientRawResponse if raw=true + :rtype: ~azure.hdinsight.job.models.JobSubmissionJsonResponse or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`JobOperationsErrorResponseException` + """ + # Construct URL + url = self.submit_sqoop_job.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['user.name'] = self._serialize.query("self.config.username", self.config.username, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'text/plain' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct body + body_content = self._client.stream_upload(content, callback) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.JobOperationsErrorResponseException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('JobSubmissionJsonResponse', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + submit_sqoop_job.metadata = {'url': '/templeton/v1/sqoop'} + + def get_app_state( + self, app_id, custom_headers=None, raw=False, **operation_config): + """Gets application state from the specified HDInsight cluster. + + :param app_id: The id of the job. + :type app_id: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: AppState or ClientRawResponse if raw=true + :rtype: ~azure.hdinsight.job.models.AppState or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`JobOperationsErrorResponseException` + """ + # Construct URL + url = self.get_app_state.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), + 'appId': self._serialize.url("app_id", app_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.JobOperationsErrorResponseException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('AppState', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_app_state.metadata = {'url': '/ws/v1/cluster/apps/{appId}/state'} + + def list_spark_batch_job( + self, from_parameter=None, size=None, custom_headers=None, raw=False, **operation_config): + """List all spark batch jobs. + + :param from_parameter: Optional param specifying which index the list + should begin from. + :type from_parameter: int + :param size: Optional param specifying the size of the returned list. + By default it is 20 and that is the maximum. + :type size: int + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: SparkBatchJobCollection or ClientRawResponse if raw=true + :rtype: ~azure.hdinsight.job.models.SparkBatchJobCollection or + ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + # Construct URL + url = self.list_spark_batch_job.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if from_parameter is not None: + query_parameters['from'] = self._serialize.query("from_parameter", from_parameter, 'int') + if size is not None: + query_parameters['size'] = self._serialize.query("size", size, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SparkBatchJobCollection', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + list_spark_batch_job.metadata = {'url': '/livy/batches'} + + def submit_spark_batch_job( + self, spark_batch_job_request, requested_by="ambari", custom_headers=None, raw=False, **operation_config): + """Create a new spark batch job. + + :param spark_batch_job_request: Livy compatible batch job request + payload. + :type spark_batch_job_request: + ~azure.hdinsight.job.models.SparkBatchJobRequest + :param requested_by: Add default value for X-Requested-By in header. + :type requested_by: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: SparkBatchJob or ClientRawResponse if raw=true + :rtype: ~azure.hdinsight.job.models.SparkBatchJob or + ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + # Construct URL + url = self.submit_spark_batch_job.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if requested_by is not None: + header_parameters['X-Requested-By'] = self._serialize.header("requested_by", requested_by, 'str') + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct body + body_content = self._serialize.body(spark_batch_job_request, 'SparkBatchJobRequest') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + if response.status_code == 201: + deserialized = self._deserialize('SparkBatchJob', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + submit_spark_batch_job.metadata = {'url': '/livy/batches'} + + def get_spark_batch_job( + self, batch_id, custom_headers=None, raw=False, **operation_config): + """Gets a single spark batch job. + + :param batch_id: Identifier for the batch job. + :type batch_id: int + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: SparkBatchJob or ClientRawResponse if raw=true + :rtype: ~azure.hdinsight.job.models.SparkBatchJob or + ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + # Construct URL + url = self.get_spark_batch_job.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), + 'batchId': self._serialize.url("batch_id", batch_id, 'int') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SparkBatchJob', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_spark_batch_job.metadata = {'url': '/livy/batches/{batchId}'} + + def delete_spark_batch( + self, batch_id, requested_by="ambari", custom_headers=None, raw=False, **operation_config): + """Cancels a running spark batch job. + + :param batch_id: Identifier for the batch job. + :type batch_id: int + :param requested_by: Add default value for X-Requested-By in header. + :type requested_by: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: SparkJobDeletedResult or ClientRawResponse if raw=true + :rtype: ~azure.hdinsight.job.models.SparkJobDeletedResult or + ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + # Construct URL + url = self.delete_spark_batch.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), + 'batchId': self._serialize.url("batch_id", batch_id, 'int') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if requested_by is not None: + header_parameters['X-Requested-By'] = self._serialize.header("requested_by", requested_by, 'str') + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SparkJobDeletedResult', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + delete_spark_batch.metadata = {'url': '/livy/batches/{batchId}'} + + def get_spark_batch_log( + self, batch_id, from_parameter=None, size=None, custom_headers=None, raw=False, **operation_config): + """Gets a single spark batch job logs. + + :param batch_id: Identifier for the batch job. + :type batch_id: int + :param from_parameter: Optional param specifying which index the list + should begin from. + :type from_parameter: int + :param size: Optional param specifying the size of the returned list. + By default it is 20 and that is the maximum. + :type size: int + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: SparkJobLog or ClientRawResponse if raw=true + :rtype: ~azure.hdinsight.job.models.SparkJobLog or + ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + # Construct URL + url = self.get_spark_batch_log.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), + 'batchId': self._serialize.url("batch_id", batch_id, 'int') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if from_parameter is not None: + query_parameters['from'] = self._serialize.query("from_parameter", from_parameter, 'int') + if size is not None: + query_parameters['size'] = self._serialize.query("size", size, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SparkJobLog', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_spark_batch_log.metadata = {'url': '/livy/batches/{batchId}/log'} + + def get_spark_batch_state( + self, batch_id, custom_headers=None, raw=False, **operation_config): + """Gets a single spark batch state. + + :param batch_id: Identifier for the batch job. + :type batch_id: int + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: SparkJobState or ClientRawResponse if raw=true + :rtype: ~azure.hdinsight.job.models.SparkJobState or + ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + # Construct URL + url = self.get_spark_batch_state.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), + 'batchId': self._serialize.url("batch_id", batch_id, 'int') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SparkJobState', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_spark_batch_state.metadata = {'url': '/livy/batches/{batchId}/state'} + + def list_spark_session_job( + self, from_parameter=None, size=None, custom_headers=None, raw=False, **operation_config): + """List all spark sessions. + + :param from_parameter: Optional param specifying which index the list + should begin from. + :type from_parameter: int + :param size: Optional param specifying the size of the returned list. + By default it is 20 and that is the maximum. + :type size: int + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: SparkSessionCollection or ClientRawResponse if raw=true + :rtype: ~azure.hdinsight.job.models.SparkSessionCollection or + ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + # Construct URL + url = self.list_spark_session_job.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if from_parameter is not None: + query_parameters['from'] = self._serialize.query("from_parameter", from_parameter, 'int') + if size is not None: + query_parameters['size'] = self._serialize.query("size", size, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SparkSessionCollection', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + list_spark_session_job.metadata = {'url': '/livy/sessions'} + + def submit_spark_session_job( + self, spark_session_job_request, requested_by="ambari", custom_headers=None, raw=False, **operation_config): + """Create a new spark session. + + :param spark_session_job_request: Livy compatible session job request + payload. + :type spark_session_job_request: + ~azure.hdinsight.job.models.SparkSessionJobRequest + :param requested_by: Add default value for X-Requested-By in header. + :type requested_by: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: SparkSessionJob or ClientRawResponse if raw=true + :rtype: ~azure.hdinsight.job.models.SparkSessionJob or + ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + # Construct URL + url = self.submit_spark_session_job.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if requested_by is not None: + header_parameters['X-Requested-By'] = self._serialize.header("requested_by", requested_by, 'str') + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct body + body_content = self._serialize.body(spark_session_job_request, 'SparkSessionJobRequest') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + if response.status_code == 201: + deserialized = self._deserialize('SparkSessionJob', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + submit_spark_session_job.metadata = {'url': '/livy/sessions'} + + def get_spark_session_job( + self, session_id, custom_headers=None, raw=False, **operation_config): + """Gets a single spark session. + + :param session_id: Identifier for the session. + :type session_id: int + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: SparkSessionJob or ClientRawResponse if raw=true + :rtype: ~azure.hdinsight.job.models.SparkSessionJob or + ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + # Construct URL + url = self.get_spark_session_job.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), + 'sessionId': self._serialize.url("session_id", session_id, 'int') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SparkSessionJob', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_spark_session_job.metadata = {'url': '/livy/sessions/{sessionId}'} + + def delete_spark_session_job( + self, session_id, requested_by="ambari", custom_headers=None, raw=False, **operation_config): + """Cancels a running spark session. + + :param session_id: Identifier for the session. + :type session_id: int + :param requested_by: Add default value for X-Requested-By in header. + :type requested_by: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: SparkJobDeletedResult or ClientRawResponse if raw=true + :rtype: ~azure.hdinsight.job.models.SparkJobDeletedResult or + ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + # Construct URL + url = self.delete_spark_session_job.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), + 'sessionId': self._serialize.url("session_id", session_id, 'int') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if requested_by is not None: + header_parameters['X-Requested-By'] = self._serialize.header("requested_by", requested_by, 'str') + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SparkJobDeletedResult', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + delete_spark_session_job.metadata = {'url': '/livy/sessions/{sessionId}'} + + def get_spark_session_log( + self, session_id, from_parameter=None, size=None, custom_headers=None, raw=False, **operation_config): + """Gets a single spark session job logs. + + :param session_id: Identifier for the session job. + :type session_id: int + :param from_parameter: Optional param specifying which index the list + should begin from. + :type from_parameter: int + :param size: Optional param specifying the size of the returned list. + By default it is 20 and that is the maximum. + :type size: int + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: SparkJobLog or ClientRawResponse if raw=true + :rtype: ~azure.hdinsight.job.models.SparkJobLog or + ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + # Construct URL + url = self.get_spark_session_log.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), + 'sessionId': self._serialize.url("session_id", session_id, 'int') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if from_parameter is not None: + query_parameters['from'] = self._serialize.query("from_parameter", from_parameter, 'int') + if size is not None: + query_parameters['size'] = self._serialize.query("size", size, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SparkJobLog', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_spark_session_log.metadata = {'url': '/livy/sessions/{sessionId}/log'} + + def get_spark_session_state( + self, session_id, custom_headers=None, raw=False, **operation_config): + """Gets a single spark session job state. + + :param session_id: Identifier for the session job. + :type session_id: int + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: SparkJobState or ClientRawResponse if raw=true + :rtype: ~azure.hdinsight.job.models.SparkJobState or + ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + # Construct URL + url = self.get_spark_session_state.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), + 'sessionId': self._serialize.url("session_id", session_id, 'int') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SparkJobState', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_spark_session_state.metadata = {'url': '/livy/sessions/{sessionId}/state'} + + def list_spark_statement_job( + self, session_id, custom_headers=None, raw=False, **operation_config): + """Gets a list of statements within a spark session. + + :param session_id: Identifier for the session. + :type session_id: int + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: SparkStatementCollection or ClientRawResponse if raw=true + :rtype: ~azure.hdinsight.job.models.SparkStatementCollection or + ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + # Construct URL + url = self.list_spark_statement_job.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), + 'sessionId': self._serialize.url("session_id", session_id, 'int') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SparkStatementCollection', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + list_spark_statement_job.metadata = {'url': '/livy/sessions/{sessionId}/statements'} + + def submit_spark_statement_job( + self, session_id, requested_by="ambari", code=None, kind=None, custom_headers=None, raw=False, **operation_config): + """Create a statement within a spark session. + + :param session_id: Identifier for the session. + :type session_id: int + :param requested_by: Add default value for X-Requested-By in header. + :type requested_by: str + :param code: + :type code: str + :param kind: Possible values include: 'spark', 'pyspark', 'sparkr', + 'sql' + :type kind: str or ~azure.hdinsight.job.models.SessionJobKind + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: SparkStatement or ClientRawResponse if raw=true + :rtype: ~azure.hdinsight.job.models.SparkStatement or + ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + spark_statement_request = models.SparkStatementRequest(code=code, kind=kind) + + # Construct URL + url = self.submit_spark_statement_job.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), + 'sessionId': self._serialize.url("session_id", session_id, 'int') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if requested_by is not None: + header_parameters['X-Requested-By'] = self._serialize.header("requested_by", requested_by, 'str') + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct body + body_content = self._serialize.body(spark_statement_request, 'SparkStatementRequest') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + if response.status_code == 201: + deserialized = self._deserialize('SparkStatement', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + submit_spark_statement_job.metadata = {'url': '/livy/sessions/{sessionId}/statements'} + + def get_spark_statement_job( + self, session_id, statement_id, custom_headers=None, raw=False, **operation_config): + """Gets a single statement within a spark session. + + :param session_id: Identifier for the session. + :type session_id: int + :param statement_id: Identifier for the statement. + :type statement_id: int + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: SparkStatement or ClientRawResponse if raw=true + :rtype: ~azure.hdinsight.job.models.SparkStatement or + ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + # Construct URL + url = self.get_spark_statement_job.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), + 'sessionId': self._serialize.url("session_id", session_id, 'int'), + 'statementId': self._serialize.url("statement_id", statement_id, 'int') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SparkStatement', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_spark_statement_job.metadata = {'url': '/livy/sessions/{sessionId}/statements/{statementId}'} + + def delete_spark_statement_job( + self, session_id, statement_id, requested_by="ambari", custom_headers=None, raw=False, **operation_config): + """Kill a statement within a session. + + :param session_id: Identifier for the session. + :type session_id: int + :param statement_id: Identifier for the statement. + :type statement_id: int + :param requested_by: Add default value for X-Requested-By in header. + :type requested_by: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: SparkStatementCancellationResult or ClientRawResponse if + raw=true + :rtype: ~azure.hdinsight.job.models.SparkStatementCancellationResult + or ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + # Construct URL + url = self.delete_spark_statement_job.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), + 'sessionId': self._serialize.url("session_id", session_id, 'int'), + 'statementId': self._serialize.url("statement_id", statement_id, 'int') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if requested_by is not None: + header_parameters['X-Requested-By'] = self._serialize.header("requested_by", requested_by, 'str') + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SparkStatementCancellationResult', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + delete_spark_statement_job.metadata = {'url': '/livy/sessions/{sessionId}/statements/{statementId}/cancel'} diff --git a/sdk/azure-hdinsight-job/azure/hdinsight/job/version.py b/sdk/azure-hdinsight-job/azure/hdinsight/job/version.py new file mode 100644 index 000000000000..97fb20f656a5 --- /dev/null +++ b/sdk/azure-hdinsight-job/azure/hdinsight/job/version.py @@ -0,0 +1,13 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +VERSION = "2018-11-01-preview" + diff --git a/sdk/azure-hdinsight-job/sdk_packaging.toml b/sdk/azure-hdinsight-job/sdk_packaging.toml new file mode 100644 index 000000000000..70d913765714 --- /dev/null +++ b/sdk/azure-hdinsight-job/sdk_packaging.toml @@ -0,0 +1,8 @@ +[packaging] +package_name = "azure-hdinsight-job" +package_nspkg = "azure-hdinsight-nspkg" +package_pprint_name = "MyService Management" +package_doc_id = "" +is_stable = false +is_arm = true +need_msrestazure = true diff --git a/sdk/azure-hdinsight-job/setup.cfg b/sdk/azure-hdinsight-job/setup.cfg new file mode 100644 index 000000000000..3c6e79cf31da --- /dev/null +++ b/sdk/azure-hdinsight-job/setup.cfg @@ -0,0 +1,2 @@ +[bdist_wheel] +universal=1 diff --git a/sdk/azure-hdinsight-job/setup.py b/sdk/azure-hdinsight-job/setup.py new file mode 100644 index 000000000000..f24fbf4dcc54 --- /dev/null +++ b/sdk/azure-hdinsight-job/setup.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python + +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import re +import os.path +from io import open +from setuptools import find_packages, setup + +# Change the PACKAGE_NAME only to change folder and different name +PACKAGE_NAME = "azure-hdinsight-job" +PACKAGE_PPRINT_NAME = "MyService Management" + +# a-b-c => a/b/c +package_folder_path = PACKAGE_NAME.replace('-', '/') +# a-b-c => a.b.c +namespace_name = PACKAGE_NAME.replace('-', '.') + +# azure v0.x is not compatible with this package +# azure v0.x used to have a __version__ attribute (newer versions don't) +try: + import azure + try: + ver = azure.__version__ + raise Exception( + 'This package is incompatible with azure=={}. '.format(ver) + + 'Uninstall it with "pip uninstall azure".' + ) + except AttributeError: + pass +except ImportError: + pass + +# Version extraction inspired from 'requests' +with open(os.path.join(package_folder_path, 'version.py') + if os.path.exists(os.path.join(package_folder_path, 'version.py')) + else os.path.join(package_folder_path, '_version.py'), 'r') as fd: + version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', + fd.read(), re.MULTILINE).group(1) + +if not version: + raise RuntimeError('Cannot find version information') + +with open('README.md', encoding='utf-8') as f: + readme = f.read() +with open('CHANGELOG.md', encoding='utf-8') as f: + changelog = f.read() + +setup( + name=PACKAGE_NAME, + version=version, + description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME), + long_description=readme + '\n\n' + changelog, + long_description_content_type='text/markdown', + license='MIT License', + author='Microsoft Corporation', + author_email='azpysdkhelp@microsoft.com', + url='https://github.com/Azure/azure-sdk-for-python', + classifiers=[ + 'Development Status :: 4 - Beta', + 'Programming Language :: Python', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'License :: OSI Approved :: MIT License', + ], + zip_safe=False, + packages=find_packages(exclude=[ + 'tests', + # Exclude packages that will be covered by PEP420 or nspkg + 'azure', + 'azure.hdinsight', + ]), + install_requires=[ + 'msrest>=0.5.0', + 'msrestazure>=0.4.32,<2.0.0', + 'azure-common~=1.1', + ], + extras_require={ + ":python_version<'3.0'": ['azure-hdinsight-nspkg'], + } +)