From 4195b504b3b276ed0a96120ff0f232e0897eee66 Mon Sep 17 00:00:00 2001 From: SDK Automation Date: Mon, 27 Jul 2020 11:11:20 +0000 Subject: [PATCH] Generated from 46877dc1dc2842b0c076a03ea147a195f2e600d5 Fix CI error --- .../azure/hdinsight/job/models/__init__.py | 6 + .../models/_hd_insight_job_client_enums.py | 32 ++++ .../azure/hdinsight/job/models/_models.py | 160 ++++++++-------- .../azure/hdinsight/job/models/_models_py3.py | 172 ++++++++++-------- .../job/operations/_job_operations.py | 5 +- 5 files changed, 219 insertions(+), 156 deletions(-) diff --git a/sdk/azure-hdinsight-job/azure/hdinsight/job/models/__init__.py b/sdk/azure-hdinsight-job/azure/hdinsight/job/models/__init__.py index c0bbb333b459..41326dc7c5cc 100644 --- a/sdk/azure-hdinsight-job/azure/hdinsight/job/models/__init__.py +++ b/sdk/azure-hdinsight-job/azure/hdinsight/job/models/__init__.py @@ -59,7 +59,10 @@ from ._models import Userargs from ._hd_insight_job_client_enums import ( ApplicationState, + JobState, SessionJobKind, + StatementState, + StatementExecutionStatus, ) __all__ = [ @@ -87,5 +90,8 @@ 'Status', 'Userargs', 'ApplicationState', + 'JobState', 'SessionJobKind', + 'StatementState', + 'StatementExecutionStatus', ] diff --git a/sdk/azure-hdinsight-job/azure/hdinsight/job/models/_hd_insight_job_client_enums.py b/sdk/azure-hdinsight-job/azure/hdinsight/job/models/_hd_insight_job_client_enums.py index 51e97b1c6d98..ef568bb913a8 100644 --- a/sdk/azure-hdinsight-job/azure/hdinsight/job/models/_hd_insight_job_client_enums.py +++ b/sdk/azure-hdinsight-job/azure/hdinsight/job/models/_hd_insight_job_client_enums.py @@ -25,9 +25,41 @@ class ApplicationState(str, Enum): killed = "KILLED" +class JobState(str, Enum): + + not_started = "not_started" + starting = "starting" + idle = "idle" + running = "running" + busy = "busy" + shutting_down = "shutting_down" + error = "error" + dead = "dead" + killed = "killed" + success = "success" + recovering = "recovering" + + class SessionJobKind(str, Enum): spark = "spark" pyspark = "pyspark" sparkr = "sparkr" sql = "sql" + + +class StatementState(str, Enum): + + waiting = "waiting" + running = "running" + available = "available" + error = "error" + cancelling = "cancelling" + cancelled = "cancelled" + + +class StatementExecutionStatus(str, Enum): + + ok = "ok" + error = "error" + abort = "abort" diff --git a/sdk/azure-hdinsight-job/azure/hdinsight/job/models/_models.py b/sdk/azure-hdinsight-job/azure/hdinsight/job/models/_models.py index 59ba1b9c60a6..9fd36835321b 100644 --- a/sdk/azure-hdinsight-job/azure/hdinsight/job/models/_models.py +++ b/sdk/azure-hdinsight-job/azure/hdinsight/job/models/_models.py @@ -224,15 +224,17 @@ def __init__(self, **kwargs): class SparkBatchJob(Model): """SparkBatchJob. - :param id: + :param id: The livy id of the spark batch job. :type id: int - :param app_id: + :param app_id: The application id of this job. :type app_id: str - :param app_info: + :param app_info: The detailed application info. :type app_info: dict[str, str] - :param state: - :type state: str - :param log_lines: + :param state: The current state of the spark batch job. Possible values + include: 'not_started', 'starting', 'idle', 'running', 'busy', + 'shutting_down', 'error', 'dead', 'killed', 'success', 'recovering' + :type state: str or ~azure.hdinsight.job.models.JobState + :param log_lines: The log lines. :type log_lines: list[str] """ @@ -256,11 +258,11 @@ def __init__(self, **kwargs): class SparkBatchJobCollection(Model): """SparkBatchJobCollection. - :param from_property: + :param from_property: The start index to fetch Spark Batch jobs. :type from_property: int - :param total: + :param total: Number of Spark Batch jobs to fetch. :type total: int - :param sessions: + :param sessions: List of spark batch jobs. :type sessions: list[~azure.hdinsight.job.models.SparkBatchJob] """ @@ -280,37 +282,37 @@ def __init__(self, **kwargs): class SparkBatchJobRequest(Model): """SparkBatchJobRequest. - :param file: + :param file: File containing the application to execute. :type file: str - :param proxy_user: + :param proxy_user: User to impersonate when running the job. :type proxy_user: str - :param class_name: + :param class_name: Application Java/Spark main class. :type class_name: str - :param arguments: + :param arguments: Command line arguments for the application. :type arguments: list[str] - :param jars: + :param jars: Jars to be used in this batch job. :type jars: list[str] - :param python_files: + :param python_files: Python files to be used in this batch job. :type python_files: list[str] - :param files: + :param files: Files to be used in this batch job. :type files: list[str] - :param driver_memory: + :param driver_memory: Amount of memory to use for the driver process. :type driver_memory: str - :param driver_cores: + :param driver_cores: Number of cores to use for the driver process. :type driver_cores: int - :param executor_memory: + :param executor_memory: Amount of memory to use per executor process. :type executor_memory: str - :param executor_cores: + :param executor_cores: Number of cores to use for each executor. :type executor_cores: int - :param executor_count: + :param executor_count: Number of executors to launch for this batch job. :type executor_count: int - :param archives: + :param archives: Archives to be used in this batch job. :type archives: list[str] - :param queue: + :param queue: The name of the YARN queue to which submitted. :type queue: str - :param name: + :param name: The name of this batch job. :type name: str - :param configuration: + :param configuration: Spark configuration properties. :type configuration: dict[str, str] """ @@ -372,15 +374,15 @@ def __init__(self, **kwargs): class SparkJobLog(Model): """SparkJobLog. - :param id: + :param id: The livy id of the spark job. :type id: int - :param from_property: + :param from_property: Offset from start of log. :type from_property: int - :param size: + :param size: Max number of log lines. :type size: int - :param total: + :param total: Total number of log lines. :type total: long - :param log_lines: + :param log_lines: The log lines. :type log_lines: list[str] """ @@ -404,10 +406,12 @@ def __init__(self, **kwargs): class SparkJobState(Model): """SparkJobState. - :param id: + :param id: The livy id of the spark job. :type id: int - :param state: - :type state: str + :param state: The current state of the spark job. Possible values include: + 'not_started', 'starting', 'idle', 'running', 'busy', 'shutting_down', + 'error', 'dead', 'killed', 'success', 'recovering' + :type state: str or ~azure.hdinsight.job.models.JobState """ _attribute_map = { @@ -424,11 +428,11 @@ def __init__(self, **kwargs): class SparkSessionCollection(Model): """SparkSessionCollection. - :param from_property: + :param from_property: The start index to fetch spark sessions. :type from_property: int - :param total: + :param total: Number of spark sessions to fetch. :type total: int - :param sessions: + :param sessions: List of spark sessions. :type sessions: list[~azure.hdinsight.job.models.SparkSessionJob] """ @@ -448,21 +452,24 @@ def __init__(self, **kwargs): class SparkSessionJob(Model): """SparkSessionJob. - :param id: + :param id: The livy id of the spark session job. :type id: int - :param app_id: + :param app_id: The application id of this job. :type app_id: str - :param owner: + :param owner: Remote user who submitted this job. :type owner: str - :param proxy_user: + :param proxy_user: User to impersonate when running. :type proxy_user: str - :param kind: - :type kind: str - :param log_lines: + :param kind: Spark session job kind. Possible values include: 'spark', + 'pyspark', 'sparkr', 'sql' + :type kind: str or ~azure.hdinsight.job.models.SessionJobKind + :param log_lines: The log lines. :type log_lines: list[str] - :param state: - :type state: str - :param app_info: + :param state: The current state of the spark session job. Possible values + include: 'not_started', 'starting', 'idle', 'running', 'busy', + 'shutting_down', 'error', 'dead', 'killed', 'success', 'recovering' + :type state: str or ~azure.hdinsight.job.models.JobState + :param app_info: The detailed application info. :type app_info: dict[str, str] """ @@ -492,35 +499,37 @@ def __init__(self, **kwargs): class SparkSessionJobRequest(Model): """SparkSessionJobRequest. - :param kind: Possible values include: 'spark', 'pyspark', 'sparkr', 'sql' + :param kind: Spark session job kind. Possible values include: 'spark', + 'pyspark', 'sparkr', 'sql' :type kind: str or ~azure.hdinsight.job.models.SessionJobKind - :param proxy_user: + :param proxy_user: User to impersonate when starting the session. :type proxy_user: str - :param jars: + :param jars: Jars to be used in this session. :type jars: list[str] - :param python_files: + :param python_files: Python files to be used in this session. :type python_files: list[str] - :param files: + :param files: Files to be used in this session. :type files: list[str] - :param driver_memory: + :param driver_memory: Amount of memory to use for the driver process. :type driver_memory: str - :param driver_cores: + :param driver_cores: Number of cores to use for the driver process. :type driver_cores: int - :param executor_memory: + :param executor_memory: Amount of memory to use per executor process. :type executor_memory: str - :param executor_cores: + :param executor_cores: Number of cores to use for each executor. :type executor_cores: int - :param executor_count: + :param executor_count: Number of executors to launch for this session. :type executor_count: int - :param archives: + :param archives: Archives to be used in this session. :type archives: list[str] - :param queue: + :param queue: The name of the YARN queue to which submitted. :type queue: str - :param name: + :param name: The name of this session. :type name: str - :param configuration: + :param configuration: Spark configuration properties. :type configuration: dict[str, str] - :param heartbeat_timeout_in_second: + :param heartbeat_timeout_in_second: Timeout in second to which session be + orphaned. :type heartbeat_timeout_in_second: int """ @@ -564,15 +573,17 @@ def __init__(self, **kwargs): class SparkStatement(Model): """SparkStatement. - :param id: + :param id: The livy id of the spark statement job. :type id: int - :param code: + :param code: The execution code. :type code: str - :param state: - :type state: str - :param output: + :param state: The current state of the spark statement. Possible values + include: 'waiting', 'running', 'available', 'error', 'cancelling', + 'cancelled' + :type state: str or ~azure.hdinsight.job.models.StatementState + :param output: The execution output. :type output: ~azure.hdinsight.job.models.SparkStatementOutput - :param progress: + :param progress: The execution progress. :type progress: float """ @@ -612,7 +623,7 @@ def __init__(self, **kwargs): class SparkStatementCollection(Model): """SparkStatementCollection. - :param statements: + :param statements: List of spark statements. :type statements: list[~azure.hdinsight.job.models.SparkStatement] """ @@ -628,11 +639,12 @@ def __init__(self, **kwargs): class SparkStatementOutput(Model): """SparkStatementOutput. - :param status: - :type status: str - :param execution_count: + :param status: Execution status. Possible values include: 'ok', 'error', + 'abort' + :type status: str or ~azure.hdinsight.job.models.StatementExecutionStatus + :param execution_count: A monotonically increasing number. :type execution_count: int - :param data: + :param data: Statement output. :type data: object """ @@ -654,8 +666,8 @@ class SparkStatementRequest(Model): :param code: :type code: str - :param kind: - :type kind: str + :param kind: Possible values include: 'spark', 'pyspark', 'sparkr', 'sql' + :type kind: str or ~azure.hdinsight.job.models.SessionJobKind """ _attribute_map = { diff --git a/sdk/azure-hdinsight-job/azure/hdinsight/job/models/_models_py3.py b/sdk/azure-hdinsight-job/azure/hdinsight/job/models/_models_py3.py index 6165e84a3a46..26cad1ca424c 100644 --- a/sdk/azure-hdinsight-job/azure/hdinsight/job/models/_models_py3.py +++ b/sdk/azure-hdinsight-job/azure/hdinsight/job/models/_models_py3.py @@ -224,15 +224,17 @@ def __init__(self, *, job_file: str=None, job_id: str=None, job_id1=None, job_na class SparkBatchJob(Model): """SparkBatchJob. - :param id: + :param id: The livy id of the spark batch job. :type id: int - :param app_id: + :param app_id: The application id of this job. :type app_id: str - :param app_info: + :param app_info: The detailed application info. :type app_info: dict[str, str] - :param state: - :type state: str - :param log_lines: + :param state: The current state of the spark batch job. Possible values + include: 'not_started', 'starting', 'idle', 'running', 'busy', + 'shutting_down', 'error', 'dead', 'killed', 'success', 'recovering' + :type state: str or ~azure.hdinsight.job.models.JobState + :param log_lines: The log lines. :type log_lines: list[str] """ @@ -244,7 +246,7 @@ class SparkBatchJob(Model): 'log_lines': {'key': 'log', 'type': '[str]'}, } - def __init__(self, *, id: int=None, app_id: str=None, app_info=None, state: str=None, log_lines=None, **kwargs) -> None: + def __init__(self, *, id: int=None, app_id: str=None, app_info=None, state=None, log_lines=None, **kwargs) -> None: super(SparkBatchJob, self).__init__(**kwargs) self.id = id self.app_id = app_id @@ -256,11 +258,11 @@ def __init__(self, *, id: int=None, app_id: str=None, app_info=None, state: str= class SparkBatchJobCollection(Model): """SparkBatchJobCollection. - :param from_property: + :param from_property: The start index to fetch Spark Batch jobs. :type from_property: int - :param total: + :param total: Number of Spark Batch jobs to fetch. :type total: int - :param sessions: + :param sessions: List of spark batch jobs. :type sessions: list[~azure.hdinsight.job.models.SparkBatchJob] """ @@ -280,37 +282,37 @@ def __init__(self, *, from_property: int=None, total: int=None, sessions=None, * class SparkBatchJobRequest(Model): """SparkBatchJobRequest. - :param file: + :param file: File containing the application to execute. :type file: str - :param proxy_user: + :param proxy_user: User to impersonate when running the job. :type proxy_user: str - :param class_name: + :param class_name: Application Java/Spark main class. :type class_name: str - :param arguments: + :param arguments: Command line arguments for the application. :type arguments: list[str] - :param jars: + :param jars: Jars to be used in this batch job. :type jars: list[str] - :param python_files: + :param python_files: Python files to be used in this batch job. :type python_files: list[str] - :param files: + :param files: Files to be used in this batch job. :type files: list[str] - :param driver_memory: + :param driver_memory: Amount of memory to use for the driver process. :type driver_memory: str - :param driver_cores: + :param driver_cores: Number of cores to use for the driver process. :type driver_cores: int - :param executor_memory: + :param executor_memory: Amount of memory to use per executor process. :type executor_memory: str - :param executor_cores: + :param executor_cores: Number of cores to use for each executor. :type executor_cores: int - :param executor_count: + :param executor_count: Number of executors to launch for this batch job. :type executor_count: int - :param archives: + :param archives: Archives to be used in this batch job. :type archives: list[str] - :param queue: + :param queue: The name of the YARN queue to which submitted. :type queue: str - :param name: + :param name: The name of this batch job. :type name: str - :param configuration: + :param configuration: Spark configuration properties. :type configuration: dict[str, str] """ @@ -372,15 +374,15 @@ def __init__(self, *, deleted_message: str=None, **kwargs) -> None: class SparkJobLog(Model): """SparkJobLog. - :param id: + :param id: The livy id of the spark job. :type id: int - :param from_property: + :param from_property: Offset from start of log. :type from_property: int - :param size: + :param size: Max number of log lines. :type size: int - :param total: + :param total: Total number of log lines. :type total: long - :param log_lines: + :param log_lines: The log lines. :type log_lines: list[str] """ @@ -404,10 +406,12 @@ def __init__(self, *, id: int=None, from_property: int=None, size: int=None, tot class SparkJobState(Model): """SparkJobState. - :param id: + :param id: The livy id of the spark job. :type id: int - :param state: - :type state: str + :param state: The current state of the spark job. Possible values include: + 'not_started', 'starting', 'idle', 'running', 'busy', 'shutting_down', + 'error', 'dead', 'killed', 'success', 'recovering' + :type state: str or ~azure.hdinsight.job.models.JobState """ _attribute_map = { @@ -415,7 +419,7 @@ class SparkJobState(Model): 'state': {'key': 'state', 'type': 'str'}, } - def __init__(self, *, id: int=None, state: str=None, **kwargs) -> None: + def __init__(self, *, id: int=None, state=None, **kwargs) -> None: super(SparkJobState, self).__init__(**kwargs) self.id = id self.state = state @@ -424,11 +428,11 @@ def __init__(self, *, id: int=None, state: str=None, **kwargs) -> None: class SparkSessionCollection(Model): """SparkSessionCollection. - :param from_property: + :param from_property: The start index to fetch spark sessions. :type from_property: int - :param total: + :param total: Number of spark sessions to fetch. :type total: int - :param sessions: + :param sessions: List of spark sessions. :type sessions: list[~azure.hdinsight.job.models.SparkSessionJob] """ @@ -448,21 +452,24 @@ def __init__(self, *, from_property: int=None, total: int=None, sessions=None, * class SparkSessionJob(Model): """SparkSessionJob. - :param id: + :param id: The livy id of the spark session job. :type id: int - :param app_id: + :param app_id: The application id of this job. :type app_id: str - :param owner: + :param owner: Remote user who submitted this job. :type owner: str - :param proxy_user: + :param proxy_user: User to impersonate when running. :type proxy_user: str - :param kind: - :type kind: str - :param log_lines: + :param kind: Spark session job kind. Possible values include: 'spark', + 'pyspark', 'sparkr', 'sql' + :type kind: str or ~azure.hdinsight.job.models.SessionJobKind + :param log_lines: The log lines. :type log_lines: list[str] - :param state: - :type state: str - :param app_info: + :param state: The current state of the spark session job. Possible values + include: 'not_started', 'starting', 'idle', 'running', 'busy', + 'shutting_down', 'error', 'dead', 'killed', 'success', 'recovering' + :type state: str or ~azure.hdinsight.job.models.JobState + :param app_info: The detailed application info. :type app_info: dict[str, str] """ @@ -477,7 +484,7 @@ class SparkSessionJob(Model): 'app_info': {'key': 'appInfo', 'type': '{str}'}, } - def __init__(self, *, id: int=None, app_id: str=None, owner: str=None, proxy_user: str=None, kind: str=None, log_lines=None, state: str=None, app_info=None, **kwargs) -> None: + def __init__(self, *, id: int=None, app_id: str=None, owner: str=None, proxy_user: str=None, kind=None, log_lines=None, state=None, app_info=None, **kwargs) -> None: super(SparkSessionJob, self).__init__(**kwargs) self.id = id self.app_id = app_id @@ -492,35 +499,37 @@ def __init__(self, *, id: int=None, app_id: str=None, owner: str=None, proxy_use class SparkSessionJobRequest(Model): """SparkSessionJobRequest. - :param kind: Possible values include: 'spark', 'pyspark', 'sparkr', 'sql' + :param kind: Spark session job kind. Possible values include: 'spark', + 'pyspark', 'sparkr', 'sql' :type kind: str or ~azure.hdinsight.job.models.SessionJobKind - :param proxy_user: + :param proxy_user: User to impersonate when starting the session. :type proxy_user: str - :param jars: + :param jars: Jars to be used in this session. :type jars: list[str] - :param python_files: + :param python_files: Python files to be used in this session. :type python_files: list[str] - :param files: + :param files: Files to be used in this session. :type files: list[str] - :param driver_memory: + :param driver_memory: Amount of memory to use for the driver process. :type driver_memory: str - :param driver_cores: + :param driver_cores: Number of cores to use for the driver process. :type driver_cores: int - :param executor_memory: + :param executor_memory: Amount of memory to use per executor process. :type executor_memory: str - :param executor_cores: + :param executor_cores: Number of cores to use for each executor. :type executor_cores: int - :param executor_count: + :param executor_count: Number of executors to launch for this session. :type executor_count: int - :param archives: + :param archives: Archives to be used in this session. :type archives: list[str] - :param queue: + :param queue: The name of the YARN queue to which submitted. :type queue: str - :param name: + :param name: The name of this session. :type name: str - :param configuration: + :param configuration: Spark configuration properties. :type configuration: dict[str, str] - :param heartbeat_timeout_in_second: + :param heartbeat_timeout_in_second: Timeout in second to which session be + orphaned. :type heartbeat_timeout_in_second: int """ @@ -564,15 +573,17 @@ def __init__(self, *, kind=None, proxy_user: str=None, jars=None, python_files=N class SparkStatement(Model): """SparkStatement. - :param id: + :param id: The livy id of the spark statement job. :type id: int - :param code: + :param code: The execution code. :type code: str - :param state: - :type state: str - :param output: + :param state: The current state of the spark statement. Possible values + include: 'waiting', 'running', 'available', 'error', 'cancelling', + 'cancelled' + :type state: str or ~azure.hdinsight.job.models.StatementState + :param output: The execution output. :type output: ~azure.hdinsight.job.models.SparkStatementOutput - :param progress: + :param progress: The execution progress. :type progress: float """ @@ -584,7 +595,7 @@ class SparkStatement(Model): 'progress': {'key': 'progress', 'type': 'float'}, } - def __init__(self, *, id: int=None, code: str=None, state: str=None, output=None, progress: float=None, **kwargs) -> None: + def __init__(self, *, id: int=None, code: str=None, state=None, output=None, progress: float=None, **kwargs) -> None: super(SparkStatement, self).__init__(**kwargs) self.id = id self.code = code @@ -612,7 +623,7 @@ def __init__(self, *, cancel_message: str=None, **kwargs) -> None: class SparkStatementCollection(Model): """SparkStatementCollection. - :param statements: + :param statements: List of spark statements. :type statements: list[~azure.hdinsight.job.models.SparkStatement] """ @@ -628,11 +639,12 @@ def __init__(self, *, statements=None, **kwargs) -> None: class SparkStatementOutput(Model): """SparkStatementOutput. - :param status: - :type status: str - :param execution_count: + :param status: Execution status. Possible values include: 'ok', 'error', + 'abort' + :type status: str or ~azure.hdinsight.job.models.StatementExecutionStatus + :param execution_count: A monotonically increasing number. :type execution_count: int - :param data: + :param data: Statement output. :type data: object """ @@ -642,7 +654,7 @@ class SparkStatementOutput(Model): 'data': {'key': 'data', 'type': 'object'}, } - def __init__(self, *, status: str=None, execution_count: int=None, data=None, **kwargs) -> None: + def __init__(self, *, status=None, execution_count: int=None, data=None, **kwargs) -> None: super(SparkStatementOutput, self).__init__(**kwargs) self.status = status self.execution_count = execution_count @@ -654,8 +666,8 @@ class SparkStatementRequest(Model): :param code: :type code: str - :param kind: - :type kind: str + :param kind: Possible values include: 'spark', 'pyspark', 'sparkr', 'sql' + :type kind: str or ~azure.hdinsight.job.models.SessionJobKind """ _attribute_map = { @@ -663,7 +675,7 @@ class SparkStatementRequest(Model): 'kind': {'key': 'kind', 'type': 'str'}, } - def __init__(self, *, code: str=None, kind: str=None, **kwargs) -> None: + def __init__(self, *, code: str=None, kind=None, **kwargs) -> None: super(SparkStatementRequest, self).__init__(**kwargs) self.code = code self.kind = kind diff --git a/sdk/azure-hdinsight-job/azure/hdinsight/job/operations/_job_operations.py b/sdk/azure-hdinsight-job/azure/hdinsight/job/operations/_job_operations.py index 5ca8b7eac532..b84650c25e52 100644 --- a/sdk/azure-hdinsight-job/azure/hdinsight/job/operations/_job_operations.py +++ b/sdk/azure-hdinsight-job/azure/hdinsight/job/operations/_job_operations.py @@ -1469,8 +1469,9 @@ def submit_spark_statement_job( :type requested_by: str :param code: :type code: str - :param kind: - :type kind: str + :param kind: Possible values include: 'spark', 'pyspark', 'sparkr', + 'sql' + :type kind: str or ~azure.hdinsight.job.models.SessionJobKind :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response