From 302777999ab14f51d19d99a0d36f89608fbc0938 Mon Sep 17 00:00:00 2001 From: giakas Date: Sun, 28 Mar 2021 10:53:45 -0700 Subject: [PATCH 01/19] New Readme Config File --- .../videoanalyzer/data-plane/readme.md | 83 +++++++++++++++++++ 1 file changed, 83 insertions(+) create mode 100644 specification/videoanalyzer/data-plane/readme.md diff --git a/specification/videoanalyzer/data-plane/readme.md b/specification/videoanalyzer/data-plane/readme.md new file mode 100644 index 000000000000..76366e45def1 --- /dev/null +++ b/specification/videoanalyzer/data-plane/readme.md @@ -0,0 +1,83 @@ +# videoanalyzer + +> see https://aka.ms/autorest + +This is the AutoRest configuration file for videoanalyzer. + +## Getting Started + +To build the SDKs for My API, simply install AutoRest via `npm` (`npm install -g autorest`) and then run: + +> `autorest readme.md` + +To see additional help and options, run: + +> `autorest --help` + +For other options on installation see [Installing AutoRest](https://aka.ms/autorest/install) on the AutoRest github page. + +--- + +## Configuration + +### Basic Information + +These are the global settings for the videoanalyzer. + +```yaml +openapi-type: data-plane +tag: package-1.0.0 +``` + +### Tag: package-1.0.0 + +These settings apply only when `--tag=package-1.0.0` is specified on the command line. + +```yaml $(tag) == 'package-1.0.0' +input-file: + - Microsoft.Media/preview/1.0.0/videoanalyzer.json +``` + +--- + +# Code Generation + +## Swagger to SDK + +This section describes what SDK should be generated by the automatic system. +This is not used by Autorest itself. + +```yaml $(swagger-to-sdk) +swagger-to-sdk: + - repo: azure-sdk-for-python-track2 + - repo: azure-sdk-for-java + - repo: azure-sdk-for-go + - repo: azure-sdk-for-js + - repo: azure-resource-manager-schemas + after_scripts: + - node sdkauto_afterscript.js videoanalyzer/resource-manager + - repo: azure-cli-extensions +``` +## Az + +See configuration in [readme.az.md](./readme.az.md) + +## Go + +See configuration in [readme.go.md](./readme.go.md) + +## Python + +See configuration in [readme.python.md](./readme.python.md) + +## TypeScript + +See configuration in [readme.typescript.md](./readme.typescript.md) + +## CSharp + +See configuration in [readme.csharp.md](./readme.csharp.md) + +## AzureResourceSchema + +See configuration in [readme.azureresourceschema.md](./readme.azureresourceschema.md) From 0a55737b3b6fc285926ab35cc35bfc17f7a32123 Mon Sep 17 00:00:00 2001 From: giakas Date: Sun, 28 Mar 2021 10:53:45 -0700 Subject: [PATCH 02/19] New Go Language Readme Config File --- .../videoanalyzer/data-plane/readme.go.md | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 specification/videoanalyzer/data-plane/readme.go.md diff --git a/specification/videoanalyzer/data-plane/readme.go.md b/specification/videoanalyzer/data-plane/readme.go.md new file mode 100644 index 000000000000..8a4e3dde2494 --- /dev/null +++ b/specification/videoanalyzer/data-plane/readme.go.md @@ -0,0 +1,26 @@ +## Go + +These settings apply only when `--go` is specified on the command line. + +```yaml $(go) +go: + license-header: MICROSOFT_APACHE_NO_VERSION + namespace: videoanalyzer + clear-output-folder: true +``` + +### Go multi-api + +``` yaml $(go) && $(multiapi) +batch: + - tag: package-1.0.0-preview +``` + +### Tag: package-1.0.0-preview and go + +These settings apply only when `--tag=package-1.0.0-preview --go` is specified on the command line. +Please also specify `--go-sdk-folder=`. + +```yaml $(tag) == 'package-1.0.0-preview' && $(go) +output-folder: $(go-sdk-folder)/services/preview/$(namespace)/mgmt/1.0.0/$(namespace) +``` From ccbc05dd5889d0621d2f60c9ea8e73ae4e547a71 Mon Sep 17 00:00:00 2001 From: giakas Date: Sun, 28 Mar 2021 10:53:46 -0700 Subject: [PATCH 03/19] New Azure AZ Readme Config File --- .../videoanalyzer/data-plane/readme.az.md | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 specification/videoanalyzer/data-plane/readme.az.md diff --git a/specification/videoanalyzer/data-plane/readme.az.md b/specification/videoanalyzer/data-plane/readme.az.md new file mode 100644 index 000000000000..c323db48f50f --- /dev/null +++ b/specification/videoanalyzer/data-plane/readme.az.md @@ -0,0 +1,28 @@ +## AZ + +These settings apply only when `--az` is specified on the command line. + +For new Resource Provider. It is highly recommended to onboard Azure CLI extensions. There's no differences in terms of customer usage. + +``` yaml $(az) && $(target-mode) != 'core' +az: + extensions: videoanalyzer + namespace: azure.mgmt.videoanalyzer + package-name: azure-mgmt-videoanalyzer +az-output-folder: $(azure-cli-extension-folder)/src/videoanalyzer +python-sdk-output-folder: "$(az-output-folder)/azext_videoanalyzer/vendored_sdks/videoanalyzer" +# add additinal configuration here specific for Azure CLI +# refer to the faq.md for more details +``` + + + +This is for command modules that already in azure cli main repo. +``` yaml $(az) && $(target-mode) == 'core' +az: + extensions: videoanalyzer + namespace: azure.mgmt.videoanalyzer + package-name: azure-mgmt-videoanalyzer +az-output-folder: $(azure-cli-folder)/src/azure-cli/azure/cli/command_modules/videoanalyzer +python-sdk-output-folder: "$(az-output-folder)/vendored_sdks/videoanalyzer" +``` \ No newline at end of file From 5d3f5fa92edc0abca1d93c697359c87356e7988a Mon Sep 17 00:00:00 2001 From: giakas Date: Sun, 28 Mar 2021 10:53:46 -0700 Subject: [PATCH 04/19] New Azure CLI Readme Config File --- specification/videoanalyzer/data-plane/readme.cli.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 specification/videoanalyzer/data-plane/readme.cli.md diff --git a/specification/videoanalyzer/data-plane/readme.cli.md b/specification/videoanalyzer/data-plane/readme.cli.md new file mode 100644 index 000000000000..c6cf6ad37ea4 --- /dev/null +++ b/specification/videoanalyzer/data-plane/readme.cli.md @@ -0,0 +1 @@ +## CLI Common Settings for all the command line tools \ No newline at end of file From cd2527a529a521ebf161e98572218328b18254e8 Mon Sep 17 00:00:00 2001 From: giakas Date: Sun, 28 Mar 2021 10:53:46 -0700 Subject: [PATCH 05/19] New Typescript Language Readme Config File --- .../videoanalyzer/data-plane/readme.typescript.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 specification/videoanalyzer/data-plane/readme.typescript.md diff --git a/specification/videoanalyzer/data-plane/readme.typescript.md b/specification/videoanalyzer/data-plane/readme.typescript.md new file mode 100644 index 000000000000..222aca9a09bc --- /dev/null +++ b/specification/videoanalyzer/data-plane/readme.typescript.md @@ -0,0 +1,14 @@ +## TypeScript + +These settings apply only when `--typescript` is specified on the command line. +Please also specify `--typescript-sdks-folder=`. + +``` yaml $(typescript) +typescript: + azure-arm: true + package-name: "@azure/arm-videoanalyzer" + output-folder: "$(typescript-sdks-folder)/sdk/videoanalyzer/arm-videoanalyzer" + payload-flattening-threshold: 1 + clear-output-folder: true + generate-metadata: true +``` From 7aae4cd02b4d885276056d1172a72ff2b7f69756 Mon Sep 17 00:00:00 2001 From: giakas Date: Sun, 28 Mar 2021 10:53:47 -0700 Subject: [PATCH 06/19] New Python Language Readme Config File --- .../videoanalyzer/data-plane/readme.python.md | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 specification/videoanalyzer/data-plane/readme.python.md diff --git a/specification/videoanalyzer/data-plane/readme.python.md b/specification/videoanalyzer/data-plane/readme.python.md new file mode 100644 index 000000000000..dda81cc659a9 --- /dev/null +++ b/specification/videoanalyzer/data-plane/readme.python.md @@ -0,0 +1,22 @@ +## Python + +These settings apply only when `--python` is specified on the command line. +Please also specify `--python-sdks-folder=`. + +``` yaml $(track2) +azure-arm: true +license-header: MICROSOFT_MIT_NO_VERSION +package-name: azure-mgmt-videoanalyzer +no-namespace-folders: true +package-version: 1.0.0b1 +``` + +``` yaml $(python-mode) == 'update' && $(track2) +no-namespace-folders: true +output-folder: $(python-sdks-folder)/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer +``` + +``` yaml $(python-mode) == 'create' && $(track2) +basic-setup-py: true +output-folder: $(python-sdks-folder)/videoanalyzer/azure-mgmt-videoanalyzer +``` From 7f565819b31c58f4927978ef491a8d5dc4e2f477 Mon Sep 17 00:00:00 2001 From: giakas Date: Sun, 28 Mar 2021 10:53:47 -0700 Subject: [PATCH 07/19] New C# Language Readme Config File --- .../videoanalyzer/data-plane/readme.csharp.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 specification/videoanalyzer/data-plane/readme.csharp.md diff --git a/specification/videoanalyzer/data-plane/readme.csharp.md b/specification/videoanalyzer/data-plane/readme.csharp.md new file mode 100644 index 000000000000..1fc91c5e2c0b --- /dev/null +++ b/specification/videoanalyzer/data-plane/readme.csharp.md @@ -0,0 +1,15 @@ +## C# + +These settings apply only when `--csharp` is specified on the command line. +Please also specify `--csharp-sdks-folder=`. + +```yaml $(csharp) +csharp: + azure-arm: true + license-header: MICROSOFT_MIT_NO_VERSION + payload-flattening-threshold: 1 + clear-output-folder: true + client-side-validation: false + namespace: Microsoft.Media + output-folder: $(csharp-sdks-folder)/videoanalyzer/management/Microsoft.Media/GeneratedProtocol +``` From be1f41f01161bcc5d6d72946f56ab232561cdcb1 Mon Sep 17 00:00:00 2001 From: giakas Date: Sun, 28 Mar 2021 10:53:48 -0700 Subject: [PATCH 08/19] New AzureResourceSchema Readme Config File --- .../data-plane/readme.azureresourceschema.md | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 specification/videoanalyzer/data-plane/readme.azureresourceschema.md diff --git a/specification/videoanalyzer/data-plane/readme.azureresourceschema.md b/specification/videoanalyzer/data-plane/readme.azureresourceschema.md new file mode 100644 index 000000000000..7be4630e4489 --- /dev/null +++ b/specification/videoanalyzer/data-plane/readme.azureresourceschema.md @@ -0,0 +1,23 @@ +## AzureResourceSchema + +These settings apply only when `--azureresourceschema` is specified on the command line. + +### AzureResourceSchema multi-api + +``` yaml $(azureresourceschema) && $(multiapi) +batch: + - tag: schema-videoanalyzer-1.0.0 + +``` + +Please also specify `--azureresourceschema-folder=`. + +### Tag: schema-videoanalyzer-1.0.0 and azureresourceschema + +``` yaml $(tag) == 'schema-videoanalyzer-1.0.0' && $(azureresourceschema) +output-folder: $(azureresourceschema-folder)/schemas + +# all the input files in this apiVersion +input-file: + - Microsoft.Media/preview/1.0.0/videoanalyzer.json +``` From f18c73df5e45a9648afd1d77ebfbae302751b5b1 Mon Sep 17 00:00:00 2001 From: giakas Date: Sun, 28 Mar 2021 10:53:48 -0700 Subject: [PATCH 09/19] New Swagger Spec File --- .../preview/1.0.0/videoanalyzer.json | 132 ++++++++++++++++++ 1 file changed, 132 insertions(+) create mode 100644 specification/videoanalyzer/data-plane/Microsoft.Media/preview/1.0.0/videoanalyzer.json diff --git a/specification/videoanalyzer/data-plane/Microsoft.Media/preview/1.0.0/videoanalyzer.json b/specification/videoanalyzer/data-plane/Microsoft.Media/preview/1.0.0/videoanalyzer.json new file mode 100644 index 000000000000..f658da659ba7 --- /dev/null +++ b/specification/videoanalyzer/data-plane/Microsoft.Media/preview/1.0.0/videoanalyzer.json @@ -0,0 +1,132 @@ +{ + "swagger": "2.0", + "info": { + "version": "1.0.0", + "title": "videoanalyzer", + "description": "Description of the new service", + "x-ms-code-generation-settings": { + "name": "videoanalyzerClient" + } + }, + "host": "management.azure.com", + "schemes": ["https"], + "consumes": ["application/json"], + "produces": ["application/json"], + "security": [ + { + "azure_auth": ["user_impersonation"] + } + ], + "securityDefinitions": { + "azure_auth": { + "type": "oauth2", + "authorizationUrl": "https://login.microsoftonline.com/common/oauth2/authorize", + "flow": "implicit", + "description": "Azure Active Directory OAuth2 Flow", + "scopes": { + "user_impersonation": "impersonate your user account" + } + } + }, + "paths": { + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/operations": { + "get": { + "tags": ["Tag1"], + "operationId": "OperationGroup_Get", + "x-ms-examples": { + "BatchAccountDelete": { "$ref": "./examples/OperationGroupGet.json" } + }, + "description": "This is a sample get operation, please see guidelines in azure-rest-api-specs repository for more info", + "parameters": [ + { + "$ref": "#/parameters/SubscriptionIdParameter" + }, + { + "$ref": "#/parameters/ResourceGroupNameParameter" + } + ], + "responses": { + "200": { + "description": "Describe the result of a successful operation.", + "schema": { + "$ref": "#/definitions/Result" + } + }, + "default": { + "description": "Error response describing why the operation failed.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + } + }, + "definitions": { + "Result": { + "description": "Sample result definition", + "properties": { + "sampleProperty": { + "type": "string", + "description": "Sample property of type string" + } + } + }, + "ErrorResponse": { + "description": "Error response.", + "properties": { + "error": { + "$ref": "#/definitions/ErrorDefinition", + "description": "The error details." + } + } + }, + "ErrorDefinition": { + "description": "Error definition.", + "properties": { + "code": { + "description": "Service specific error code which serves as the substatus for the HTTP error code.", + "type": "string", + "readOnly": true + }, + "message": { + "description": "Description of the error.", + "type": "string", + "readOnly": true + }, + "details": { + "description": "Internal error details.", + "type": "array", + "items": { + "$ref": "#/definitions/ErrorDefinition" + }, + "readOnly": true + } + } + } + }, + "parameters": { + "SubscriptionIdParameter": { + "name": "subscriptionId", + "in": "path", + "required": true, + "type": "string", + "description": "The Azure subscription ID. This is a GUID-formatted string (e.g. 00000000-0000-0000-0000-000000000000)" + }, + "ResourceGroupNameParameter": { + "name": "resourceGroupName", + "in": "path", + "required": true, + "type": "string", + "description": "The name of the resource group.", + "x-ms-parameter-location": "method" + }, + "ApiVersionParameter": { + "name": "api-version", + "in": "query", + "required": true, + "type": "string", + "description": "The API version to be used with the HTTP request." + } + } +} From ab4e504e4c5a01dad19b1b4bd641ba93872a4430 Mon Sep 17 00:00:00 2001 From: giakas Date: Sun, 28 Mar 2021 10:53:48 -0700 Subject: [PATCH 10/19] New Swagger Example Spec File --- .../preview/1.0.0/examples/OperationGroupGet.json | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 specification/videoanalyzer/data-plane/Microsoft.Media/preview/1.0.0/examples/OperationGroupGet.json diff --git a/specification/videoanalyzer/data-plane/Microsoft.Media/preview/1.0.0/examples/OperationGroupGet.json b/specification/videoanalyzer/data-plane/Microsoft.Media/preview/1.0.0/examples/OperationGroupGet.json new file mode 100644 index 000000000000..65752e3c0295 --- /dev/null +++ b/specification/videoanalyzer/data-plane/Microsoft.Media/preview/1.0.0/examples/OperationGroupGet.json @@ -0,0 +1,15 @@ +{ + "parameters": { + "accountName": "sampleacct", + "resourceGroupName": "videoanalyzerClient", + "api-version": "1.0.0", + "subscriptionId": "subid" + }, + "responses": { + "200": { + "body": { + "sampleProperty": "sampleProperty" + } + } + } +} From c856ca46094a7c3d953ff5a76d8006aea48b5dd8 Mon Sep 17 00:00:00 2001 From: giakas Date: Sun, 28 Mar 2021 11:08:57 -0700 Subject: [PATCH 11/19] Copy old LVA swaggers --- .../1.0.0/examples/OperationGroupGet.json | 15 - .../preview/1.0.0/videoanalyzer.json | 132 -- .../preview/1.0.0/LiveVideoAnalytics.json | 1124 +++++++++++++++++ .../LiveVideoAnalyticsSdkDefinitions.json | 209 +++ .../videoanalyzer/data-plane/readme.az.md | 28 - .../data-plane/readme.azureresourceschema.md | 23 - .../videoanalyzer/data-plane/readme.cli.md | 1 - .../videoanalyzer/data-plane/readme.csharp.md | 15 - .../videoanalyzer/data-plane/readme.go.md | 26 - .../videoanalyzer/data-plane/readme.md | 94 +- .../videoanalyzer/data-plane/readme.python.md | 22 - .../data-plane/readme.typescript.md | 14 - 12 files changed, 1391 insertions(+), 312 deletions(-) delete mode 100644 specification/videoanalyzer/data-plane/Microsoft.Media/preview/1.0.0/examples/OperationGroupGet.json delete mode 100644 specification/videoanalyzer/data-plane/Microsoft.Media/preview/1.0.0/videoanalyzer.json create mode 100644 specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/LiveVideoAnalytics.json create mode 100644 specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/LiveVideoAnalyticsSdkDefinitions.json delete mode 100644 specification/videoanalyzer/data-plane/readme.az.md delete mode 100644 specification/videoanalyzer/data-plane/readme.azureresourceschema.md delete mode 100644 specification/videoanalyzer/data-plane/readme.cli.md delete mode 100644 specification/videoanalyzer/data-plane/readme.csharp.md delete mode 100644 specification/videoanalyzer/data-plane/readme.go.md delete mode 100644 specification/videoanalyzer/data-plane/readme.python.md delete mode 100644 specification/videoanalyzer/data-plane/readme.typescript.md diff --git a/specification/videoanalyzer/data-plane/Microsoft.Media/preview/1.0.0/examples/OperationGroupGet.json b/specification/videoanalyzer/data-plane/Microsoft.Media/preview/1.0.0/examples/OperationGroupGet.json deleted file mode 100644 index 65752e3c0295..000000000000 --- a/specification/videoanalyzer/data-plane/Microsoft.Media/preview/1.0.0/examples/OperationGroupGet.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "parameters": { - "accountName": "sampleacct", - "resourceGroupName": "videoanalyzerClient", - "api-version": "1.0.0", - "subscriptionId": "subid" - }, - "responses": { - "200": { - "body": { - "sampleProperty": "sampleProperty" - } - } - } -} diff --git a/specification/videoanalyzer/data-plane/Microsoft.Media/preview/1.0.0/videoanalyzer.json b/specification/videoanalyzer/data-plane/Microsoft.Media/preview/1.0.0/videoanalyzer.json deleted file mode 100644 index f658da659ba7..000000000000 --- a/specification/videoanalyzer/data-plane/Microsoft.Media/preview/1.0.0/videoanalyzer.json +++ /dev/null @@ -1,132 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "version": "1.0.0", - "title": "videoanalyzer", - "description": "Description of the new service", - "x-ms-code-generation-settings": { - "name": "videoanalyzerClient" - } - }, - "host": "management.azure.com", - "schemes": ["https"], - "consumes": ["application/json"], - "produces": ["application/json"], - "security": [ - { - "azure_auth": ["user_impersonation"] - } - ], - "securityDefinitions": { - "azure_auth": { - "type": "oauth2", - "authorizationUrl": "https://login.microsoftonline.com/common/oauth2/authorize", - "flow": "implicit", - "description": "Azure Active Directory OAuth2 Flow", - "scopes": { - "user_impersonation": "impersonate your user account" - } - } - }, - "paths": { - "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/operations": { - "get": { - "tags": ["Tag1"], - "operationId": "OperationGroup_Get", - "x-ms-examples": { - "BatchAccountDelete": { "$ref": "./examples/OperationGroupGet.json" } - }, - "description": "This is a sample get operation, please see guidelines in azure-rest-api-specs repository for more info", - "parameters": [ - { - "$ref": "#/parameters/SubscriptionIdParameter" - }, - { - "$ref": "#/parameters/ResourceGroupNameParameter" - } - ], - "responses": { - "200": { - "description": "Describe the result of a successful operation.", - "schema": { - "$ref": "#/definitions/Result" - } - }, - "default": { - "description": "Error response describing why the operation failed.", - "schema": { - "$ref": "#/definitions/ErrorResponse" - } - } - } - } - } - }, - "definitions": { - "Result": { - "description": "Sample result definition", - "properties": { - "sampleProperty": { - "type": "string", - "description": "Sample property of type string" - } - } - }, - "ErrorResponse": { - "description": "Error response.", - "properties": { - "error": { - "$ref": "#/definitions/ErrorDefinition", - "description": "The error details." - } - } - }, - "ErrorDefinition": { - "description": "Error definition.", - "properties": { - "code": { - "description": "Service specific error code which serves as the substatus for the HTTP error code.", - "type": "string", - "readOnly": true - }, - "message": { - "description": "Description of the error.", - "type": "string", - "readOnly": true - }, - "details": { - "description": "Internal error details.", - "type": "array", - "items": { - "$ref": "#/definitions/ErrorDefinition" - }, - "readOnly": true - } - } - } - }, - "parameters": { - "SubscriptionIdParameter": { - "name": "subscriptionId", - "in": "path", - "required": true, - "type": "string", - "description": "The Azure subscription ID. This is a GUID-formatted string (e.g. 00000000-0000-0000-0000-000000000000)" - }, - "ResourceGroupNameParameter": { - "name": "resourceGroupName", - "in": "path", - "required": true, - "type": "string", - "description": "The name of the resource group.", - "x-ms-parameter-location": "method" - }, - "ApiVersionParameter": { - "name": "api-version", - "in": "query", - "required": true, - "type": "string", - "description": "The API version to be used with the HTTP request." - } - } -} diff --git a/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/LiveVideoAnalytics.json b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/LiveVideoAnalytics.json new file mode 100644 index 000000000000..e1135e5d617f --- /dev/null +++ b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/LiveVideoAnalytics.json @@ -0,0 +1,1124 @@ +{ + "swagger": "2.0", + "info": { + "description": "Direct Methods for Live Video Analytics on IoT Edge.", + "version": "2.0.0", + "title": "Direct Methods for Live Video Analytics on IoT Edge", + "contact": { + "email": "amshelp@microsoft.com" + } + }, + "security": [ + { + "sharedAccessSignature": [] + } + ], + "paths": {}, + "securityDefinitions": { + "sharedAccessSignature": { + "type": "apiKey", + "name": "Authorization", + "in": "header" + } + }, + "definitions": { + "MediaGraphInstance": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string", + "description": "The identifier for the media graph instance." + }, + "systemData": { + "$ref": "#/definitions/MediaGraphSystemData" + }, + "properties": { + "$ref": "#/definitions/MediaGraphInstanceProperties" + } + }, + "description": "Represents an instance of a media graph." + }, + "MediaGraphInstanceProperties": { + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "An optional description for the instance." + }, + "topologyName": { + "type": "string", + "description": "The name of the media graph topology that this instance will run. A topology with this name should already have been set in the Edge module." + }, + "parameters": { + "type": "array", + "description": "List of one or more graph instance parameters.", + "items": { + "$ref": "#/definitions/MediaGraphParameterDefinition" + } + }, + "state": { + "type": "string", + "description": "Allowed states for a graph instance.", + "enum": [ + "Inactive", + "Activating", + "Active", + "Deactivating" + ], + "x-ms-enum": { + "name": "MediaGraphInstanceState", + "values": [ + { + "value": "Inactive", + "description": "The media graph instance is idle and not processing media." + }, + { + "value": "Activating", + "description": "The media graph instance is transitioning into the active state." + }, + { + "value": "Active", + "description": "The media graph instance is active and processing media." + }, + { + "value": "Deactivating", + "description": "The media graph instance is transitioning into the inactive state." + } + ], + "modelAsString": true + } + } + }, + "description": "Properties of a media graph instance." + }, + "MediaGraphParameterDefinition": { + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "name": { + "type": "string", + "description": "The name of the parameter defined in the media graph topology." + }, + "value": { + "type": "string", + "description": "The value to supply for the named parameter defined in the media graph topology." + } + }, + "description": "A key-value pair. A media graph topology allows certain values to be parameterized. When an instance is created, the parameters are supplied with arguments specific to that instance. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters." + }, + "MediaGraphInstanceCollection": { + "type": "object", + "properties": { + "value": { + "type": "array", + "description": "A collection of media graph instances.", + "items": { + "$ref": "#/definitions/MediaGraphInstance" + } + }, + "@continuationToken": { + "type": "string", + "description": "A continuation token to use in subsequent calls to enumerate through the graph instance collection. This is used when the collection contains too many results to return in one response." + } + }, + "description": "A collection of media graph instances." + }, + "MediaGraphTopologyCollection": { + "type": "object", + "properties": { + "value": { + "type": "array", + "description": "A collection of media graph topologies.", + "items": { + "$ref": "#/definitions/MediaGraphTopology" + } + }, + "@continuationToken": { + "type": "string", + "description": "A continuation token to use in subsequent calls to enumerate through the graph topologies collection. This is used when the collection contains too many results to return in one response." + } + }, + "description": "A collection of media graph topologies." + }, + "MediaGraphTopology": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string", + "description": "The identifier for the media graph topology." + }, + "systemData": { + "$ref": "#/definitions/MediaGraphSystemData" + }, + "properties": { + "$ref": "#/definitions/MediaGraphTopologyProperties" + } + }, + "description": "The definition of a media graph topology." + }, + "MediaGraphTopologyProperties": { + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "A description of a media graph topology. It is recommended to use this to describe the expected use of the topology." + }, + "parameters": { + "type": "array", + "items": { + "$ref": "#/definitions/MediaGraphParameterDeclaration" + }, + "description": "The list of parameters defined in the topology. The value for these parameters are supplied by instances of this topology." + }, + "sources": { + "type": "array", + "items": { + "$ref": "#/definitions/MediaGraphSource" + }, + "description": "The list of source nodes in this topology." + }, + "processors": { + "type": "array", + "items": { + "$ref": "#/definitions/MediaGraphProcessor" + }, + "description": "The list of processor nodes in this topology." + }, + "sinks": { + "type": "array", + "items": { + "$ref": "#/definitions/MediaGraphSink" + }, + "description": "The list of sink nodes in this topology." + } + }, + "description": "A description of the properties of a media graph topology." + }, + "MediaGraphSystemData": { + "type": "object", + "properties": { + "createdAt": { + "type": "string", + "format": "date-time", + "description": "The timestamp of resource creation (UTC)." + }, + "lastModifiedAt": { + "type": "string", + "format": "date-time", + "description": "The timestamp of resource last modification (UTC)." + } + }, + "description": "The system data for a resource. This is used by both topologies and instances." + }, + "MediaGraphParameterDeclaration": { + "type": "object", + "required": [ + "name", + "type" + ], + "properties": { + "name": { + "type": "string", + "description": "The name of the parameter.", + "maxLength": 64 + }, + "type": { + "type": "string", + "description": "The type of the parameter.", + "enum": [ + "String", + "SecretString", + "Int", + "Double", + "Bool" + ], + "x-ms-enum": { + "name": "MediaGraphParameterType", + "values": [ + { + "value": "String", + "description": "A string parameter value." + }, + { + "value": "SecretString", + "description": "A string to hold sensitive information as parameter value." + }, + { + "value": "Int", + "description": "A 32-bit signed integer as parameter value." + }, + { + "value": "Double", + "description": "A 64-bit double-precision floating point type as parameter value." + }, + { + "value": "Bool", + "description": "A boolean value that is either true or false." + } + ], + "modelAsString": true + } + }, + "description": { + "type": "string", + "description": "Description of the parameter." + }, + "default": { + "type": "string", + "description": "The default value for the parameter to be used if the media graph instance does not specify a value." + } + }, + "description": "The declaration of a parameter in the media graph topology. A media graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters." + }, + "MediaGraphSource": { + "type": "object", + "required": [ + "@type", + "name" + ], + "discriminator": "@type", + "properties": { + "@type": { + "type": "string", + "description": "The type of the source node. The discriminator for derived types." + }, + "name": { + "type": "string", + "description": "The name to be used for this source node." + } + }, + "description": "A source node in a media graph." + }, + "MediaGraphRtspSource": { + "type": "object", + "properties": { + "transport": { + "type": "string", + "description": "Underlying RTSP transport. This is used to enable or disable HTTP tunneling.", + "enum": [ + "Http", + "Tcp" + ], + "x-ms-enum": { + "name": "MediaGraphRtspTransport", + "values": [ + { + "value": "Http", + "description": "HTTP/HTTPS transport. This should be used when HTTP tunneling is desired." + }, + { + "value": "Tcp", + "description": "TCP transport. This should be used when HTTP tunneling is NOT desired." + } + ], + "modelAsString": true + } + }, + "endpoint": { + "description": "RTSP endpoint of the stream that is being connected to.", + "$ref": "#/definitions/MediaGraphEndpoint" + } + }, + "required": [ + "endpoint" + ], + "allOf": [ + { + "$ref": "#/definitions/MediaGraphSource" + } + ], + "description": "Enables a media graph to capture media from a RTSP server.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphRtspSource" + }, + "MediaGraphIoTHubMessageSource": { + "type": "object", + "properties": { + "hubInputName": { + "type": "string", + "description": "Name of the input path where messages can be routed to (via routes declared in the IoT Edge deployment manifest)." + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphSource" + } + ], + "description": "Enables a media graph to receive messages via routes declared in the IoT Edge deployment manifest.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphIoTHubMessageSource" + }, + "MediaGraphIoTHubMessageSink": { + "type": "object", + "properties": { + "hubOutputName": { + "type": "string", + "description": "Name of the output path to which the media graph will publish message. These messages can then be delivered to desired destinations by declaring routes referencing the output path in the IoT Edge deployment manifest." + } + }, + "required": [ + "@type", + "hubOutputName" + ], + "allOf": [ + { + "$ref": "#/definitions/MediaGraphSink" + } + ], + "description": "Enables a media graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphIoTHubMessageSink" + }, + "MediaGraphEndpoint": { + "type": "object", + "required": [ + "@type", + "url" + ], + "discriminator": "@type", + "properties": { + "@type": { + "type": "string", + "description": "The discriminator for derived types." + }, + "credentials": { + "description": "Polymorphic credentials to be presented to the endpoint.", + "$ref": "#/definitions/MediaGraphCredentials" + }, + "url": { + "type": "string", + "description": "Url for the endpoint." + } + }, + "description": "Base class for endpoints." + }, + "MediaGraphCredentials": { + "type": "object", + "required": [ + "@type" + ], + "discriminator": "@type", + "properties": { + "@type": { + "type": "string", + "description": "The discriminator for derived types." + } + }, + "description": "Credentials to present during authentication." + }, + "MediaGraphUsernamePasswordCredentials": { + "type": "object", + "properties": { + "username": { + "type": "string", + "description": "Username for a username/password pair." + }, + "password": { + "type": "string", + "description": "Password for a username/password pair. Please use a parameter so that the actual value is not returned on PUT or GET requests." + } + }, + "required": [ + "username", + "password" + ], + "allOf": [ + { + "$ref": "#/definitions/MediaGraphCredentials" + } + ], + "description": "Username/password credential pair.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphUsernamePasswordCredentials" + }, + "MediaGraphHttpHeaderCredentials": { + "type": "object", + "properties": { + "headerName": { + "type": "string", + "description": "HTTP header name." + }, + "headerValue": { + "type": "string", + "description": "HTTP header value. Please use a parameter so that the actual value is not returned on PUT or GET requests." + } + }, + "required": [ + "headerName", + "headerValue" + ], + "allOf": [ + { + "$ref": "#/definitions/MediaGraphCredentials" + } + ], + "description": "Http header service credentials.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphHttpHeaderCredentials" + }, + "MediaGraphUnsecuredEndpoint": { + "type": "object", + "allOf": [ + { + "$ref": "#/definitions/MediaGraphEndpoint" + } + ], + "description": "An endpoint that the media graph can connect to, with no encryption in transit.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphUnsecuredEndpoint" + }, + "MediaGraphTlsEndpoint": { + "type": "object", + "properties": { + "trustedCertificates": { + "description": "Trusted certificates when authenticating a TLS connection. Null designates that Azure Media Service's source of trust should be used.", + "$ref": "#/definitions/MediaGraphCertificateSource" + }, + "validationOptions": { + "description": "Validation options to use when authenticating a TLS connection. By default, strict validation is used.", + "$ref": "#/definitions/MediaGraphTlsValidationOptions" + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphEndpoint" + } + ], + "description": "A TLS endpoint for media graph external connections.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphTlsEndpoint" + }, + "MediaGraphCertificateSource": { + "type": "object", + "required": [ + "@type" + ], + "discriminator": "@type", + "properties": { + "@type": { + "type": "string", + "description": "The discriminator for derived types." + } + }, + "description": "Base class for certificate sources." + }, + "MediaGraphTlsValidationOptions": { + "type": "object", + "properties": { + "ignoreHostname": { + "type": "string", + "description": "Boolean value ignoring the host name (common name) during validation." + }, + "ignoreSignature": { + "type": "string", + "description": "Boolean value ignoring the integrity of the certificate chain at the current time." + } + }, + "description": "Options for controlling the authentication of TLS endpoints." + }, + "MediaGraphPemCertificateList": { + "type": "object", + "properties": { + "certificates": { + "type": "array", + "description": "PEM formatted public certificates one per entry.", + "items": { + "type": "string" + } + } + }, + "required": [ + "certificates" + ], + "allOf": [ + { + "$ref": "#/definitions/MediaGraphCertificateSource" + } + ], + "description": "A list of PEM formatted certificates.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphPemCertificateList" + }, + "MediaGraphSink": { + "type": "object", + "required": [ + "@type", + "inputs", + "name" + ], + "discriminator": "@type", + "properties": { + "@type": { + "type": "string", + "description": "The discriminator for derived types." + }, + "name": { + "type": "string", + "description": "The name to be used for the media graph sink." + }, + "inputs": { + "type": "array", + "description": "An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node.", + "items": { + "$ref": "#/definitions/MediaGraphNodeInput" + } + } + }, + "description": "Enables a media graph to write media data to a destination outside of the Live Video Analytics IoT Edge module." + }, + "MediaGraphNodeInput": { + "type": "object", + "required": [ + "nodeName" + ], + "properties": { + "nodeName": { + "type": "string", + "description": "The name of another node in the media graph, the output of which is used as input to this node." + }, + "outputSelectors": { + "type": "array", + "description": "Allows for the selection of particular streams from another node.", + "items": { + "$ref": "#/definitions/MediaGraphOutputSelector" + } + } + }, + "description": "Represents the input to any node in a media graph." + }, + "MediaGraphOutputSelector": { + "type": "object", + "properties": { + "property": { + "type": "string", + "description": "The stream property to compare with.", + "enum": [ + "mediaType" + ], + "x-ms-enum": { + "name": "MediaGraphOutputSelectorProperty", + "values": [ + { + "value": "mediaType", + "description": "The stream's MIME type or subtype." + } + ], + "modelAsString": true + } + }, + "operator": { + "type": "string", + "description": "The operator to compare streams by.", + "enum": [ + "is", + "isNot" + ], + "x-ms-enum": { + "name": "MediaGraphOutputSelectorOperator", + "values": [ + { + "value": "is", + "description": "A media type is the same type or a subtype." + }, + { + "value": "isNot", + "description": "A media type is not the same type or a subtype." + } + ], + "modelAsString": true + } + }, + "value": { + "type": "string", + "description": "Value to compare against." + } + }, + "description": "Allows for the selection of particular streams from another node." + }, + "MediaGraphFileSink": { + "type": "object", + "properties": { + "baseDirectoryPath": { + "type": "string", + "description": "Absolute directory for all outputs to the Edge device from this sink.", + "example": "/var/media/output/" + }, + "fileNamePattern": { + "type": "string", + "description": "File name pattern for creating new files on the Edge device. The pattern must include at least one system variable. See the documentation for available variables and additional examples.", + "example": "mySampleFile-${System.GraphTopologyName}-${System.GraphInstanceName}-${System.DateTime}" + }, + "maximumSizeMiB": { + "type": "string", + "description": "Maximum amount of disk space that can be used for storing files from this sink." + } + }, + "required": [ + "fileNamePattern", + "baseDirectoryPath", + "maximumSizeMiB" + ], + "allOf": [ + { + "$ref": "#/definitions/MediaGraphSink" + } + ], + "description": "Enables a media graph to write/store media (video and audio) to a file on the Edge device.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphFileSink" + }, + "MediaGraphAssetSink": { + "type": "object", + "properties": { + "assetNamePattern": { + "type": "string", + "description": "A name pattern when creating new assets. The pattern must include at least one system variable. See the documentation for available variables and additional examples.", + "example": "MySampleAsset-${System.GraphTopologyName}-${System.GraphInstanceName}-${System.DateTime}" + }, + "segmentLength": { + "type": "string", + "example": "PT30S", + "description": "When writing media to an asset, wait until at least this duration of media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum of 30 seconds and a recommended maximum of 5 minutes." + }, + "localMediaCachePath": { + "type": "string", + "description": "Path to a local file system directory for temporary caching of media before writing to an Asset. Used when the Edge device is temporarily disconnected from Azure.", + "example": "/var/lib/lva/tmp/" + }, + "localMediaCacheMaximumSizeMiB": { + "type": "string", + "description": "Maximum amount of disk space that can be used for temporary caching of media." + } + }, + "required": [ + "@type", + "assetNamePattern", + "localMediaCachePath", + "localMediaCacheMaximumSizeMiB" + ], + "allOf": [ + { + "$ref": "#/definitions/MediaGraphSink" + } + ], + "description": "Enables a media graph to record media to an Azure Media Services asset for subsequent playback.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphAssetSink" + }, + "MediaGraphProcessor": { + "type": "object", + "required": [ + "@type", + "inputs", + "name" + ], + "discriminator": "@type", + "properties": { + "@type": { + "type": "string", + "description": "The discriminator for derived types." + }, + "name": { + "type": "string", + "description": "The name for this processor node." + }, + "inputs": { + "type": "array", + "description": "An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node.", + "items": { + "$ref": "#/definitions/MediaGraphNodeInput" + } + } + }, + "description": "A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output." + }, + "MediaGraphMotionDetectionProcessor": { + "type": "object", + "properties": { + "sensitivity": { + "type": "string", + "description": "Enumeration that specifies the sensitivity of the motion detection processor.", + "enum": [ + "Low", + "Medium", + "High" + ], + "x-ms-enum": { + "name": "MediaGraphMotionDetectionSensitivity", + "values": [ + { + "value": "Low", + "description": "Low Sensitivity." + }, + { + "value": "Medium", + "description": "Medium Sensitivity." + }, + { + "value": "High", + "description": "High Sensitivity." + } + ], + "modelAsString": true + } + }, + "outputMotionRegion": { + "type": "boolean", + "description": "Indicates whether the processor should detect and output the regions, within the video frame, where motion was detected. Default is true." + }, + "eventAggregationWindow": { + "type": "string", + "description": "Event aggregation window duration, or 0 for no aggregation." + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphProcessor" + } + ], + "description": "A node that accepts raw video as input, and detects if there are moving objects present. If so, then it emits an event, and allows frames where motion was detected to pass through. Other frames are blocked/dropped.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphMotionDetectionProcessor" + }, + "MediaGraphExtensionProcessorBase": { + "type": "object", + "required": [ + "endpoint", + "image" + ], + "properties": { + "endpoint": { + "description": "Endpoint to which this processor should connect.", + "$ref": "#/definitions/MediaGraphEndpoint" + }, + "image": { + "description": "Describes the parameters of the image that is sent as input to the endpoint.", + "$ref": "#/definitions/MediaGraphImage" + }, + "samplingOptions": { + "description": "Describes the sampling options to be applied when forwarding samples to the extension.", + "$ref": "#/definitions/MediaGraphSamplingOptions" + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphProcessor" + } + ], + "description": "Processor that allows for extensions outside of the Live Video Analytics Edge module to be integrated into the graph. It is the base class for various different kinds of extension processor types.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphExtensionProcessorBase" + }, + "MediaGraphCognitiveServicesVisionExtension": { + "type": "object", + "properties": {}, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphExtensionProcessorBase" + } + ], + "description": "A processor that allows the media graph to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension" + }, + "MediaGraphGrpcExtension": { + "type": "object", + "required": [ + "dataTransfer" + ], + "properties": { + "dataTransfer": { + "description": "How media should be transferred to the inference engine.", + "$ref": "#/definitions/MediaGraphGrpcExtensionDataTransfer" + }, + "extensionConfiguration": { + "type": "string", + "description": "Optional configuration to pass to the gRPC extension." + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphExtensionProcessorBase" + } + ], + "description": "A processor that allows the media graph to send video frames to an external inference container over a gRPC connection. This can be done using shared memory (for high frame rates), or over the network. Inference results are relayed to downstream nodes.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphGrpcExtension" + }, + "MediaGraphGrpcExtensionDataTransfer": { + "type": "object", + "required": [ + "mode" + ], + "properties": { + "sharedMemorySizeMiB": { + "type": "string", + "description": "The size of the buffer for all in-flight frames in mebibytes if mode is SharedMemory. Should not be specified otherwise." + }, + "mode": { + "type": "string", + "description": "How frame data should be transmitted to the inference engine.", + "enum": [ + "Embedded", + "SharedMemory" + ], + "x-ms-enum": { + "name": "MediaGraphGrpcExtensionDataTransferMode", + "values": [ + { + "value": "Embedded", + "description": "Frames are transferred embedded into the gRPC messages." + }, + { + "value": "SharedMemory", + "description": "Frames are transferred through shared memory." + } + ], + "modelAsString": true + } + } + }, + "description": "Describes how media should be transferred to the inference engine.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphGrpcExtensionDataTransfer" + }, + "MediaGraphHttpExtension": { + "type": "object", + "allOf": [ + { + "$ref": "#/definitions/MediaGraphExtensionProcessorBase" + } + ], + "description": "A processor that allows the media graph to send video frames (mostly at low frame rates e.g. <5 fps) to an external inference container over an HTTP-based RESTful API. Inference results are relayed to downstream nodes.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphHttpExtension" + }, + "MediaGraphImage": { + "type": "object", + "properties": { + "scale": { + "$ref": "#/definitions/MediaGraphImageScale" + }, + "format": { + "$ref": "#/definitions/MediaGraphImageFormat" + } + }, + "description": "Describes the properties of an image frame." + }, + "MediaGraphSamplingOptions": { + "type": "object", + "properties": { + "skipSamplesWithoutAnnotation": { + "type": "string", + "description": "If true, limits the samples submitted to the extension to only samples which have associated inference(s)" + }, + "maximumSamplesPerSecond": { + "type": "string", + "description": "Maximum rate of samples submitted to the extension" + } + }, + "description": "Describes the properties of a sample." + }, + "MediaGraphImageScale": { + "type": "object", + "properties": { + "mode": { + "type": "string", + "description": "Describes the modes for scaling an input video frame into an image, before it is sent to an inference engine.", + "enum": [ + "PreserveAspectRatio", + "Pad", + "Stretch" + ], + "x-ms-enum": { + "name": "MediaGraphImageScaleMode", + "values": [ + { + "value": "PreserveAspectRatio", + "description": "Use the same aspect ratio as the input frame." + }, + { + "value": "Pad", + "description": "Center pad the input frame to match the given dimensions." + }, + { + "value": "Stretch", + "description": "Stretch input frame to match given dimensions." + } + ], + "modelAsString": true + } + }, + "width": { + "type": "string", + "description": "The desired output width of the image." + }, + "height": { + "type": "string", + "description": "The desired output height of the image." + } + }, + "description": "The scaling mode for the image." + }, + "MediaGraphImageFormat": { + "type": "object", + "required": [ + "@type" + ], + "discriminator": "@type", + "properties": { + "@type": { + "type": "string", + "description": "The discriminator for derived types." + } + }, + "description": "Encoding settings for an image.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormat" + }, + "MediaGraphImageFormatRaw": { + "type": "object", + "required": [ + "pixelFormat" + ], + "properties": { + "pixelFormat": { + "type": "string", + "description": "The pixel format that will be used to encode images.", + "enum": [ + "Yuv420p", + "Rgb565be", + "Rgb565le", + "Rgb555be", + "Rgb555le", + "Rgb24", + "Bgr24", + "Argb", + "Rgba", + "Abgr", + "Bgra" + ], + "x-ms-enum": { + "name": "MediaGraphImageFormatRawPixelFormat", + "values": [ + { + "value": "Yuv420p", + "description": "Planar YUV 4:2:0, 12bpp, (1 Cr and Cb sample per 2x2 Y samples)." + }, + { + "value": "Rgb565be", + "description": "Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian." + }, + { + "value": "Rgb565le", + "description": "Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian." + }, + { + "value": "Rgb555be", + "description": "Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined." + }, + { + "value": "Rgb555le", + "description": "Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined." + }, + { + "value": "Rgb24", + "description": "Packed RGB 8:8:8, 24bpp, RGBRGB." + }, + { + "value": "Bgr24", + "description": "Packed RGB 8:8:8, 24bpp, BGRBGR." + }, + { + "value": "Argb", + "description": "Packed ARGB 8:8:8:8, 32bpp, ARGBARGB." + }, + { + "value": "Rgba", + "description": "Packed RGBA 8:8:8:8, 32bpp, RGBARGBA." + }, + { + "value": "Abgr", + "description": "Packed ABGR 8:8:8:8, 32bpp, ABGRABGR." + }, + { + "value": "Bgra", + "description": "Packed BGRA 8:8:8:8, 32bpp, BGRABGRA." + } + ], + "modelAsString": true + } + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphImageFormat" + } + ], + "description": "Encoding settings for raw images.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormatRaw" + }, + "MediaGraphImageFormatJpeg": { + "type": "object", + "properties": { + "quality": { + "type": "string", + "description": "The image quality. Value must be between 0 to 100 (best quality)." + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphImageFormat" + } + ], + "description": "Encoding settings for Jpeg images.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormatJpeg" + }, + "MediaGraphImageFormatBmp": { + "type": "object", + "properties": {}, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphImageFormat" + } + ], + "description": "Encoding settings for Bmp images.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormatBmp" + }, + "MediaGraphImageFormatPng": { + "type": "object", + "properties": {}, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphImageFormat" + } + ], + "description": "Encoding settings for Png images.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormatPng" + }, + "MediaGraphSignalGateProcessor": { + "type": "object", + "properties": { + "activationEvaluationWindow": { + "type": "string", + "example": "PT1.0S", + "description": "The period of time over which the gate gathers input events before evaluating them." + }, + "activationSignalOffset": { + "type": "string", + "example": "-PT1.0S", + "description": "Signal offset once the gate is activated (can be negative). It is an offset between the time the event is received, and the timestamp of the first media sample (eg. video frame) that is allowed through by the gate." + }, + "minimumActivationTime": { + "type": "string", + "example": "PT1S", + "description": "The minimum period for which the gate remains open in the absence of subsequent triggers (events)." + }, + "maximumActivationTime": { + "type": "string", + "example": "PT2S", + "description": "The maximum period for which the gate remains open in the presence of subsequent events." + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphProcessor" + } + ], + "description": "A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphSignalGateProcessor" + } + } +} diff --git a/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/LiveVideoAnalyticsSdkDefinitions.json b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/LiveVideoAnalyticsSdkDefinitions.json new file mode 100644 index 000000000000..147d26469752 --- /dev/null +++ b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/LiveVideoAnalyticsSdkDefinitions.json @@ -0,0 +1,209 @@ +{ + "swagger": "2.0", + "info": { + "description": "Direct Methods for Live Video Analytics on IoT Edge.", + "version": "2.0.0", + "title": "Direct Methods for Live Video Analytics on IoT Edge", + "contact": { + "email": "amshelp@microsoft.com" + } + }, + "security": [ + { + "sharedAccessSignature": [] + } + ], + "paths": {}, + "securityDefinitions": { + "sharedAccessSignature": { + "type": "apiKey", + "name": "Authorization", + "in": "header" + } + }, + "definitions": { + "MethodRequest": { + "type": "object", + "required": [ + "methodName" + ], + "properties": { + "methodName": { + "type": "string", + "description": "method name", + "readOnly": true + }, + "@apiVersion": { + "type": "string", + "description": "api version", + "enum": [ + "2.0" + ], + "x-ms-enum": { + "name": "ApiVersionEnum", + "modelAsString": false + } + } + }, + "discriminator": "methodName", + "description": "Base Class for Method Requests." + }, + "MediaGraphTopologySetRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphTopologySet", + "allOf": [ + { + "$ref": "#/definitions/MethodRequest" + } + ], + "required": [ + "graph" + ], + "properties": { + "graph": { + "$ref": "./LiveVideoAnalytics.json#/definitions/MediaGraphTopology" + } + }, + "description": "Represents the MediaGraphTopologySetRequest." + }, + "MediaGraphTopologySetRequestBody": { + "type": "object", + "allOf": [ + { + "$ref": "#/definitions/MethodRequest" + }, + { + "$ref": "./LiveVideoAnalytics.json#/definitions/MediaGraphTopology" + } + ], + "description": "Represents the MediaGraphTopologySetRequest body." + }, + "MediaGraphInstanceSetRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphInstanceSet", + "allOf": [ + { + "$ref": "#/definitions/MethodRequest" + } + ], + "required": [ + "instance" + ], + "properties": { + "instance": { + "$ref": "./LiveVideoAnalytics.json#/definitions/MediaGraphInstance" + } + }, + "description": "Represents the MediaGraphInstanceSetRequest." + }, + "MediaGraphInstanceSetRequestBody": { + "type": "object", + "allOf": [ + { + "$ref": "#/definitions/MethodRequest" + }, + { + "$ref": "./LiveVideoAnalytics.json#/definitions/MediaGraphInstance" + } + ], + "description": "Represents the MediaGraphInstanceSetRequest body." + }, + "ItemNonSetRequestBase": { + "type": "object", + "allOf": [ + { + "$ref": "#/definitions/MethodRequest" + } + ], + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string", + "description": "method name" + } + } + }, + "MediaGraphTopologyListRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphTopologyList", + "allOf": [ + { + "$ref": "#/definitions/MethodRequest" + } + ], + "description": "Represents the MediaGraphTopologyListRequest." + }, + "MediaGraphTopologyGetRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphTopologyGet", + "allOf": [ + { + "$ref": "#/definitions/ItemNonSetRequestBase" + } + ], + "description": "Represents the MediaGraphTopologyGetRequest." + }, + "MediaGraphTopologyDeleteRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphTopologyDelete", + "allOf": [ + { + "$ref": "#/definitions/ItemNonSetRequestBase" + } + ], + "description": "Represents the MediaGraphTopologyDeleteRequest." + }, + "MediaGraphInstanceListRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphInstanceList", + "allOf": [ + { + "$ref": "#/definitions/MethodRequest" + } + ], + "description": "Represents the MediaGraphInstanceListRequest." + }, + "MediaGraphInstanceGetRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphInstanceGet", + "allOf": [ + { + "$ref": "#/definitions/ItemNonSetRequestBase" + } + ], + "description": "Represents the MediaGraphInstanceGetRequest." + }, + "MediaGraphInstanceActivateRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphInstanceActivate", + "allOf": [ + { + "$ref": "#/definitions/ItemNonSetRequestBase" + } + ], + "description": "Represents the MediaGraphInstanceActivateRequest." + }, + "MediaGraphInstanceDeActivateRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphInstanceDeactivate", + "allOf": [ + { + "$ref": "#/definitions/ItemNonSetRequestBase" + } + ], + "description": "Represents the MediaGraphInstanceDeactivateRequest." + }, + "MediaGraphInstanceDeleteRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphInstanceDelete", + "allOf": [ + { + "$ref": "#/definitions/ItemNonSetRequestBase" + } + ], + "description": "Represents the MediaGraphInstanceDeleteRequest." + } + } +} diff --git a/specification/videoanalyzer/data-plane/readme.az.md b/specification/videoanalyzer/data-plane/readme.az.md deleted file mode 100644 index c323db48f50f..000000000000 --- a/specification/videoanalyzer/data-plane/readme.az.md +++ /dev/null @@ -1,28 +0,0 @@ -## AZ - -These settings apply only when `--az` is specified on the command line. - -For new Resource Provider. It is highly recommended to onboard Azure CLI extensions. There's no differences in terms of customer usage. - -``` yaml $(az) && $(target-mode) != 'core' -az: - extensions: videoanalyzer - namespace: azure.mgmt.videoanalyzer - package-name: azure-mgmt-videoanalyzer -az-output-folder: $(azure-cli-extension-folder)/src/videoanalyzer -python-sdk-output-folder: "$(az-output-folder)/azext_videoanalyzer/vendored_sdks/videoanalyzer" -# add additinal configuration here specific for Azure CLI -# refer to the faq.md for more details -``` - - - -This is for command modules that already in azure cli main repo. -``` yaml $(az) && $(target-mode) == 'core' -az: - extensions: videoanalyzer - namespace: azure.mgmt.videoanalyzer - package-name: azure-mgmt-videoanalyzer -az-output-folder: $(azure-cli-folder)/src/azure-cli/azure/cli/command_modules/videoanalyzer -python-sdk-output-folder: "$(az-output-folder)/vendored_sdks/videoanalyzer" -``` \ No newline at end of file diff --git a/specification/videoanalyzer/data-plane/readme.azureresourceschema.md b/specification/videoanalyzer/data-plane/readme.azureresourceschema.md deleted file mode 100644 index 7be4630e4489..000000000000 --- a/specification/videoanalyzer/data-plane/readme.azureresourceschema.md +++ /dev/null @@ -1,23 +0,0 @@ -## AzureResourceSchema - -These settings apply only when `--azureresourceschema` is specified on the command line. - -### AzureResourceSchema multi-api - -``` yaml $(azureresourceschema) && $(multiapi) -batch: - - tag: schema-videoanalyzer-1.0.0 - -``` - -Please also specify `--azureresourceschema-folder=`. - -### Tag: schema-videoanalyzer-1.0.0 and azureresourceschema - -``` yaml $(tag) == 'schema-videoanalyzer-1.0.0' && $(azureresourceschema) -output-folder: $(azureresourceschema-folder)/schemas - -# all the input files in this apiVersion -input-file: - - Microsoft.Media/preview/1.0.0/videoanalyzer.json -``` diff --git a/specification/videoanalyzer/data-plane/readme.cli.md b/specification/videoanalyzer/data-plane/readme.cli.md deleted file mode 100644 index c6cf6ad37ea4..000000000000 --- a/specification/videoanalyzer/data-plane/readme.cli.md +++ /dev/null @@ -1 +0,0 @@ -## CLI Common Settings for all the command line tools \ No newline at end of file diff --git a/specification/videoanalyzer/data-plane/readme.csharp.md b/specification/videoanalyzer/data-plane/readme.csharp.md deleted file mode 100644 index 1fc91c5e2c0b..000000000000 --- a/specification/videoanalyzer/data-plane/readme.csharp.md +++ /dev/null @@ -1,15 +0,0 @@ -## C# - -These settings apply only when `--csharp` is specified on the command line. -Please also specify `--csharp-sdks-folder=`. - -```yaml $(csharp) -csharp: - azure-arm: true - license-header: MICROSOFT_MIT_NO_VERSION - payload-flattening-threshold: 1 - clear-output-folder: true - client-side-validation: false - namespace: Microsoft.Media - output-folder: $(csharp-sdks-folder)/videoanalyzer/management/Microsoft.Media/GeneratedProtocol -``` diff --git a/specification/videoanalyzer/data-plane/readme.go.md b/specification/videoanalyzer/data-plane/readme.go.md deleted file mode 100644 index 8a4e3dde2494..000000000000 --- a/specification/videoanalyzer/data-plane/readme.go.md +++ /dev/null @@ -1,26 +0,0 @@ -## Go - -These settings apply only when `--go` is specified on the command line. - -```yaml $(go) -go: - license-header: MICROSOFT_APACHE_NO_VERSION - namespace: videoanalyzer - clear-output-folder: true -``` - -### Go multi-api - -``` yaml $(go) && $(multiapi) -batch: - - tag: package-1.0.0-preview -``` - -### Tag: package-1.0.0-preview and go - -These settings apply only when `--tag=package-1.0.0-preview --go` is specified on the command line. -Please also specify `--go-sdk-folder=`. - -```yaml $(tag) == 'package-1.0.0-preview' && $(go) -output-folder: $(go-sdk-folder)/services/preview/$(namespace)/mgmt/1.0.0/$(namespace) -``` diff --git a/specification/videoanalyzer/data-plane/readme.md b/specification/videoanalyzer/data-plane/readme.md index 76366e45def1..ad5b26baaa51 100644 --- a/specification/videoanalyzer/data-plane/readme.md +++ b/specification/videoanalyzer/data-plane/readme.md @@ -1,41 +1,50 @@ -# videoanalyzer +# MediaServices - Live Video Analytics Edge > see https://aka.ms/autorest -This is the AutoRest configuration file for videoanalyzer. +This is the AutoRest configuration file for Live video analytics edge. + +These swaggers are used to generate the SDKs for Live Video Analytics. These SDKs are models only (no client) and customer would need to use IoT SDK to send direct method calls to IoT hub. These SDKs are not ARM based and doesn't do any REST calls. all operations are sent as direct methods on IoT hub. + +--- ## Getting Started -To build the SDKs for My API, simply install AutoRest via `npm` (`npm install -g autorest`) and then run: +To build the SDK for Live video analytics edge, simply [Install AutoRest](https://aka.ms/autorest/install) and in this folder, run: -> `autorest readme.md` +> `autorest` To see additional help and options, run: > `autorest --help` -For other options on installation see [Installing AutoRest](https://aka.ms/autorest/install) on the AutoRest github page. - --- ## Configuration ### Basic Information -These are the global settings for the videoanalyzer. +These are the global settings for the Live video analytics API. -```yaml +``` yaml openapi-type: data-plane -tag: package-1.0.0 +tag: package-lva-2-0-0-preview + +directive: + - where: + - $.definitions.MethodRequest.properties.methodName + suppress: + - RequiredReadOnlyProperties ``` -### Tag: package-1.0.0 +### Tag: package-lva-1-0-4-preview -These settings apply only when `--tag=package-1.0.0` is specified on the command line. +These settings apply only when `--tag=package-lva-2-0-0-preview` is specified on the command line. -```yaml $(tag) == 'package-1.0.0' +``` yaml $(tag) == 'package-lva-2-0-0-preview' input-file: - - Microsoft.Media/preview/1.0.0/videoanalyzer.json + - LiveVideoAnalytics.Edge/preview/2.0.0/LiveVideoAnalytics.json + - LiveVideoAnalytics.Edge/preview/2.0.0/LiveVideoAnalyticsSdkDefinitions.json ``` --- @@ -47,37 +56,50 @@ input-file: This section describes what SDK should be generated by the automatic system. This is not used by Autorest itself. -```yaml $(swagger-to-sdk) +``` yaml $(swagger-to-sdk) swagger-to-sdk: - - repo: azure-sdk-for-python-track2 - - repo: azure-sdk-for-java - - repo: azure-sdk-for-go - - repo: azure-sdk-for-js - - repo: azure-resource-manager-schemas + - repo: azure-sdk-for-net after_scripts: - - node sdkauto_afterscript.js videoanalyzer/resource-manager - - repo: azure-cli-extensions + - bundle install && rake arm:regen_all_profiles['azure_media_lva_edge'] ``` -## Az - -See configuration in [readme.az.md](./readme.az.md) -## Go - -See configuration in [readme.go.md](./readme.go.md) - -## Python +## C# + +These settings apply only when `--csharp` is specified on the command line. +Please also specify `--csharp-sdks-folder=`. + +``` yaml $(csharp) +csharp: + azure-arm: false + payload-flattening-threshold: 2 + license-header: MICROSOFT_MIT_NO_VERSION + namespace: Microsoft.Azure.Media.LiveVideoAnalytics.Edge + output-folder: $(csharp-sdks-folder)/mediaservices/Microsoft.Azure.Media.LiveVideoAnalytics.Edge/src/Generated + clear-output-folder: true + use-internal-constructors: true + override-client-name: LiveVideoAnalyticsEdgeClient + use-datetimeoffset: true +``` +## Multi-API/Profile support for AutoRest v3 generators -See configuration in [readme.python.md](./readme.python.md) +AutoRest V3 generators require the use of `--tag=all-api-versions` to select api files. -## TypeScript +This block is updated by an automatic script. Edits may be lost! -See configuration in [readme.typescript.md](./readme.typescript.md) +``` yaml $(tag) == 'all-api-versions' /* autogenerated */ +# include the azure profile definitions from the standard location +require: $(this-folder)/../../../profiles/readme.md -## CSharp +# all the input files across all versions +input-file: + - $(this-folder)/LiveVideoAnalytics.Edge/preview/2.0.0/LiveVideoAnalytics.json -See configuration in [readme.csharp.md](./readme.csharp.md) +``` -## AzureResourceSchema +If there are files that should not be in the `all-api-versions` set, +uncomment the `exclude-file` section below and add the file paths. -See configuration in [readme.azureresourceschema.md](./readme.azureresourceschema.md) +``` yaml $(tag) == 'all-api-versions' +#exclude-file: +# - $(this-folder)/Microsoft.Example/stable/2010-01-01/somefile.json +``` diff --git a/specification/videoanalyzer/data-plane/readme.python.md b/specification/videoanalyzer/data-plane/readme.python.md deleted file mode 100644 index dda81cc659a9..000000000000 --- a/specification/videoanalyzer/data-plane/readme.python.md +++ /dev/null @@ -1,22 +0,0 @@ -## Python - -These settings apply only when `--python` is specified on the command line. -Please also specify `--python-sdks-folder=`. - -``` yaml $(track2) -azure-arm: true -license-header: MICROSOFT_MIT_NO_VERSION -package-name: azure-mgmt-videoanalyzer -no-namespace-folders: true -package-version: 1.0.0b1 -``` - -``` yaml $(python-mode) == 'update' && $(track2) -no-namespace-folders: true -output-folder: $(python-sdks-folder)/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer -``` - -``` yaml $(python-mode) == 'create' && $(track2) -basic-setup-py: true -output-folder: $(python-sdks-folder)/videoanalyzer/azure-mgmt-videoanalyzer -``` diff --git a/specification/videoanalyzer/data-plane/readme.typescript.md b/specification/videoanalyzer/data-plane/readme.typescript.md deleted file mode 100644 index 222aca9a09bc..000000000000 --- a/specification/videoanalyzer/data-plane/readme.typescript.md +++ /dev/null @@ -1,14 +0,0 @@ -## TypeScript - -These settings apply only when `--typescript` is specified on the command line. -Please also specify `--typescript-sdks-folder=`. - -``` yaml $(typescript) -typescript: - azure-arm: true - package-name: "@azure/arm-videoanalyzer" - output-folder: "$(typescript-sdks-folder)/sdk/videoanalyzer/arm-videoanalyzer" - payload-flattening-threshold: 1 - clear-output-folder: true - generate-metadata: true -``` From 58655bc1b0f63e37fab5d2e6620bedaa958d5517 Mon Sep 17 00:00:00 2001 From: giakas Date: Sun, 28 Mar 2021 11:10:45 -0700 Subject: [PATCH 12/19] Rename old swaggers to new file names --- .../1.0.0/{LiveVideoAnalytics.json => AzureVideoAnalyzer.json} | 0 ...sSdkDefinitions.json => AzureVideoAnalyzerSdkDefinitions.json} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/{LiveVideoAnalytics.json => AzureVideoAnalyzer.json} (100%) rename specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/{LiveVideoAnalyticsSdkDefinitions.json => AzureVideoAnalyzerSdkDefinitions.json} (100%) diff --git a/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/LiveVideoAnalytics.json b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzer.json similarity index 100% rename from specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/LiveVideoAnalytics.json rename to specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzer.json diff --git a/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/LiveVideoAnalyticsSdkDefinitions.json b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json similarity index 100% rename from specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/LiveVideoAnalyticsSdkDefinitions.json rename to specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json From a39cf324eb3a9a0040d2f85b7a4fc5568530ee07 Mon Sep 17 00:00:00 2001 From: giakas Date: Sun, 28 Mar 2021 11:21:24 -0700 Subject: [PATCH 13/19] Update LVA swaggers with new AVA swagger --- .../preview/1.0.0/AzureVideoAnalyzer.json | 682 +++++++++++------- .../AzureVideoAnalyzerSdkDefinitions.json | 96 +-- .../videoanalyzer/data-plane/readme.md | 32 +- 3 files changed, 504 insertions(+), 306 deletions(-) diff --git a/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzer.json b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzer.json index e1135e5d617f..596487ef7667 100644 --- a/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzer.json +++ b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzer.json @@ -1,9 +1,9 @@ { "swagger": "2.0", "info": { - "description": "Direct Methods for Live Video Analytics on IoT Edge.", - "version": "2.0.0", - "title": "Direct Methods for Live Video Analytics on IoT Edge", + "description": "Direct Methods for Azure Video Analyzer on IoT Edge.", + "version": "1.0.0", + "title": "Direct Methods for Azure Video Analyzer on IoT Edge", "contact": { "email": "amshelp@microsoft.com" } @@ -22,7 +22,7 @@ } }, "definitions": { - "MediaGraphInstance": { + "LivePipeline": { "type": "object", "required": [ "name" @@ -30,123 +30,124 @@ "properties": { "name": { "type": "string", - "description": "The identifier for the media graph instance." + "description": "The identifier for the live pipeline." }, "systemData": { - "$ref": "#/definitions/MediaGraphSystemData" + "$ref": "#/definitions/SystemData", + "description": "The system data for a resource." }, "properties": { - "$ref": "#/definitions/MediaGraphInstanceProperties" + "$ref": "#/definitions/LivePipelineProperties", + "description": "The properties of the live pipeline." } }, - "description": "Represents an instance of a media graph." + "description": "Represents a unique live pipeline." }, - "MediaGraphInstanceProperties": { + "LivePipelineProperties": { "type": "object", "properties": { "description": { "type": "string", - "description": "An optional description for the instance." + "description": "An optional description for the live pipeline." }, "topologyName": { "type": "string", - "description": "The name of the media graph topology that this instance will run. A topology with this name should already have been set in the Edge module." + "description": "The name of the pipeline topology that this live pipeline will run. A pipeline topology with this name should already have been set in the Edge module." }, "parameters": { "type": "array", - "description": "List of one or more graph instance parameters.", + "description": "List of one or more live pipeline parameters.", "items": { - "$ref": "#/definitions/MediaGraphParameterDefinition" + "$ref": "#/definitions/ParameterDefinition" } }, "state": { "type": "string", - "description": "Allowed states for a graph instance.", + "description": "Allowed states for a live pipeline.", "enum": [ - "Inactive", - "Activating", - "Active", - "Deactivating" + "inactive", + "activating", + "active", + "deactivating" ], "x-ms-enum": { - "name": "MediaGraphInstanceState", + "name": "livePipelineState", "values": [ { - "value": "Inactive", - "description": "The media graph instance is idle and not processing media." + "value": "inactive", + "description": "The live pipeline is idle and not processing media." }, { - "value": "Activating", - "description": "The media graph instance is transitioning into the active state." + "value": "activating", + "description": "The live pipeline is transitioning into the active state." }, { - "value": "Active", - "description": "The media graph instance is active and processing media." + "value": "active", + "description": "The live pipeline is active and processing media." }, { - "value": "Deactivating", - "description": "The media graph instance is transitioning into the inactive state." + "value": "deactivating", + "description": "The live pipeline is transitioning into the inactive state." } ], "modelAsString": true } } }, - "description": "Properties of a media graph instance." + "description": "Properties of a live pipeline." }, - "MediaGraphParameterDefinition": { + "ParameterDefinition": { "type": "object", "required": [ - "name", - "value" + "name" ], "properties": { "name": { "type": "string", - "description": "The name of the parameter defined in the media graph topology." + "description": "The name of the parameter defined in the pipeline topology." }, "value": { "type": "string", - "description": "The value to supply for the named parameter defined in the media graph topology." + "description": "The value to supply for the named parameter defined in the pipeline topology." } }, - "description": "A key-value pair. A media graph topology allows certain values to be parameterized. When an instance is created, the parameters are supplied with arguments specific to that instance. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters." + "description": "A key-value pair. A pipeline topology allows certain values to be parameterized. When a live pipeline is created, the parameters are supplied with arguments specific to that instance. This allows the same pipeline topology to be used as a blueprint for multiple streams with different values for the parameters." }, - "MediaGraphInstanceCollection": { + "LivePipelineCollection": { "type": "object", "properties": { "value": { "type": "array", - "description": "A collection of media graph instances.", + "description": "A collection of live pipelines.", "items": { - "$ref": "#/definitions/MediaGraphInstance" + "$ref": "#/definitions/LivePipeline" } }, "@continuationToken": { "type": "string", - "description": "A continuation token to use in subsequent calls to enumerate through the graph instance collection. This is used when the collection contains too many results to return in one response." + "description": "A continuation token to use in subsequent calls to enumerate through the live pipeline collection. This is used when the collection contains too many results to return in one response." } }, - "description": "A collection of media graph instances." + "description": "A collection of streams." }, - "MediaGraphTopologyCollection": { + "PipelineTopologyCollection": { "type": "object", "properties": { "value": { "type": "array", - "description": "A collection of media graph topologies.", + "description": "A collection of pipeline topologies.", "items": { - "$ref": "#/definitions/MediaGraphTopology" + "$ref": "#/definitions/PipelineTopology" } }, "@continuationToken": { "type": "string", - "description": "A continuation token to use in subsequent calls to enumerate through the graph topologies collection. This is used when the collection contains too many results to return in one response." + "description": "A continuation token to use in subsequent calls to enumerate through the pipeline topology collection. This is used when the collection contains too many results to return in one response." } }, - "description": "A collection of media graph topologies." + "description": "A collection of pipeline topologies." }, - "MediaGraphTopology": { + "PipelineTopology": { "type": "object", "required": [ "name" @@ -154,56 +155,58 @@ "properties": { "name": { "type": "string", - "description": "The identifier for the media graph topology." + "description": "The identifier for the pipeline topology." }, "systemData": { - "$ref": "#/definitions/MediaGraphSystemData" + "$ref": "#/definitions/SystemData", + "description": "The system data for a resource." }, "properties": { - "$ref": "#/definitions/MediaGraphTopologyProperties" + "$ref": "#/definitions/PipelineTopologyProperties", + "description": "The properties of the pipeline topology." } }, - "description": "The definition of a media graph topology." + "description": "The definition of a pipeline topology." }, - "MediaGraphTopologyProperties": { + "PipelineTopologyProperties": { "type": "object", "properties": { "description": { "type": "string", - "description": "A description of a media graph topology. It is recommended to use this to describe the expected use of the topology." + "description": "A description of a pipeline topology. It is recommended to use this to describe the expected use of the pipeline topology." }, "parameters": { "type": "array", "items": { - "$ref": "#/definitions/MediaGraphParameterDeclaration" + "$ref": "#/definitions/ParameterDeclaration" }, - "description": "The list of parameters defined in the topology. The value for these parameters are supplied by instances of this topology." + "description": "The list of parameters defined in the pipeline topology. The value for these parameters are supplied by streams of this pipeline topology." }, "sources": { "type": "array", "items": { - "$ref": "#/definitions/MediaGraphSource" + "$ref": "#/definitions/Source" }, - "description": "The list of source nodes in this topology." + "description": "The list of source nodes in this pipeline topology." }, "processors": { "type": "array", "items": { - "$ref": "#/definitions/MediaGraphProcessor" + "$ref": "#/definitions/Processor" }, - "description": "The list of processor nodes in this topology." + "description": "The list of processor nodes in this pipeline topology." }, "sinks": { "type": "array", "items": { - "$ref": "#/definitions/MediaGraphSink" + "$ref": "#/definitions/Sink" }, - "description": "The list of sink nodes in this topology." + "description": "The list of sink nodes in this pipeline topology." } }, - "description": "A description of the properties of a media graph topology." + "description": "A description of the properties of a pipeline topology." }, - "MediaGraphSystemData": { + "SystemData": { "type": "object", "properties": { "createdAt": { @@ -217,9 +220,9 @@ "description": "The timestamp of resource last modification (UTC)." } }, - "description": "The system data for a resource. This is used by both topologies and instances." + "description": "The system data for a resource. This is used by both pipeline topologies and live pipelines." }, - "MediaGraphParameterDeclaration": { + "ParameterDeclaration": { "type": "object", "required": [ "name", @@ -235,33 +238,33 @@ "type": "string", "description": "The type of the parameter.", "enum": [ - "String", - "SecretString", - "Int", - "Double", - "Bool" + "string", + "secretString", + "int", + "double", + "bool" ], "x-ms-enum": { - "name": "MediaGraphParameterType", + "name": "parameterType", "values": [ { - "value": "String", + "value": "string", "description": "A string parameter value." }, { - "value": "SecretString", + "value": "secretString", "description": "A string to hold sensitive information as parameter value." }, { - "value": "Int", + "value": "int", "description": "A 32-bit signed integer as parameter value." }, { - "value": "Double", + "value": "double", "description": "A 64-bit double-precision floating point type as parameter value." }, { - "value": "Bool", + "value": "bool", "description": "A boolean value that is either true or false." } ], @@ -274,12 +277,12 @@ }, "default": { "type": "string", - "description": "The default value for the parameter to be used if the media graph instance does not specify a value." + "description": "The default value for the parameter to be used if the live pipeline does not specify a value." } }, - "description": "The declaration of a parameter in the media graph topology. A media graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters." + "description": "The declaration of a parameter in the pipeline topology. A topology can be authored with parameters. Then, during live pipeline creation, the value for those parameters can be specified. This allows the same pipeline topology to be used as a blueprint for multiple live pipelines with different values for the parameters." }, - "MediaGraphSource": { + "Source": { "type": "object", "required": [ "@type", @@ -296,27 +299,27 @@ "description": "The name to be used for this source node." } }, - "description": "A source node in a media graph." + "description": "A source node in a pipeline topology." }, - "MediaGraphRtspSource": { + "RtspSource": { "type": "object", "properties": { "transport": { "type": "string", "description": "Underlying RTSP transport. This is used to enable or disable HTTP tunneling.", "enum": [ - "Http", - "Tcp" + "http", + "tcp" ], "x-ms-enum": { - "name": "MediaGraphRtspTransport", + "name": "rtspTransport", "values": [ { - "value": "Http", + "value": "http", "description": "HTTP/HTTPS transport. This should be used when HTTP tunneling is desired." }, { - "value": "Tcp", + "value": "tcp", "description": "TCP transport. This should be used when HTTP tunneling is NOT desired." } ], @@ -325,7 +328,7 @@ }, "endpoint": { "description": "RTSP endpoint of the stream that is being connected to.", - "$ref": "#/definitions/MediaGraphEndpoint" + "$ref": "#/definitions/Endpoint" } }, "required": [ @@ -333,13 +336,13 @@ ], "allOf": [ { - "$ref": "#/definitions/MediaGraphSource" + "$ref": "#/definitions/Source" } ], - "description": "Enables a media graph to capture media from a RTSP server.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphRtspSource" + "description": "Enables a pipeline topology to capture media from a RTSP server.", + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.RtspSource" }, - "MediaGraphIoTHubMessageSource": { + "IotHubMessageSource": { "type": "object", "properties": { "hubInputName": { @@ -349,18 +352,18 @@ }, "allOf": [ { - "$ref": "#/definitions/MediaGraphSource" + "$ref": "#/definitions/Source" } ], - "description": "Enables a media graph to receive messages via routes declared in the IoT Edge deployment manifest.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphIoTHubMessageSource" + "description": "Enables a pipeline topology to receive messages via routes declared in the IoT Edge deployment manifest.", + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.IotHubMessageSource" }, - "MediaGraphIoTHubMessageSink": { + "IotHubMessageSink": { "type": "object", "properties": { "hubOutputName": { "type": "string", - "description": "Name of the output path to which the media graph will publish message. These messages can then be delivered to desired destinations by declaring routes referencing the output path in the IoT Edge deployment manifest." + "description": "Name of the output path to which the pipeline topology will publish message. These messages can then be delivered to desired destinations by declaring routes referencing the output path in the IoT Edge deployment manifest." } }, "required": [ @@ -369,13 +372,13 @@ ], "allOf": [ { - "$ref": "#/definitions/MediaGraphSink" + "$ref": "#/definitions/Sink" } ], - "description": "Enables a media graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphIoTHubMessageSink" + "description": "Enables a pipeline topology to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest.", + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.IotHubMessageSink" }, - "MediaGraphEndpoint": { + "Endpoint": { "type": "object", "required": [ "@type", @@ -389,7 +392,7 @@ }, "credentials": { "description": "Polymorphic credentials to be presented to the endpoint.", - "$ref": "#/definitions/MediaGraphCredentials" + "$ref": "#/definitions/Credentials" }, "url": { "type": "string", @@ -398,7 +401,7 @@ }, "description": "Base class for endpoints." }, - "MediaGraphCredentials": { + "Credentials": { "type": "object", "required": [ "@type" @@ -412,7 +415,7 @@ }, "description": "Credentials to present during authentication." }, - "MediaGraphUsernamePasswordCredentials": { + "UsernamePasswordCredentials": { "type": "object", "properties": { "username": { @@ -430,13 +433,13 @@ ], "allOf": [ { - "$ref": "#/definitions/MediaGraphCredentials" + "$ref": "#/definitions/Credentials" } ], "description": "Username/password credential pair.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphUsernamePasswordCredentials" + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.UsernamePasswordCredentials" }, - "MediaGraphHttpHeaderCredentials": { + "HttpHeaderCredentials": { "type": "object", "properties": { "headerName": { @@ -454,43 +457,62 @@ ], "allOf": [ { - "$ref": "#/definitions/MediaGraphCredentials" + "$ref": "#/definitions/Credentials" } ], "description": "Http header service credentials.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphHttpHeaderCredentials" + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.HttpHeaderCredentials" }, - "MediaGraphUnsecuredEndpoint": { + "UnsecuredEndpoint": { "type": "object", "allOf": [ { - "$ref": "#/definitions/MediaGraphEndpoint" + "$ref": "#/definitions/Endpoint" } ], - "description": "An endpoint that the media graph can connect to, with no encryption in transit.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphUnsecuredEndpoint" + "description": "An endpoint that the pipeline topology can connect to, with no encryption in transit.", + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.UnsecuredEndpoint" }, - "MediaGraphTlsEndpoint": { + "TlsEndpoint": { "type": "object", "properties": { "trustedCertificates": { "description": "Trusted certificates when authenticating a TLS connection. Null designates that Azure Media Service's source of trust should be used.", - "$ref": "#/definitions/MediaGraphCertificateSource" + "$ref": "#/definitions/CertificateSource" }, "validationOptions": { "description": "Validation options to use when authenticating a TLS connection. By default, strict validation is used.", - "$ref": "#/definitions/MediaGraphTlsValidationOptions" + "$ref": "#/definitions/TlsValidationOptions" + } + }, + "allOf": [ + { + "$ref": "#/definitions/Endpoint" + } + ], + "description": "A TLS endpoint for pipeline topology external connections.", + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.TlsEndpoint" + }, + "SymmetricKeyCredentials": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "Symmetric key credential." } }, + "required": [ + "key" + ], "allOf": [ { - "$ref": "#/definitions/MediaGraphEndpoint" + "$ref": "#/definitions/Credentials" } ], - "description": "A TLS endpoint for media graph external connections.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphTlsEndpoint" + "description": "Symmetric key credential.", + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.SymmetricKeyCredentials" }, - "MediaGraphCertificateSource": { + "CertificateSource": { "type": "object", "required": [ "@type" @@ -504,7 +526,7 @@ }, "description": "Base class for certificate sources." }, - "MediaGraphTlsValidationOptions": { + "TlsValidationOptions": { "type": "object", "properties": { "ignoreHostname": { @@ -518,7 +540,7 @@ }, "description": "Options for controlling the authentication of TLS endpoints." }, - "MediaGraphPemCertificateList": { + "PemCertificateList": { "type": "object", "properties": { "certificates": { @@ -534,13 +556,13 @@ ], "allOf": [ { - "$ref": "#/definitions/MediaGraphCertificateSource" + "$ref": "#/definitions/CertificateSource" } ], "description": "A list of PEM formatted certificates.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphPemCertificateList" + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.PemCertificateList" }, - "MediaGraphSink": { + "Sink": { "type": "object", "required": [ "@type", @@ -555,19 +577,19 @@ }, "name": { "type": "string", - "description": "The name to be used for the media graph sink." + "description": "The name to be used for the topology sink." }, "inputs": { "type": "array", - "description": "An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node.", + "description": "An array of the names of the other nodes in the pipeline topology, the outputs of which are used as input for this sink node.", "items": { - "$ref": "#/definitions/MediaGraphNodeInput" + "$ref": "#/definitions/NodeInput" } } }, - "description": "Enables a media graph to write media data to a destination outside of the Live Video Analytics IoT Edge module." + "description": "Enables a pipeline topology to write media data to a destination outside of the Azure Video Analyzer IoT Edge module." }, - "MediaGraphNodeInput": { + "NodeInput": { "type": "object", "required": [ "nodeName" @@ -575,19 +597,19 @@ "properties": { "nodeName": { "type": "string", - "description": "The name of another node in the media graph, the output of which is used as input to this node." + "description": "The name of another node in the pipeline topology, the output of which is used as input to this node." }, "outputSelectors": { "type": "array", "description": "Allows for the selection of particular streams from another node.", "items": { - "$ref": "#/definitions/MediaGraphOutputSelector" + "$ref": "#/definitions/OutputSelector" } } }, - "description": "Represents the input to any node in a media graph." + "description": "Represents the input to any node in a topology." }, - "MediaGraphOutputSelector": { + "OutputSelector": { "type": "object", "properties": { "property": { @@ -597,7 +619,7 @@ "mediaType" ], "x-ms-enum": { - "name": "MediaGraphOutputSelectorProperty", + "name": "outputSelectorProperty", "values": [ { "value": "mediaType", @@ -615,7 +637,7 @@ "isNot" ], "x-ms-enum": { - "name": "MediaGraphOutputSelectorOperator", + "name": "outputSelectorOperator", "values": [ { "value": "is", @@ -636,7 +658,7 @@ }, "description": "Allows for the selection of particular streams from another node." }, - "MediaGraphFileSink": { + "FileSink": { "type": "object", "properties": { "baseDirectoryPath": { @@ -647,7 +669,7 @@ "fileNamePattern": { "type": "string", "description": "File name pattern for creating new files on the Edge device. The pattern must include at least one system variable. See the documentation for available variables and additional examples.", - "example": "mySampleFile-${System.GraphTopologyName}-${System.GraphInstanceName}-${System.DateTime}" + "example": "mySampleFile-${System.PipelineTopologyName}-${System.LivePipelineName}-${System.DateTime}" }, "maximumSizeMiB": { "type": "string", @@ -661,19 +683,19 @@ ], "allOf": [ { - "$ref": "#/definitions/MediaGraphSink" + "$ref": "#/definitions/Sink" } ], - "description": "Enables a media graph to write/store media (video and audio) to a file on the Edge device.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphFileSink" + "description": "Enables a topology to write/store media (video and audio) to a file on the Edge device.", + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.FileSink" }, - "MediaGraphAssetSink": { + "AssetSink": { "type": "object", "properties": { - "assetNamePattern": { + "assetContainerSasUrl": { "type": "string", - "description": "A name pattern when creating new assets. The pattern must include at least one system variable. See the documentation for available variables and additional examples.", - "example": "MySampleAsset-${System.GraphTopologyName}-${System.GraphInstanceName}-${System.DateTime}" + "description": "An Azure Storage SAS Url which points to container, such as the one created for an Azure Media Services asset.", + "example": "https://azurestorage.com/mycontainer?sig=mysecretkey" }, "segmentLength": { "type": "string", @@ -683,7 +705,63 @@ "localMediaCachePath": { "type": "string", "description": "Path to a local file system directory for temporary caching of media before writing to an Asset. Used when the Edge device is temporarily disconnected from Azure.", - "example": "/var/lib/lva/tmp/" + "example": "/var/lib/tmp/" + }, + "localMediaCacheMaximumSizeMiB": { + "type": "string", + "description": "Maximum amount of disk space that can be used for temporary caching of media." + } + }, + "required": [ + "@type", + "assetContainerSasUrl", + "localMediaCachePath", + "localMediaCacheMaximumSizeMiB" + ], + "allOf": [ + { + "$ref": "#/definitions/Sink" + } + ], + "description": "Enables a pipeline topology to record media to an Azure Media Services asset for subsequent playback.", + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.AssetSink" + }, + "VideoCreationProperties": { + "type": "object", + "properties": { + "title": { + "type": "string", + "description": "An optional title for the video." + }, + "description": { + "type": "string", + "description": "An optional description for the video." + }, + "segmentLength": { + "type": "string", + "example": "PT30S", + "description": "When writing media to video, wait until at least this duration of media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum of 30 seconds and a recommended maximum of 5 minutes." + } + }, + "description": "Properties which will be used only if a video is being created." + }, + "VideoSink": { + "type": "object", + "properties": { + "videoName": { + "type": "string", + "description": "Name of a new or existing Video Analyzer video entity to use as media output.", + "example": "myVideo001" + }, + "videoCreationProperties": { + "type": "string", + "description": "Optional properties which will be used only if a video is being created.", + "$ref": "#/definitions/VideoCreationProperties" + }, + "localMediaCachePath": { + "type": "string", + "description": "Path to a local file system directory for temporary caching of media before writing to a video. This local cache will grow if the connection to Azure is not stable.", + "example": "/var/lib/tmp/" }, "localMediaCacheMaximumSizeMiB": { "type": "string", @@ -692,19 +770,19 @@ }, "required": [ "@type", - "assetNamePattern", + "videoName", "localMediaCachePath", "localMediaCacheMaximumSizeMiB" ], "allOf": [ { - "$ref": "#/definitions/MediaGraphSink" + "$ref": "#/definitions/Sink" } ], - "description": "Enables a media graph to record media to an Azure Media Services asset for subsequent playback.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphAssetSink" + "description": "Enables a pipeline topology to record media to an Azure Video Analyzer video for subsequent playback.", + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.VideoSink" }, - "MediaGraphProcessor": { + "Processor": { "type": "object", "required": [ "@type", @@ -723,38 +801,38 @@ }, "inputs": { "type": "array", - "description": "An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node.", + "description": "An array of the names of the other nodes in the topology, the outputs of which are used as input for this processor node.", "items": { - "$ref": "#/definitions/MediaGraphNodeInput" + "$ref": "#/definitions/NodeInput" } } }, - "description": "A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output." + "description": "A node that represents the desired processing of media in a topology. Takes media and/or events as inputs, and emits media and/or event as output." }, - "MediaGraphMotionDetectionProcessor": { + "MotionDetectionProcessor": { "type": "object", "properties": { "sensitivity": { "type": "string", "description": "Enumeration that specifies the sensitivity of the motion detection processor.", "enum": [ - "Low", - "Medium", - "High" + "low", + "medium", + "high" ], "x-ms-enum": { - "name": "MediaGraphMotionDetectionSensitivity", + "name": "motionDetectionSensitivity", "values": [ { - "value": "Low", + "value": "low", "description": "Low Sensitivity." }, { - "value": "Medium", + "value": "medium", "description": "Medium Sensitivity." }, { - "value": "High", + "value": "high", "description": "High Sensitivity." } ], @@ -772,13 +850,74 @@ }, "allOf": [ { - "$ref": "#/definitions/MediaGraphProcessor" + "$ref": "#/definitions/Processor" } ], "description": "A node that accepts raw video as input, and detects if there are moving objects present. If so, then it emits an event, and allows frames where motion was detected to pass through. Other frames are blocked/dropped.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphMotionDetectionProcessor" + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.MotionDetectionProcessor" }, - "MediaGraphExtensionProcessorBase": { + "ObjectTrackingProcessor": { + "type": "object", + "properties": { + "accuracy": { + "type": "string", + "description": "Enumeration that controls the accuracy of the tracker.", + "enum": [ + "low", + "medium", + "high" + ], + "x-ms-enum": { + "name": "objectTrackingAccuracy", + "values": [ + { + "value": "low", + "description": "Low Accuracy." + }, + { + "value": "medium", + "description": "Medium Accuracy." + }, + { + "value": "high", + "description": "High Accuracy." + } + ], + "modelAsString": true + } + } + }, + "allOf": [ + { + "$ref": "#/definitions/Processor" + } + ], + "description": "A node that accepts raw video as input, and detects objects.", + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.ObjectTrackingProcessor" + }, + "LineCrossingProcessor": { + "type": "object", + "required": [ + "lines" + ], + "properties": { + "lines": { + "type": "array", + "description": "An array of lines used to compute line crossing events.", + "items": { + "$ref": "#/definitions/Line" + } + } + }, + "allOf": [ + { + "$ref": "#/definitions/Processor" + } + ], + "description": "A node that accepts raw video as input, and detects when an object crosses a line.", + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.LineCrossingProcessor" + }, + "ExtensionProcessorBase": { "type": "object", "required": [ "endpoint", @@ -787,37 +926,42 @@ "properties": { "endpoint": { "description": "Endpoint to which this processor should connect.", - "$ref": "#/definitions/MediaGraphEndpoint" + "$ref": "#/definitions/Endpoint" }, "image": { "description": "Describes the parameters of the image that is sent as input to the endpoint.", - "$ref": "#/definitions/MediaGraphImage" + "$ref": "#/definitions/Image" }, "samplingOptions": { "description": "Describes the sampling options to be applied when forwarding samples to the extension.", - "$ref": "#/definitions/MediaGraphSamplingOptions" + "$ref": "#/definitions/SamplingOptions" } }, "allOf": [ { - "$ref": "#/definitions/MediaGraphProcessor" + "$ref": "#/definitions/Processor" } ], - "description": "Processor that allows for extensions outside of the Live Video Analytics Edge module to be integrated into the graph. It is the base class for various different kinds of extension processor types.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphExtensionProcessorBase" + "description": "Processor that allows for extensions outside of the Azure Video Analyzer Edge module to be integrated into the pipeline topology. It is the base class for various different kinds of extension processor types.", + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.ExtensionProcessorBase" }, - "MediaGraphCognitiveServicesVisionExtension": { + "CognitiveServicesVisionExtension": { "type": "object", - "properties": {}, + "properties": { + "extensionConfiguration": { + "type": "string", + "description": "Optional configuration to pass to the CognitiveServicesVision extension." + } + }, "allOf": [ { - "$ref": "#/definitions/MediaGraphExtensionProcessorBase" + "$ref": "#/definitions/ExtensionProcessorBase" } ], - "description": "A processor that allows the media graph to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension" + "description": "A processor that allows the pipeline topology to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes.", + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.CognitiveServicesVisionExtension" }, - "MediaGraphGrpcExtension": { + "GrpcExtension": { "type": "object", "required": [ "dataTransfer" @@ -825,7 +969,7 @@ "properties": { "dataTransfer": { "description": "How media should be transferred to the inference engine.", - "$ref": "#/definitions/MediaGraphGrpcExtensionDataTransfer" + "$ref": "#/definitions/GrpcExtensionDataTransfer" }, "extensionConfiguration": { "type": "string", @@ -834,13 +978,13 @@ }, "allOf": [ { - "$ref": "#/definitions/MediaGraphExtensionProcessorBase" + "$ref": "#/definitions/ExtensionProcessorBase" } ], - "description": "A processor that allows the media graph to send video frames to an external inference container over a gRPC connection. This can be done using shared memory (for high frame rates), or over the network. Inference results are relayed to downstream nodes.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphGrpcExtension" + "description": "A processor that allows the pipeline topology to send video frames to an external inference container over a gRPC connection. This can be done using shared memory (for high frame rates), or over the network. Inference results are relayed to downstream nodes.", + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.GrpcExtension" }, - "MediaGraphGrpcExtensionDataTransfer": { + "GrpcExtensionDataTransfer": { "type": "object", "required": [ "mode" @@ -854,18 +998,18 @@ "type": "string", "description": "How frame data should be transmitted to the inference engine.", "enum": [ - "Embedded", - "SharedMemory" + "embedded", + "sharedMemory" ], "x-ms-enum": { - "name": "MediaGraphGrpcExtensionDataTransferMode", + "name": "grpcExtensionDataTransferMode", "values": [ { - "value": "Embedded", + "value": "embedded", "description": "Frames are transferred embedded into the gRPC messages." }, { - "value": "SharedMemory", + "value": "sharedMemory", "description": "Frames are transferred through shared memory." } ], @@ -874,31 +1018,31 @@ } }, "description": "Describes how media should be transferred to the inference engine.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphGrpcExtensionDataTransfer" + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.GrpcExtensionDataTransfer" }, - "MediaGraphHttpExtension": { + "HttpExtension": { "type": "object", "allOf": [ { - "$ref": "#/definitions/MediaGraphExtensionProcessorBase" + "$ref": "#/definitions/ExtensionProcessorBase" } ], - "description": "A processor that allows the media graph to send video frames (mostly at low frame rates e.g. <5 fps) to an external inference container over an HTTP-based RESTful API. Inference results are relayed to downstream nodes.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphHttpExtension" + "description": "A processor that allows the pipeline topology to send video frames (mostly at low frame rates e.g. <5 fps) to an external inference container over an HTTP-based RESTful API. Inference results are relayed to downstream nodes.", + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.HttpExtension" }, - "MediaGraphImage": { + "Image": { "type": "object", "properties": { "scale": { - "$ref": "#/definitions/MediaGraphImageScale" + "$ref": "#/definitions/ImageScale" }, "format": { - "$ref": "#/definitions/MediaGraphImageFormat" + "$ref": "#/definitions/ImageFormat" } }, "description": "Describes the properties of an image frame." }, - "MediaGraphSamplingOptions": { + "SamplingOptions": { "type": "object", "properties": { "skipSamplesWithoutAnnotation": { @@ -912,30 +1056,30 @@ }, "description": "Describes the properties of a sample." }, - "MediaGraphImageScale": { + "ImageScale": { "type": "object", "properties": { "mode": { "type": "string", "description": "Describes the modes for scaling an input video frame into an image, before it is sent to an inference engine.", "enum": [ - "PreserveAspectRatio", - "Pad", - "Stretch" + "preserveAspectRatio", + "pad", + "stretch" ], "x-ms-enum": { - "name": "MediaGraphImageScaleMode", + "name": "imageScaleMode", "values": [ { - "value": "PreserveAspectRatio", + "value": "preserveAspectRatio", "description": "Use the same aspect ratio as the input frame." }, { - "value": "Pad", + "value": "pad", "description": "Center pad the input frame to match the given dimensions." }, { - "value": "Stretch", + "value": "stretch", "description": "Stretch input frame to match given dimensions." } ], @@ -953,7 +1097,7 @@ }, "description": "The scaling mode for the image." }, - "MediaGraphImageFormat": { + "ImageFormat": { "type": "object", "required": [ "@type" @@ -966,9 +1110,9 @@ } }, "description": "Encoding settings for an image.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormat" + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.ImageFormat" }, - "MediaGraphImageFormatRaw": { + "ImageFormatRaw": { "type": "object", "required": [ "pixelFormat" @@ -978,63 +1122,63 @@ "type": "string", "description": "The pixel format that will be used to encode images.", "enum": [ - "Yuv420p", - "Rgb565be", - "Rgb565le", - "Rgb555be", - "Rgb555le", - "Rgb24", - "Bgr24", - "Argb", - "Rgba", - "Abgr", - "Bgra" + "yuv420p", + "rgb565be", + "rgb565le", + "rgb555be", + "rgb555le", + "rgb24", + "bgr24", + "argb", + "rgba", + "abgr", + "bgra" ], "x-ms-enum": { - "name": "MediaGraphImageFormatRawPixelFormat", + "name": "imageFormatRawPixelFormat", "values": [ { - "value": "Yuv420p", + "value": "yuv420p", "description": "Planar YUV 4:2:0, 12bpp, (1 Cr and Cb sample per 2x2 Y samples)." }, { - "value": "Rgb565be", + "value": "rgb565be", "description": "Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian." }, { - "value": "Rgb565le", + "value": "rgb565le", "description": "Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian." }, { - "value": "Rgb555be", + "value": "rgb555be", "description": "Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined." }, { - "value": "Rgb555le", + "value": "rgb555le", "description": "Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined." }, { - "value": "Rgb24", + "value": "rgb24", "description": "Packed RGB 8:8:8, 24bpp, RGBRGB." }, { - "value": "Bgr24", + "value": "bgr24", "description": "Packed RGB 8:8:8, 24bpp, BGRBGR." }, { - "value": "Argb", + "value": "argb", "description": "Packed ARGB 8:8:8:8, 32bpp, ARGBARGB." }, { - "value": "Rgba", + "value": "rgba", "description": "Packed RGBA 8:8:8:8, 32bpp, RGBARGBA." }, { - "value": "Abgr", + "value": "abgr", "description": "Packed ABGR 8:8:8:8, 32bpp, ABGRABGR." }, { - "value": "Bgra", + "value": "bgra", "description": "Packed BGRA 8:8:8:8, 32bpp, BGRABGRA." } ], @@ -1044,13 +1188,13 @@ }, "allOf": [ { - "$ref": "#/definitions/MediaGraphImageFormat" + "$ref": "#/definitions/ImageFormat" } ], "description": "Encoding settings for raw images.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormatRaw" + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.ImageFormatRaw" }, - "MediaGraphImageFormatJpeg": { + "ImageFormatJpeg": { "type": "object", "properties": { "quality": { @@ -1060,35 +1204,89 @@ }, "allOf": [ { - "$ref": "#/definitions/MediaGraphImageFormat" + "$ref": "#/definitions/ImageFormat" } ], "description": "Encoding settings for Jpeg images.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormatJpeg" + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.ImageFormatJpeg" }, - "MediaGraphImageFormatBmp": { + "ImageFormatBmp": { "type": "object", "properties": {}, "allOf": [ { - "$ref": "#/definitions/MediaGraphImageFormat" + "$ref": "#/definitions/ImageFormat" } ], "description": "Encoding settings for Bmp images.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormatBmp" + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.ImageFormatBmp" }, - "MediaGraphImageFormatPng": { + "ImageFormatPng": { "type": "object", "properties": {}, "allOf": [ { - "$ref": "#/definitions/MediaGraphImageFormat" + "$ref": "#/definitions/ImageFormat" } ], "description": "Encoding settings for Png images.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormatPng" + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.ImageFormatPng" + }, + "Line": { + "type": "object", + "required": [ + "name", + "line" + ], + "properties": { + "line": { + "$ref": "#/definitions/LineCoordinates", + "description": "Sets the properties of the line." + }, + "name": { + "type": "string", + "description": "The name of the line." + } + }, + "description": "Describes the properties of a line." + }, + "LineCoordinates": { + "type": "object", + "required": [ + "start", + "end" + ], + "properties": { + "start": { + "$ref": "#/definitions/Point", + "description": "Sets the coordinates of the starting point for the line." + }, + "end": { + "$ref": "#/definitions/Point", + "description": "Sets the coordinates of the ending point for the line." + } + }, + "description": "Describes the start point and end point of a line in the frame." + }, + "Point": { + "type": "object", + "required": [ + "x", + "y" + ], + "properties": { + "x": { + "type": "string", + "description": "The X value of the point ranging from 0 to 1 starting from the left side of the frame." + }, + "y": { + "type": "string", + "description": "The Y value of the point ranging from 0 to 1 starting from the upper side of the frame." + } + }, + "description": "Describes the x and y value of a point in the frame." }, - "MediaGraphSignalGateProcessor": { + "SignalGateProcessor": { "type": "object", "properties": { "activationEvaluationWindow": { @@ -1114,11 +1312,11 @@ }, "allOf": [ { - "$ref": "#/definitions/MediaGraphProcessor" + "$ref": "#/definitions/Processor" } ], "description": "A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphSignalGateProcessor" + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.SignalGateProcessor" } } -} +} \ No newline at end of file diff --git a/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json index 147d26469752..e38342084801 100644 --- a/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json +++ b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json @@ -1,9 +1,9 @@ { "swagger": "2.0", "info": { - "description": "Direct Methods for Live Video Analytics on IoT Edge.", - "version": "2.0.0", - "title": "Direct Methods for Live Video Analytics on IoT Edge", + "description": "Direct Methods for Azure Video Analyzer on IoT Edge.", + "version": "1.0.0", + "title": "Direct Methods for Azure Video Analyzer on IoT Edge", "contact": { "email": "amshelp@microsoft.com" } @@ -37,10 +37,10 @@ "type": "string", "description": "api version", "enum": [ - "2.0" + "1.0" ], "x-ms-enum": { - "name": "ApiVersionEnum", + "name": "apiVersionEnum", "modelAsString": false } } @@ -48,65 +48,65 @@ "discriminator": "methodName", "description": "Base Class for Method Requests." }, - "MediaGraphTopologySetRequest": { + "PipelineTopologySetRequest": { "type": "object", - "x-ms-discriminator-value": "GraphTopologySet", + "x-ms-discriminator-value": "pipelineTopologySet", "allOf": [ { "$ref": "#/definitions/MethodRequest" } ], "required": [ - "graph" + "pipelineTopology" ], "properties": { - "graph": { - "$ref": "./LiveVideoAnalytics.json#/definitions/MediaGraphTopology" + "pipelineTopology": { + "$ref": "./AzureVideoAnalyzer.json#/definitions/PipelineTopology" } }, - "description": "Represents the MediaGraphTopologySetRequest." + "description": "Represents the pipelineTopologySet request." }, - "MediaGraphTopologySetRequestBody": { + "PipelineTopologySetRequestBody": { "type": "object", "allOf": [ { "$ref": "#/definitions/MethodRequest" }, { - "$ref": "./LiveVideoAnalytics.json#/definitions/MediaGraphTopology" + "$ref": "./AzureVideoAnalyzer.json#/definitions/PipelineTopology" } ], - "description": "Represents the MediaGraphTopologySetRequest body." + "description": "Represents the pipelineTopologySet request body." }, - "MediaGraphInstanceSetRequest": { + "LivePipelineSetRequest": { "type": "object", - "x-ms-discriminator-value": "GraphInstanceSet", + "x-ms-discriminator-value": "livePipelineSet", "allOf": [ { "$ref": "#/definitions/MethodRequest" } ], "required": [ - "instance" + "livePipeline" ], "properties": { - "instance": { - "$ref": "./LiveVideoAnalytics.json#/definitions/MediaGraphInstance" + "livePipeline": { + "$ref": "./AzureVideoAnalyzer.json#/definitions/LivePipeline" } }, - "description": "Represents the MediaGraphInstanceSetRequest." + "description": "Represents the livePipelineSet request." }, - "MediaGraphInstanceSetRequestBody": { + "livePipelineSetRequestBody": { "type": "object", "allOf": [ { "$ref": "#/definitions/MethodRequest" }, { - "$ref": "./LiveVideoAnalytics.json#/definitions/MediaGraphInstance" + "$ref": "./AzureVideoAnalyzer.json#/definitions/LivePipeline" } ], - "description": "Represents the MediaGraphInstanceSetRequest body." + "description": "Represents the livePipelineSet request body." }, "ItemNonSetRequestBase": { "type": "object", @@ -125,85 +125,85 @@ } } }, - "MediaGraphTopologyListRequest": { + "PipelineTopologyListRequest": { "type": "object", - "x-ms-discriminator-value": "GraphTopologyList", + "x-ms-discriminator-value": "pipelineTopologyList", "allOf": [ { "$ref": "#/definitions/MethodRequest" } ], - "description": "Represents the MediaGraphTopologyListRequest." + "description": "Represents the pipelineTopologyList request." }, - "MediaGraphTopologyGetRequest": { + "PipelineTopologyGetRequest": { "type": "object", - "x-ms-discriminator-value": "GraphTopologyGet", + "x-ms-discriminator-value": "pipelineTopologyGet", "allOf": [ { "$ref": "#/definitions/ItemNonSetRequestBase" } ], - "description": "Represents the MediaGraphTopologyGetRequest." + "description": "Represents the pipelineTopologyGet request." }, - "MediaGraphTopologyDeleteRequest": { + "PipelineTopologyDeleteRequest": { "type": "object", - "x-ms-discriminator-value": "GraphTopologyDelete", + "x-ms-discriminator-value": "pipelineTopologyDelete", "allOf": [ { "$ref": "#/definitions/ItemNonSetRequestBase" } ], - "description": "Represents the MediaGraphTopologyDeleteRequest." + "description": "Represents the pipelineTopologyDelete request." }, - "MediaGraphInstanceListRequest": { + "LivePipelineListRequest": { "type": "object", - "x-ms-discriminator-value": "GraphInstanceList", + "x-ms-discriminator-value": "livePipelineList", "allOf": [ { "$ref": "#/definitions/MethodRequest" } ], - "description": "Represents the MediaGraphInstanceListRequest." + "description": "Represents the livePipelineList request." }, - "MediaGraphInstanceGetRequest": { + "LivePipelineGetRequest": { "type": "object", - "x-ms-discriminator-value": "GraphInstanceGet", + "x-ms-discriminator-value": "livePipelineGet", "allOf": [ { "$ref": "#/definitions/ItemNonSetRequestBase" } ], - "description": "Represents the MediaGraphInstanceGetRequest." + "description": "Represents the livePipelineGet request." }, - "MediaGraphInstanceActivateRequest": { + "LivePipelineActivateRequest": { "type": "object", - "x-ms-discriminator-value": "GraphInstanceActivate", + "x-ms-discriminator-value": "livePipelineActivate", "allOf": [ { "$ref": "#/definitions/ItemNonSetRequestBase" } ], - "description": "Represents the MediaGraphInstanceActivateRequest." + "description": "Represents the livePipelineActivate request." }, - "MediaGraphInstanceDeActivateRequest": { + "LivePipelineDeactivateRequest": { "type": "object", - "x-ms-discriminator-value": "GraphInstanceDeactivate", + "x-ms-discriminator-value": "livePipelineDeactivate", "allOf": [ { "$ref": "#/definitions/ItemNonSetRequestBase" } ], - "description": "Represents the MediaGraphInstanceDeactivateRequest." + "description": "Represents the livePipelineDeactivate request." }, - "MediaGraphInstanceDeleteRequest": { + "LivePipelineDeleteRequest": { "type": "object", - "x-ms-discriminator-value": "GraphInstanceDelete", + "x-ms-discriminator-value": "livePipelineDelete", "allOf": [ { "$ref": "#/definitions/ItemNonSetRequestBase" } ], - "description": "Represents the MediaGraphInstanceDeleteRequest." + "description": "Represents the livePipelineDelete request." } } -} +} \ No newline at end of file diff --git a/specification/videoanalyzer/data-plane/readme.md b/specification/videoanalyzer/data-plane/readme.md index ad5b26baaa51..efe24e6b46fc 100644 --- a/specification/videoanalyzer/data-plane/readme.md +++ b/specification/videoanalyzer/data-plane/readme.md @@ -1,16 +1,16 @@ -# MediaServices - Live Video Analytics Edge +# Azure Video Analyzer Edge > see https://aka.ms/autorest -This is the AutoRest configuration file for Live video analytics edge. +This is the AutoRest configuration file for Azure video analyzer edge. -These swaggers are used to generate the SDKs for Live Video Analytics. These SDKs are models only (no client) and customer would need to use IoT SDK to send direct method calls to IoT hub. These SDKs are not ARM based and doesn't do any REST calls. all operations are sent as direct methods on IoT hub. +These swaggers are used to generate the SDKs for Azure Video Analyzer. These SDKs are models only (no client) and customer would need to use IoT SDK to send direct method calls to IoT hub. These SDKs are not ARM based and doesn't do any REST calls. all operations are sent as direct methods on IoT hub. --- ## Getting Started -To build the SDK for Live video analytics edge, simply [Install AutoRest](https://aka.ms/autorest/install) and in this folder, run: +To build the SDK for Azure video analyzer edge, simply [Install AutoRest](https://aka.ms/autorest/install) and in this folder, run: > `autorest` @@ -24,11 +24,11 @@ To see additional help and options, run: ### Basic Information -These are the global settings for the Live video analytics API. +These are the global settings for the Azure video analyzer edge API. ``` yaml openapi-type: data-plane -tag: package-lva-2-0-0-preview +tag: package-ava-edge-1-0-0-preview directive: - where: @@ -37,14 +37,14 @@ directive: - RequiredReadOnlyProperties ``` -### Tag: package-lva-1-0-4-preview +### Tag: package-ava-edge-1-0-0-preview -These settings apply only when `--tag=package-lva-2-0-0-preview` is specified on the command line. +These settings apply only when `--tag=package-ava-edge-1-0-0-preview` is specified on the command line. -``` yaml $(tag) == 'package-lva-2-0-0-preview' +``` yaml $(tag) == 'package-ava-edge-1-0-0-preview' input-file: - - LiveVideoAnalytics.Edge/preview/2.0.0/LiveVideoAnalytics.json - - LiveVideoAnalytics.Edge/preview/2.0.0/LiveVideoAnalyticsSdkDefinitions.json + - VideoAnalyzer.Edge/preview/2.0.0/AzureVideoAnalyzer.json + - VideoAnalyzer.Edge/preview/2.0.0/AzureVideoAnalyzerSdkDefinitions.json ``` --- @@ -60,7 +60,7 @@ This is not used by Autorest itself. swagger-to-sdk: - repo: azure-sdk-for-net after_scripts: - - bundle install && rake arm:regen_all_profiles['azure_media_lva_edge'] + - bundle install && rake arm:regen_all_profiles['azure_media_ava_edge'] ``` ## C# @@ -73,11 +73,11 @@ csharp: azure-arm: false payload-flattening-threshold: 2 license-header: MICROSOFT_MIT_NO_VERSION - namespace: Microsoft.Azure.Media.LiveVideoAnalytics.Edge - output-folder: $(csharp-sdks-folder)/mediaservices/Microsoft.Azure.Media.LiveVideoAnalytics.Edge/src/Generated + namespace: Microsoft.Azure.Media.AzureVideoAnalyzer.Edge + output-folder: $(csharp-sdks-folder)/mediaservices/Microsoft.Azure.Media.AzureVideoAnalyzer.Edge/src/Generated clear-output-folder: true use-internal-constructors: true - override-client-name: LiveVideoAnalyticsEdgeClient + override-client-name: AzureVideoAnalyzerEdgeClient use-datetimeoffset: true ``` ## Multi-API/Profile support for AutoRest v3 generators @@ -92,7 +92,7 @@ require: $(this-folder)/../../../profiles/readme.md # all the input files across all versions input-file: - - $(this-folder)/LiveVideoAnalytics.Edge/preview/2.0.0/LiveVideoAnalytics.json + - $(this-folder)/VideoAnalyzer.Edge/preview/2.0.0/AzureVideoAnalyzer.json ``` From c944967d8c01cdc473fd3e1508663fccbfa81677 Mon Sep 17 00:00:00 2001 From: giakas Date: Mon, 29 Mar 2021 10:18:30 -0700 Subject: [PATCH 14/19] fixed some paths and capitalization --- specification/videoanalyzer/data-plane/readme.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/specification/videoanalyzer/data-plane/readme.md b/specification/videoanalyzer/data-plane/readme.md index efe24e6b46fc..5956fee000d8 100644 --- a/specification/videoanalyzer/data-plane/readme.md +++ b/specification/videoanalyzer/data-plane/readme.md @@ -2,7 +2,7 @@ > see https://aka.ms/autorest -This is the AutoRest configuration file for Azure video analyzer edge. +This is the AutoRest configuration file for Azure Video Analyzer edge. These swaggers are used to generate the SDKs for Azure Video Analyzer. These SDKs are models only (no client) and customer would need to use IoT SDK to send direct method calls to IoT hub. These SDKs are not ARM based and doesn't do any REST calls. all operations are sent as direct methods on IoT hub. @@ -10,7 +10,7 @@ These swaggers are used to generate the SDKs for Azure Video Analyzer. These SDK ## Getting Started -To build the SDK for Azure video analyzer edge, simply [Install AutoRest](https://aka.ms/autorest/install) and in this folder, run: +To build the SDK for Azure Video Analyzer edge, simply [Install AutoRest](https://aka.ms/autorest/install) and in this folder, run: > `autorest` @@ -24,7 +24,7 @@ To see additional help and options, run: ### Basic Information -These are the global settings for the Azure video analyzer edge API. +These are the global settings for the Azure Video Analyzer edge API. ``` yaml openapi-type: data-plane @@ -43,8 +43,8 @@ These settings apply only when `--tag=package-ava-edge-1-0-0-preview` is specifi ``` yaml $(tag) == 'package-ava-edge-1-0-0-preview' input-file: - - VideoAnalyzer.Edge/preview/2.0.0/AzureVideoAnalyzer.json - - VideoAnalyzer.Edge/preview/2.0.0/AzureVideoAnalyzerSdkDefinitions.json + - VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzer.json + - VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json ``` --- @@ -92,7 +92,7 @@ require: $(this-folder)/../../../profiles/readme.md # all the input files across all versions input-file: - - $(this-folder)/VideoAnalyzer.Edge/preview/2.0.0/AzureVideoAnalyzer.json + - $(this-folder)/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzer.json ``` From ee2e0e7f9aa0fd21bbec8d3c6173302a7089cc95 Mon Sep 17 00:00:00 2001 From: giakas Date: Mon, 29 Mar 2021 10:37:05 -0700 Subject: [PATCH 15/19] fix prettier check issues --- .../VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzer.json | 2 +- .../preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzer.json b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzer.json index 596487ef7667..3c6e175bc758 100644 --- a/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzer.json +++ b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzer.json @@ -1319,4 +1319,4 @@ "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.SignalGateProcessor" } } -} \ No newline at end of file +} diff --git a/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json index e38342084801..7e1400ecacbc 100644 --- a/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json +++ b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json @@ -206,4 +206,4 @@ "description": "Represents the livePipelineDelete request." } } -} \ No newline at end of file +} From 2ae729c36605de7e63b9b4ef59275d2e32d69667 Mon Sep 17 00:00:00 2001 From: giakas Date: Tue, 30 Mar 2021 09:05:04 -0700 Subject: [PATCH 16/19] fixing a Pascal casing --- .../preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json index 7e1400ecacbc..3d6aa2098cf0 100644 --- a/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json +++ b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json @@ -96,7 +96,7 @@ }, "description": "Represents the livePipelineSet request." }, - "livePipelineSetRequestBody": { + "LivePipelineSetRequestBody": { "type": "object", "allOf": [ { From d56fda1d66ab83f2330a35835b55e31de5c7aeb5 Mon Sep 17 00:00:00 2001 From: giakas Date: Wed, 28 Apr 2021 10:45:37 -0700 Subject: [PATCH 17/19] Updating swaggers to latest --- .../preview/1.0.0/AzureVideoAnalyzer.json | 921 +++++++++++++----- .../AzureVideoAnalyzerSdkDefinitions.json | 53 +- 2 files changed, 680 insertions(+), 294 deletions(-) diff --git a/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzer.json b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzer.json index 3c6e175bc758..842cc8c39c1b 100644 --- a/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzer.json +++ b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzer.json @@ -1,11 +1,11 @@ { "swagger": "2.0", "info": { - "description": "Direct Methods for Azure Video Analyzer on IoT Edge.", "version": "1.0.0", - "title": "Direct Methods for Azure Video Analyzer on IoT Edge", + "title": "Azure Video Analyzer for Edge", + "description": "Azure Video Analyzer resources which can be utilized when performing direct method calls through Azure IoT Edge.", "contact": { - "email": "amshelp@microsoft.com" + "email": "videoanalyzerhelp@microsoft.com" } }, "security": [ @@ -30,40 +30,40 @@ "properties": { "name": { "type": "string", - "description": "The identifier for the live pipeline." + "description": "Live pipeline unique identifier." }, "systemData": { "$ref": "#/definitions/SystemData", - "description": "The system data for a resource." + "description": "Read-only system metadata associated with this object." }, "properties": { "$ref": "#/definitions/LivePipelineProperties", - "description": "The properties of the live pipeline." + "description": "Live pipeline properties." } }, - "description": "Represents a unique live pipeline." + "description": "Live Pipeline represents an unique instance of a pipeline topology which is used for real-time content ingestion and analysis." }, "LivePipelineProperties": { "type": "object", "properties": { "description": { "type": "string", - "description": "An optional description for the live pipeline." + "description": "An optional description of the live pipeline." }, "topologyName": { "type": "string", - "description": "The name of the pipeline topology that this live pipeline will run. A pipeline topology with this name should already have been set in the Edge module." + "description": "The reference to an existing pipeline topology defined for real-time content processing. When activated, this live pipeline will process content according to the pipeline topology definition." }, "parameters": { "type": "array", - "description": "List of one or more live pipeline parameters.", + "description": "List of the instance level parameter values for the user-defined topology parameters. A pipeline can only define or override parameters values for parameters which have been declared in the referenced topology. Topology parameters without a default value must be defined. Topology parameters with a default value can be optionally be overridden.", "items": { "$ref": "#/definitions/ParameterDefinition" } }, "state": { "type": "string", - "description": "Allowed states for a live pipeline.", + "description": "Current pipeline state (read-only).", "enum": [ "inactive", "activating", @@ -83,7 +83,7 @@ }, { "value": "active", - "description": "The live pipeline is active and processing media." + "description": "The live pipeline is active and able to process media. If your data source is not available, for instance, if your RTSP camera is powered off or unreachable, the pipeline will still be active and periodically retrying the connection. Your Azure subscription will be billed for the duration in which the live pipeline is in the active state." }, { "value": "deactivating", @@ -94,7 +94,7 @@ } } }, - "description": "Properties of a live pipeline." + "description": "Live pipeline properties." }, "ParameterDefinition": { "type": "object", @@ -104,45 +104,45 @@ "properties": { "name": { "type": "string", - "description": "The name of the parameter defined in the pipeline topology." + "description": "Name of the parameter declared in the pipeline topology." }, "value": { "type": "string", - "description": "The value to supply for the named parameter defined in the pipeline topology." + "description": "Parameter value to be applied on this specific live pipeline." } }, - "description": "A key-value pair. A pipeline topology allows certain values to be parameterized. When a live pipeline is created, the parameters are supplied with arguments specific to that instance. This allows the same pipeline topology to be used as a blueprint for multiple streams with different values for the parameters." + "description": "Defines the parameter value of an specific pipeline topology parameter. See pipeline topology parameters for more information." }, "LivePipelineCollection": { "type": "object", "properties": { "value": { "type": "array", - "description": "A collection of live pipelines.", + "description": "List of live pipelines.", "items": { "$ref": "#/definitions/LivePipeline" } }, "@continuationToken": { "type": "string", - "description": "A continuation token to use in subsequent calls to enumerate through the live pipeline collection. This is used when the collection contains too many results to return in one response." + "description": "A continuation token to be used in subsequent calls when enumerating through the collection. This is returned when the collection results won't fit in a single response." } }, - "description": "A collection of streams." + "description": "A collection of live pipelines." }, "PipelineTopologyCollection": { "type": "object", "properties": { "value": { "type": "array", - "description": "A collection of pipeline topologies.", + "description": "List of pipeline topologies.", "items": { "$ref": "#/definitions/PipelineTopology" } }, "@continuationToken": { "type": "string", - "description": "A continuation token to use in subsequent calls to enumerate through the pipeline topology collection. This is used when the collection contains too many results to return in one response." + "description": "A continuation token to be used in subsequent calls when enumerating through the collection. This is returned when the collection results won't fit in a single response." } }, "description": "A collection of pipeline topologies." @@ -155,56 +155,56 @@ "properties": { "name": { "type": "string", - "description": "The identifier for the pipeline topology." + "description": "Pipeline topology unique identifier." }, "systemData": { "$ref": "#/definitions/SystemData", - "description": "The system data for a resource." + "description": "Read-only system metadata associated with this object." }, "properties": { "$ref": "#/definitions/PipelineTopologyProperties", - "description": "The properties of the pipeline topology." + "description": "Pipeline topology properties." } }, - "description": "The definition of a pipeline topology." + "description": "Pipeline topology describes the processing steps to be applied when processing media for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which acquires data from a RTSP camera, process it with an specific AI model and stored the data on the cloud can be reused across many different cameras, as long as the same processing should be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized, thus allowing individual pipelines to refer to different values, such as individual cameras RTSP endpoints and credentials. Overall a topology is composed of the following:\r\n\r\n - Parameters: list of user defined parameters that can be references across the topology nodes.\r\n - Sources: list of one or more data sources nodes such as an RTSP source which allows for media to be ingested from cameras.\r\n - Processors: list of nodes which perform data analysis or transformations.\r\n -Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations." }, "PipelineTopologyProperties": { "type": "object", "properties": { "description": { "type": "string", - "description": "A description of a pipeline topology. It is recommended to use this to describe the expected use of the pipeline topology." + "description": "An optional description of the pipeline topology. It is recommended that the expected use of the topology to be described here." }, "parameters": { "type": "array", "items": { "$ref": "#/definitions/ParameterDeclaration" }, - "description": "The list of parameters defined in the pipeline topology. The value for these parameters are supplied by streams of this pipeline topology." + "description": "List of the topology parameter declarations. Parameters declared here can be referenced throughout the topology nodes through the use of \"${PARAMETER_NAME}\" string pattern. Parameters can have optional default values and can later be defined in individual instances of the pipeline." }, "sources": { "type": "array", "items": { - "$ref": "#/definitions/Source" + "$ref": "#/definitions/SourceNodeBase" }, - "description": "The list of source nodes in this pipeline topology." + "description": "List of the topology source nodes. Source nodes enable external data to be ingested by the pipeline." }, "processors": { "type": "array", "items": { - "$ref": "#/definitions/Processor" + "$ref": "#/definitions/ProcessorNodeBase" }, - "description": "The list of processor nodes in this pipeline topology." + "description": "List of the topology processor nodes. Processor nodes enable pipeline data to be analyzed, processed or transformed." }, "sinks": { "type": "array", "items": { - "$ref": "#/definitions/Sink" + "$ref": "#/definitions/SinkNodeBase" }, - "description": "The list of sink nodes in this pipeline topology." + "description": "List of the topology sink nodes. Sink nodes allow pipeline data to be stored or exported." } }, - "description": "A description of the properties of a pipeline topology." + "description": "Pipeline topology properties." }, "SystemData": { "type": "object", @@ -212,15 +212,15 @@ "createdAt": { "type": "string", "format": "date-time", - "description": "The timestamp of resource creation (UTC)." + "description": "Date and time when this resource was first created. Value is represented in UTC according to the ISO8601 date format." }, "lastModifiedAt": { "type": "string", "format": "date-time", - "description": "The timestamp of resource last modification (UTC)." + "description": "Date and time when this resource was last modified. Value is represented in UTC according to the ISO8601 date format." } }, - "description": "The system data for a resource. This is used by both pipeline topologies and live pipelines." + "description": "Read-only system metadata associated with a resource." }, "ParameterDeclaration": { "type": "object", @@ -231,12 +231,12 @@ "properties": { "name": { "type": "string", - "description": "The name of the parameter.", + "description": "Name of the parameter.", "maxLength": 64 }, "type": { "type": "string", - "description": "The type of the parameter.", + "description": "Type of the parameter.", "enum": [ "string", "secretString", @@ -249,23 +249,23 @@ "values": [ { "value": "string", - "description": "A string parameter value." + "description": "The parameter's value is a string." }, { "value": "secretString", - "description": "A string to hold sensitive information as parameter value." + "description": "The parameter's value is a string that holds sensitive information." }, { "value": "int", - "description": "A 32-bit signed integer as parameter value." + "description": "The parameter's value is a 32-bit signed integer." }, { "value": "double", - "description": "A 64-bit double-precision floating point type as parameter value." + "description": "The parameter's value is a 64-bit double-precision floating point." }, { "value": "bool", - "description": "A boolean value that is either true or false." + "description": "The parameter's value is a boolean value that is either true or false." } ], "modelAsString": true @@ -280,9 +280,9 @@ "description": "The default value for the parameter to be used if the live pipeline does not specify a value." } }, - "description": "The declaration of a parameter in the pipeline topology. A topology can be authored with parameters. Then, during live pipeline creation, the value for those parameters can be specified. This allows the same pipeline topology to be used as a blueprint for multiple live pipelines with different values for the parameters." + "description": "Single topology parameter declaration. Declared parameters can and must be referenced throughout the topology and can optionally have default values to be used when they are not defined in the pipeline instances." }, - "Source": { + "SourceNodeBase": { "type": "object", "required": [ "@type", @@ -292,21 +292,21 @@ "properties": { "@type": { "type": "string", - "description": "The type of the source node. The discriminator for derived types." + "description": "Type discriminator for the derived types." }, "name": { "type": "string", - "description": "The name to be used for this source node." + "description": "Node name. Must be unique within the topology." } }, - "description": "A source node in a pipeline topology." + "description": "Base class for topology source nodes." }, "RtspSource": { "type": "object", "properties": { "transport": { "type": "string", - "description": "Underlying RTSP transport. This is used to enable or disable HTTP tunneling.", + "description": "Network transport utilized by the RTSP and RTP exchange: TCP or HTTP. When using TCP, the RTP packets are interleaved on the TCP RTSP connection. When using HTTP, the RTSP messages are exchanged through long lived HTTP connections, and the RTP packages are interleaved in the HTTP connections alongside the RTSP messages.", "enum": [ "http", "tcp" @@ -316,19 +316,19 @@ "values": [ { "value": "http", - "description": "HTTP/HTTPS transport. This should be used when HTTP tunneling is desired." + "description": "HTTP transport. RTSP messages are exchanged over long running HTTP requests and RTP packets are interleaved within the HTTP channel." }, { "value": "tcp", - "description": "TCP transport. This should be used when HTTP tunneling is NOT desired." + "description": "TCP transport. RTSP is used directly over TCP and RTP packets are interleaved within the TCP channel." } ], "modelAsString": true } }, "endpoint": { - "description": "RTSP endpoint of the stream that is being connected to.", - "$ref": "#/definitions/Endpoint" + "description": "RTSP endpoint information for Video Analyzer to connect to. This contains the required information for Video Analyzer to connect to RTSP cameras and/or generic RTSP servers.", + "$ref": "#/definitions/EndpointBase" } }, "required": [ @@ -336,10 +336,10 @@ ], "allOf": [ { - "$ref": "#/definitions/Source" + "$ref": "#/definitions/SourceNodeBase" } ], - "description": "Enables a pipeline topology to capture media from a RTSP server.", + "description": "RTSP source allows for media from an RTSP camera or generic RTSP server to be ingested into a live pipeline.", "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.RtspSource" }, "IotHubMessageSource": { @@ -347,15 +347,15 @@ "properties": { "hubInputName": { "type": "string", - "description": "Name of the input path where messages can be routed to (via routes declared in the IoT Edge deployment manifest)." + "description": "Name of the IoT Edge Hub input from which messages will be consumed." } }, "allOf": [ { - "$ref": "#/definitions/Source" + "$ref": "#/definitions/SourceNodeBase" } ], - "description": "Enables a pipeline topology to receive messages via routes declared in the IoT Edge deployment manifest.", + "description": "IoT Hub Message source allows for the pipeline to consume messages from the IoT Edge Hub. Messages can be routed from other IoT modules via routes declared in the IoT Edge deployment manifest.", "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.IotHubMessageSource" }, "IotHubMessageSink": { @@ -363,7 +363,7 @@ "properties": { "hubOutputName": { "type": "string", - "description": "Name of the output path to which the pipeline topology will publish message. These messages can then be delivered to desired destinations by declaring routes referencing the output path in the IoT Edge deployment manifest." + "description": "Name of the Iot Edge Hub output to which the messages will be published." } }, "required": [ @@ -372,13 +372,13 @@ ], "allOf": [ { - "$ref": "#/definitions/Sink" + "$ref": "#/definitions/SinkNodeBase" } ], - "description": "Enables a pipeline topology to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest.", + "description": "IoT Hub Message sink allows for pipeline messages to published into the IoT Edge Hub. Published messages can then be delivered to the cloud and other modules via routes declared in the IoT Edge deployment manifest.", "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.IotHubMessageSink" }, - "Endpoint": { + "EndpointBase": { "type": "object", "required": [ "@type", @@ -388,20 +388,20 @@ "properties": { "@type": { "type": "string", - "description": "The discriminator for derived types." + "description": "Type discriminator for the derived types." }, "credentials": { - "description": "Polymorphic credentials to be presented to the endpoint.", - "$ref": "#/definitions/Credentials" + "description": "Credentials to be presented to the endpoint.", + "$ref": "#/definitions/CredentialsBase" }, "url": { "type": "string", - "description": "Url for the endpoint." + "description": "The endpoint URL for Video Analyzer to connect to." } }, "description": "Base class for endpoints." }, - "Credentials": { + "CredentialsBase": { "type": "object", "required": [ "@type" @@ -410,21 +410,21 @@ "properties": { "@type": { "type": "string", - "description": "The discriminator for derived types." + "description": "Type discriminator for the derived types." } }, - "description": "Credentials to present during authentication." + "description": "Base class for credential objects." }, "UsernamePasswordCredentials": { "type": "object", "properties": { "username": { "type": "string", - "description": "Username for a username/password pair." + "description": "Username to be presented as part of the credentials." }, "password": { "type": "string", - "description": "Password for a username/password pair. Please use a parameter so that the actual value is not returned on PUT or GET requests." + "description": "Password to be presented as part of the credentials. It is recommended that this value is parameterized as a secret string in order to prevent this value to be returned as part of the resource on API requests." } }, "required": [ @@ -433,10 +433,10 @@ ], "allOf": [ { - "$ref": "#/definitions/Credentials" + "$ref": "#/definitions/CredentialsBase" } ], - "description": "Username/password credential pair.", + "description": "Username and password credentials.", "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.UsernamePasswordCredentials" }, "HttpHeaderCredentials": { @@ -448,7 +448,7 @@ }, "headerValue": { "type": "string", - "description": "HTTP header value. Please use a parameter so that the actual value is not returned on PUT or GET requests." + "description": "HTTP header value. It is recommended that this value is parameterized as a secret string in order to prevent this value to be returned as part of the resource on API requests." } }, "required": [ @@ -457,27 +457,27 @@ ], "allOf": [ { - "$ref": "#/definitions/Credentials" + "$ref": "#/definitions/CredentialsBase" } ], - "description": "Http header service credentials.", + "description": "HTTP header credentials.", "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.HttpHeaderCredentials" }, "UnsecuredEndpoint": { "type": "object", "allOf": [ { - "$ref": "#/definitions/Endpoint" + "$ref": "#/definitions/EndpointBase" } ], - "description": "An endpoint that the pipeline topology can connect to, with no encryption in transit.", + "description": "Unsecured endpoint describes an endpoint that the pipeline can connect to over clear transport (no encryption in transit).", "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.UnsecuredEndpoint" }, "TlsEndpoint": { "type": "object", "properties": { "trustedCertificates": { - "description": "Trusted certificates when authenticating a TLS connection. Null designates that Azure Media Service's source of trust should be used.", + "description": "List of trusted certificate authorities when authenticating a TLS connection. A null list designates that Azure Video Analyzer's list of trusted authorities should be used.", "$ref": "#/definitions/CertificateSource" }, "validationOptions": { @@ -487,10 +487,10 @@ }, "allOf": [ { - "$ref": "#/definitions/Endpoint" + "$ref": "#/definitions/EndpointBase" } ], - "description": "A TLS endpoint for pipeline topology external connections.", + "description": "TLS endpoint describes an endpoint that the pipeline can connect to over TLS transport (data is encrypted in transit).", "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.TlsEndpoint" }, "SymmetricKeyCredentials": { @@ -506,7 +506,7 @@ ], "allOf": [ { - "$ref": "#/definitions/Credentials" + "$ref": "#/definitions/CredentialsBase" } ], "description": "Symmetric key credential.", @@ -521,7 +521,7 @@ "properties": { "@type": { "type": "string", - "description": "The discriminator for derived types." + "description": "Type discriminator for the derived types." } }, "description": "Base class for certificate sources." @@ -531,21 +531,21 @@ "properties": { "ignoreHostname": { "type": "string", - "description": "Boolean value ignoring the host name (common name) during validation." + "description": "When set to 'true' causes the certificate subject name validation to be skipped. Default is 'false'." }, "ignoreSignature": { "type": "string", - "description": "Boolean value ignoring the integrity of the certificate chain at the current time." + "description": "When set to 'true' causes the certificate chain trust validation to be skipped. Default is 'false'." } }, - "description": "Options for controlling the authentication of TLS endpoints." + "description": "Options for controlling the validation of TLS endpoints." }, "PemCertificateList": { "type": "object", "properties": { "certificates": { "type": "array", - "description": "PEM formatted public certificates one per entry.", + "description": "PEM formatted public certificates. One certificate per entry.", "items": { "type": "string" } @@ -562,7 +562,7 @@ "description": "A list of PEM formatted certificates.", "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.PemCertificateList" }, - "Sink": { + "SinkNodeBase": { "type": "object", "required": [ "@type", @@ -573,21 +573,21 @@ "properties": { "@type": { "type": "string", - "description": "The discriminator for derived types." + "description": "Type discriminator for the derived types." }, "name": { "type": "string", - "description": "The name to be used for the topology sink." + "description": "Node name. Must be unique within the topology." }, "inputs": { "type": "array", - "description": "An array of the names of the other nodes in the pipeline topology, the outputs of which are used as input for this sink node.", + "description": "An array of upstream node references within the topology to be used as inputs for this node.", "items": { "$ref": "#/definitions/NodeInput" } } }, - "description": "Enables a pipeline topology to write media data to a destination outside of the Azure Video Analyzer IoT Edge module." + "description": "Base class for topology sink nodes." }, "NodeInput": { "type": "object", @@ -597,24 +597,24 @@ "properties": { "nodeName": { "type": "string", - "description": "The name of another node in the pipeline topology, the output of which is used as input to this node." + "description": "The name of the upstream node in the pipeline which output is used as input of the current node." }, "outputSelectors": { "type": "array", - "description": "Allows for the selection of particular streams from another node.", + "description": "Allows for the selection of specific data streams (eg. video only) from another node.", "items": { "$ref": "#/definitions/OutputSelector" } } }, - "description": "Represents the input to any node in a topology." + "description": "Describes an input signal to be used on a pipeline node." }, "OutputSelector": { "type": "object", "properties": { "property": { "type": "string", - "description": "The stream property to compare with.", + "description": "The property of the data stream to be used as the selection criteria.", "enum": [ "mediaType" ], @@ -623,7 +623,7 @@ "values": [ { "value": "mediaType", - "description": "The stream's MIME type or subtype." + "description": "The stream's MIME type or subtype: audio, video or application" } ], "modelAsString": true @@ -631,7 +631,7 @@ }, "operator": { "type": "string", - "description": "The operator to compare streams by.", + "description": "The operator to compare properties by.", "enum": [ "is", "isNot" @@ -641,11 +641,11 @@ "values": [ { "value": "is", - "description": "A media type is the same type or a subtype." + "description": "The property is of the type defined by value." }, { "value": "isNot", - "description": "A media type is not the same type or a subtype." + "description": "The property is not of the type defined by value." } ], "modelAsString": true @@ -663,17 +663,17 @@ "properties": { "baseDirectoryPath": { "type": "string", - "description": "Absolute directory for all outputs to the Edge device from this sink.", + "description": "Absolute directory path where media files will be stored.", "example": "/var/media/output/" }, "fileNamePattern": { "type": "string", - "description": "File name pattern for creating new files on the Edge device. The pattern must include at least one system variable. See the documentation for available variables and additional examples.", - "example": "mySampleFile-${System.PipelineTopologyName}-${System.LivePipelineName}-${System.DateTime}" + "description": "File name pattern for creating new files when performing event based recording. The pattern must include at least one system variable.", + "example": "mySampleFile-${System.TopologyName}-${System.PipelineName}-${System.Runtime.DateTime}" }, "maximumSizeMiB": { "type": "string", - "description": "Maximum amount of disk space that can be used for storing files from this sink." + "description": "Maximum amount of disk space that can be used for storing files from this sink. Once this limit is reached, the oldest files from this sink will be automatically deleted." } }, "required": [ @@ -683,89 +683,52 @@ ], "allOf": [ { - "$ref": "#/definitions/Sink" + "$ref": "#/definitions/SinkNodeBase" } ], - "description": "Enables a topology to write/store media (video and audio) to a file on the Edge device.", + "description": "File sink allows for video and audio content to be recorded on the file system on the edge device.", "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.FileSink" }, - "AssetSink": { - "type": "object", - "properties": { - "assetContainerSasUrl": { - "type": "string", - "description": "An Azure Storage SAS Url which points to container, such as the one created for an Azure Media Services asset.", - "example": "https://azurestorage.com/mycontainer?sig=mysecretkey" - }, - "segmentLength": { - "type": "string", - "example": "PT30S", - "description": "When writing media to an asset, wait until at least this duration of media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum of 30 seconds and a recommended maximum of 5 minutes." - }, - "localMediaCachePath": { - "type": "string", - "description": "Path to a local file system directory for temporary caching of media before writing to an Asset. Used when the Edge device is temporarily disconnected from Azure.", - "example": "/var/lib/tmp/" - }, - "localMediaCacheMaximumSizeMiB": { - "type": "string", - "description": "Maximum amount of disk space that can be used for temporary caching of media." - } - }, - "required": [ - "@type", - "assetContainerSasUrl", - "localMediaCachePath", - "localMediaCacheMaximumSizeMiB" - ], - "allOf": [ - { - "$ref": "#/definitions/Sink" - } - ], - "description": "Enables a pipeline topology to record media to an Azure Media Services asset for subsequent playback.", - "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.AssetSink" - }, "VideoCreationProperties": { "type": "object", "properties": { "title": { "type": "string", - "description": "An optional title for the video." + "description": "Optional video title provided by the user. Value can be up to 256 characters long." }, "description": { "type": "string", - "description": "An optional description for the video." + "description": "Optional video description provided by the user. Value can be up to 2048 characters long." }, "segmentLength": { "type": "string", "example": "PT30S", - "description": "When writing media to video, wait until at least this duration of media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum of 30 seconds and a recommended maximum of 5 minutes." + "description": "Video segment length indicates the length of individual video files (segments) which are persisted to storage. Smaller segments provide lower archive playback latency but generate larger volume of storage transactions. Larger segments reduce the amount of storage transactions while increasing the archive playback latency. Value must be specified in ISO8601 duration format (i.e. \"PT30S\" equals 30 seconds) and can vary between 30 seconds to 5 minutes, in 30 seconds increments. Changing this value after the video is initially created can lead to errors when uploading media to the archive. Default value is 30 seconds." } }, - "description": "Properties which will be used only if a video is being created." + "description": "Optional video properties to be used in case a new video resource needs to be created on the service. These will not take effect if the video already exists." }, "VideoSink": { "type": "object", "properties": { "videoName": { "type": "string", - "description": "Name of a new or existing Video Analyzer video entity to use as media output.", + "description": "Name of a new or existing Video Analyzer video resource used for the media recording.", "example": "myVideo001" }, "videoCreationProperties": { "type": "string", - "description": "Optional properties which will be used only if a video is being created.", + "description": "Optional video properties to be used in case a new video resource needs to be created on the service.", "$ref": "#/definitions/VideoCreationProperties" }, "localMediaCachePath": { "type": "string", - "description": "Path to a local file system directory for temporary caching of media before writing to a video. This local cache will grow if the connection to Azure is not stable.", + "description": "Path to a local file system directory for caching of temporary media files. This will also be used to store content which cannot be immediately uploaded to Azure due to Internet connectivity issues.", "example": "/var/lib/tmp/" }, "localMediaCacheMaximumSizeMiB": { "type": "string", - "description": "Maximum amount of disk space that can be used for temporary caching of media." + "description": "Maximum amount of disk space that can be used for caching of temporary media files. Once this limit is reached, the oldest segments of the media archive will be continuously deleted in order to make space for new media, thus leading to gaps in the cloud recorded content." } }, "required": [ @@ -776,13 +739,13 @@ ], "allOf": [ { - "$ref": "#/definitions/Sink" + "$ref": "#/definitions/SinkNodeBase" } ], - "description": "Enables a pipeline topology to record media to an Azure Video Analyzer video for subsequent playback.", + "description": "Video sink allows for video and audio to be recorded to the Video Analyzer service. The recorded video can be played from anywhere and further managed from the cloud. Due to security reasons, a given Video Analyzer edge module instance can only record content to new video entries, or existing video entries previously recorded by the same module. Any attempt to record content to an existing video which has not been created by the same module instance will result in failure to record.", "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.VideoSink" }, - "Processor": { + "ProcessorNodeBase": { "type": "object", "required": [ "@type", @@ -793,28 +756,28 @@ "properties": { "@type": { "type": "string", - "description": "The discriminator for derived types." + "description": "Type discriminator for the derived types." }, "name": { "type": "string", - "description": "The name for this processor node." + "description": "Node name. Must be unique within the topology." }, "inputs": { "type": "array", - "description": "An array of the names of the other nodes in the topology, the outputs of which are used as input for this processor node.", + "description": "An array of upstream node references within the topology to be used as inputs for this node.", "items": { "$ref": "#/definitions/NodeInput" } } }, - "description": "A node that represents the desired processing of media in a topology. Takes media and/or events as inputs, and emits media and/or event as output." + "description": "Base class for topology processor nodes." }, "MotionDetectionProcessor": { "type": "object", "properties": { "sensitivity": { "type": "string", - "description": "Enumeration that specifies the sensitivity of the motion detection processor.", + "description": "Motion detection sensitivity: low, medium, high.", "enum": [ "low", "medium", @@ -825,15 +788,15 @@ "values": [ { "value": "low", - "description": "Low Sensitivity." + "description": "Low sensitivity." }, { "value": "medium", - "description": "Medium Sensitivity." + "description": "Medium sensitivity." }, { "value": "high", - "description": "High Sensitivity." + "description": "High sensitivity." } ], "modelAsString": true @@ -841,19 +804,19 @@ }, "outputMotionRegion": { "type": "boolean", - "description": "Indicates whether the processor should detect and output the regions, within the video frame, where motion was detected. Default is true." + "description": "Indicates whether the processor should detect and output the regions within the video frame where motion was detected. Default is true." }, "eventAggregationWindow": { "type": "string", - "description": "Event aggregation window duration, or 0 for no aggregation." + "description": "Time window duration on which events are aggregated before being emitted. Value must be specified in ISO8601 duration format (i.e. \"PT2S\" equals 2 seconds). Use 0 seconds for no aggregation. Default is 1 second." } }, "allOf": [ { - "$ref": "#/definitions/Processor" + "$ref": "#/definitions/ProcessorNodeBase" } ], - "description": "A node that accepts raw video as input, and detects if there are moving objects present. If so, then it emits an event, and allows frames where motion was detected to pass through. Other frames are blocked/dropped.", + "description": "Motion detection processor allows for motion detection on the video stream. It generates motion events whenever motion is present on the video.", "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.MotionDetectionProcessor" }, "ObjectTrackingProcessor": { @@ -861,7 +824,7 @@ "properties": { "accuracy": { "type": "string", - "description": "Enumeration that controls the accuracy of the tracker.", + "description": "Object tracker accuracy: low, medium, high. Higher accuracy leads to higher CPU consumption in average.", "enum": [ "low", "medium", @@ -872,15 +835,15 @@ "values": [ { "value": "low", - "description": "Low Accuracy." + "description": "Low accuracy." }, { "value": "medium", - "description": "Medium Accuracy." + "description": "Medium accuracy." }, { "value": "high", - "description": "High Accuracy." + "description": "High accuracy." } ], "modelAsString": true @@ -889,10 +852,10 @@ }, "allOf": [ { - "$ref": "#/definitions/Processor" + "$ref": "#/definitions/ProcessorNodeBase" } ], - "description": "A node that accepts raw video as input, and detects objects.", + "description": "Object tracker processor allows for continuous tracking of one of more objects over a finite sequence of video frames. It must be used downstream of an object detector extension node, thus allowing for the extension to be configured to to perform inferences on sparse frames through the use of the 'maximumSamplesPerSecond' sampling property. The object tracker node will then track the detected objects over the frames in which the detector is not invoked resulting on a smother tracking of detected objects across the continuum of video frames. The tracker will stop tracking objects which are not subsequently detected by the upstream detector on the subsequent detections.", "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.ObjectTrackingProcessor" }, "LineCrossingProcessor": { @@ -905,16 +868,16 @@ "type": "array", "description": "An array of lines used to compute line crossing events.", "items": { - "$ref": "#/definitions/Line" + "$ref": "#/definitions/NamedLineBase" } } }, "allOf": [ { - "$ref": "#/definitions/Processor" + "$ref": "#/definitions/ProcessorNodeBase" } ], - "description": "A node that accepts raw video as input, and detects when an object crosses a line.", + "description": "Line crossing processor allows for the detection of tracked objects moving across one or more predefined lines. It must be downstream of an object tracker of downstream on an AI extension node that generates sequenceId for objects which are tracked across different frames of the video. Inference events are generated every time objects crosses from one side of the line to another.", "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.LineCrossingProcessor" }, "ExtensionProcessorBase": { @@ -925,42 +888,26 @@ ], "properties": { "endpoint": { - "description": "Endpoint to which this processor should connect.", - "$ref": "#/definitions/Endpoint" + "description": "Endpoint details of the pipeline extension plugin.", + "$ref": "#/definitions/EndpointBase" }, "image": { - "description": "Describes the parameters of the image that is sent as input to the endpoint.", - "$ref": "#/definitions/Image" + "description": "Image transformations and formatting options to be applied to the video frame(s) prior submission to the pipeline extension plugin.", + "$ref": "#/definitions/ImageProperties" }, "samplingOptions": { - "description": "Describes the sampling options to be applied when forwarding samples to the extension.", + "description": "Media sampling parameters that define how often media is submitted to the extension plugin.", "$ref": "#/definitions/SamplingOptions" } }, "allOf": [ { - "$ref": "#/definitions/Processor" + "$ref": "#/definitions/ProcessorNodeBase" } ], - "description": "Processor that allows for extensions outside of the Azure Video Analyzer Edge module to be integrated into the pipeline topology. It is the base class for various different kinds of extension processor types.", + "description": "Base class for pipeline extension processors. Pipeline extensions allow for custom media analysis and processing to be plugged into the Video Analyzer pipeline.", "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.ExtensionProcessorBase" }, - "CognitiveServicesVisionExtension": { - "type": "object", - "properties": { - "extensionConfiguration": { - "type": "string", - "description": "Optional configuration to pass to the CognitiveServicesVision extension." - } - }, - "allOf": [ - { - "$ref": "#/definitions/ExtensionProcessorBase" - } - ], - "description": "A processor that allows the pipeline topology to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes.", - "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.CognitiveServicesVisionExtension" - }, "GrpcExtension": { "type": "object", "required": [ @@ -968,12 +915,12 @@ ], "properties": { "dataTransfer": { - "description": "How media should be transferred to the inference engine.", + "description": "Specifies how media is transferred to the extension plugin.", "$ref": "#/definitions/GrpcExtensionDataTransfer" }, "extensionConfiguration": { "type": "string", - "description": "Optional configuration to pass to the gRPC extension." + "description": "An optional configuration string that is sent to the extension plugin. The configuration string is specific to each custom extension and it not understood neither validated by Video Analyzer. Please see https://aka.ms/ava-extension-grpc for details." } }, "allOf": [ @@ -981,7 +928,7 @@ "$ref": "#/definitions/ExtensionProcessorBase" } ], - "description": "A processor that allows the pipeline topology to send video frames to an external inference container over a gRPC connection. This can be done using shared memory (for high frame rates), or over the network. Inference results are relayed to downstream nodes.", + "description": "GRPC extension processor allows pipeline extension plugins to be connected to the pipeline through over a gRPC channel. Extension plugins must act as an gRPC server. Please see https://aka.ms/ava-extension-grpc for details.", "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.GrpcExtension" }, "GrpcExtensionDataTransfer": { @@ -992,11 +939,11 @@ "properties": { "sharedMemorySizeMiB": { "type": "string", - "description": "The size of the buffer for all in-flight frames in mebibytes if mode is SharedMemory. Should not be specified otherwise." + "description": "The share memory buffer for sample transfers, in mebibytes. It can only be used with the 'SharedMemory' transfer mode." }, "mode": { "type": "string", - "description": "How frame data should be transmitted to the inference engine.", + "description": "Data transfer mode: embedded or sharedMemory.", "enum": [ "embedded", "sharedMemory" @@ -1006,18 +953,18 @@ "values": [ { "value": "embedded", - "description": "Frames are transferred embedded into the gRPC messages." + "description": "Media samples are embedded into the gRPC messages. This mode is less efficient but it requires a simpler implementations and can be used with plugins which are not on the same node as the Video Analyzer module." }, { "value": "sharedMemory", - "description": "Frames are transferred through shared memory." + "description": "Media samples are made available through shared memory. This mode enables efficient data transfers but it requires that the extension plugin to be co-located on the same node and sharing the same shared memory space." } ], "modelAsString": true } } }, - "description": "Describes how media should be transferred to the inference engine.", + "description": "Describes how media is transferred to the extension plugin.", "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.GrpcExtensionDataTransfer" }, "HttpExtension": { @@ -1027,41 +974,41 @@ "$ref": "#/definitions/ExtensionProcessorBase" } ], - "description": "A processor that allows the pipeline topology to send video frames (mostly at low frame rates e.g. <5 fps) to an external inference container over an HTTP-based RESTful API. Inference results are relayed to downstream nodes.", + "description": "HTTP extension processor allows pipeline extension plugins to be connected to the pipeline through over the HTTP protocol. Extension plugins must act as an HTTP server. Please see https://aka.ms/ava-extension-http for details.", "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.HttpExtension" }, - "Image": { + "ImageProperties": { "type": "object", "properties": { "scale": { "$ref": "#/definitions/ImageScale" }, "format": { - "$ref": "#/definitions/ImageFormat" + "$ref": "#/definitions/ImageFormatProperties" } }, - "description": "Describes the properties of an image frame." + "description": "Image transformations and formatting options to be applied to the video frame(s)." }, "SamplingOptions": { "type": "object", "properties": { "skipSamplesWithoutAnnotation": { "type": "string", - "description": "If true, limits the samples submitted to the extension to only samples which have associated inference(s)" + "description": "When set to 'true', prevents frames without upstream inference data to be sent to the extension plugin. This is useful to limit the frames sent to the extension to pre-analyzed frames only. For example, when used downstream from a motion detector, this can enable for only frames in which motion has been detected to be further analyzed." }, "maximumSamplesPerSecond": { "type": "string", - "description": "Maximum rate of samples submitted to the extension" + "description": "Maximum rate of samples submitted to the extension. This prevents an extension plugin to be overloaded with data." } }, - "description": "Describes the properties of a sample." + "description": "Defines how often media is submitted to the extension plugin." }, "ImageScale": { "type": "object", "properties": { "mode": { "type": "string", - "description": "Describes the modes for scaling an input video frame into an image, before it is sent to an inference engine.", + "description": "Describes the image scaling mode to be applied. Default mode is 'pad'.", "enum": [ "preserveAspectRatio", "pad", @@ -1072,15 +1019,15 @@ "values": [ { "value": "preserveAspectRatio", - "description": "Use the same aspect ratio as the input frame." + "description": "Preserves the same aspect ratio as the input image. If only one image dimension is provided, the second dimension is calculated based on the input image aspect ratio. When 2 dimensions are provided, the image is resized to fit the most constraining dimension, considering the input image size and aspect ratio." }, { "value": "pad", - "description": "Center pad the input frame to match the given dimensions." + "description": "Pads the image with black horizontal stripes (letterbox) or black vertical stripes (pillar-box) so the image is resized to the specified dimensions while not altering the content aspect ratio." }, { "value": "stretch", - "description": "Stretch input frame to match given dimensions." + "description": "Stretches the original image so it resized to the specified dimensions." } ], "modelAsString": true @@ -1088,16 +1035,16 @@ }, "width": { "type": "string", - "description": "The desired output width of the image." + "description": "The desired output image width." }, "height": { "type": "string", - "description": "The desired output height of the image." + "description": "The desired output image height." } }, - "description": "The scaling mode for the image." + "description": "Image scaling mode." }, - "ImageFormat": { + "ImageFormatProperties": { "type": "object", "required": [ "@type" @@ -1106,11 +1053,10 @@ "properties": { "@type": { "type": "string", - "description": "The discriminator for derived types." + "description": "Type discriminator for the derived types." } }, - "description": "Encoding settings for an image.", - "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.ImageFormat" + "description": "Base class for image formatting properties." }, "ImageFormatRaw": { "type": "object", @@ -1120,7 +1066,7 @@ "properties": { "pixelFormat": { "type": "string", - "description": "The pixel format that will be used to encode images.", + "description": "Pixel format to be applied to the raw image.", "enum": [ "yuv420p", "rgb565be", @@ -1188,10 +1134,10 @@ }, "allOf": [ { - "$ref": "#/definitions/ImageFormat" + "$ref": "#/definitions/ImageFormatProperties" } ], - "description": "Encoding settings for raw images.", + "description": "Raw image formatting.", "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.ImageFormatRaw" }, "ImageFormatJpeg": { @@ -1199,15 +1145,15 @@ "properties": { "quality": { "type": "string", - "description": "The image quality. Value must be between 0 to 100 (best quality)." + "description": "Image quality value between 0 to 100 (best quality)." } }, "allOf": [ { - "$ref": "#/definitions/ImageFormat" + "$ref": "#/definitions/ImageFormatProperties" } ], - "description": "Encoding settings for Jpeg images.", + "description": "JPEG image encoding.", "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.ImageFormatJpeg" }, "ImageFormatBmp": { @@ -1215,10 +1161,10 @@ "properties": {}, "allOf": [ { - "$ref": "#/definitions/ImageFormat" + "$ref": "#/definitions/ImageFormatProperties" } ], - "description": "Encoding settings for Bmp images.", + "description": "BMP image encoding.", "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.ImageFormatBmp" }, "ImageFormatPng": { @@ -1226,65 +1172,89 @@ "properties": {}, "allOf": [ { - "$ref": "#/definitions/ImageFormat" + "$ref": "#/definitions/ImageFormatProperties" } ], - "description": "Encoding settings for Png images.", + "description": "PNG image encoding.", "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.ImageFormatPng" }, - "Line": { + "NamedLineBase": { "type": "object", "required": [ - "name", - "line" + "@type", + "name" ], + "discriminator": "@type", "properties": { - "line": { - "$ref": "#/definitions/LineCoordinates", - "description": "Sets the properties of the line." + "@type": { + "type": "string", + "description": "The Type discriminator for the derived types." }, "name": { "type": "string", - "description": "The name of the line." + "description": "Line name. Must be unique within the node." } }, - "description": "Describes the properties of a line." + "description": "Base class for named lines." }, - "LineCoordinates": { + "NamedLineString": { "type": "object", "required": [ - "start", - "end" + "line" ], "properties": { - "start": { - "$ref": "#/definitions/Point", - "description": "Sets the coordinates of the starting point for the line." - }, - "end": { - "$ref": "#/definitions/Point", - "description": "Sets the coordinates of the ending point for the line." + "line": { + "type": "string", + "example": "[[0.3,0.2],[0.9,0.8]]", + "description": "Point coordinates for the line start and end, respectively. Example: '[[0.3, 0.2],[0.9, 0.8]]'. Each point is expressed as [LEFT, TOP] coordinate ratios ranging from 0.0 to 1.0, where [0,0] is the upper-left frame corner and [1, 1] is the bottom-right frame corner." } }, - "description": "Describes the start point and end point of a line in the frame." + "allOf": [ + { + "$ref": "#/definitions/NamedLineBase" + } + ], + "description": "Describes a line configuration.", + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.NamedLineString" }, - "Point": { + "NamedPolygonBase": { "type": "object", "required": [ - "x", - "y" + "@type", + "name" ], + "discriminator": "@type", "properties": { - "x": { + "@type": { "type": "string", - "description": "The X value of the point ranging from 0 to 1 starting from the left side of the frame." + "description": "The Type discriminator for the derived types." }, - "y": { + "name": { "type": "string", - "description": "The Y value of the point ranging from 0 to 1 starting from the upper side of the frame." + "description": "Polygon name. Must be unique within the node." } }, - "description": "Describes the x and y value of a point in the frame." + "description": "Describes the named polygon." + }, + "NamedPolygonString": { + "type": "object", + "required": [ + "polygon" + ], + "properties": { + "polygon": { + "type": "string", + "example": "[[0.3, 0.2],[0.9, 0.8],[0.7, 0.6]]", + "description": "Point coordinates for the polygon. Example: '[[0.3, 0.2],[0.9, 0.8],[0.7, 0.6]]'. Each point is expressed as [LEFT, TOP] coordinate ratios ranging from 0.0 to 1.0, where [0,0] is the upper-left frame corner and [1, 1] is the bottom-right frame corner." + } + }, + "allOf": [ + { + "$ref": "#/definitions/NamedPolygonBase" + } + ], + "description": "Describes a closed polygon configuration.", + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.NamedPolygonString" }, "SignalGateProcessor": { "type": "object", @@ -1297,26 +1267,443 @@ "activationSignalOffset": { "type": "string", "example": "-PT1.0S", - "description": "Signal offset once the gate is activated (can be negative). It is an offset between the time the event is received, and the timestamp of the first media sample (eg. video frame) that is allowed through by the gate." + "description": "Signal offset once the gate is activated (can be negative). It determines the how much farther behind of after the signal will be let through based on the activation time. A negative offset indicates that data prior the activation time must be included on the signal that is let through, once the gate is activated. When used upstream of a file or video sink, this allows for scenarios such as recording buffered media prior an event, such as: record video 5 seconds prior motions is detected." }, "minimumActivationTime": { "type": "string", "example": "PT1S", - "description": "The minimum period for which the gate remains open in the absence of subsequent triggers (events)." + "description": "The minimum period for which the gate remains open in the absence of subsequent triggers (events). When used upstream of a file or video sink, it determines the minimum length of the recorded video clip." }, "maximumActivationTime": { "type": "string", "example": "PT2S", - "description": "The maximum period for which the gate remains open in the presence of subsequent events." + "description": "The maximum period for which the gate remains open in the presence of subsequent triggers (events). When used upstream of a file or video sink, it determines the maximum length of the recorded video clip." } }, "allOf": [ { - "$ref": "#/definitions/Processor" + "$ref": "#/definitions/ProcessorNodeBase" } ], - "description": "A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate.", + "description": "A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate. See https://aka.ms/ava-signalgate for more information.", "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.SignalGateProcessor" + }, + "SpatialAnalysisOperationBase": { + "type": "object", + "required": [ + "@type" + ], + "discriminator": "@type", + "properties": { + "@type": { + "type": "string", + "description": "The Type discriminator for the derived types." + } + }, + "description": "Base class for Azure Cognitive Services Spatial Analysis operations." + }, + "SpatialAnalysisCustomOperation": { + "type": "object", + "required": [ + "extensionConfiguration" + ], + "properties": { + "extensionConfiguration": { + "type": "string", + "description": "Custom configuration to pass to the Azure Cognitive Services Spatial Analysis module." + } + }, + "allOf": [ + { + "$ref": "#/definitions/SpatialAnalysisOperationBase" + } + ], + "description": "Defines a Spatial Analysis custom operation. This requires the Azure Cognitive Services Spatial analysis module to be deployed alongside the Video Analyzer module, please see https://aka.ms/ava-spatial-analysis for more information.", + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.SpatialAnalysisCustomOperation" + }, + "SpatialAnalysisTypedOperationBase": { + "type": "object", + "properties": { + "debug": { + "type": "string", + "description": "If set to 'true', enables debugging mode for this operation." + }, + "cameraConfiguration": { + "type": "string", + "description": "Advanced camera configuration." + }, + "detectorNodeConfiguration": { + "type": "string", + "description": "Advanced detector node configuration." + }, + "enableFaceMaskClassifier": { + "type": "string", + "description": "If set to 'true', enables face mask detection for this operation." + } + }, + "allOf": [ + { + "$ref": "#/definitions/SpatialAnalysisOperationBase" + } + ], + "description": "Base class for Azure Cognitive Services Spatial Analysis typed operations." + }, + "SpatialAnalysisOperationEventBase": { + "type": "object", + "properties": { + "threshold": { + "type": "string", + "description": "The event threshold." + }, + "focus": { + "type": "string", + "description": "The operation focus type.", + "enum": [ + "center", + "bottomCenter", + "footprint" + ], + "x-ms-enum": { + "name": "spatialAnalysisOperationFocus", + "values": [ + { + "value": "center", + "description": "The center of the object." + }, + { + "value": "bottomCenter", + "description": "The bottom center of the object." + }, + { + "value": "footprint", + "description": "The footprint." + } + ], + "modelAsString": true + } + } + }, + "description": "Defines the Azure Cognitive Services Spatial Analysis operation eventing configuration." + }, + "SpatialAnalysisPersonCountEvent": { + "type": "object", + "properties": { + "trigger": { + "type": "string", + "description": "The event trigger type.", + "enum": [ + "event", + "interval" + ], + "x-ms-enum": { + "name": "spatialAnalysisPersonCountEventTrigger", + "values": [ + { + "value": "event", + "description": "Event trigger." + }, + { + "value": "interval", + "description": "Interval trigger." + } + ], + "modelAsString": true + } + }, + "outputFrequency": { + "type": "string", + "description": "The event or interval output frequency." + } + }, + "allOf": [ + { + "$ref": "#/definitions/SpatialAnalysisOperationEventBase" + } + ], + "description": "Defines a Spatial Analysis person count operation eventing configuration." + }, + "SpatialAnalysisPersonCountZoneEvents": { + "type": "object", + "required": [ + "zone" + ], + "properties": { + "zone": { + "description": "The named zone.", + "$ref": "#/definitions/NamedPolygonBase" + }, + "events": { + "type": "array", + "description": "The event configuration.", + "items": { + "$ref": "#/definitions/SpatialAnalysisPersonCountEvent" + } + } + } + }, + "SpatialAnalysisPersonCountOperation": { + "type": "object", + "required": [ + "zones" + ], + "properties": { + "zones": { + "type": "array", + "description": "The list of zones and optional events.", + "items": { + "$ref": "#/definitions/SpatialAnalysisPersonCountZoneEvents" + } + } + }, + "allOf": [ + { + "$ref": "#/definitions/SpatialAnalysisTypedOperationBase" + } + ], + "description": "Defines a Spatial Analysis person count operation. This requires the Azure Cognitive Services Spatial analysis module to be deployed alongside the Video Analyzer module, please see https://aka.ms/ava-spatial-analysis for more information.", + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.SpatialAnalysisPersonCountOperation" + }, + "SpatialAnalysisPersonZoneCrossingEvent": { + "type": "object", + "properties": { + "eventType": { + "type": "string", + "description": "The event type.", + "enum": [ + "zoneCrossing", + "zoneDwellTime" + ], + "x-ms-enum": { + "name": "spatialAnalysisPersonZoneCrossingEventType", + "values": [ + { + "value": "zoneCrossing", + "description": "Zone crossing event type." + }, + { + "value": "zoneDwellTime", + "description": "Zone dwell time event type." + } + ], + "modelAsString": true + } + } + }, + "allOf": [ + { + "$ref": "#/definitions/SpatialAnalysisOperationEventBase" + } + ], + "description": "Defines a Spatial Analysis person crossing zone operation eventing configuration." + }, + "SpatialAnalysisPersonZoneCrossingZoneEvents": { + "type": "object", + "required": [ + "zone" + ], + "properties": { + "zone": { + "description": "The named zone.", + "$ref": "#/definitions/NamedPolygonBase" + }, + "events": { + "type": "array", + "description": "The event configuration.", + "items": { + "$ref": "#/definitions/SpatialAnalysisPersonZoneCrossingEvent" + } + } + } + }, + "SpatialAnalysisPersonZoneCrossingOperation": { + "type": "object", + "required": [ + "zones" + ], + "properties": { + "zones": { + "type": "array", + "description": "The list of zones with optional events.", + "items": { + "$ref": "#/definitions/SpatialAnalysisPersonZoneCrossingZoneEvents" + } + } + }, + "allOf": [ + { + "$ref": "#/definitions/SpatialAnalysisTypedOperationBase" + } + ], + "description": "Defines a Spatial Analysis person zone crossing operation. This requires the Azure Cognitive Services Spatial analysis module to be deployed alongside the Video Analyzer module, please see https://aka.ms/ava-spatial-analysis for more information.", + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.SpatialAnalysisPersonZoneCrossingOperation" + }, + "SpatialAnalysisPersonDistanceEvent": { + "type": "object", + "properties": { + "trigger": { + "type": "string", + "description": "The event trigger type.", + "enum": [ + "event", + "interval" + ], + "x-ms-enum": { + "name": "spatialAnalysisPersonDistanceEventTrigger", + "values": [ + { + "value": "event", + "description": "Event trigger." + }, + { + "value": "interval", + "description": "Interval trigger." + } + ], + "modelAsString": true + } + }, + "outputFrequency": { + "type": "string", + "description": "The event or interval output frequency." + }, + "minimumDistanceThreshold": { + "type": "string", + "description": "The minimum distance threshold" + }, + "maximumDistanceThreshold": { + "type": "string", + "description": "The maximum distance threshold" + } + }, + "allOf": [ + { + "$ref": "#/definitions/SpatialAnalysisOperationEventBase" + } + ], + "description": "Defines a Spatial Analysis person distance operation eventing configuration." + }, + "SpatialAnalysisPersonDistanceZoneEvents": { + "type": "object", + "required": [ + "zone" + ], + "properties": { + "zone": { + "description": "The named zone.", + "$ref": "#/definitions/NamedPolygonBase" + }, + "events": { + "type": "array", + "description": "The event configuration.", + "items": { + "$ref": "#/definitions/SpatialAnalysisPersonDistanceEvent" + } + } + } + }, + "SpatialAnalysisPersonDistanceOperation": { + "type": "object", + "required": [ + "zones" + ], + "properties": { + "zones": { + "type": "array", + "description": "The list of zones with optional events.", + "items": { + "$ref": "#/definitions/SpatialAnalysisPersonDistanceZoneEvents" + } + } + }, + "allOf": [ + { + "$ref": "#/definitions/SpatialAnalysisTypedOperationBase" + } + ], + "description": "Defines a Spatial Analysis person distance operation. This requires the Azure Cognitive Services Spatial analysis module to be deployed alongside the Video Analyzer module, please see https://aka.ms/ava-spatial-analysis for more information.", + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.SpatialAnalysisPersonDistanceOperation" + }, + "SpatialAnalysisPersonLineCrossingEvent": { + "type": "object", + "allOf": [ + { + "$ref": "#/definitions/SpatialAnalysisOperationEventBase" + } + ], + "description": "Defines a Spatial Analysis person line crossing operation eventing configuration." + }, + "SpatialAnalysisPersonLineCrossingLineEvents": { + "type": "object", + "required": [ + "line" + ], + "properties": { + "line": { + "description": "The named line.", + "$ref": "#/definitions/NamedLineBase" + }, + "events": { + "type": "array", + "description": "The event configuration.", + "items": { + "$ref": "#/definitions/SpatialAnalysisPersonLineCrossingEvent" + } + } + } + }, + "SpatialAnalysisPersonLineCrossingOperation": { + "type": "object", + "required": [ + "lines" + ], + "properties": { + "lines": { + "type": "array", + "description": "The list of lines with optional events.", + "items": { + "$ref": "#/definitions/SpatialAnalysisPersonLineCrossingLineEvents" + } + } + }, + "allOf": [ + { + "$ref": "#/definitions/SpatialAnalysisTypedOperationBase" + } + ], + "description": "Defines a Spatial Analysis person line crossing operation. This requires the Azure Cognitive Services Spatial analysis module to be deployed alongside the Video Analyzer module, please see https://aka.ms/ava-spatial-analysis for more information.", + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.SpatialAnalysisPersonLineCrossingOperation" + }, + "CognitiveServicesVisionProcessor": { + "type": "object", + "required": [ + "operation", + "endpoint" + ], + "properties": { + "endpoint": { + "description": "Endpoint to which this processor should connect.", + "$ref": "#/definitions/EndpointBase" + }, + "image": { + "description": "Describes the parameters of the image that is sent as input to the endpoint.", + "$ref": "#/definitions/ImageProperties" + }, + "samplingOptions": { + "description": "Describes the sampling options to be applied when forwarding samples to the extension.", + "$ref": "#/definitions/SamplingOptions" + }, + "operation": { + "description": "Describes the Spatial Analysis operation to be used in the Cognitive Services Vision processor.", + "$ref": "#/definitions/SpatialAnalysisOperationBase" + } + }, + "allOf": [ + { + "$ref": "#/definitions/ProcessorNodeBase" + } + ], + "description": "A processor that allows the pipeline topology to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes.", + "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.CognitiveServicesVisionProcessor" } } } diff --git a/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json index 3d6aa2098cf0..7ab744ddd856 100644 --- a/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json +++ b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json @@ -1,11 +1,11 @@ { "swagger": "2.0", "info": { - "description": "Direct Methods for Azure Video Analyzer on IoT Edge.", "version": "1.0.0", - "title": "Direct Methods for Azure Video Analyzer on IoT Edge", + "title": "Azure Video Analyzer for Edge", + "description": "Azure Video Analyzer helpers for IoT Edge direct method calls.", "contact": { - "email": "amshelp@microsoft.com" + "email": "videoanalyzerhelp@microsoft.com" } }, "security": [ @@ -30,12 +30,12 @@ "properties": { "methodName": { "type": "string", - "description": "method name", + "description": "Direct method method name.", "readOnly": true }, "@apiVersion": { "type": "string", - "description": "api version", + "description": "Video Analyzer API version.", "enum": [ "1.0" ], @@ -46,7 +46,7 @@ } }, "discriminator": "methodName", - "description": "Base Class for Method Requests." + "description": "Base class for direct method calls." }, "PipelineTopologySetRequest": { "type": "object", @@ -64,7 +64,7 @@ "$ref": "./AzureVideoAnalyzer.json#/definitions/PipelineTopology" } }, - "description": "Represents the pipelineTopologySet request." + "description": "Creates a new pipeline topology or updates an existing one." }, "PipelineTopologySetRequestBody": { "type": "object", @@ -76,7 +76,7 @@ "$ref": "./AzureVideoAnalyzer.json#/definitions/PipelineTopology" } ], - "description": "Represents the pipelineTopologySet request body." + "description": "Pipeline topology resource representation." }, "LivePipelineSetRequest": { "type": "object", @@ -94,7 +94,7 @@ "$ref": "./AzureVideoAnalyzer.json#/definitions/LivePipeline" } }, - "description": "Represents the livePipelineSet request." + "description": "Creates a new live pipeline or updates an existing one." }, "LivePipelineSetRequestBody": { "type": "object", @@ -106,9 +106,9 @@ "$ref": "./AzureVideoAnalyzer.json#/definitions/LivePipeline" } ], - "description": "Represents the livePipelineSet request body." + "description": "Live pipeline resource representation." }, - "ItemNonSetRequestBase": { + "MethodRequestEmptyBodyBase": { "type": "object", "allOf": [ { @@ -121,7 +121,7 @@ "properties": { "name": { "type": "string", - "description": "method name" + "description": "Resource name." } } }, @@ -133,27 +133,27 @@ "$ref": "#/definitions/MethodRequest" } ], - "description": "Represents the pipelineTopologyList request." + "description": "List all existing pipeline topologies." }, "PipelineTopologyGetRequest": { "type": "object", "x-ms-discriminator-value": "pipelineTopologyGet", "allOf": [ { - "$ref": "#/definitions/ItemNonSetRequestBase" + "$ref": "#/definitions/MethodRequestEmptyBodyBase" } ], - "description": "Represents the pipelineTopologyGet request." + "description": "Retrieves an existing pipeline topology." }, "PipelineTopologyDeleteRequest": { "type": "object", "x-ms-discriminator-value": "pipelineTopologyDelete", "allOf": [ { - "$ref": "#/definitions/ItemNonSetRequestBase" + "$ref": "#/definitions/MethodRequestEmptyBodyBase" } ], - "description": "Represents the pipelineTopologyDelete request." + "description": "Deletes an existing pipeline topology." }, "LivePipelineListRequest": { "type": "object", @@ -163,47 +163,46 @@ "$ref": "#/definitions/MethodRequest" } ], - "description": "Represents the livePipelineList request." + "description": "List all existing live pipelines." }, "LivePipelineGetRequest": { "type": "object", "x-ms-discriminator-value": "livePipelineGet", "allOf": [ { - "$ref": "#/definitions/ItemNonSetRequestBase" + "$ref": "#/definitions/MethodRequestEmptyBodyBase" } ], - "description": "Represents the livePipelineGet request." + "description": "Retrieves an existing live pipeline." }, "LivePipelineActivateRequest": { "type": "object", "x-ms-discriminator-value": "livePipelineActivate", "allOf": [ { - "$ref": "#/definitions/ItemNonSetRequestBase" + "$ref": "#/definitions/MethodRequestEmptyBodyBase" } ], - "description": "Represents the livePipelineActivate request." + "description": "Activates an existing live pipeline." }, "LivePipelineDeactivateRequest": { "type": "object", "x-ms-discriminator-value": "livePipelineDeactivate", "allOf": [ { - "$ref": "#/definitions/ItemNonSetRequestBase" + "$ref": "#/definitions/MethodRequestEmptyBodyBase" } ], - "description": "Represents the livePipelineDeactivate request." + "description": "Deactivates an existing live pipeline." }, "LivePipelineDeleteRequest": { "type": "object", "x-ms-discriminator-value": "livePipelineDelete", "allOf": [ { - "$ref": "#/definitions/ItemNonSetRequestBase" + "$ref": "#/definitions/MethodRequestEmptyBodyBase" } ], - "description": "Represents the livePipelineDelete request." + "description": "Deletes an existing live pipeline." } - } } From 86f6b22fcbda40bea06eac2c7c84cba2824a2d9c Mon Sep 17 00:00:00 2001 From: giakas Date: Wed, 28 Apr 2021 12:10:05 -0700 Subject: [PATCH 18/19] fix linting issue --- .../preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json | 1 + 1 file changed, 1 insertion(+) diff --git a/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json index 7ab744ddd856..8daf4143a58f 100644 --- a/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json +++ b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzerSdkDefinitions.json @@ -205,4 +205,5 @@ ], "description": "Deletes an existing live pipeline." } + } } From f062a7d0912d869acdc8ca083ce2e6af40128cc1 Mon Sep 17 00:00:00 2001 From: giakas Date: Wed, 28 Apr 2021 12:14:26 -0700 Subject: [PATCH 19/19] removed an unused definition --- .../preview/1.0.0/AzureVideoAnalyzer.json | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzer.json b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzer.json index 842cc8c39c1b..7b6e7f136211 100644 --- a/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzer.json +++ b/specification/videoanalyzer/data-plane/VideoAnalyzer.Edge/preview/1.0.0/AzureVideoAnalyzer.json @@ -493,25 +493,6 @@ "description": "TLS endpoint describes an endpoint that the pipeline can connect to over TLS transport (data is encrypted in transit).", "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.TlsEndpoint" }, - "SymmetricKeyCredentials": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "Symmetric key credential." - } - }, - "required": [ - "key" - ], - "allOf": [ - { - "$ref": "#/definitions/CredentialsBase" - } - ], - "description": "Symmetric key credential.", - "x-ms-discriminator-value": "#Microsoft.VideoAnalyzer.SymmetricKeyCredentials" - }, "CertificateSource": { "type": "object", "required": [