diff --git a/specification/cognitiveservices/data-plane/Common/Parameters.json b/specification/cognitiveservices/data-plane/Common/Parameters.json index 05038c300269..8831df933942 100644 --- a/specification/cognitiveservices/data-plane/Common/Parameters.json +++ b/specification/cognitiveservices/data-plane/Common/Parameters.json @@ -38,6 +38,7 @@ ], "properties": { "url": { + "description": "Publicly reachable URL of an image", "type": "string" } } diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/ComputerVision.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/ComputerVision.json index 2269e878b2f3..823aa264008c 100644 --- a/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/ComputerVision.json +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/ComputerVision.json @@ -58,7 +58,7 @@ } } }, - "/analyze?overload=url": { + "/analyze": { "post": { "description": "This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response.", "operationId": "AnalyzeImage", @@ -93,21 +93,7 @@ } }, { - "name": "language", - "in": "query", - "description": "A string indicating which language to return. The service will return recognition results in specified language. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.zh - Simplified Chinese.", - "type": "string", - "required": false, - "default": "en", - "x-nullable": false, - "x-ms-enum": { - "name": "Language", - "modelAsString": false - }, - "enum": [ - "en", - "zh" - ] + "$ref": "#/parameters/ServiceLanguage" }, { "$ref": "../../../Common/Parameters.json#/parameters/ImageUrl" @@ -134,7 +120,7 @@ } } }, - "/generateThumbnail?overload=url": { + "/generateThumbnail": { "post": { "description": "This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong.", "operationId": "GenerateThumbnail", @@ -191,12 +177,12 @@ }, "x-ms-examples": { "Successful Generate Thumbnail request": { - "$ref": "./examples/SuccessfulGenerateThumbnail.json" + "$ref": "./examples/SuccessfulGenerateThumbnailWithUrl.json" } } } }, - "/ocr?overload=url": { + "/ocr": { "post": { "description": "Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError.", "operationId": "RecognizePrintedText", @@ -238,7 +224,7 @@ } } }, - "/describe?overload=url": { + "/describe": { "post": { "description": "This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", "operationId": "DescribeImage", @@ -257,6 +243,9 @@ "required": false, "default": "1" }, + { + "$ref": "#/parameters/ServiceLanguage" + }, { "$ref": "../../../Common/Parameters.json#/parameters/ImageUrl" } @@ -282,9 +271,9 @@ } } }, - "/tag?overload=url": { + "/tag": { "post": { - "description": "This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag “cello” may be accompanied by the hint “musical instrument”. All tags are in English.", + "description": "This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English.", "operationId": "TagImage", "consumes": [ "application/json" @@ -293,6 +282,9 @@ "application/json" ], "parameters": [ + { + "$ref": "#/parameters/ServiceLanguage" + }, { "$ref": "../../../Common/Parameters.json#/parameters/ImageUrl" } @@ -318,7 +310,7 @@ } } }, - "/models/{model}/analyze?overload=url": { + "/models/{model}/analyze": { "post": { "description": "This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", "operationId": "AnalyzeImageByDomain", @@ -370,9 +362,9 @@ } } }, - "/recognizeText?overload=url": { + "/recognizeText": { "post": { - "description": "Recognize Text operation. When you use the Recognize Text interface, the response contains a field called “Operation-Location”. The “Operation-Location” field contains the URL that you must use for your Get Handwritten Text Operation Result operation.", + "description": "Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Handwritten Text Operation Result operation.", "operationId": "RecognizeText", "parameters": [ { @@ -456,7 +448,8 @@ "description": "This operation extracts a rich set of visual features based on the image content.", "operationId": "AnalyzeImageInStream", "consumes": [ - "application/octet-stream" + "application/octet-stream", + "multipart/form-data" ], "produces": [ "application/json" @@ -477,16 +470,7 @@ ] }, { - "name": "language", - "in": "query", - "description": "A string indicating which language to return. The service will return recognition results in specified language. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.zh - Simplified Chinese.", - "type": "string", - "required": false, - "default": "en", - "enum": [ - "en", - "zh" - ] + "$ref": "#/parameters/ServiceLanguage" }, { "$ref": "../../../Common/Parameters.json#/parameters/ImageStream" @@ -505,6 +489,11 @@ "$ref": "#/definitions/ComputerVisionError" } } + }, + "x-ms-examples": { + "Successful Analyze with Url request": { + "$ref": "./examples/SuccessfulAnalyzeWithStream.json" + } } } }, @@ -513,11 +502,11 @@ "description": "This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong.", "operationId": "GenerateThumbnailInStream", "consumes": [ - "application/octet-stream" + "application/octet-stream", + "multipart/form-data" ], "produces": [ - "application/octet-stream", - "application/json" + "application/octet-stream" ], "parameters": [ { @@ -563,6 +552,11 @@ "$ref": "#/definitions/ComputerVisionError" } } + }, + "x-ms-examples": { + "Successful Generate Thumbnail request": { + "$ref": "./examples/SuccessfulGenerateThumbnailWithStream.json" + } } } }, @@ -571,7 +565,8 @@ "description": "Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError.", "operationId": "RecognizePrintedTextInStream", "consumes": [ - "application/octet-stream" + "application/octet-stream", + "multipart/form-data" ], "produces": [ "application/json" @@ -600,6 +595,11 @@ "$ref": "#/definitions/ComputerVisionError" } } + }, + "x-ms-examples": { + "Successful Ocr request": { + "$ref": "./examples/SuccessfulOcrWithStream.json" + } } } }, @@ -608,7 +608,8 @@ "description": "This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", "operationId": "DescribeImageInStream", "consumes": [ - "application/octet-stream" + "application/octet-stream", + "multipart/form-data" ], "produces": [ "application/json" @@ -622,6 +623,9 @@ "required": false, "default": "1" }, + { + "$ref": "#/parameters/ServiceLanguage" + }, { "$ref": "../../../Common/Parameters.json#/parameters/ImageStream" } @@ -639,15 +643,21 @@ "$ref": "#/definitions/ComputerVisionError" } } + }, + "x-ms-examples": { + "Successful Describe request": { + "$ref": "./examples/SuccessfulDescribeWithStream.json" + } } } }, "/tag?overload=stream": { "post": { - "description": "This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag “cello” may be accompanied by the hint “musical instrument”. All tags are in English.", + "description": "This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English.", "operationId": "TagImageInStream", "consumes": [ - "application/octet-stream" + "application/octet-stream", + "multipart/form-data" ], "produces": [ "application/json" @@ -670,6 +680,11 @@ "$ref": "#/definitions/ComputerVisionError" } } + }, + "x-ms-examples": { + "Successful Tag request": { + "$ref": "./examples/SuccessfulTagWithStream.json" + } } } }, @@ -678,7 +693,8 @@ "description": "This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", "operationId": "AnalyzeImageByDomainInStream", "consumes": [ - "application/octet-stream" + "application/octet-stream", + "multipart/form-data" ], "produces": [ "application/json" @@ -708,12 +724,17 @@ "$ref": "#/definitions/ComputerVisionError" } } + }, + "x-ms-examples": { + "Successful Domain Model analysis request": { + "$ref": "./examples/SuccessfulDomainModelWithStream.json" + } } } }, "/recognizeText?overload=stream": { "post": { - "description": "Recognize Text operation. When you use the Recognize Text interface, the response contains a field called “Operation-Location”. The “Operation-Location” field contains the URL that you must use for your Get Handwritten Text Operation Result operation.", + "description": "Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Handwritten Text Operation Result operation.", "operationId": "RecognizeTextInStream", "parameters": [ { @@ -745,6 +766,11 @@ "$ref": "#/definitions/ComputerVisionError" } } + }, + "x-ms-examples": { + "Successful Domain Model analysis request": { + "$ref": "./examples/SuccessfulRecognizeTextWithStream.json" + } } } } @@ -831,15 +857,12 @@ } }, "adult": { - "description": "A property scoring on whether the image is adult-oriented and/or racy.", "$ref": "#/definitions/AdultInfo" }, "color": { - "description": "A property scoring on color spectrums.", "$ref": "#/definitions/ColorInfo" }, "imageType": { - "description": "A property indicating type of image (whether it's clipart or line drawing)", "$ref": "#/definitions/ImageType" }, "tags": { @@ -850,7 +873,6 @@ } }, "description": { - "description": "Description of the image.", "$ref": "#/definitions/ImageDescriptionDetails" }, "faces": { @@ -865,7 +887,6 @@ "description": "Id of the request for tracking purposes." }, "metadata": { - "description": "Image metadata", "$ref": "#/definitions/ImageMetadata" } } @@ -975,7 +996,6 @@ "description": "Result of image analysis using a specific domain model including additional metadata.", "properties": { "result": { - "description": "Result of the image analysis.", "x-ms-client-flatten": true, "$ref": "#/definitions/DomainModelResult" }, @@ -984,7 +1004,6 @@ "description": "Id of the REST API request." }, "metadata": { - "description": "Additional image metadata", "$ref": "#/definitions/ImageMetadata" } } @@ -1027,7 +1046,6 @@ "description": "Id of the REST API request." }, "metadata": { - "description": "Image metadata", "$ref": "#/definitions/ImageMetadata" } } @@ -1055,7 +1073,6 @@ "description": "Id of the REST API request." }, "metadata": { - "description": "Image metadata", "$ref": "#/definitions/ImageMetadata" } } @@ -1161,6 +1178,10 @@ "gender": { "type": "string", "description": "Possible gender of the face.", + "x-ms-enum": { + "name": "Gender-", + "modelAsString": false + }, "enum": [ "Male", "Female" @@ -1256,8 +1277,6 @@ "description": "Scoring of the category." }, "detail": { - "type": "object", - "description": "Additional category detail if available.", "$ref": "#/definitions/CategoryDetail" } } @@ -1314,6 +1333,9 @@ "description": "A unique request identifier." } } + }, + "ServiceLanguage": { + "type": "string" } }, "parameters": { @@ -1398,11 +1420,27 @@ "HandwritingBoolean": { "name": "detectHandwriting", "in": "query", - "description": "If “true” is specified, handwriting recognition is performed. If this parameter is set to “false” or is not specified, printed text recognition is performed.", + "description": "If 'true' is specified, handwriting recognition is performed. If this parameter is set to 'false' or is not specified, printed text recognition is performed.", "required": false, "x-ms-parameter-location": "method", "type": "boolean", "default": false + }, + "ServiceLanguage": { + "name": "language", + "in": "query", + "description": "The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese.", + "type": "string", + "required": false, + "x-ms-parameter-location": "method", + "x-nullable": false, + "default": "en", + "enum": [ + "en", + "ja", + "pt", + "zh" + ] } } } \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulAnalyzeWithStream.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulAnalyzeWithStream.json new file mode 100644 index 000000000000..4b711e44d75d --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulAnalyzeWithStream.json @@ -0,0 +1,118 @@ +{ + "parameters": { + "Content-Type": "application/octet-stream", + "Ocp-Apim-Subscription-Key": "{API key}", + "visualFeatures": "Categories,Adult,Tags,Description,Faces,Color,ImageType", + "details": "Celebrities", + "language": "en", + "body": "{binary}" + }, + "responses": { + "200": { + "headers": {}, + "body": { + "categories": [ + { + "name": "abstract_", + "score": 0.00390625 + }, + { + "name": "people_", + "score": 0.83984375, + "detail": { + "celebrities": [ + { + "name": "Satya Nadella", + "faceRectangle": { + "left": 597, + "top": 162, + "width": 248, + "height": 248 + }, + "confidence": 0.999028444 + } + ], + "landmarks": [ + { + "name": "Forbidden City", + "confidence": 0.9978346 + } + ] + } + } + ], + "adult": { + "isAdultContent": false, + "isRacyContent": false, + "adultScore": 0.0934349000453949, + "racyScore": 0.068613491952419281 + }, + "tags": [ + { + "name": "person", + "confidence": 0.98979085683822632 + }, + { + "name": "man", + "confidence": 0.94493889808654785 + }, + { + "name": "outdoor", + "confidence": 0.938492476940155 + }, + { + "name": "window", + "confidence": 0.89513939619064331 + } + ], + "description": { + "tags": [ + "person", + "man", + "outdoor", + "window", + "glasses" + ], + "captions": [ + { + "text": "Satya Nadella sitting on a bench", + "confidence": 0.48293603002174407 + } + ] + }, + "requestId": "0dbec5ad-a3d3-4f7e-96b4-dfd57efe967d", + "metadata": { + "width": 1500, + "height": 1000, + "format": "Jpeg" + }, + "faces": [ + { + "age": 44, + "gender": "Male", + "faceRectangle": { + "left": 593, + "top": 160, + "width": 250, + "height": 250 + } + } + ], + "color": { + "dominantColorForeground": "Brown", + "dominantColorBackground": "Brown", + "dominantColors": [ + "Brown", + "Black" + ], + "accentColor": "873B59", + "isBWImg": false + }, + "imageType": { + "clipArtType": 0, + "lineDrawingType": 0 + } + } + } + } +} \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulDescribeWithStream.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulDescribeWithStream.json new file mode 100644 index 000000000000..307cc72fd052 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulDescribeWithStream.json @@ -0,0 +1,43 @@ +{ + "parameters": { + "Content-Type": "application/octet-stream", + "Ocp-Apim-Subscription-Key": "{API key}", + "maxCandidates": "1", + "body": "{binary}" + }, + "responses": { + "200": { + "body": { + "description": { + "tags": [ + "person", + "man", + "outdoor", + "window", + "glasses" + ], + "captions": [ + { + "text": "Satya Nadella sitting on a bench", + "confidence": 0.48293603002174407 + }, + { + "text": "Satya Nadella is sitting on a bench", + "confidence": 0.40037006815422832 + }, + { + "text": "Satya Nadella sitting in front of a building", + "confidence": 0.38035155997373377 + } + ] + }, + "requestId": "ed2de1c6-fb55-4686-b0da-4da6e05d283f", + "metadata": { + "width": 1500, + "height": 1000, + "format": "Jpeg" + } + } + } + } +} \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulDomainModelWithStream.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulDomainModelWithStream.json new file mode 100644 index 000000000000..3ea4f78af418 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulDomainModelWithStream.json @@ -0,0 +1,34 @@ +{ + "parameters": { + "Content-Type": "application/octet-stream", + "Ocp-Apim-Subscription-Key": "{API key}", + "Model": "Celebrities", + "body": "{binary}" + }, + "responses": { + "200": { + "body": { + "requestId": "f0027b4b-dc0d-4082-9228-1545ed246b03", + "metadata": { + "width": 1500, + "height": 1000, + "format": "Jpeg" + }, + "result": { + "celebrities": [ + { + "name": "Satya Nadella", + "faceRectangle": { + "left": 597, + "top": 162, + "width": 248, + "height": 248 + }, + "confidence": 0.999028444 + } + ] + } + } + } + } +} \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulGenerateThumbnailWithStream.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulGenerateThumbnailWithStream.json new file mode 100644 index 000000000000..353d43c7e0a2 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulGenerateThumbnailWithStream.json @@ -0,0 +1,16 @@ +{ + "parameters": { + "Content-Type": "application/octet-stream", + "Ocp-Apim-Subscription-Key": "{API key}", + "width": "500", + "height": "500", + "smartCropping": true, + "body": "{binary}" + }, + "responses": { + "200": { + "headers": {}, + "body": "{Binary}" + } + } +} \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulGenerateThumbnail.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulGenerateThumbnailWithUrl.json similarity index 100% rename from specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulGenerateThumbnail.json rename to specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulGenerateThumbnailWithUrl.json diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulOcrWithStream.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulOcrWithStream.json new file mode 100644 index 000000000000..5cd77b68f477 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulOcrWithStream.json @@ -0,0 +1,77 @@ +{ + "parameters": { + "Content-Type": "application/octet-stream", + "Ocp-Apim-Subscription-Key": "{API key}", + "detectOrientation": "true", + "language": "en", + "body": "{binary}" + }, + "responses": { + "200": { + "headers": {}, + "body": { + "language": "en", + "textAngle": -2.0000000000000338, + "orientation": "Up", + "regions": [ + { + "boundingBox": "462,379,497,258", + "lines": [ + { + "boundingBox": "462,379,497,74", + "words": [ + { + "boundingBox": "462,379,41,73", + "text": "A" + }, + { + "boundingBox": "523,379,153,73", + "text": "GOAL" + }, + { + "boundingBox": "694,379,265,74", + "text": "WITHOUT" + } + ] + }, + { + "boundingBox": "565,471,289,74", + "words": [ + { + "boundingBox": "565,471,41,73", + "text": "A" + }, + { + "boundingBox": "626,471,150,73", + "text": "PLAN" + }, + { + "boundingBox": "801,472,53,73", + "text": "IS" + } + ] + }, + { + "boundingBox": "519,563,375,74", + "words": [ + { + "boundingBox": "519,563,149,74", + "text": "JUST" + }, + { + "boundingBox": "683,564,41,72", + "text": "A" + }, + { + "boundingBox": "741,564,153,73", + "text": "WISH" + } + ] + } + ] + } + ] + } + } + } +} \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulRecognizeTextWithStream.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulRecognizeTextWithStream.json new file mode 100644 index 000000000000..1bb35b0b7cd2 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulRecognizeTextWithStream.json @@ -0,0 +1,15 @@ +{ + "parameters": { + "Content-Type": "application/octet-stream", + "Ocp-Apim-Subscription-Key": "{API key}", + "Handwriting": "true", + "body": "{binary}" + }, + "responses": { + "202": { + "header": { + "Operation-Location": "https://{domain}/vision/v1.0/textOperations/49a36324-fc4b-4387-aa06-090cfbf0064f" + } + } + } +} \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulTagWithStream.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulTagWithStream.json new file mode 100644 index 000000000000..ac649a3afb3d --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v1.0/examples/SuccessfulTagWithStream.json @@ -0,0 +1,53 @@ +{ + "parameters": { + "Content-Type": "application/octet-stream", + "Ocp-Apim-Subscription-Key": "{API key}", + "body": "{binary}" + }, + "responses": { + "200": { + "body": { + "tags": [ + { + "name": "grass", + "confidence": 0.9999997615814209 + }, + { + "name": "outdoor", + "confidence": 0.99997067451477051 + }, + { + "name": "sky", + "confidence": 0.99928975105285645 + }, + { + "name": "building", + "confidence": 0.99646323919296265 + }, + { + "name": "house", + "confidence": 0.99279803037643433 + }, + { + "name": "lawn", + "confidence": 0.82268029451370239 + }, + { + "name": "green", + "confidence": 0.64122253656387329 + }, + { + "name": "residential", + "confidence": 0.31403225660324097 + } + ], + "requestId": "1ad0e45e-b7b4-4be3-8042-53be96103337", + "metadata": { + "width": 400, + "height": 400, + "format": "Jpeg" + } + } + } + } +} \ No newline at end of file