diff --git a/README.md b/README.md index 0ef9f84..591004d 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,7 @@ This project framework provides examples for the following services: * Using the **Bing Image Search SDK** [azure-cognitiveservices-search-imagesearch](http://pypi.python.org/pypi/azure-cognitiveservices-search-imagesearch) for the [Image Search API](https://azure.microsoft.com/services/cognitive-services/bing-image-search-api/) * Using the **Bing News Search SDK** [azure-cognitiveservices-search-newssearch](http://pypi.python.org/pypi/azure-cognitiveservices-search-newssearch) for the [News Search API](https://azure.microsoft.com/services/cognitive-services/bing-news-search-api/) * Using the **Bing Video Search SDK** [azure-cognitiveservices-search-videosearch](http://pypi.python.org/pypi/azure-cognitiveservices-search-videosearch) for the [Video Search API](https://azure.microsoft.com/services/cognitive-services/bing-video-search-api/) +* Using the **Bing Visual Search SDK** [azure-cognitiveservices-search-visualsearch](http://pypi.python.org/pypi/azure-cognitiveservices-search-visualsearch) for the [Visual Search API](https://azure.microsoft.com/services/cognitive-services/bing-visual-search-api/) * Using the **Bing Web Search SDK** [azure-cognitiveservices-search-websearch](http://pypi.python.org/pypi/azure-cognitiveservices-search-websearch) for the [Web Search API](https://azure.microsoft.com/services/cognitive-services/bing-web-search-api/) ### Vision @@ -83,6 +84,7 @@ We provide several meta-packages to help you install several packages at a time. 4. Set up the environment variable `IMAGESEARCH_SUBSCRIPTION_KEY` with your key if you want to execute ImageSearch tests. 4. Set up the environment variable `NEWSSEARCH_SUBSCRIPTION_KEY` with your key if you want to execute NewsSearch tests. 4. Set up the environment variable `VIDEOSEARCH_SUBSCRIPTION_KEY` with your key if you want to execute VideoSearch tests. +4. Set up the environment variable `VISUALSEARCH_SUBSCRIPTION_KEY` with your key if you want to execute VideoSearch tests. 4. Set up the environment variable `WEBSEARCH_SUBSCRIPTION_KEY` with your key if you want to execute WebSearch tests. 4. Set up the environment variable `COMPUTERVISION_SUBSCRIPTION_KEY` with your key if you want to execute Computer Vision tests. You might override too `COMPUTERVISION_LOCATION` (westcentralus by default). 4. Set up the environment variable `CONTENTMODERATOR_SUBSCRIPTION_KEY` with your key if you want to execute Content Moderator tests. You might override too `CONTENTMODERATOR_LOCATION` (westcentralus by default). diff --git a/requirements.txt b/requirements.txt index 8144a70..4f3482d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,6 +5,7 @@ azure-cognitiveservices-search-entitysearch azure-cognitiveservices-search-imagesearch azure-cognitiveservices-search-newssearch azure-cognitiveservices-search-videosearch +azure-cognitiveservices-search-visualsearch azure-cognitiveservices-search-websearch azure-cognitiveservices-vision-computervision azure-cognitiveservices-vision-contentmoderator diff --git a/samples/search/TestImages/image.jpg b/samples/search/TestImages/image.jpg new file mode 100644 index 0000000..57020d8 Binary files /dev/null and b/samples/search/TestImages/image.jpg differ diff --git a/samples/search/visual_search_samples.py b/samples/search/visual_search_samples.py new file mode 100644 index 0000000..98a7464 --- /dev/null +++ b/samples/search/visual_search_samples.py @@ -0,0 +1,202 @@ +import json +import os.path + +from azure.cognitiveservices.search.visualsearch import VisualSearchAPI +from azure.cognitiveservices.search.visualsearch.models import ( + VisualSearchRequest, + CropArea, + ImageInfo, + Filters, + KnowledgeRequest, +) +from msrest.authentication import CognitiveServicesCredentials + +SUBSCRIPTION_KEY_ENV_NAME = "VISUALSEARCH_SUBSCRIPTION_KEY" + +CWD = os.path.dirname(__file__) +TEST_IMAGES = os.path.join(CWD, "TestImages") + +def search_image_binary(subscription_key): + """VisualSearchImageBinary. + + This will send an image binary in the body of the post request and print out the imageInsightsToken, the number of tags, the number of actions, and the first actionType. + """ + client = VisualSearchAPI(CognitiveServicesCredentials(subscription_key)) + + image_path = os.path.join(TEST_IMAGES, "image.jpg") + with open(image_path, "rb") as image_fd: + + # You need to pass the serialized form of the model + knowledge_request = json.dumps(VisualSearchRequest().serialize()) + + print("Search visual search request with binary of dog image") + result = client.images.visual_search(image=image_fd, knowledge_request=knowledge_request) + + if not result: + print("No visual search result data.") + return + + # Visual Search results + if result.image.image_insights_token: + print("Uploaded image insights token: {}".format(result.image.image_insights_token)) + else: + print("Couldn't find image insights token!") + + # List of tags + if result.tags: + first_tag = result.tags[0] + print("Visual search tag count: {}".format(len(result.tags))) + + # List of actions in first tag + if first_tag.actions: + first_tag_action = first_tag.actions[0] + print("First tag action count: {}".format(len(first_tag.actions))) + print("First tag action type: {}".format(first_tag_action.action_type)) + else: + print("Couldn't find tag actions!") + else: + print("Couldn't find image tags!") + +def search_image_binary_with_crop_area(subscription_key): + """VisualSearchImageBinaryWithCropArea. + + This will send an image binary in the body of the post request, along with a cropArea object, and print out the imageInsightsToken, the number of tags, the number of actions, and the first actionType. + """ + client = VisualSearchAPI(CognitiveServicesCredentials(subscription_key)) + + image_path = os.path.join(TEST_IMAGES, "image.jpg") + with open(image_path, "rb") as image_fd: + + crop_area = CropArea(top=0.1,bottom=0.5,left=0.1,right=0.9) + knowledge_request = VisualSearchRequest(image_info=ImageInfo(crop_area=crop_area)) + + # You need to pass the serialized form of the model + knowledge_request = json.dumps(knowledge_request.serialize()) + + print("Search visual search request with binary of dog image") + result = client.images.visual_search(image=image_fd, knowledge_request=knowledge_request) + + if not result: + print("No visual search result data.") + return + + # Visual Search results + if result.image.image_insights_token: + print("Uploaded image insights token: {}".format(result.image.image_insights_token)) + else: + print("Couldn't find image insights token!") + + # List of tags + if result.tags: + first_tag = result.tags[0] + print("Visual search tag count: {}".format(len(result.tags))) + + # List of actions in first tag + if first_tag.actions: + first_tag_action = first_tag.actions[0] + print("First tag action count: {}".format(len(first_tag.actions))) + print("First tag action type: {}".format(first_tag_action.action_type)) + else: + print("Couldn't find tag actions!") + else: + print("Couldn't find image tags!") + +def search_url_with_filters(subscription_key): + """VisualSearchUrlWithFilters. + + This will send an image url in the knowledgeRequest parameter, along with a \"site:www.bing.com\" filter, and print out the imageInsightsToken, the number of tags, the number of actions, and the first actionType. + """ + client = VisualSearchAPI(CognitiveServicesCredentials(subscription_key)) + + image_url = "https://images.unsplash.com/photo-1512546148165-e50d714a565a?w=600&q=80" + filters = Filters(site="www.bing.com") + + knowledge_request = VisualSearchRequest( + image_info=ImageInfo(url=image_url), + knowledge_request=KnowledgeRequest(filters=filters) + ) + + # You need to pass the serialized form of the model + knowledge_request = json.dumps(knowledge_request.serialize()) + + print("Search visual search request with url of dog image") + result = client.images.visual_search(knowledge_request=knowledge_request) + + if not result: + print("No visual search result data.") + return + + # Visual Search results + if result.image.image_insights_token: + print("Uploaded image insights token: {}".format(result.image.image_insights_token)) + else: + print("Couldn't find image insights token!") + + # List of tags + if result.tags: + first_tag = result.tags[0] + print("Visual search tag count: {}".format(len(result.tags))) + + # List of actions in first tag + if first_tag.actions: + first_tag_action = first_tag.actions[0] + print("First tag action count: {}".format(len(first_tag.actions))) + print("First tag action type: {}".format(first_tag_action.action_type)) + else: + print("Couldn't find tag actions!") + else: + print("Couldn't find image tags!") + +def search_insights_token_with_crop_area(subscription_key): + """VisualSearchInsightsTokenWithCropArea. + + This will send an image insights token in the knowledgeRequest parameter, along with a cropArea object, and print out the imageInsightsToken, the number of tags, the number of actions, and the first actionType. + """ + client = VisualSearchAPI(CognitiveServicesCredentials(subscription_key)) + + image_insights_token = "bcid_113F29C079F18F385732D8046EC80145*ccid_oV/QcH95*mid_687689FAFA449B35BC11A1AE6CEAB6F9A9B53708*thid_R.113F29C079F18F385732D8046EC80145" + crop_area = CropArea(top=0.1,bottom=0.5,left=0.1,right=0.9) + + knowledge_request = VisualSearchRequest( + image_info=ImageInfo( + image_insights_token=image_insights_token, + crop_area=crop_area + ), + ) + + # You need to pass the serialized form of the model + knowledge_request = json.dumps(knowledge_request.serialize()) + + print("Search visual search request with url of dog image") + result = client.images.visual_search(knowledge_request=knowledge_request) + + if not result: + print("No visual search result data.") + return + + # Visual Search results + if result.image.image_insights_token: + print("Uploaded image insights token: {}".format(result.image.image_insights_token)) + else: + print("Couldn't find image insights token!") + + # List of tags + if result.tags: + first_tag = result.tags[0] + print("Visual search tag count: {}".format(len(result.tags))) + + # List of actions in first tag + if first_tag.actions: + first_tag_action = first_tag.actions[0] + print("First tag action count: {}".format(len(first_tag.actions))) + print("First tag action type: {}".format(first_tag_action.action_type)) + else: + print("Couldn't find tag actions!") + else: + print("Couldn't find image tags!") + +if __name__ == "__main__": + import sys, os.path + sys.path.append(os.path.abspath(os.path.join(__file__, "..", ".."))) + from tools import execute_samples + execute_samples(globals(), SUBSCRIPTION_KEY_ENV_NAME) \ No newline at end of file