diff --git a/Instructions/Exercises/04-face-service.md b/Instructions/Exercises/04-face-service.md index 57c1e90..3632df7 100644 --- a/Instructions/Exercises/04-face-service.md +++ b/Instructions/Exercises/04-face-service.md @@ -50,13 +50,13 @@ In this exercise, you'll complete a partially implemented client application tha **C#** ``` - dotnet add package Azure.AI.Vision.ImageAnalysis -v 0.15.1-beta.1 + dotnet add package Azure.AI.Vision.ImageAnalysis -v 1.0.0-beta.1 ``` **Python** ``` - pip install azure-ai-vision==0.15.1b1 + pip install azure-ai-vision==1.0.0b1 ``` 3. View the contents of the **computer-vision** folder, and note that it contains a file for configuration settings: @@ -75,8 +75,7 @@ In this exercise, you'll complete a partially implemented client application tha **C#** ```C# - // import namespaces - using Azure.AI.Vision.Common; + // Import namespaces using Azure.AI.Vision.ImageAnalysis; ``` @@ -84,7 +83,9 @@ In this exercise, you'll complete a partially implemented client application tha ```Python # import namespaces - import azure.ai.vision as sdk + from azure.ai.vision.imageanalysis import ImageAnalysisClient + from azure.ai.vision.imageanalysis.models import VisualFeatures + from azure.core.credentials import AzureKeyCredential ``` ## View the image you will analyze @@ -104,8 +105,8 @@ Now you're ready to use the SDK to call the Vision service and detect faces in a ```C# // Authenticate Azure AI Vision client - var cvClient = new VisionServiceOptions( - aiSvcEndpoint, + ImageAnalysisClient cvClient = new ImageAnalysisClient( + new Uri(aiSvcEndpoint), new AzureKeyCredential(aiSvcKey)); ``` @@ -113,136 +114,70 @@ Now you're ready to use the SDK to call the Vision service and detect faces in a ```Python # Authenticate Azure AI Vision client - cv_client = sdk.VisionServiceOptions(ai_endpoint, ai_key) + cv_client = ImageAnalysisClient( + endpoint=ai_endpoint, + credential=AzureKeyCredential(ai_key) + ) ``` 2. In the **Main** function, under the code you just added, note that the code specifies the path to an image file and then passes the image path to a function named **AnalyzeImage**. This function is not yet fully implemented. -3. In the **AnalyzeImage** function, under the comment **Specify features to be retrieved (PEOPLE)**, add the following code: +3. In the **AnalyzeImage** function, under the comment **Get result with specified features to be retrieved (PEOPLE)**, add the following code: **C#** ```C# - // Specify features to be retrieved (PEOPLE) - Features = - ImageAnalysisFeature.People + // Get result with specified features to be retrieved (PEOPLE) + ImageAnalysisResult result = client.Analyze( + BinaryData.FromStream(stream), + VisualFeatures.People); ``` **Python** ```Python - # Specify features to be retrieved (PEOPLE) - analysis_options = sdk.ImageAnalysisOptions() - - features = analysis_options.features = ( - sdk.ImageAnalysisFeature.PEOPLE - ) + # Get result with specified features to be retrieved (PEOPLE) + result = cv_client.analyze( + image_data=image_data, + visual_features=[ + VisualFeatures.PEOPLE], + ) ``` -4. In the **AnalyzeImage** function, under the comment **Get image analysis**, add the following code: +4. In the **AnalyzeImage** function, under the comment **Draw bounding box around detected people**, add the following code: **C#** ```C - // Get image analysis - using var imageSource = VisionSource.FromFile(imageFile); - - using var analyzer = new ImageAnalyzer(serviceOptions, imageSource, analysisOptions); - - var result = analyzer.Analyze(); - - if (result.Reason == ImageAnalysisResultReason.Analyzed) + // Draw bounding box around detected people + foreach (DetectedPerson person in result.People.Values) { - // Get people in the image - if (result.People != null) + if (person.Confidence > 0.5) { - Console.WriteLine($" People:"); - - // Prepare image for drawing - System.Drawing.Image image = System.Drawing.Image.FromFile(imageFile); - Graphics graphics = Graphics.FromImage(image); - Pen pen = new Pen(Color.Cyan, 3); - Font font = new Font("Arial", 16); - SolidBrush brush = new SolidBrush(Color.WhiteSmoke); - - foreach (var person in result.People) - { - // Draw object bounding box if confidence > 50% - if (person.Confidence > 0.5) - { - // Draw object bounding box - var r = person.BoundingBox; - Rectangle rect = new Rectangle(r.X, r.Y, r.Width, r.Height); - graphics.DrawRectangle(pen, rect); - - // Return the confidence of the person detected - Console.WriteLine($" Bounding box {person.BoundingBox}, Confidence {person.Confidence:0.0000}"); - } - } - - // Save annotated image - String output_file = "detected_people.jpg"; - image.Save(output_file); - Console.WriteLine(" Results saved in " + output_file + "\n"); + // Draw object bounding box + var r = person.BoundingBox; + Rectangle rect = new Rectangle(r.X, r.Y, r.Width, r.Height); + graphics.DrawRectangle(pen, rect); } + + // Return the confidence of the person detected + //Console.WriteLine($" Bounding box {person.BoundingBox.ToString()}, Confidence: {person.Confidence:F2}"); } - else - { - var errorDetails = ImageAnalysisErrorDetails.FromResult(result); - Console.WriteLine(" Analysis failed."); - Console.WriteLine($" Error reason : {errorDetails.Reason}"); - Console.WriteLine($" Error code : {errorDetails.ErrorCode}"); - Console.WriteLine($" Error message: {errorDetails.Message}\n"); - } - ``` **Python** ```Python - # Get image analysis - image = sdk.VisionSource(image_file) - - image_analyzer = sdk.ImageAnalyzer(cv_client, image, analysis_options) - - result = image_analyzer.analyze() - - if result.reason == sdk.ImageAnalysisResultReason.ANALYZED: - # Get people in the image - if result.people is not None: - print("\nPeople in image:") - - # Prepare image for drawing - image = Image.open(image_file) - fig = plt.figure(figsize=(image.width/100, image.height/100)) - plt.axis('off') - draw = ImageDraw.Draw(image) - color = 'cyan' - - for detected_people in result.people: - # Draw object bounding box if confidence > 50% - if detected_people.confidence > 0.5: - # Draw object bounding box - r = detected_people.bounding_box - bounding_box = ((r.x, r.y), (r.x + r.w, r.y + r.h)) - draw.rectangle(bounding_box, outline=color, width=3) - - # Return the confidence of the person detected - print(" {} (confidence: {:.2f}%)".format(detected_people.bounding_box, detected_people.confidence * 100)) - - # Save annotated image - plt.imshow(image) - plt.tight_layout(pad=0) - outputfile = 'detected_people.jpg' - fig.savefig(outputfile) - print(' Results saved in', outputfile) - - else: - error_details = sdk.ImageAnalysisErrorDetails.from_result(result) - print(" Analysis failed.") - print(" Error reason: {}".format(error_details.reason)) - print(" Error code: {}".format(error_details.error_code)) - print(" Error message: {}".format(error_details.message)) + # Draw bounding box around detected people + for detected_people in result.people.list: + if(detected_people.confidence > 0.5): + # Draw object bounding box + r = detected_people.bounding_box + bounding_box = ((r.x, r.y), (r.x + r.width, r.y + r.height)) + draw.rectangle(bounding_box, outline=color, width=3) + + # Return the confidence of the person detected + #print(" {} (confidence: {:.2f}%)".format(detected_people.bounding_box, detected_people.confidence * 100)) ``` 5. Save your changes and return to the integrated terminal for the **computer-vision** folder, and enter the following command to run the program: @@ -260,7 +195,9 @@ Now you're ready to use the SDK to call the Vision service and detect faces in a ``` 6. Observe the output, which should indicate the number of faces detected. -7. View the **detected_people.jpg** file that is generated in the same folder as your code file to see the annotated faces. In this case, your code has used the attributes of the face to label the location of the top left of the box, and the bounding box coordinates to draw a rectangle around each face. +7. View the **people.jpg** file that is generated in the same folder as your code file to see the annotated faces. In this case, your code has used the attributes of the face to label the location of the top left of the box, and the bounding box coordinates to draw a rectangle around each face. + +If you'd like to see the confidence score of all people the service detected, you can uncomment the code line under the comment `Return the confidence of the person detected` and rerun the code. ## Prepare to use the Face SDK @@ -477,7 +414,7 @@ with open(image_file, mode="rb") as image_data: dotnet run ``` - *The C# output may display warnings about asynchronous functions now using the **await** operator. You can ignore these.* + *The C# output may display warnings about asynchronous functions not using the **await** operator. You can ignore these.* **Python** diff --git a/Labfiles/04-face/C-Sharp/computer-vision/Program.cs b/Labfiles/04-face/C-Sharp/computer-vision/Program.cs index 5d584fd..8c909a3 100644 --- a/Labfiles/04-face/C-Sharp/computer-vision/Program.cs +++ b/Labfiles/04-face/C-Sharp/computer-vision/Program.cs @@ -42,17 +42,38 @@ static void Main(string[] args) } } - static void AnalyzeImage(string imageFile, VisionServiceOptions serviceOptions) + static void AnalyzeImage(string imageFile, ImageAnalysisClient client) { Console.WriteLine($"\nAnalyzing {imageFile} \n"); - var analysisOptions = new ImageAnalysisOptions() + // Use a file stream to pass the image data to the analyze call + using FileStream stream = new FileStream(imageFile, + FileMode.Open); + + // Get result with specified features to be retrieved (PEOPLE) + + + // Close the stream + stream.Close(); + + // Get people in the image + if (result.People.Values.Count > 0) { - // Specify features to be retrieved + Console.WriteLine($" People:"); - }; + // Prepare image for drawing + System.Drawing.Image image = System.Drawing.Image.FromFile(imageFile); + Graphics graphics = Graphics.FromImage(image); + Pen pen = new Pen(Color.Cyan, 3); + + // Draw bounding box around detected people + - // Get image analysis + // Save annotated image + String output_file = "people.jpg"; + image.Save(output_file); + Console.WriteLine(" Results saved in " + output_file + "\n"); + } } diff --git a/Labfiles/04-face/Python/computer-vision/detect-people.py b/Labfiles/04-face/Python/computer-vision/detect-people.py index 206f3ae..078f3f3 100644 --- a/Labfiles/04-face/Python/computer-vision/detect-people.py +++ b/Labfiles/04-face/Python/computer-vision/detect-people.py @@ -1,13 +1,11 @@ from dotenv import load_dotenv import os -from array import array from PIL import Image, ImageDraw import sys -import time from matplotlib import pyplot as plt import numpy as np -# Import namespaces +# import namespaces @@ -25,24 +23,45 @@ def main(): if len(sys.argv) > 1: image_file = sys.argv[1] - # Authenticate Azure AI Vision client + with open(image_file, "rb") as f: + image_data = f.read() + # Authenticate Azure AI Vision client + # Analyze image - AnalyzeImage(image_file, cv_client) + AnalyzeImage(image_file, image_data, cv_client) except Exception as ex: print(ex) -def AnalyzeImage(image_file, cv_client): - print('\nAnalyzing', image_file) +def AnalyzeImage(filename, image_data, cv_client): + print('\nAnalyzing ', filename) + + # Get result with specified features to be retrieved (PEOPLE) + - # Specify features to be retrieved (PEOPLE) + # Identify people in the image + if result.people is not None: + print("\nPeople in image:") + # Prepare image for drawing + image = Image.open(filename) + fig = plt.figure(figsize=(image.width/100, image.height/100)) + plt.axis('off') + draw = ImageDraw.Draw(image) + color = 'cyan' - # Get image analysis + # Draw bounding box around detected people + + # Save annotated image + plt.imshow(image) + plt.tight_layout(pad=0) + outputfile = 'people.jpg' + fig.savefig(outputfile) + print(' Results saved in', outputfile) if __name__ == "__main__": main() \ No newline at end of file