Skip to content

Commit

Permalink
update Image Analysis SDK for face lab
Browse files Browse the repository at this point in the history
  • Loading branch information
ivorb committed Sep 6, 2024
1 parent a151845 commit 777dd79
Show file tree
Hide file tree
Showing 3 changed files with 102 additions and 125 deletions.
159 changes: 48 additions & 111 deletions Instructions/Exercises/04-face-service.md
Original file line number Diff line number Diff line change
Expand Up @@ -50,13 +50,13 @@ In this exercise, you'll complete a partially implemented client application tha
**C#**

```
dotnet add package Azure.AI.Vision.ImageAnalysis -v 0.15.1-beta.1
dotnet add package Azure.AI.Vision.ImageAnalysis -v 1.0.0-beta.1
```

**Python**

```
pip install azure-ai-vision==0.15.1b1
pip install azure-ai-vision==1.0.0b1
```

3. View the contents of the **computer-vision** folder, and note that it contains a file for configuration settings:
Expand All @@ -75,16 +75,17 @@ In this exercise, you'll complete a partially implemented client application tha
**C#**

```C#
// import namespaces
using Azure.AI.Vision.Common;
// Import namespaces
using Azure.AI.Vision.ImageAnalysis;
```

**Python**

```Python
# import namespaces
import azure.ai.vision as sdk
from azure.ai.vision.imageanalysis import ImageAnalysisClient
from azure.ai.vision.imageanalysis.models import VisualFeatures
from azure.core.credentials import AzureKeyCredential
```

## View the image you will analyze
Expand All @@ -104,145 +105,79 @@ Now you're ready to use the SDK to call the Vision service and detect faces in a

```C#
// Authenticate Azure AI Vision client
var cvClient = new VisionServiceOptions(
aiSvcEndpoint,
ImageAnalysisClient cvClient = new ImageAnalysisClient(
new Uri(aiSvcEndpoint),
new AzureKeyCredential(aiSvcKey));
```

**Python**

```Python
# Authenticate Azure AI Vision client
cv_client = sdk.VisionServiceOptions(ai_endpoint, ai_key)
cv_client = ImageAnalysisClient(
endpoint=ai_endpoint,
credential=AzureKeyCredential(ai_key)
)
```

2. In the **Main** function, under the code you just added, note that the code specifies the path to an image file and then passes the image path to a function named **AnalyzeImage**. This function is not yet fully implemented.

3. In the **AnalyzeImage** function, under the comment **Specify features to be retrieved (PEOPLE)**, add the following code:
3. In the **AnalyzeImage** function, under the comment **Get result with specified features to be retrieved (PEOPLE)**, add the following code:

**C#**

```C#
// Specify features to be retrieved (PEOPLE)
Features =
ImageAnalysisFeature.People
// Get result with specified features to be retrieved (PEOPLE)
ImageAnalysisResult result = client.Analyze(
BinaryData.FromStream(stream),
VisualFeatures.People);
```

**Python**

```Python
# Specify features to be retrieved (PEOPLE)
analysis_options = sdk.ImageAnalysisOptions()

features = analysis_options.features = (
sdk.ImageAnalysisFeature.PEOPLE
)
# Get result with specified features to be retrieved (PEOPLE)
result = cv_client.analyze(
image_data=image_data,
visual_features=[
VisualFeatures.PEOPLE],
)
```

4. In the **AnalyzeImage** function, under the comment **Get image analysis**, add the following code:
4. In the **AnalyzeImage** function, under the comment **Draw bounding box around detected people**, add the following code:

**C#**

```C
// Get image analysis
using var imageSource = VisionSource.FromFile(imageFile);

using var analyzer = new ImageAnalyzer(serviceOptions, imageSource, analysisOptions);

var result = analyzer.Analyze();

if (result.Reason == ImageAnalysisResultReason.Analyzed)
// Draw bounding box around detected people
foreach (DetectedPerson person in result.People.Values)
{
// Get people in the image
if (result.People != null)
if (person.Confidence > 0.5)
{
Console.WriteLine($" People:");

// Prepare image for drawing
System.Drawing.Image image = System.Drawing.Image.FromFile(imageFile);
Graphics graphics = Graphics.FromImage(image);
Pen pen = new Pen(Color.Cyan, 3);
Font font = new Font("Arial", 16);
SolidBrush brush = new SolidBrush(Color.WhiteSmoke);

foreach (var person in result.People)
{
// Draw object bounding box if confidence > 50%
if (person.Confidence > 0.5)
{
// Draw object bounding box
var r = person.BoundingBox;
Rectangle rect = new Rectangle(r.X, r.Y, r.Width, r.Height);
graphics.DrawRectangle(pen, rect);

// Return the confidence of the person detected
Console.WriteLine($" Bounding box {person.BoundingBox}, Confidence {person.Confidence:0.0000}");
}
}

// Save annotated image
String output_file = "detected_people.jpg";
image.Save(output_file);
Console.WriteLine(" Results saved in " + output_file + "\n");
// Draw object bounding box
var r = person.BoundingBox;
Rectangle rect = new Rectangle(r.X, r.Y, r.Width, r.Height);
graphics.DrawRectangle(pen, rect);
}

// Return the confidence of the person detected
//Console.WriteLine($" Bounding box {person.BoundingBox.ToString()}, Confidence: {person.Confidence:F2}");
}
else
{
var errorDetails = ImageAnalysisErrorDetails.FromResult(result);
Console.WriteLine(" Analysis failed.");
Console.WriteLine($" Error reason : {errorDetails.Reason}");
Console.WriteLine($" Error code : {errorDetails.ErrorCode}");
Console.WriteLine($" Error message: {errorDetails.Message}\n");
}

```

**Python**

```Python
# Get image analysis
image = sdk.VisionSource(image_file)

image_analyzer = sdk.ImageAnalyzer(cv_client, image, analysis_options)

result = image_analyzer.analyze()

if result.reason == sdk.ImageAnalysisResultReason.ANALYZED:
# Get people in the image
if result.people is not None:
print("\nPeople in image:")

# Prepare image for drawing
image = Image.open(image_file)
fig = plt.figure(figsize=(image.width/100, image.height/100))
plt.axis('off')
draw = ImageDraw.Draw(image)
color = 'cyan'

for detected_people in result.people:
# Draw object bounding box if confidence > 50%
if detected_people.confidence > 0.5:
# Draw object bounding box
r = detected_people.bounding_box
bounding_box = ((r.x, r.y), (r.x + r.w, r.y + r.h))
draw.rectangle(bounding_box, outline=color, width=3)

# Return the confidence of the person detected
print(" {} (confidence: {:.2f}%)".format(detected_people.bounding_box, detected_people.confidence * 100))

# Save annotated image
plt.imshow(image)
plt.tight_layout(pad=0)
outputfile = 'detected_people.jpg'
fig.savefig(outputfile)
print(' Results saved in', outputfile)

else:
error_details = sdk.ImageAnalysisErrorDetails.from_result(result)
print(" Analysis failed.")
print(" Error reason: {}".format(error_details.reason))
print(" Error code: {}".format(error_details.error_code))
print(" Error message: {}".format(error_details.message))
# Draw bounding box around detected people
for detected_people in result.people.list:
if(detected_people.confidence > 0.5):
# Draw object bounding box
r = detected_people.bounding_box
bounding_box = ((r.x, r.y), (r.x + r.width, r.y + r.height))
draw.rectangle(bounding_box, outline=color, width=3)

# Return the confidence of the person detected
#print(" {} (confidence: {:.2f}%)".format(detected_people.bounding_box, detected_people.confidence * 100))
```

5. Save your changes and return to the integrated terminal for the **computer-vision** folder, and enter the following command to run the program:
Expand All @@ -260,7 +195,9 @@ Now you're ready to use the SDK to call the Vision service and detect faces in a
```

6. Observe the output, which should indicate the number of faces detected.
7. View the **detected_people.jpg** file that is generated in the same folder as your code file to see the annotated faces. In this case, your code has used the attributes of the face to label the location of the top left of the box, and the bounding box coordinates to draw a rectangle around each face.
7. View the **people.jpg** file that is generated in the same folder as your code file to see the annotated faces. In this case, your code has used the attributes of the face to label the location of the top left of the box, and the bounding box coordinates to draw a rectangle around each face.

If you'd like to see the confidence score of all people the service detected, you can uncomment the code line under the comment `Return the confidence of the person detected` and rerun the code.

## Prepare to use the Face SDK

Expand Down Expand Up @@ -477,7 +414,7 @@ with open(image_file, mode="rb") as image_data:
dotnet run
```

*The C# output may display warnings about asynchronous functions now using the **await** operator. You can ignore these.*
*The C# output may display warnings about asynchronous functions not using the **await** operator. You can ignore these.*

**Python**

Expand Down
31 changes: 26 additions & 5 deletions Labfiles/04-face/C-Sharp/computer-vision/Program.cs
Original file line number Diff line number Diff line change
Expand Up @@ -42,17 +42,38 @@ static void Main(string[] args)
}
}

static void AnalyzeImage(string imageFile, VisionServiceOptions serviceOptions)
static void AnalyzeImage(string imageFile, ImageAnalysisClient client)
{
Console.WriteLine($"\nAnalyzing {imageFile} \n");

var analysisOptions = new ImageAnalysisOptions()
// Use a file stream to pass the image data to the analyze call
using FileStream stream = new FileStream(imageFile,
FileMode.Open);

// Get result with specified features to be retrieved (PEOPLE)


// Close the stream
stream.Close();

// Get people in the image
if (result.People.Values.Count > 0)
{
// Specify features to be retrieved
Console.WriteLine($" People:");

};
// Prepare image for drawing
System.Drawing.Image image = System.Drawing.Image.FromFile(imageFile);
Graphics graphics = Graphics.FromImage(image);
Pen pen = new Pen(Color.Cyan, 3);

// Draw bounding box around detected people


// Get image analysis
// Save annotated image
String output_file = "people.jpg";
image.Save(output_file);
Console.WriteLine(" Results saved in " + output_file + "\n");
}

}

Expand Down
37 changes: 28 additions & 9 deletions Labfiles/04-face/Python/computer-vision/detect-people.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,11 @@
from dotenv import load_dotenv
import os
from array import array
from PIL import Image, ImageDraw
import sys
import time
from matplotlib import pyplot as plt
import numpy as np

# Import namespaces
# import namespaces



Expand All @@ -25,24 +23,45 @@ def main():
if len(sys.argv) > 1:
image_file = sys.argv[1]

# Authenticate Azure AI Vision client
with open(image_file, "rb") as f:
image_data = f.read()

# Authenticate Azure AI Vision client


# Analyze image
AnalyzeImage(image_file, cv_client)
AnalyzeImage(image_file, image_data, cv_client)

except Exception as ex:
print(ex)


def AnalyzeImage(image_file, cv_client):
print('\nAnalyzing', image_file)
def AnalyzeImage(filename, image_data, cv_client):
print('\nAnalyzing ', filename)

# Get result with specified features to be retrieved (PEOPLE)


# Specify features to be retrieved (PEOPLE)
# Identify people in the image
if result.people is not None:
print("\nPeople in image:")

# Prepare image for drawing
image = Image.open(filename)
fig = plt.figure(figsize=(image.width/100, image.height/100))
plt.axis('off')
draw = ImageDraw.Draw(image)
color = 'cyan'

# Get image analysis
# Draw bounding box around detected people


# Save annotated image
plt.imshow(image)
plt.tight_layout(pad=0)
outputfile = 'people.jpg'
fig.savefig(outputfile)
print(' Results saved in', outputfile)

if __name__ == "__main__":
main()

0 comments on commit 777dd79

Please sign in to comment.