-
Notifications
You must be signed in to change notification settings - Fork 2.8k
/
Copy pathDetectPersonGcs.java
114 lines (100 loc) · 5.01 KB
/
DetectPersonGcs.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
/*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package video;
// [START video_detect_person_gcs]
import com.google.api.gax.longrunning.OperationFuture;
import com.google.cloud.videointelligence.v1.AnnotateVideoProgress;
import com.google.cloud.videointelligence.v1.AnnotateVideoRequest;
import com.google.cloud.videointelligence.v1.AnnotateVideoResponse;
import com.google.cloud.videointelligence.v1.DetectedAttribute;
import com.google.cloud.videointelligence.v1.DetectedLandmark;
import com.google.cloud.videointelligence.v1.Feature;
import com.google.cloud.videointelligence.v1.PersonDetectionAnnotation;
import com.google.cloud.videointelligence.v1.PersonDetectionConfig;
import com.google.cloud.videointelligence.v1.TimestampedObject;
import com.google.cloud.videointelligence.v1.Track;
import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
import com.google.cloud.videointelligence.v1.VideoContext;
import com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;
import com.google.cloud.videointelligence.v1.VideoSegment;
public class DetectPersonGcs {
public static void detectPersonGcs() throws Exception {
// TODO(developer): Replace these variables before running the sample.
String gcsUri = "gs://cloud-samples-data/video/googlework_short.mp4";
detectPersonGcs(gcsUri);
}
// Detects people in a video stored in Google Cloud Storage using
// the Cloud Video Intelligence API.
public static void detectPersonGcs(String gcsUri) throws Exception {
try (VideoIntelligenceServiceClient videoIntelligenceServiceClient =
VideoIntelligenceServiceClient.create()) {
// Reads a local video file and converts it to base64.
PersonDetectionConfig personDetectionConfig =
PersonDetectionConfig.newBuilder()
// Must set includeBoundingBoxes to true to get poses and attributes.
.setIncludeBoundingBoxes(true)
.setIncludePoseLandmarks(true)
.setIncludeAttributes(true)
.build();
VideoContext videoContext =
VideoContext.newBuilder().setPersonDetectionConfig(personDetectionConfig).build();
AnnotateVideoRequest request =
AnnotateVideoRequest.newBuilder()
.setInputUri(gcsUri)
.addFeatures(Feature.PERSON_DETECTION)
.setVideoContext(videoContext)
.build();
// Detects people in a video
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future =
videoIntelligenceServiceClient.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
AnnotateVideoResponse response = future.get();
// Get the first response, since we sent only one video.
VideoAnnotationResults annotationResult = response.getAnnotationResultsList().get(0);
// Annotations for list of people detected, tracked and recognized in video.
for (PersonDetectionAnnotation personDetectionAnnotation :
annotationResult.getPersonDetectionAnnotationsList()) {
System.out.print("Person detected:\n");
for (Track track : personDetectionAnnotation.getTracksList()) {
VideoSegment segment = track.getSegment();
System.out.printf(
"\tStart: %d.%.0fs\n",
segment.getStartTimeOffset().getSeconds(),
segment.getStartTimeOffset().getNanos() / 1e6);
System.out.printf(
"\tEnd: %d.%.0fs\n",
segment.getEndTimeOffset().getSeconds(), segment.getEndTimeOffset().getNanos() / 1e6);
// Each segment includes timestamped objects that include characteristic--e.g. clothes,
// posture of the person detected.
TimestampedObject firstTimestampedObject = track.getTimestampedObjects(0);
// Attributes include unique pieces of clothing, poses (i.e., body landmarks)
// of the person detected.
for (DetectedAttribute attribute : firstTimestampedObject.getAttributesList()) {
System.out.printf(
"\tAttribute: %s; Value: %s\n", attribute.getName(), attribute.getValue());
}
// Landmarks in person detection include body parts.
for (DetectedLandmark attribute : firstTimestampedObject.getLandmarksList()) {
System.out.printf(
"\tLandmark: %s; Vertex: %f, %f\n",
attribute.getName(), attribute.getPoint().getX(), attribute.getPoint().getY());
}
}
}
}
}
}
// [END video_detect_person_gcs]