aws.rekognition.StreamProcessor
Explore with Pulumi AI
Resource for managing an AWS Rekognition Stream Processor.
This resource must be configured specifically for your use case, and not all options are compatible with one another. See Stream Processor API documentation for configuration information.
Stream Processors configured for Face Recognition cannot have any properties updated after the fact, and it will result in an AWS API error.
Example Usage
Label Detection
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = new aws.s3.BucketV2("example", {bucket: "example-bucket"});
const exampleTopic = new aws.sns.Topic("example", {name: "example-topic"});
const exampleVideoStream = new aws.kinesis.VideoStream("example", {
    name: "example-kinesis-input",
    dataRetentionInHours: 1,
    deviceName: "kinesis-video-device-name",
    mediaType: "video/h264",
});
const exampleRole = new aws.iam.Role("example", {
    name: "example-role",
    inlinePolicies: [{
        name: "Rekognition-Access",
        policy: pulumi.jsonStringify({
            Version: "2012-10-17",
            Statement: [
                {
                    Action: ["s3:PutObject"],
                    Effect: "Allow",
                    Resource: [pulumi.interpolate`${example.arn}/*`],
                },
                {
                    Action: ["sns:Publish"],
                    Effect: "Allow",
                    Resource: [exampleTopic.arn],
                },
                {
                    Action: [
                        "kinesis:Get*",
                        "kinesis:DescribeStreamSummary",
                    ],
                    Effect: "Allow",
                    Resource: [exampleVideoStream.arn],
                },
            ],
        }),
    }],
    assumeRolePolicy: JSON.stringify({
        Version: "2012-10-17",
        Statement: [{
            Action: "sts:AssumeRole",
            Effect: "Allow",
            Principal: {
                Service: "rekognition.amazonaws.com",
            },
        }],
    }),
});
const exampleStreamProcessor = new aws.rekognition.StreamProcessor("example", {
    roleArn: exampleRole.arn,
    name: "example-processor",
    dataSharingPreference: {
        optIn: false,
    },
    output: {
        s3Destination: {
            bucket: example.bucket,
        },
    },
    settings: {
        connectedHome: {
            labels: [
                "PERSON",
                "PET",
            ],
        },
    },
    input: {
        kinesisVideoStream: {
            arn: exampleVideoStream.arn,
        },
    },
    notificationChannel: {
        snsTopicArn: exampleTopic.arn,
    },
});
import pulumi
import json
import pulumi_aws as aws
example = aws.s3.BucketV2("example", bucket="example-bucket")
example_topic = aws.sns.Topic("example", name="example-topic")
example_video_stream = aws.kinesis.VideoStream("example",
    name="example-kinesis-input",
    data_retention_in_hours=1,
    device_name="kinesis-video-device-name",
    media_type="video/h264")
example_role = aws.iam.Role("example",
    name="example-role",
    inline_policies=[{
        "name": "Rekognition-Access",
        "policy": pulumi.Output.json_dumps({
            "Version": "2012-10-17",
            "Statement": [
                {
                    "Action": ["s3:PutObject"],
                    "Effect": "Allow",
                    "Resource": [example.arn.apply(lambda arn: f"{arn}/*")],
                },
                {
                    "Action": ["sns:Publish"],
                    "Effect": "Allow",
                    "Resource": [example_topic.arn],
                },
                {
                    "Action": [
                        "kinesis:Get*",
                        "kinesis:DescribeStreamSummary",
                    ],
                    "Effect": "Allow",
                    "Resource": [example_video_stream.arn],
                },
            ],
        }),
    }],
    assume_role_policy=json.dumps({
        "Version": "2012-10-17",
        "Statement": [{
            "Action": "sts:AssumeRole",
            "Effect": "Allow",
            "Principal": {
                "Service": "rekognition.amazonaws.com",
            },
        }],
    }))
example_stream_processor = aws.rekognition.StreamProcessor("example",
    role_arn=example_role.arn,
    name="example-processor",
    data_sharing_preference={
        "opt_in": False,
    },
    output={
        "s3_destination": {
            "bucket": example.bucket,
        },
    },
    settings={
        "connected_home": {
            "labels": [
                "PERSON",
                "PET",
            ],
        },
    },
    input={
        "kinesis_video_stream": {
            "arn": example_video_stream.arn,
        },
    },
    notification_channel={
        "sns_topic_arn": example_topic.arn,
    })
package main
import (
	"encoding/json"
	"fmt"
	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam"
	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/rekognition"
	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3"
	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/sns"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		example, err := s3.NewBucketV2(ctx, "example", &s3.BucketV2Args{
			Bucket: pulumi.String("example-bucket"),
		})
		if err != nil {
			return err
		}
		exampleTopic, err := sns.NewTopic(ctx, "example", &sns.TopicArgs{
			Name: pulumi.String("example-topic"),
		})
		if err != nil {
			return err
		}
		exampleVideoStream, err := kinesis.NewVideoStream(ctx, "example", &kinesis.VideoStreamArgs{
			Name:                 pulumi.String("example-kinesis-input"),
			DataRetentionInHours: pulumi.Int(1),
			DeviceName:           pulumi.String("kinesis-video-device-name"),
			MediaType:            pulumi.String("video/h264"),
		})
		if err != nil {
			return err
		}
		tmpJSON0, err := json.Marshal(map[string]interface{}{
			"Version": "2012-10-17",
			"Statement": []map[string]interface{}{
				map[string]interface{}{
					"Action": "sts:AssumeRole",
					"Effect": "Allow",
					"Principal": map[string]interface{}{
						"Service": "rekognition.amazonaws.com",
					},
				},
			},
		})
		if err != nil {
			return err
		}
		json0 := string(tmpJSON0)
		exampleRole, err := iam.NewRole(ctx, "example", &iam.RoleArgs{
			Name: pulumi.String("example-role"),
			InlinePolicies: iam.RoleInlinePolicyArray{
				&iam.RoleInlinePolicyArgs{
					Name: pulumi.String("Rekognition-Access"),
					Policy: pulumi.All(example.Arn, exampleTopic.Arn, exampleVideoStream.Arn).ApplyT(func(_args []interface{}) (string, error) {
						exampleArn := _args[0].(string)
						exampleTopicArn := _args[1].(string)
						exampleVideoStreamArn := _args[2].(string)
						var _zero string
						tmpJSON1, err := json.Marshal(map[string]interface{}{
							"Version": "2012-10-17",
							"Statement": []interface{}{
								map[string]interface{}{
									"Action": []string{
										"s3:PutObject",
									},
									"Effect": "Allow",
									"Resource": []string{
										fmt.Sprintf("%v/*", exampleArn),
									},
								},
								map[string]interface{}{
									"Action": []string{
										"sns:Publish",
									},
									"Effect": "Allow",
									"Resource": []string{
										exampleTopicArn,
									},
								},
								map[string]interface{}{
									"Action": []string{
										"kinesis:Get*",
										"kinesis:DescribeStreamSummary",
									},
									"Effect": "Allow",
									"Resource": []string{
										exampleVideoStreamArn,
									},
								},
							},
						})
						if err != nil {
							return _zero, err
						}
						json1 := string(tmpJSON1)
						return json1, nil
					}).(pulumi.StringOutput),
				},
			},
			AssumeRolePolicy: pulumi.String(json0),
		})
		if err != nil {
			return err
		}
		_, err = rekognition.NewStreamProcessor(ctx, "example", &rekognition.StreamProcessorArgs{
			RoleArn: exampleRole.Arn,
			Name:    pulumi.String("example-processor"),
			DataSharingPreference: &rekognition.StreamProcessorDataSharingPreferenceArgs{
				OptIn: pulumi.Bool(false),
			},
			Output: &rekognition.StreamProcessorOutputTypeArgs{
				S3Destination: &rekognition.StreamProcessorOutputS3DestinationArgs{
					Bucket: example.Bucket,
				},
			},
			Settings: &rekognition.StreamProcessorSettingsArgs{
				ConnectedHome: &rekognition.StreamProcessorSettingsConnectedHomeArgs{
					Labels: pulumi.StringArray{
						pulumi.String("PERSON"),
						pulumi.String("PET"),
					},
				},
			},
			Input: &rekognition.StreamProcessorInputTypeArgs{
				KinesisVideoStream: &rekognition.StreamProcessorInputKinesisVideoStreamArgs{
					Arn: exampleVideoStream.Arn,
				},
			},
			NotificationChannel: &rekognition.StreamProcessorNotificationChannelArgs{
				SnsTopicArn: exampleTopic.Arn,
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using System.Text.Json;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() => 
{
    var example = new Aws.S3.BucketV2("example", new()
    {
        Bucket = "example-bucket",
    });
    var exampleTopic = new Aws.Sns.Topic("example", new()
    {
        Name = "example-topic",
    });
    var exampleVideoStream = new Aws.Kinesis.VideoStream("example", new()
    {
        Name = "example-kinesis-input",
        DataRetentionInHours = 1,
        DeviceName = "kinesis-video-device-name",
        MediaType = "video/h264",
    });
    var exampleRole = new Aws.Iam.Role("example", new()
    {
        Name = "example-role",
        InlinePolicies = new[]
        {
            new Aws.Iam.Inputs.RoleInlinePolicyArgs
            {
                Name = "Rekognition-Access",
                Policy = Output.JsonSerialize(Output.Create(new Dictionary<string, object?>
                {
                    ["Version"] = "2012-10-17",
                    ["Statement"] = new[]
                    {
                        new Dictionary<string, object?>
                        {
                            ["Action"] = new[]
                            {
                                "s3:PutObject",
                            },
                            ["Effect"] = "Allow",
                            ["Resource"] = new[]
                            {
                                example.Arn.Apply(arn => $"{arn}/*"),
                            },
                        },
                        new Dictionary<string, object?>
                        {
                            ["Action"] = new[]
                            {
                                "sns:Publish",
                            },
                            ["Effect"] = "Allow",
                            ["Resource"] = new[]
                            {
                                exampleTopic.Arn,
                            },
                        },
                        new Dictionary<string, object?>
                        {
                            ["Action"] = new[]
                            {
                                "kinesis:Get*",
                                "kinesis:DescribeStreamSummary",
                            },
                            ["Effect"] = "Allow",
                            ["Resource"] = new[]
                            {
                                exampleVideoStream.Arn,
                            },
                        },
                    },
                })),
            },
        },
        AssumeRolePolicy = JsonSerializer.Serialize(new Dictionary<string, object?>
        {
            ["Version"] = "2012-10-17",
            ["Statement"] = new[]
            {
                new Dictionary<string, object?>
                {
                    ["Action"] = "sts:AssumeRole",
                    ["Effect"] = "Allow",
                    ["Principal"] = new Dictionary<string, object?>
                    {
                        ["Service"] = "rekognition.amazonaws.com",
                    },
                },
            },
        }),
    });
    var exampleStreamProcessor = new Aws.Rekognition.StreamProcessor("example", new()
    {
        RoleArn = exampleRole.Arn,
        Name = "example-processor",
        DataSharingPreference = new Aws.Rekognition.Inputs.StreamProcessorDataSharingPreferenceArgs
        {
            OptIn = false,
        },
        Output = new Aws.Rekognition.Inputs.StreamProcessorOutputArgs
        {
            S3Destination = new Aws.Rekognition.Inputs.StreamProcessorOutputS3DestinationArgs
            {
                Bucket = example.Bucket,
            },
        },
        Settings = new Aws.Rekognition.Inputs.StreamProcessorSettingsArgs
        {
            ConnectedHome = new Aws.Rekognition.Inputs.StreamProcessorSettingsConnectedHomeArgs
            {
                Labels = new[]
                {
                    "PERSON",
                    "PET",
                },
            },
        },
        Input = new Aws.Rekognition.Inputs.StreamProcessorInputArgs
        {
            KinesisVideoStream = new Aws.Rekognition.Inputs.StreamProcessorInputKinesisVideoStreamArgs
            {
                Arn = exampleVideoStream.Arn,
            },
        },
        NotificationChannel = new Aws.Rekognition.Inputs.StreamProcessorNotificationChannelArgs
        {
            SnsTopicArn = exampleTopic.Arn,
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.s3.BucketV2;
import com.pulumi.aws.s3.BucketV2Args;
import com.pulumi.aws.sns.Topic;
import com.pulumi.aws.sns.TopicArgs;
import com.pulumi.aws.kinesis.VideoStream;
import com.pulumi.aws.kinesis.VideoStreamArgs;
import com.pulumi.aws.iam.Role;
import com.pulumi.aws.iam.RoleArgs;
import com.pulumi.aws.iam.inputs.RoleInlinePolicyArgs;
import com.pulumi.aws.rekognition.StreamProcessor;
import com.pulumi.aws.rekognition.StreamProcessorArgs;
import com.pulumi.aws.rekognition.inputs.StreamProcessorDataSharingPreferenceArgs;
import com.pulumi.aws.rekognition.inputs.StreamProcessorOutputArgs;
import com.pulumi.aws.rekognition.inputs.StreamProcessorOutputS3DestinationArgs;
import com.pulumi.aws.rekognition.inputs.StreamProcessorSettingsArgs;
import com.pulumi.aws.rekognition.inputs.StreamProcessorSettingsConnectedHomeArgs;
import com.pulumi.aws.rekognition.inputs.StreamProcessorInputArgs;
import com.pulumi.aws.rekognition.inputs.StreamProcessorInputKinesisVideoStreamArgs;
import com.pulumi.aws.rekognition.inputs.StreamProcessorNotificationChannelArgs;
import static com.pulumi.codegen.internal.Serialization.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var example = new BucketV2("example", BucketV2Args.builder()
            .bucket("example-bucket")
            .build());
        var exampleTopic = new Topic("exampleTopic", TopicArgs.builder()
            .name("example-topic")
            .build());
        var exampleVideoStream = new VideoStream("exampleVideoStream", VideoStreamArgs.builder()
            .name("example-kinesis-input")
            .dataRetentionInHours(1)
            .deviceName("kinesis-video-device-name")
            .mediaType("video/h264")
            .build());
        var exampleRole = new Role("exampleRole", RoleArgs.builder()
            .name("example-role")
            .inlinePolicies(RoleInlinePolicyArgs.builder()
                .name("Rekognition-Access")
                .policy(Output.tuple(example.arn(), exampleTopic.arn(), exampleVideoStream.arn()).applyValue(values -> {
                    var exampleArn = values.t1;
                    var exampleTopicArn = values.t2;
                    var exampleVideoStreamArn = values.t3;
                    return serializeJson(
                        jsonObject(
                            jsonProperty("Version", "2012-10-17"),
                            jsonProperty("Statement", jsonArray(
                                jsonObject(
                                    jsonProperty("Action", jsonArray("s3:PutObject")),
                                    jsonProperty("Effect", "Allow"),
                                    jsonProperty("Resource", jsonArray(String.format("%s/*", exampleArn)))
                                ), 
                                jsonObject(
                                    jsonProperty("Action", jsonArray("sns:Publish")),
                                    jsonProperty("Effect", "Allow"),
                                    jsonProperty("Resource", jsonArray(exampleTopicArn))
                                ), 
                                jsonObject(
                                    jsonProperty("Action", jsonArray(
                                        "kinesis:Get*", 
                                        "kinesis:DescribeStreamSummary"
                                    )),
                                    jsonProperty("Effect", "Allow"),
                                    jsonProperty("Resource", jsonArray(exampleVideoStreamArn))
                                )
                            ))
                        ));
                }))
                .build())
            .assumeRolePolicy(serializeJson(
                jsonObject(
                    jsonProperty("Version", "2012-10-17"),
                    jsonProperty("Statement", jsonArray(jsonObject(
                        jsonProperty("Action", "sts:AssumeRole"),
                        jsonProperty("Effect", "Allow"),
                        jsonProperty("Principal", jsonObject(
                            jsonProperty("Service", "rekognition.amazonaws.com")
                        ))
                    )))
                )))
            .build());
        var exampleStreamProcessor = new StreamProcessor("exampleStreamProcessor", StreamProcessorArgs.builder()
            .roleArn(exampleRole.arn())
            .name("example-processor")
            .dataSharingPreference(StreamProcessorDataSharingPreferenceArgs.builder()
                .optIn(false)
                .build())
            .output(StreamProcessorOutputArgs.builder()
                .s3Destination(StreamProcessorOutputS3DestinationArgs.builder()
                    .bucket(example.bucket())
                    .build())
                .build())
            .settings(StreamProcessorSettingsArgs.builder()
                .connectedHome(StreamProcessorSettingsConnectedHomeArgs.builder()
                    .labels(                    
                        "PERSON",
                        "PET")
                    .build())
                .build())
            .input(StreamProcessorInputArgs.builder()
                .kinesisVideoStream(StreamProcessorInputKinesisVideoStreamArgs.builder()
                    .arn(exampleVideoStream.arn())
                    .build())
                .build())
            .notificationChannel(StreamProcessorNotificationChannelArgs.builder()
                .snsTopicArn(exampleTopic.arn())
                .build())
            .build());
    }
}
resources:
  example:
    type: aws:s3:BucketV2
    properties:
      bucket: example-bucket
  exampleTopic:
    type: aws:sns:Topic
    name: example
    properties:
      name: example-topic
  exampleVideoStream:
    type: aws:kinesis:VideoStream
    name: example
    properties:
      name: example-kinesis-input
      dataRetentionInHours: 1
      deviceName: kinesis-video-device-name
      mediaType: video/h264
  exampleRole:
    type: aws:iam:Role
    name: example
    properties:
      name: example-role
      inlinePolicies:
        - name: Rekognition-Access
          policy:
            fn::toJSON:
              Version: 2012-10-17
              Statement:
                - Action:
                    - s3:PutObject
                  Effect: Allow
                  Resource:
                    - ${example.arn}/*
                - Action:
                    - sns:Publish
                  Effect: Allow
                  Resource:
                    - ${exampleTopic.arn}
                - Action:
                    - kinesis:Get*
                    - kinesis:DescribeStreamSummary
                  Effect: Allow
                  Resource:
                    - ${exampleVideoStream.arn}
      assumeRolePolicy:
        fn::toJSON:
          Version: 2012-10-17
          Statement:
            - Action: sts:AssumeRole
              Effect: Allow
              Principal:
                Service: rekognition.amazonaws.com
  exampleStreamProcessor:
    type: aws:rekognition:StreamProcessor
    name: example
    properties:
      roleArn: ${exampleRole.arn}
      name: example-processor
      dataSharingPreference:
        optIn: false
      output:
        s3Destination:
          bucket: ${example.bucket}
      settings:
        connectedHome:
          labels:
            - PERSON
            - PET
      input:
        kinesisVideoStream:
          arn: ${exampleVideoStream.arn}
      notificationChannel:
        snsTopicArn: ${exampleTopic.arn}
Face Detection Usage
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = new aws.kinesis.VideoStream("example", {
    name: "example-kinesis-input",
    dataRetentionInHours: 1,
    deviceName: "kinesis-video-device-name",
    mediaType: "video/h264",
});
const exampleStream = new aws.kinesis.Stream("example", {
    name: "pulumi-kinesis-example",
    shardCount: 1,
});
const exampleRole = new aws.iam.Role("example", {
    name: "example-role",
    inlinePolicies: [{
        name: "Rekognition-Access",
        policy: pulumi.jsonStringify({
            Version: "2012-10-17",
            Statement: [
                {
                    Action: [
                        "kinesis:Get*",
                        "kinesis:DescribeStreamSummary",
                    ],
                    Effect: "Allow",
                    Resource: [example.arn],
                },
                {
                    Action: ["kinesis:PutRecord"],
                    Effect: "Allow",
                    Resource: [exampleStream.arn],
                },
            ],
        }),
    }],
    assumeRolePolicy: JSON.stringify({
        Version: "2012-10-17",
        Statement: [{
            Action: "sts:AssumeRole",
            Effect: "Allow",
            Principal: {
                Service: "rekognition.amazonaws.com",
            },
        }],
    }),
});
const exampleCollection = new aws.rekognition.Collection("example", {collectionId: "example-collection"});
const exampleStreamProcessor = new aws.rekognition.StreamProcessor("example", {
    roleArn: exampleRole.arn,
    name: "example-processor",
    dataSharingPreference: {
        optIn: false,
    },
    regionsOfInterests: [{
        polygons: [
            {
                x: 0.5,
                y: 0.5,
            },
            {
                x: 0.5,
                y: 0.5,
            },
            {
                x: 0.5,
                y: 0.5,
            },
        ],
    }],
    input: {
        kinesisVideoStream: {
            arn: example.arn,
        },
    },
    output: {
        kinesisDataStream: {
            arn: exampleStream.arn,
        },
    },
    settings: {
        faceSearch: {
            collectionId: exampleCollection.id,
        },
    },
});
import pulumi
import json
import pulumi_aws as aws
example = aws.kinesis.VideoStream("example",
    name="example-kinesis-input",
    data_retention_in_hours=1,
    device_name="kinesis-video-device-name",
    media_type="video/h264")
example_stream = aws.kinesis.Stream("example",
    name="pulumi-kinesis-example",
    shard_count=1)
example_role = aws.iam.Role("example",
    name="example-role",
    inline_policies=[{
        "name": "Rekognition-Access",
        "policy": pulumi.Output.json_dumps({
            "Version": "2012-10-17",
            "Statement": [
                {
                    "Action": [
                        "kinesis:Get*",
                        "kinesis:DescribeStreamSummary",
                    ],
                    "Effect": "Allow",
                    "Resource": [example.arn],
                },
                {
                    "Action": ["kinesis:PutRecord"],
                    "Effect": "Allow",
                    "Resource": [example_stream.arn],
                },
            ],
        }),
    }],
    assume_role_policy=json.dumps({
        "Version": "2012-10-17",
        "Statement": [{
            "Action": "sts:AssumeRole",
            "Effect": "Allow",
            "Principal": {
                "Service": "rekognition.amazonaws.com",
            },
        }],
    }))
example_collection = aws.rekognition.Collection("example", collection_id="example-collection")
example_stream_processor = aws.rekognition.StreamProcessor("example",
    role_arn=example_role.arn,
    name="example-processor",
    data_sharing_preference={
        "opt_in": False,
    },
    regions_of_interests=[{
        "polygons": [
            {
                "x": 0.5,
                "y": 0.5,
            },
            {
                "x": 0.5,
                "y": 0.5,
            },
            {
                "x": 0.5,
                "y": 0.5,
            },
        ],
    }],
    input={
        "kinesis_video_stream": {
            "arn": example.arn,
        },
    },
    output={
        "kinesis_data_stream": {
            "arn": example_stream.arn,
        },
    },
    settings={
        "face_search": {
            "collection_id": example_collection.id,
        },
    })
package main
import (
	"encoding/json"
	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam"
	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/rekognition"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		example, err := kinesis.NewVideoStream(ctx, "example", &kinesis.VideoStreamArgs{
			Name:                 pulumi.String("example-kinesis-input"),
			DataRetentionInHours: pulumi.Int(1),
			DeviceName:           pulumi.String("kinesis-video-device-name"),
			MediaType:            pulumi.String("video/h264"),
		})
		if err != nil {
			return err
		}
		exampleStream, err := kinesis.NewStream(ctx, "example", &kinesis.StreamArgs{
			Name:       pulumi.String("pulumi-kinesis-example"),
			ShardCount: pulumi.Int(1),
		})
		if err != nil {
			return err
		}
		tmpJSON0, err := json.Marshal(map[string]interface{}{
			"Version": "2012-10-17",
			"Statement": []map[string]interface{}{
				map[string]interface{}{
					"Action": "sts:AssumeRole",
					"Effect": "Allow",
					"Principal": map[string]interface{}{
						"Service": "rekognition.amazonaws.com",
					},
				},
			},
		})
		if err != nil {
			return err
		}
		json0 := string(tmpJSON0)
		exampleRole, err := iam.NewRole(ctx, "example", &iam.RoleArgs{
			Name: pulumi.String("example-role"),
			InlinePolicies: iam.RoleInlinePolicyArray{
				&iam.RoleInlinePolicyArgs{
					Name: pulumi.String("Rekognition-Access"),
					Policy: pulumi.All(example.Arn, exampleStream.Arn).ApplyT(func(_args []interface{}) (string, error) {
						exampleArn := _args[0].(string)
						exampleStreamArn := _args[1].(string)
						var _zero string
						tmpJSON1, err := json.Marshal(map[string]interface{}{
							"Version": "2012-10-17",
							"Statement": []interface{}{
								map[string]interface{}{
									"Action": []string{
										"kinesis:Get*",
										"kinesis:DescribeStreamSummary",
									},
									"Effect": "Allow",
									"Resource": []string{
										exampleArn,
									},
								},
								map[string]interface{}{
									"Action": []string{
										"kinesis:PutRecord",
									},
									"Effect": "Allow",
									"Resource": []string{
										exampleStreamArn,
									},
								},
							},
						})
						if err != nil {
							return _zero, err
						}
						json1 := string(tmpJSON1)
						return json1, nil
					}).(pulumi.StringOutput),
				},
			},
			AssumeRolePolicy: pulumi.String(json0),
		})
		if err != nil {
			return err
		}
		exampleCollection, err := rekognition.NewCollection(ctx, "example", &rekognition.CollectionArgs{
			CollectionId: pulumi.String("example-collection"),
		})
		if err != nil {
			return err
		}
		_, err = rekognition.NewStreamProcessor(ctx, "example", &rekognition.StreamProcessorArgs{
			RoleArn: exampleRole.Arn,
			Name:    pulumi.String("example-processor"),
			DataSharingPreference: &rekognition.StreamProcessorDataSharingPreferenceArgs{
				OptIn: pulumi.Bool(false),
			},
			RegionsOfInterests: rekognition.StreamProcessorRegionsOfInterestArray{
				&rekognition.StreamProcessorRegionsOfInterestArgs{
					Polygons: rekognition.StreamProcessorRegionsOfInterestPolygonArray{
						&rekognition.StreamProcessorRegionsOfInterestPolygonArgs{
							X: pulumi.Float64(0.5),
							Y: pulumi.Float64(0.5),
						},
						&rekognition.StreamProcessorRegionsOfInterestPolygonArgs{
							X: pulumi.Float64(0.5),
							Y: pulumi.Float64(0.5),
						},
						&rekognition.StreamProcessorRegionsOfInterestPolygonArgs{
							X: pulumi.Float64(0.5),
							Y: pulumi.Float64(0.5),
						},
					},
				},
			},
			Input: &rekognition.StreamProcessorInputTypeArgs{
				KinesisVideoStream: &rekognition.StreamProcessorInputKinesisVideoStreamArgs{
					Arn: example.Arn,
				},
			},
			Output: &rekognition.StreamProcessorOutputTypeArgs{
				KinesisDataStream: &rekognition.StreamProcessorOutputKinesisDataStreamArgs{
					Arn: exampleStream.Arn,
				},
			},
			Settings: &rekognition.StreamProcessorSettingsArgs{
				FaceSearch: &rekognition.StreamProcessorSettingsFaceSearchArgs{
					CollectionId: exampleCollection.ID(),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using System.Text.Json;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() => 
{
    var example = new Aws.Kinesis.VideoStream("example", new()
    {
        Name = "example-kinesis-input",
        DataRetentionInHours = 1,
        DeviceName = "kinesis-video-device-name",
        MediaType = "video/h264",
    });
    var exampleStream = new Aws.Kinesis.Stream("example", new()
    {
        Name = "pulumi-kinesis-example",
        ShardCount = 1,
    });
    var exampleRole = new Aws.Iam.Role("example", new()
    {
        Name = "example-role",
        InlinePolicies = new[]
        {
            new Aws.Iam.Inputs.RoleInlinePolicyArgs
            {
                Name = "Rekognition-Access",
                Policy = Output.JsonSerialize(Output.Create(new Dictionary<string, object?>
                {
                    ["Version"] = "2012-10-17",
                    ["Statement"] = new[]
                    {
                        new Dictionary<string, object?>
                        {
                            ["Action"] = new[]
                            {
                                "kinesis:Get*",
                                "kinesis:DescribeStreamSummary",
                            },
                            ["Effect"] = "Allow",
                            ["Resource"] = new[]
                            {
                                example.Arn,
                            },
                        },
                        new Dictionary<string, object?>
                        {
                            ["Action"] = new[]
                            {
                                "kinesis:PutRecord",
                            },
                            ["Effect"] = "Allow",
                            ["Resource"] = new[]
                            {
                                exampleStream.Arn,
                            },
                        },
                    },
                })),
            },
        },
        AssumeRolePolicy = JsonSerializer.Serialize(new Dictionary<string, object?>
        {
            ["Version"] = "2012-10-17",
            ["Statement"] = new[]
            {
                new Dictionary<string, object?>
                {
                    ["Action"] = "sts:AssumeRole",
                    ["Effect"] = "Allow",
                    ["Principal"] = new Dictionary<string, object?>
                    {
                        ["Service"] = "rekognition.amazonaws.com",
                    },
                },
            },
        }),
    });
    var exampleCollection = new Aws.Rekognition.Collection("example", new()
    {
        CollectionId = "example-collection",
    });
    var exampleStreamProcessor = new Aws.Rekognition.StreamProcessor("example", new()
    {
        RoleArn = exampleRole.Arn,
        Name = "example-processor",
        DataSharingPreference = new Aws.Rekognition.Inputs.StreamProcessorDataSharingPreferenceArgs
        {
            OptIn = false,
        },
        RegionsOfInterests = new[]
        {
            new Aws.Rekognition.Inputs.StreamProcessorRegionsOfInterestArgs
            {
                Polygons = new[]
                {
                    new Aws.Rekognition.Inputs.StreamProcessorRegionsOfInterestPolygonArgs
                    {
                        X = 0.5,
                        Y = 0.5,
                    },
                    new Aws.Rekognition.Inputs.StreamProcessorRegionsOfInterestPolygonArgs
                    {
                        X = 0.5,
                        Y = 0.5,
                    },
                    new Aws.Rekognition.Inputs.StreamProcessorRegionsOfInterestPolygonArgs
                    {
                        X = 0.5,
                        Y = 0.5,
                    },
                },
            },
        },
        Input = new Aws.Rekognition.Inputs.StreamProcessorInputArgs
        {
            KinesisVideoStream = new Aws.Rekognition.Inputs.StreamProcessorInputKinesisVideoStreamArgs
            {
                Arn = example.Arn,
            },
        },
        Output = new Aws.Rekognition.Inputs.StreamProcessorOutputArgs
        {
            KinesisDataStream = new Aws.Rekognition.Inputs.StreamProcessorOutputKinesisDataStreamArgs
            {
                Arn = exampleStream.Arn,
            },
        },
        Settings = new Aws.Rekognition.Inputs.StreamProcessorSettingsArgs
        {
            FaceSearch = new Aws.Rekognition.Inputs.StreamProcessorSettingsFaceSearchArgs
            {
                CollectionId = exampleCollection.Id,
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.kinesis.VideoStream;
import com.pulumi.aws.kinesis.VideoStreamArgs;
import com.pulumi.aws.kinesis.Stream;
import com.pulumi.aws.kinesis.StreamArgs;
import com.pulumi.aws.iam.Role;
import com.pulumi.aws.iam.RoleArgs;
import com.pulumi.aws.iam.inputs.RoleInlinePolicyArgs;
import com.pulumi.aws.rekognition.Collection;
import com.pulumi.aws.rekognition.CollectionArgs;
import com.pulumi.aws.rekognition.StreamProcessor;
import com.pulumi.aws.rekognition.StreamProcessorArgs;
import com.pulumi.aws.rekognition.inputs.StreamProcessorDataSharingPreferenceArgs;
import com.pulumi.aws.rekognition.inputs.StreamProcessorRegionsOfInterestArgs;
import com.pulumi.aws.rekognition.inputs.StreamProcessorInputArgs;
import com.pulumi.aws.rekognition.inputs.StreamProcessorInputKinesisVideoStreamArgs;
import com.pulumi.aws.rekognition.inputs.StreamProcessorOutputArgs;
import com.pulumi.aws.rekognition.inputs.StreamProcessorOutputKinesisDataStreamArgs;
import com.pulumi.aws.rekognition.inputs.StreamProcessorSettingsArgs;
import com.pulumi.aws.rekognition.inputs.StreamProcessorSettingsFaceSearchArgs;
import static com.pulumi.codegen.internal.Serialization.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var example = new VideoStream("example", VideoStreamArgs.builder()
            .name("example-kinesis-input")
            .dataRetentionInHours(1)
            .deviceName("kinesis-video-device-name")
            .mediaType("video/h264")
            .build());
        var exampleStream = new Stream("exampleStream", StreamArgs.builder()
            .name("pulumi-kinesis-example")
            .shardCount(1)
            .build());
        var exampleRole = new Role("exampleRole", RoleArgs.builder()
            .name("example-role")
            .inlinePolicies(RoleInlinePolicyArgs.builder()
                .name("Rekognition-Access")
                .policy(Output.tuple(example.arn(), exampleStream.arn()).applyValue(values -> {
                    var exampleArn = values.t1;
                    var exampleStreamArn = values.t2;
                    return serializeJson(
                        jsonObject(
                            jsonProperty("Version", "2012-10-17"),
                            jsonProperty("Statement", jsonArray(
                                jsonObject(
                                    jsonProperty("Action", jsonArray(
                                        "kinesis:Get*", 
                                        "kinesis:DescribeStreamSummary"
                                    )),
                                    jsonProperty("Effect", "Allow"),
                                    jsonProperty("Resource", jsonArray(exampleArn))
                                ), 
                                jsonObject(
                                    jsonProperty("Action", jsonArray("kinesis:PutRecord")),
                                    jsonProperty("Effect", "Allow"),
                                    jsonProperty("Resource", jsonArray(exampleStreamArn))
                                )
                            ))
                        ));
                }))
                .build())
            .assumeRolePolicy(serializeJson(
                jsonObject(
                    jsonProperty("Version", "2012-10-17"),
                    jsonProperty("Statement", jsonArray(jsonObject(
                        jsonProperty("Action", "sts:AssumeRole"),
                        jsonProperty("Effect", "Allow"),
                        jsonProperty("Principal", jsonObject(
                            jsonProperty("Service", "rekognition.amazonaws.com")
                        ))
                    )))
                )))
            .build());
        var exampleCollection = new Collection("exampleCollection", CollectionArgs.builder()
            .collectionId("example-collection")
            .build());
        var exampleStreamProcessor = new StreamProcessor("exampleStreamProcessor", StreamProcessorArgs.builder()
            .roleArn(exampleRole.arn())
            .name("example-processor")
            .dataSharingPreference(StreamProcessorDataSharingPreferenceArgs.builder()
                .optIn(false)
                .build())
            .regionsOfInterests(StreamProcessorRegionsOfInterestArgs.builder()
                .polygons(                
                    StreamProcessorRegionsOfInterestPolygonArgs.builder()
                        .x(0.5)
                        .y(0.5)
                        .build(),
                    StreamProcessorRegionsOfInterestPolygonArgs.builder()
                        .x(0.5)
                        .y(0.5)
                        .build(),
                    StreamProcessorRegionsOfInterestPolygonArgs.builder()
                        .x(0.5)
                        .y(0.5)
                        .build())
                .build())
            .input(StreamProcessorInputArgs.builder()
                .kinesisVideoStream(StreamProcessorInputKinesisVideoStreamArgs.builder()
                    .arn(example.arn())
                    .build())
                .build())
            .output(StreamProcessorOutputArgs.builder()
                .kinesisDataStream(StreamProcessorOutputKinesisDataStreamArgs.builder()
                    .arn(exampleStream.arn())
                    .build())
                .build())
            .settings(StreamProcessorSettingsArgs.builder()
                .faceSearch(StreamProcessorSettingsFaceSearchArgs.builder()
                    .collectionId(exampleCollection.id())
                    .build())
                .build())
            .build());
    }
}
resources:
  example:
    type: aws:kinesis:VideoStream
    properties:
      name: example-kinesis-input
      dataRetentionInHours: 1
      deviceName: kinesis-video-device-name
      mediaType: video/h264
  exampleStream:
    type: aws:kinesis:Stream
    name: example
    properties:
      name: pulumi-kinesis-example
      shardCount: 1
  exampleRole:
    type: aws:iam:Role
    name: example
    properties:
      name: example-role
      inlinePolicies:
        - name: Rekognition-Access
          policy:
            fn::toJSON:
              Version: 2012-10-17
              Statement:
                - Action:
                    - kinesis:Get*
                    - kinesis:DescribeStreamSummary
                  Effect: Allow
                  Resource:
                    - ${example.arn}
                - Action:
                    - kinesis:PutRecord
                  Effect: Allow
                  Resource:
                    - ${exampleStream.arn}
      assumeRolePolicy:
        fn::toJSON:
          Version: 2012-10-17
          Statement:
            - Action: sts:AssumeRole
              Effect: Allow
              Principal:
                Service: rekognition.amazonaws.com
  exampleCollection:
    type: aws:rekognition:Collection
    name: example
    properties:
      collectionId: example-collection
  exampleStreamProcessor:
    type: aws:rekognition:StreamProcessor
    name: example
    properties:
      roleArn: ${exampleRole.arn}
      name: example-processor
      dataSharingPreference:
        optIn: false
      regionsOfInterests:
        - polygons:
            - x: 0.5
              y: 0.5
            - x: 0.5
              y: 0.5
            - x: 0.5
              y: 0.5
      input:
        kinesisVideoStream:
          arn: ${example.arn}
      output:
        kinesisDataStream:
          arn: ${exampleStream.arn}
      settings:
        faceSearch:
          collectionId: ${exampleCollection.id}
Create StreamProcessor Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new StreamProcessor(name: string, args: StreamProcessorArgs, opts?: CustomResourceOptions);@overload
def StreamProcessor(resource_name: str,
                    args: StreamProcessorArgs,
                    opts: Optional[ResourceOptions] = None)
@overload
def StreamProcessor(resource_name: str,
                    opts: Optional[ResourceOptions] = None,
                    role_arn: Optional[str] = None,
                    data_sharing_preference: Optional[StreamProcessorDataSharingPreferenceArgs] = None,
                    input: Optional[StreamProcessorInputArgs] = None,
                    kms_key_id: Optional[str] = None,
                    name: Optional[str] = None,
                    notification_channel: Optional[StreamProcessorNotificationChannelArgs] = None,
                    output: Optional[StreamProcessorOutputArgs] = None,
                    regions_of_interests: Optional[Sequence[StreamProcessorRegionsOfInterestArgs]] = None,
                    settings: Optional[StreamProcessorSettingsArgs] = None,
                    tags: Optional[Mapping[str, str]] = None,
                    timeouts: Optional[StreamProcessorTimeoutsArgs] = None)func NewStreamProcessor(ctx *Context, name string, args StreamProcessorArgs, opts ...ResourceOption) (*StreamProcessor, error)public StreamProcessor(string name, StreamProcessorArgs args, CustomResourceOptions? opts = null)
public StreamProcessor(String name, StreamProcessorArgs args)
public StreamProcessor(String name, StreamProcessorArgs args, CustomResourceOptions options)
type: aws:rekognition:StreamProcessor
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args StreamProcessorArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args StreamProcessorArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args StreamProcessorArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args StreamProcessorArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args StreamProcessorArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var streamProcessorResource = new Aws.Rekognition.StreamProcessor("streamProcessorResource", new()
{
    RoleArn = "string",
    DataSharingPreference = new Aws.Rekognition.Inputs.StreamProcessorDataSharingPreferenceArgs
    {
        OptIn = false,
    },
    Input = new Aws.Rekognition.Inputs.StreamProcessorInputArgs
    {
        KinesisVideoStream = new Aws.Rekognition.Inputs.StreamProcessorInputKinesisVideoStreamArgs
        {
            Arn = "string",
        },
    },
    KmsKeyId = "string",
    Name = "string",
    NotificationChannel = new Aws.Rekognition.Inputs.StreamProcessorNotificationChannelArgs
    {
        SnsTopicArn = "string",
    },
    Output = new Aws.Rekognition.Inputs.StreamProcessorOutputArgs
    {
        KinesisDataStream = new Aws.Rekognition.Inputs.StreamProcessorOutputKinesisDataStreamArgs
        {
            Arn = "string",
        },
        S3Destination = new Aws.Rekognition.Inputs.StreamProcessorOutputS3DestinationArgs
        {
            Bucket = "string",
            KeyPrefix = "string",
        },
    },
    RegionsOfInterests = new[]
    {
        new Aws.Rekognition.Inputs.StreamProcessorRegionsOfInterestArgs
        {
            Polygons = new[]
            {
                new Aws.Rekognition.Inputs.StreamProcessorRegionsOfInterestPolygonArgs
                {
                    X = 0,
                    Y = 0,
                },
            },
            BoundingBox = new Aws.Rekognition.Inputs.StreamProcessorRegionsOfInterestBoundingBoxArgs
            {
                Height = 0,
                Left = 0,
                Top = 0,
                Width = 0,
            },
        },
    },
    Settings = new Aws.Rekognition.Inputs.StreamProcessorSettingsArgs
    {
        ConnectedHome = new Aws.Rekognition.Inputs.StreamProcessorSettingsConnectedHomeArgs
        {
            Labels = new[]
            {
                "string",
            },
            MinConfidence = 0,
        },
        FaceSearch = new Aws.Rekognition.Inputs.StreamProcessorSettingsFaceSearchArgs
        {
            CollectionId = "string",
            FaceMatchThreshold = 0,
        },
    },
    Tags = 
    {
        { "string", "string" },
    },
    Timeouts = new Aws.Rekognition.Inputs.StreamProcessorTimeoutsArgs
    {
        Create = "string",
        Delete = "string",
        Update = "string",
    },
});
example, err := rekognition.NewStreamProcessor(ctx, "streamProcessorResource", &rekognition.StreamProcessorArgs{
	RoleArn: pulumi.String("string"),
	DataSharingPreference: &rekognition.StreamProcessorDataSharingPreferenceArgs{
		OptIn: pulumi.Bool(false),
	},
	Input: &rekognition.StreamProcessorInputTypeArgs{
		KinesisVideoStream: &rekognition.StreamProcessorInputKinesisVideoStreamArgs{
			Arn: pulumi.String("string"),
		},
	},
	KmsKeyId: pulumi.String("string"),
	Name:     pulumi.String("string"),
	NotificationChannel: &rekognition.StreamProcessorNotificationChannelArgs{
		SnsTopicArn: pulumi.String("string"),
	},
	Output: &rekognition.StreamProcessorOutputTypeArgs{
		KinesisDataStream: &rekognition.StreamProcessorOutputKinesisDataStreamArgs{
			Arn: pulumi.String("string"),
		},
		S3Destination: &rekognition.StreamProcessorOutputS3DestinationArgs{
			Bucket:    pulumi.String("string"),
			KeyPrefix: pulumi.String("string"),
		},
	},
	RegionsOfInterests: rekognition.StreamProcessorRegionsOfInterestArray{
		&rekognition.StreamProcessorRegionsOfInterestArgs{
			Polygons: rekognition.StreamProcessorRegionsOfInterestPolygonArray{
				&rekognition.StreamProcessorRegionsOfInterestPolygonArgs{
					X: pulumi.Float64(0),
					Y: pulumi.Float64(0),
				},
			},
			BoundingBox: &rekognition.StreamProcessorRegionsOfInterestBoundingBoxArgs{
				Height: pulumi.Float64(0),
				Left:   pulumi.Float64(0),
				Top:    pulumi.Float64(0),
				Width:  pulumi.Float64(0),
			},
		},
	},
	Settings: &rekognition.StreamProcessorSettingsArgs{
		ConnectedHome: &rekognition.StreamProcessorSettingsConnectedHomeArgs{
			Labels: pulumi.StringArray{
				pulumi.String("string"),
			},
			MinConfidence: pulumi.Float64(0),
		},
		FaceSearch: &rekognition.StreamProcessorSettingsFaceSearchArgs{
			CollectionId:       pulumi.String("string"),
			FaceMatchThreshold: pulumi.Float64(0),
		},
	},
	Tags: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
	Timeouts: &rekognition.StreamProcessorTimeoutsArgs{
		Create: pulumi.String("string"),
		Delete: pulumi.String("string"),
		Update: pulumi.String("string"),
	},
})
var streamProcessorResource = new StreamProcessor("streamProcessorResource", StreamProcessorArgs.builder()
    .roleArn("string")
    .dataSharingPreference(StreamProcessorDataSharingPreferenceArgs.builder()
        .optIn(false)
        .build())
    .input(StreamProcessorInputArgs.builder()
        .kinesisVideoStream(StreamProcessorInputKinesisVideoStreamArgs.builder()
            .arn("string")
            .build())
        .build())
    .kmsKeyId("string")
    .name("string")
    .notificationChannel(StreamProcessorNotificationChannelArgs.builder()
        .snsTopicArn("string")
        .build())
    .output(StreamProcessorOutputArgs.builder()
        .kinesisDataStream(StreamProcessorOutputKinesisDataStreamArgs.builder()
            .arn("string")
            .build())
        .s3Destination(StreamProcessorOutputS3DestinationArgs.builder()
            .bucket("string")
            .keyPrefix("string")
            .build())
        .build())
    .regionsOfInterests(StreamProcessorRegionsOfInterestArgs.builder()
        .polygons(StreamProcessorRegionsOfInterestPolygonArgs.builder()
            .x(0)
            .y(0)
            .build())
        .boundingBox(StreamProcessorRegionsOfInterestBoundingBoxArgs.builder()
            .height(0)
            .left(0)
            .top(0)
            .width(0)
            .build())
        .build())
    .settings(StreamProcessorSettingsArgs.builder()
        .connectedHome(StreamProcessorSettingsConnectedHomeArgs.builder()
            .labels("string")
            .minConfidence(0)
            .build())
        .faceSearch(StreamProcessorSettingsFaceSearchArgs.builder()
            .collectionId("string")
            .faceMatchThreshold(0)
            .build())
        .build())
    .tags(Map.of("string", "string"))
    .timeouts(StreamProcessorTimeoutsArgs.builder()
        .create("string")
        .delete("string")
        .update("string")
        .build())
    .build());
stream_processor_resource = aws.rekognition.StreamProcessor("streamProcessorResource",
    role_arn="string",
    data_sharing_preference={
        "opt_in": False,
    },
    input={
        "kinesis_video_stream": {
            "arn": "string",
        },
    },
    kms_key_id="string",
    name="string",
    notification_channel={
        "sns_topic_arn": "string",
    },
    output={
        "kinesis_data_stream": {
            "arn": "string",
        },
        "s3_destination": {
            "bucket": "string",
            "key_prefix": "string",
        },
    },
    regions_of_interests=[{
        "polygons": [{
            "x": 0,
            "y": 0,
        }],
        "bounding_box": {
            "height": 0,
            "left": 0,
            "top": 0,
            "width": 0,
        },
    }],
    settings={
        "connected_home": {
            "labels": ["string"],
            "min_confidence": 0,
        },
        "face_search": {
            "collection_id": "string",
            "face_match_threshold": 0,
        },
    },
    tags={
        "string": "string",
    },
    timeouts={
        "create": "string",
        "delete": "string",
        "update": "string",
    })
const streamProcessorResource = new aws.rekognition.StreamProcessor("streamProcessorResource", {
    roleArn: "string",
    dataSharingPreference: {
        optIn: false,
    },
    input: {
        kinesisVideoStream: {
            arn: "string",
        },
    },
    kmsKeyId: "string",
    name: "string",
    notificationChannel: {
        snsTopicArn: "string",
    },
    output: {
        kinesisDataStream: {
            arn: "string",
        },
        s3Destination: {
            bucket: "string",
            keyPrefix: "string",
        },
    },
    regionsOfInterests: [{
        polygons: [{
            x: 0,
            y: 0,
        }],
        boundingBox: {
            height: 0,
            left: 0,
            top: 0,
            width: 0,
        },
    }],
    settings: {
        connectedHome: {
            labels: ["string"],
            minConfidence: 0,
        },
        faceSearch: {
            collectionId: "string",
            faceMatchThreshold: 0,
        },
    },
    tags: {
        string: "string",
    },
    timeouts: {
        create: "string",
        "delete": "string",
        update: "string",
    },
});
type: aws:rekognition:StreamProcessor
properties:
    dataSharingPreference:
        optIn: false
    input:
        kinesisVideoStream:
            arn: string
    kmsKeyId: string
    name: string
    notificationChannel:
        snsTopicArn: string
    output:
        kinesisDataStream:
            arn: string
        s3Destination:
            bucket: string
            keyPrefix: string
    regionsOfInterests:
        - boundingBox:
            height: 0
            left: 0
            top: 0
            width: 0
          polygons:
            - x: 0
              "y": 0
    roleArn: string
    settings:
        connectedHome:
            labels:
                - string
            minConfidence: 0
        faceSearch:
            collectionId: string
            faceMatchThreshold: 0
    tags:
        string: string
    timeouts:
        create: string
        delete: string
        update: string
StreamProcessor Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The StreamProcessor resource accepts the following input properties:
- RoleArn string
- The Amazon Resource Number (ARN) of the IAM role that allows access to the stream processor. The IAM role provides Rekognition read permissions for a Kinesis stream. It also provides write permissions to an Amazon S3 bucket and Amazon Simple Notification Service topic for a label detection stream processor. This is required for both face search and label detection stream processors.
- DataSharing StreamPreference Processor Data Sharing Preference 
- See data_sharing_preference.
- Input
StreamProcessor Input 
- Input video stream. See input.
- KmsKey stringId 
- Optional parameter for label detection stream processors.
- Name string
- The name of the Stream Processor.
- NotificationChannel StreamProcessor Notification Channel 
- The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status. See notification_channel.
- Output
StreamProcessor Output 
- Kinesis data stream stream or Amazon S3 bucket location to which Amazon Rekognition Video puts the analysis results. See output.
- RegionsOf List<StreamInterests Processor Regions Of Interest> 
- Specifies locations in the frames where Amazon Rekognition checks for objects or people. See regions_of_interest.
- Settings
StreamProcessor Settings 
- Input parameters used in a streaming video analyzed by a stream processor. See - settings.- The following arguments are optional: 
- Dictionary<string, string>
- A map of tags to assign to the resource. If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- Timeouts
StreamProcessor Timeouts 
- RoleArn string
- The Amazon Resource Number (ARN) of the IAM role that allows access to the stream processor. The IAM role provides Rekognition read permissions for a Kinesis stream. It also provides write permissions to an Amazon S3 bucket and Amazon Simple Notification Service topic for a label detection stream processor. This is required for both face search and label detection stream processors.
- DataSharing StreamPreference Processor Data Sharing Preference Args 
- See data_sharing_preference.
- Input
StreamProcessor Input Type Args 
- Input video stream. See input.
- KmsKey stringId 
- Optional parameter for label detection stream processors.
- Name string
- The name of the Stream Processor.
- NotificationChannel StreamProcessor Notification Channel Args 
- The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status. See notification_channel.
- Output
StreamProcessor Output Type Args 
- Kinesis data stream stream or Amazon S3 bucket location to which Amazon Rekognition Video puts the analysis results. See output.
- RegionsOf []StreamInterests Processor Regions Of Interest Args 
- Specifies locations in the frames where Amazon Rekognition checks for objects or people. See regions_of_interest.
- Settings
StreamProcessor Settings Args 
- Input parameters used in a streaming video analyzed by a stream processor. See - settings.- The following arguments are optional: 
- map[string]string
- A map of tags to assign to the resource. If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- Timeouts
StreamProcessor Timeouts Args 
- roleArn String
- The Amazon Resource Number (ARN) of the IAM role that allows access to the stream processor. The IAM role provides Rekognition read permissions for a Kinesis stream. It also provides write permissions to an Amazon S3 bucket and Amazon Simple Notification Service topic for a label detection stream processor. This is required for both face search and label detection stream processors.
- dataSharing StreamPreference Processor Data Sharing Preference 
- See data_sharing_preference.
- input
StreamProcessor Input 
- Input video stream. See input.
- kmsKey StringId 
- Optional parameter for label detection stream processors.
- name String
- The name of the Stream Processor.
- notificationChannel StreamProcessor Notification Channel 
- The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status. See notification_channel.
- output
StreamProcessor Output 
- Kinesis data stream stream or Amazon S3 bucket location to which Amazon Rekognition Video puts the analysis results. See output.
- regionsOf List<StreamInterests Processor Regions Of Interest> 
- Specifies locations in the frames where Amazon Rekognition checks for objects or people. See regions_of_interest.
- settings
StreamProcessor Settings 
- Input parameters used in a streaming video analyzed by a stream processor. See - settings.- The following arguments are optional: 
- Map<String,String>
- A map of tags to assign to the resource. If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- timeouts
StreamProcessor Timeouts 
- roleArn string
- The Amazon Resource Number (ARN) of the IAM role that allows access to the stream processor. The IAM role provides Rekognition read permissions for a Kinesis stream. It also provides write permissions to an Amazon S3 bucket and Amazon Simple Notification Service topic for a label detection stream processor. This is required for both face search and label detection stream processors.
- dataSharing StreamPreference Processor Data Sharing Preference 
- See data_sharing_preference.
- input
StreamProcessor Input 
- Input video stream. See input.
- kmsKey stringId 
- Optional parameter for label detection stream processors.
- name string
- The name of the Stream Processor.
- notificationChannel StreamProcessor Notification Channel 
- The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status. See notification_channel.
- output
StreamProcessor Output 
- Kinesis data stream stream or Amazon S3 bucket location to which Amazon Rekognition Video puts the analysis results. See output.
- regionsOf StreamInterests Processor Regions Of Interest[] 
- Specifies locations in the frames where Amazon Rekognition checks for objects or people. See regions_of_interest.
- settings
StreamProcessor Settings 
- Input parameters used in a streaming video analyzed by a stream processor. See - settings.- The following arguments are optional: 
- {[key: string]: string}
- A map of tags to assign to the resource. If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- timeouts
StreamProcessor Timeouts 
- role_arn str
- The Amazon Resource Number (ARN) of the IAM role that allows access to the stream processor. The IAM role provides Rekognition read permissions for a Kinesis stream. It also provides write permissions to an Amazon S3 bucket and Amazon Simple Notification Service topic for a label detection stream processor. This is required for both face search and label detection stream processors.
- data_sharing_ Streampreference Processor Data Sharing Preference Args 
- See data_sharing_preference.
- input
StreamProcessor Input Args 
- Input video stream. See input.
- kms_key_ strid 
- Optional parameter for label detection stream processors.
- name str
- The name of the Stream Processor.
- notification_channel StreamProcessor Notification Channel Args 
- The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status. See notification_channel.
- output
StreamProcessor Output Args 
- Kinesis data stream stream or Amazon S3 bucket location to which Amazon Rekognition Video puts the analysis results. See output.
- regions_of_ Sequence[Streaminterests Processor Regions Of Interest Args] 
- Specifies locations in the frames where Amazon Rekognition checks for objects or people. See regions_of_interest.
- settings
StreamProcessor Settings Args 
- Input parameters used in a streaming video analyzed by a stream processor. See - settings.- The following arguments are optional: 
- Mapping[str, str]
- A map of tags to assign to the resource. If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- timeouts
StreamProcessor Timeouts Args 
- roleArn String
- The Amazon Resource Number (ARN) of the IAM role that allows access to the stream processor. The IAM role provides Rekognition read permissions for a Kinesis stream. It also provides write permissions to an Amazon S3 bucket and Amazon Simple Notification Service topic for a label detection stream processor. This is required for both face search and label detection stream processors.
- dataSharing Property MapPreference 
- See data_sharing_preference.
- input Property Map
- Input video stream. See input.
- kmsKey StringId 
- Optional parameter for label detection stream processors.
- name String
- The name of the Stream Processor.
- notificationChannel Property Map
- The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status. See notification_channel.
- output Property Map
- Kinesis data stream stream or Amazon S3 bucket location to which Amazon Rekognition Video puts the analysis results. See output.
- regionsOf List<Property Map>Interests 
- Specifies locations in the frames where Amazon Rekognition checks for objects or people. See regions_of_interest.
- settings Property Map
- Input parameters used in a streaming video analyzed by a stream processor. See - settings.- The following arguments are optional: 
- Map<String>
- A map of tags to assign to the resource. If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- timeouts Property Map
Outputs
All input properties are implicitly available as output properties. Additionally, the StreamProcessor resource produces the following output properties:
- Arn string
- ARN of the Stream Processor.
- Id string
- The provider-assigned unique ID for this managed resource.
- StreamProcessor stringArn 
- (Deprecated) ARN of the Stream Processor.
Use arninstead.
- Dictionary<string, string>
- A map of tags assigned to the resource, including those inherited from the provider default_tagsconfiguration block.
- Arn string
- ARN of the Stream Processor.
- Id string
- The provider-assigned unique ID for this managed resource.
- StreamProcessor stringArn 
- (Deprecated) ARN of the Stream Processor.
Use arninstead.
- map[string]string
- A map of tags assigned to the resource, including those inherited from the provider default_tagsconfiguration block.
- arn String
- ARN of the Stream Processor.
- id String
- The provider-assigned unique ID for this managed resource.
- streamProcessor StringArn 
- (Deprecated) ARN of the Stream Processor.
Use arninstead.
- Map<String,String>
- A map of tags assigned to the resource, including those inherited from the provider default_tagsconfiguration block.
- arn string
- ARN of the Stream Processor.
- id string
- The provider-assigned unique ID for this managed resource.
- streamProcessor stringArn 
- (Deprecated) ARN of the Stream Processor.
Use arninstead.
- {[key: string]: string}
- A map of tags assigned to the resource, including those inherited from the provider default_tagsconfiguration block.
- arn str
- ARN of the Stream Processor.
- id str
- The provider-assigned unique ID for this managed resource.
- stream_processor_ strarn 
- (Deprecated) ARN of the Stream Processor.
Use arninstead.
- Mapping[str, str]
- A map of tags assigned to the resource, including those inherited from the provider default_tagsconfiguration block.
- arn String
- ARN of the Stream Processor.
- id String
- The provider-assigned unique ID for this managed resource.
- streamProcessor StringArn 
- (Deprecated) ARN of the Stream Processor.
Use arninstead.
- Map<String>
- A map of tags assigned to the resource, including those inherited from the provider default_tagsconfiguration block.
Look up Existing StreamProcessor Resource
Get an existing StreamProcessor resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: StreamProcessorState, opts?: CustomResourceOptions): StreamProcessor@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        arn: Optional[str] = None,
        data_sharing_preference: Optional[StreamProcessorDataSharingPreferenceArgs] = None,
        input: Optional[StreamProcessorInputArgs] = None,
        kms_key_id: Optional[str] = None,
        name: Optional[str] = None,
        notification_channel: Optional[StreamProcessorNotificationChannelArgs] = None,
        output: Optional[StreamProcessorOutputArgs] = None,
        regions_of_interests: Optional[Sequence[StreamProcessorRegionsOfInterestArgs]] = None,
        role_arn: Optional[str] = None,
        settings: Optional[StreamProcessorSettingsArgs] = None,
        stream_processor_arn: Optional[str] = None,
        tags: Optional[Mapping[str, str]] = None,
        tags_all: Optional[Mapping[str, str]] = None,
        timeouts: Optional[StreamProcessorTimeoutsArgs] = None) -> StreamProcessorfunc GetStreamProcessor(ctx *Context, name string, id IDInput, state *StreamProcessorState, opts ...ResourceOption) (*StreamProcessor, error)public static StreamProcessor Get(string name, Input<string> id, StreamProcessorState? state, CustomResourceOptions? opts = null)public static StreamProcessor get(String name, Output<String> id, StreamProcessorState state, CustomResourceOptions options)resources:  _:    type: aws:rekognition:StreamProcessor    get:      id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Arn string
- ARN of the Stream Processor.
- DataSharing StreamPreference Processor Data Sharing Preference 
- See data_sharing_preference.
- Input
StreamProcessor Input 
- Input video stream. See input.
- KmsKey stringId 
- Optional parameter for label detection stream processors.
- Name string
- The name of the Stream Processor.
- NotificationChannel StreamProcessor Notification Channel 
- The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status. See notification_channel.
- Output
StreamProcessor Output 
- Kinesis data stream stream or Amazon S3 bucket location to which Amazon Rekognition Video puts the analysis results. See output.
- RegionsOf List<StreamInterests Processor Regions Of Interest> 
- Specifies locations in the frames where Amazon Rekognition checks for objects or people. See regions_of_interest.
- RoleArn string
- The Amazon Resource Number (ARN) of the IAM role that allows access to the stream processor. The IAM role provides Rekognition read permissions for a Kinesis stream. It also provides write permissions to an Amazon S3 bucket and Amazon Simple Notification Service topic for a label detection stream processor. This is required for both face search and label detection stream processors.
- Settings
StreamProcessor Settings 
- Input parameters used in a streaming video analyzed by a stream processor. See - settings.- The following arguments are optional: 
- StreamProcessor stringArn 
- (Deprecated) ARN of the Stream Processor.
Use arninstead.
- Dictionary<string, string>
- A map of tags to assign to the resource. If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- Dictionary<string, string>
- A map of tags assigned to the resource, including those inherited from the provider default_tagsconfiguration block.
- Timeouts
StreamProcessor Timeouts 
- Arn string
- ARN of the Stream Processor.
- DataSharing StreamPreference Processor Data Sharing Preference Args 
- See data_sharing_preference.
- Input
StreamProcessor Input Type Args 
- Input video stream. See input.
- KmsKey stringId 
- Optional parameter for label detection stream processors.
- Name string
- The name of the Stream Processor.
- NotificationChannel StreamProcessor Notification Channel Args 
- The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status. See notification_channel.
- Output
StreamProcessor Output Type Args 
- Kinesis data stream stream or Amazon S3 bucket location to which Amazon Rekognition Video puts the analysis results. See output.
- RegionsOf []StreamInterests Processor Regions Of Interest Args 
- Specifies locations in the frames where Amazon Rekognition checks for objects or people. See regions_of_interest.
- RoleArn string
- The Amazon Resource Number (ARN) of the IAM role that allows access to the stream processor. The IAM role provides Rekognition read permissions for a Kinesis stream. It also provides write permissions to an Amazon S3 bucket and Amazon Simple Notification Service topic for a label detection stream processor. This is required for both face search and label detection stream processors.
- Settings
StreamProcessor Settings Args 
- Input parameters used in a streaming video analyzed by a stream processor. See - settings.- The following arguments are optional: 
- StreamProcessor stringArn 
- (Deprecated) ARN of the Stream Processor.
Use arninstead.
- map[string]string
- A map of tags to assign to the resource. If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- map[string]string
- A map of tags assigned to the resource, including those inherited from the provider default_tagsconfiguration block.
- Timeouts
StreamProcessor Timeouts Args 
- arn String
- ARN of the Stream Processor.
- dataSharing StreamPreference Processor Data Sharing Preference 
- See data_sharing_preference.
- input
StreamProcessor Input 
- Input video stream. See input.
- kmsKey StringId 
- Optional parameter for label detection stream processors.
- name String
- The name of the Stream Processor.
- notificationChannel StreamProcessor Notification Channel 
- The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status. See notification_channel.
- output
StreamProcessor Output 
- Kinesis data stream stream or Amazon S3 bucket location to which Amazon Rekognition Video puts the analysis results. See output.
- regionsOf List<StreamInterests Processor Regions Of Interest> 
- Specifies locations in the frames where Amazon Rekognition checks for objects or people. See regions_of_interest.
- roleArn String
- The Amazon Resource Number (ARN) of the IAM role that allows access to the stream processor. The IAM role provides Rekognition read permissions for a Kinesis stream. It also provides write permissions to an Amazon S3 bucket and Amazon Simple Notification Service topic for a label detection stream processor. This is required for both face search and label detection stream processors.
- settings
StreamProcessor Settings 
- Input parameters used in a streaming video analyzed by a stream processor. See - settings.- The following arguments are optional: 
- streamProcessor StringArn 
- (Deprecated) ARN of the Stream Processor.
Use arninstead.
- Map<String,String>
- A map of tags to assign to the resource. If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- Map<String,String>
- A map of tags assigned to the resource, including those inherited from the provider default_tagsconfiguration block.
- timeouts
StreamProcessor Timeouts 
- arn string
- ARN of the Stream Processor.
- dataSharing StreamPreference Processor Data Sharing Preference 
- See data_sharing_preference.
- input
StreamProcessor Input 
- Input video stream. See input.
- kmsKey stringId 
- Optional parameter for label detection stream processors.
- name string
- The name of the Stream Processor.
- notificationChannel StreamProcessor Notification Channel 
- The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status. See notification_channel.
- output
StreamProcessor Output 
- Kinesis data stream stream or Amazon S3 bucket location to which Amazon Rekognition Video puts the analysis results. See output.
- regionsOf StreamInterests Processor Regions Of Interest[] 
- Specifies locations in the frames where Amazon Rekognition checks for objects or people. See regions_of_interest.
- roleArn string
- The Amazon Resource Number (ARN) of the IAM role that allows access to the stream processor. The IAM role provides Rekognition read permissions for a Kinesis stream. It also provides write permissions to an Amazon S3 bucket and Amazon Simple Notification Service topic for a label detection stream processor. This is required for both face search and label detection stream processors.
- settings
StreamProcessor Settings 
- Input parameters used in a streaming video analyzed by a stream processor. See - settings.- The following arguments are optional: 
- streamProcessor stringArn 
- (Deprecated) ARN of the Stream Processor.
Use arninstead.
- {[key: string]: string}
- A map of tags to assign to the resource. If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- {[key: string]: string}
- A map of tags assigned to the resource, including those inherited from the provider default_tagsconfiguration block.
- timeouts
StreamProcessor Timeouts 
- arn str
- ARN of the Stream Processor.
- data_sharing_ Streampreference Processor Data Sharing Preference Args 
- See data_sharing_preference.
- input
StreamProcessor Input Args 
- Input video stream. See input.
- kms_key_ strid 
- Optional parameter for label detection stream processors.
- name str
- The name of the Stream Processor.
- notification_channel StreamProcessor Notification Channel Args 
- The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status. See notification_channel.
- output
StreamProcessor Output Args 
- Kinesis data stream stream or Amazon S3 bucket location to which Amazon Rekognition Video puts the analysis results. See output.
- regions_of_ Sequence[Streaminterests Processor Regions Of Interest Args] 
- Specifies locations in the frames where Amazon Rekognition checks for objects or people. See regions_of_interest.
- role_arn str
- The Amazon Resource Number (ARN) of the IAM role that allows access to the stream processor. The IAM role provides Rekognition read permissions for a Kinesis stream. It also provides write permissions to an Amazon S3 bucket and Amazon Simple Notification Service topic for a label detection stream processor. This is required for both face search and label detection stream processors.
- settings
StreamProcessor Settings Args 
- Input parameters used in a streaming video analyzed by a stream processor. See - settings.- The following arguments are optional: 
- stream_processor_ strarn 
- (Deprecated) ARN of the Stream Processor.
Use arninstead.
- Mapping[str, str]
- A map of tags to assign to the resource. If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- Mapping[str, str]
- A map of tags assigned to the resource, including those inherited from the provider default_tagsconfiguration block.
- timeouts
StreamProcessor Timeouts Args 
- arn String
- ARN of the Stream Processor.
- dataSharing Property MapPreference 
- See data_sharing_preference.
- input Property Map
- Input video stream. See input.
- kmsKey StringId 
- Optional parameter for label detection stream processors.
- name String
- The name of the Stream Processor.
- notificationChannel Property Map
- The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status. See notification_channel.
- output Property Map
- Kinesis data stream stream or Amazon S3 bucket location to which Amazon Rekognition Video puts the analysis results. See output.
- regionsOf List<Property Map>Interests 
- Specifies locations in the frames where Amazon Rekognition checks for objects or people. See regions_of_interest.
- roleArn String
- The Amazon Resource Number (ARN) of the IAM role that allows access to the stream processor. The IAM role provides Rekognition read permissions for a Kinesis stream. It also provides write permissions to an Amazon S3 bucket and Amazon Simple Notification Service topic for a label detection stream processor. This is required for both face search and label detection stream processors.
- settings Property Map
- Input parameters used in a streaming video analyzed by a stream processor. See - settings.- The following arguments are optional: 
- streamProcessor StringArn 
- (Deprecated) ARN of the Stream Processor.
Use arninstead.
- Map<String>
- A map of tags to assign to the resource. If configured with a provider default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- Map<String>
- A map of tags assigned to the resource, including those inherited from the provider default_tagsconfiguration block.
- timeouts Property Map
Supporting Types
StreamProcessorDataSharingPreference, StreamProcessorDataSharingPreferenceArgs          
- OptIn bool
- Whether you are sharing data with Rekognition to improve model performance.
- OptIn bool
- Whether you are sharing data with Rekognition to improve model performance.
- optIn Boolean
- Whether you are sharing data with Rekognition to improve model performance.
- optIn boolean
- Whether you are sharing data with Rekognition to improve model performance.
- opt_in bool
- Whether you are sharing data with Rekognition to improve model performance.
- optIn Boolean
- Whether you are sharing data with Rekognition to improve model performance.
StreamProcessorInput, StreamProcessorInputArgs      
- KinesisVideo StreamStream Processor Input Kinesis Video Stream 
- Kinesis input stream. See kinesis_video_stream.
- KinesisVideo StreamStream Processor Input Kinesis Video Stream 
- Kinesis input stream. See kinesis_video_stream.
- kinesisVideo StreamStream Processor Input Kinesis Video Stream 
- Kinesis input stream. See kinesis_video_stream.
- kinesisVideo StreamStream Processor Input Kinesis Video Stream 
- Kinesis input stream. See kinesis_video_stream.
- kinesis_video_ Streamstream Processor Input Kinesis Video Stream 
- Kinesis input stream. See kinesis_video_stream.
- kinesisVideo Property MapStream 
- Kinesis input stream. See kinesis_video_stream.
StreamProcessorInputKinesisVideoStream, StreamProcessorInputKinesisVideoStreamArgs            
- Arn string
- ARN of the Kinesis video stream stream that streams the source video.
- Arn string
- ARN of the Kinesis video stream stream that streams the source video.
- arn String
- ARN of the Kinesis video stream stream that streams the source video.
- arn string
- ARN of the Kinesis video stream stream that streams the source video.
- arn str
- ARN of the Kinesis video stream stream that streams the source video.
- arn String
- ARN of the Kinesis video stream stream that streams the source video.
StreamProcessorNotificationChannel, StreamProcessorNotificationChannelArgs        
- SnsTopic stringArn 
- The Amazon Resource Number (ARN) of the Amazon Amazon Simple Notification Service topic to which Amazon Rekognition posts the completion status.
- SnsTopic stringArn 
- The Amazon Resource Number (ARN) of the Amazon Amazon Simple Notification Service topic to which Amazon Rekognition posts the completion status.
- snsTopic StringArn 
- The Amazon Resource Number (ARN) of the Amazon Amazon Simple Notification Service topic to which Amazon Rekognition posts the completion status.
- snsTopic stringArn 
- The Amazon Resource Number (ARN) of the Amazon Amazon Simple Notification Service topic to which Amazon Rekognition posts the completion status.
- sns_topic_ strarn 
- The Amazon Resource Number (ARN) of the Amazon Amazon Simple Notification Service topic to which Amazon Rekognition posts the completion status.
- snsTopic StringArn 
- The Amazon Resource Number (ARN) of the Amazon Amazon Simple Notification Service topic to which Amazon Rekognition posts the completion status.
StreamProcessorOutput, StreamProcessorOutputArgs      
- KinesisData StreamStream Processor Output Kinesis Data Stream 
- The Amazon Kinesis Data Streams stream to which the Amazon Rekognition stream processor streams the analysis results. See kinesis_data_stream.
- S3Destination
StreamProcessor Output S3Destination 
- The Amazon S3 bucket location to which Amazon Rekognition publishes the detailed inference results of a video analysis operation. See s3_destination.
- KinesisData StreamStream Processor Output Kinesis Data Stream 
- The Amazon Kinesis Data Streams stream to which the Amazon Rekognition stream processor streams the analysis results. See kinesis_data_stream.
- S3Destination
StreamProcessor Output S3Destination 
- The Amazon S3 bucket location to which Amazon Rekognition publishes the detailed inference results of a video analysis operation. See s3_destination.
- kinesisData StreamStream Processor Output Kinesis Data Stream 
- The Amazon Kinesis Data Streams stream to which the Amazon Rekognition stream processor streams the analysis results. See kinesis_data_stream.
- s3Destination
StreamProcessor Output S3Destination 
- The Amazon S3 bucket location to which Amazon Rekognition publishes the detailed inference results of a video analysis operation. See s3_destination.
- kinesisData StreamStream Processor Output Kinesis Data Stream 
- The Amazon Kinesis Data Streams stream to which the Amazon Rekognition stream processor streams the analysis results. See kinesis_data_stream.
- s3Destination
StreamProcessor Output S3Destination 
- The Amazon S3 bucket location to which Amazon Rekognition publishes the detailed inference results of a video analysis operation. See s3_destination.
- kinesis_data_ Streamstream Processor Output Kinesis Data Stream 
- The Amazon Kinesis Data Streams stream to which the Amazon Rekognition stream processor streams the analysis results. See kinesis_data_stream.
- s3_destination StreamProcessor Output S3Destination 
- The Amazon S3 bucket location to which Amazon Rekognition publishes the detailed inference results of a video analysis operation. See s3_destination.
- kinesisData Property MapStream 
- The Amazon Kinesis Data Streams stream to which the Amazon Rekognition stream processor streams the analysis results. See kinesis_data_stream.
- s3Destination Property Map
- The Amazon S3 bucket location to which Amazon Rekognition publishes the detailed inference results of a video analysis operation. See s3_destination.
StreamProcessorOutputKinesisDataStream, StreamProcessorOutputKinesisDataStreamArgs            
- Arn string
- ARN of the output Amazon Kinesis Data Streams stream.
- Arn string
- ARN of the output Amazon Kinesis Data Streams stream.
- arn String
- ARN of the output Amazon Kinesis Data Streams stream.
- arn string
- ARN of the output Amazon Kinesis Data Streams stream.
- arn str
- ARN of the output Amazon Kinesis Data Streams stream.
- arn String
- ARN of the output Amazon Kinesis Data Streams stream.
StreamProcessorOutputS3Destination, StreamProcessorOutputS3DestinationArgs        
- bucket str
- Name of the Amazon S3 bucket you want to associate with the streaming video project.
- key_prefix str
- The prefix value of the location within the bucket that you want the information to be published to.
StreamProcessorRegionsOfInterest, StreamProcessorRegionsOfInterestArgs          
- Polygons
List<StreamProcessor Regions Of Interest Polygon> 
- Shape made up of up to 10 Point objects to define a region of interest. See polygon.
- BoundingBox StreamProcessor Regions Of Interest Bounding Box 
- Box representing a region of interest on screen. Only 1 per region is allowed. See bounding_box.
- Polygons
[]StreamProcessor Regions Of Interest Polygon 
- Shape made up of up to 10 Point objects to define a region of interest. See polygon.
- BoundingBox StreamProcessor Regions Of Interest Bounding Box 
- Box representing a region of interest on screen. Only 1 per region is allowed. See bounding_box.
- polygons
List<StreamProcessor Regions Of Interest Polygon> 
- Shape made up of up to 10 Point objects to define a region of interest. See polygon.
- boundingBox StreamProcessor Regions Of Interest Bounding Box 
- Box representing a region of interest on screen. Only 1 per region is allowed. See bounding_box.
- polygons
StreamProcessor Regions Of Interest Polygon[] 
- Shape made up of up to 10 Point objects to define a region of interest. See polygon.
- boundingBox StreamProcessor Regions Of Interest Bounding Box 
- Box representing a region of interest on screen. Only 1 per region is allowed. See bounding_box.
- polygons
Sequence[StreamProcessor Regions Of Interest Polygon] 
- Shape made up of up to 10 Point objects to define a region of interest. See polygon.
- bounding_box StreamProcessor Regions Of Interest Bounding Box 
- Box representing a region of interest on screen. Only 1 per region is allowed. See bounding_box.
- polygons List<Property Map>
- Shape made up of up to 10 Point objects to define a region of interest. See polygon.
- boundingBox Property Map
- Box representing a region of interest on screen. Only 1 per region is allowed. See bounding_box.
StreamProcessorRegionsOfInterestBoundingBox, StreamProcessorRegionsOfInterestBoundingBoxArgs              
- Height double
- Height of the bounding box as a ratio of the overall image height.
- Left double
- Left coordinate of the bounding box as a ratio of overall image width.
- Top double
- Top coordinate of the bounding box as a ratio of overall image height.
- Width double
- Width of the bounding box as a ratio of the overall image width.
- Height float64
- Height of the bounding box as a ratio of the overall image height.
- Left float64
- Left coordinate of the bounding box as a ratio of overall image width.
- Top float64
- Top coordinate of the bounding box as a ratio of overall image height.
- Width float64
- Width of the bounding box as a ratio of the overall image width.
- height Double
- Height of the bounding box as a ratio of the overall image height.
- left Double
- Left coordinate of the bounding box as a ratio of overall image width.
- top Double
- Top coordinate of the bounding box as a ratio of overall image height.
- width Double
- Width of the bounding box as a ratio of the overall image width.
- height number
- Height of the bounding box as a ratio of the overall image height.
- left number
- Left coordinate of the bounding box as a ratio of overall image width.
- top number
- Top coordinate of the bounding box as a ratio of overall image height.
- width number
- Width of the bounding box as a ratio of the overall image width.
- height float
- Height of the bounding box as a ratio of the overall image height.
- left float
- Left coordinate of the bounding box as a ratio of overall image width.
- top float
- Top coordinate of the bounding box as a ratio of overall image height.
- width float
- Width of the bounding box as a ratio of the overall image width.
- height Number
- Height of the bounding box as a ratio of the overall image height.
- left Number
- Left coordinate of the bounding box as a ratio of overall image width.
- top Number
- Top coordinate of the bounding box as a ratio of overall image height.
- width Number
- Width of the bounding box as a ratio of the overall image width.
StreamProcessorRegionsOfInterestPolygon, StreamProcessorRegionsOfInterestPolygonArgs            
StreamProcessorSettings, StreamProcessorSettingsArgs      
- ConnectedHome StreamProcessor Settings Connected Home 
- Label detection settings to use on a streaming video. See connected_home.
- FaceSearch StreamProcessor Settings Face Search 
- Input face recognition parameters for an Amazon Rekognition stream processor. See face_search.
- ConnectedHome StreamProcessor Settings Connected Home 
- Label detection settings to use on a streaming video. See connected_home.
- FaceSearch StreamProcessor Settings Face Search 
- Input face recognition parameters for an Amazon Rekognition stream processor. See face_search.
- connectedHome StreamProcessor Settings Connected Home 
- Label detection settings to use on a streaming video. See connected_home.
- faceSearch StreamProcessor Settings Face Search 
- Input face recognition parameters for an Amazon Rekognition stream processor. See face_search.
- connectedHome StreamProcessor Settings Connected Home 
- Label detection settings to use on a streaming video. See connected_home.
- faceSearch StreamProcessor Settings Face Search 
- Input face recognition parameters for an Amazon Rekognition stream processor. See face_search.
- connected_home StreamProcessor Settings Connected Home 
- Label detection settings to use on a streaming video. See connected_home.
- face_search StreamProcessor Settings Face Search 
- Input face recognition parameters for an Amazon Rekognition stream processor. See face_search.
- connectedHome Property Map
- Label detection settings to use on a streaming video. See connected_home.
- faceSearch Property Map
- Input face recognition parameters for an Amazon Rekognition stream processor. See face_search.
StreamProcessorSettingsConnectedHome, StreamProcessorSettingsConnectedHomeArgs          
- Labels List<string>
- Specifies what you want to detect in the video, such as people, packages, or pets. The current valid labels you can include in this list are: PERSON,PET,PACKAGE, andALL.
- MinConfidence double
- Minimum confidence required to label an object in the video.
- Labels []string
- Specifies what you want to detect in the video, such as people, packages, or pets. The current valid labels you can include in this list are: PERSON,PET,PACKAGE, andALL.
- MinConfidence float64
- Minimum confidence required to label an object in the video.
- labels List<String>
- Specifies what you want to detect in the video, such as people, packages, or pets. The current valid labels you can include in this list are: PERSON,PET,PACKAGE, andALL.
- minConfidence Double
- Minimum confidence required to label an object in the video.
- labels string[]
- Specifies what you want to detect in the video, such as people, packages, or pets. The current valid labels you can include in this list are: PERSON,PET,PACKAGE, andALL.
- minConfidence number
- Minimum confidence required to label an object in the video.
- labels Sequence[str]
- Specifies what you want to detect in the video, such as people, packages, or pets. The current valid labels you can include in this list are: PERSON,PET,PACKAGE, andALL.
- min_confidence float
- Minimum confidence required to label an object in the video.
- labels List<String>
- Specifies what you want to detect in the video, such as people, packages, or pets. The current valid labels you can include in this list are: PERSON,PET,PACKAGE, andALL.
- minConfidence Number
- Minimum confidence required to label an object in the video.
StreamProcessorSettingsFaceSearch, StreamProcessorSettingsFaceSearchArgs          
- CollectionId string
- ID of a collection that contains faces that you want to search for.
- FaceMatch doubleThreshold 
- Minimum face match confidence score that must be met to return a result for a recognized face.
- CollectionId string
- ID of a collection that contains faces that you want to search for.
- FaceMatch float64Threshold 
- Minimum face match confidence score that must be met to return a result for a recognized face.
- collectionId String
- ID of a collection that contains faces that you want to search for.
- faceMatch DoubleThreshold 
- Minimum face match confidence score that must be met to return a result for a recognized face.
- collectionId string
- ID of a collection that contains faces that you want to search for.
- faceMatch numberThreshold 
- Minimum face match confidence score that must be met to return a result for a recognized face.
- collection_id str
- ID of a collection that contains faces that you want to search for.
- face_match_ floatthreshold 
- Minimum face match confidence score that must be met to return a result for a recognized face.
- collectionId String
- ID of a collection that contains faces that you want to search for.
- faceMatch NumberThreshold 
- Minimum face match confidence score that must be met to return a result for a recognized face.
StreamProcessorTimeouts, StreamProcessorTimeoutsArgs      
- Create string
- A string that can be parsed as a duration consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
- Delete string
- A string that can be parsed as a duration consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs.
- Update string
- A string that can be parsed as a duration consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
- Create string
- A string that can be parsed as a duration consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
- Delete string
- A string that can be parsed as a duration consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs.
- Update string
- A string that can be parsed as a duration consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
- create String
- A string that can be parsed as a duration consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
- delete String
- A string that can be parsed as a duration consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs.
- update String
- A string that can be parsed as a duration consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
- create string
- A string that can be parsed as a duration consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
- delete string
- A string that can be parsed as a duration consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs.
- update string
- A string that can be parsed as a duration consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
- create str
- A string that can be parsed as a duration consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
- delete str
- A string that can be parsed as a duration consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs.
- update str
- A string that can be parsed as a duration consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
- create String
- A string that can be parsed as a duration consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
- delete String
- A string that can be parsed as a duration consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs.
- update String
- A string that can be parsed as a duration consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
Import
Using pulumi import, import Rekognition Stream Processor using the name. For example:
$ pulumi import aws:rekognition/streamProcessor:StreamProcessor example my-stream
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- AWS Classic pulumi/pulumi-aws
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the awsTerraform Provider.