From 22ab033d1e637b6715a59a5bfcfd3670d5eb9dc3 Mon Sep 17 00:00:00 2001 From: github-actions Date: Tue, 27 May 2025 16:06:09 +0000 Subject: [PATCH] chore(schema): update --- samtranslator/schema/schema.json | 62 +++------ schema_source/cloudformation-docs.json | 168 ++++++++++++++--------- schema_source/cloudformation.schema.json | 62 +++------ 3 files changed, 135 insertions(+), 157 deletions(-) diff --git a/samtranslator/schema/schema.json b/samtranslator/schema/schema.json index 69fd3fb0f..cd2310ad9 100644 --- a/samtranslator/schema/schema.json +++ b/samtranslator/schema/schema.json @@ -62916,41 +62916,27 @@ "items": { "type": "string" }, - "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects to and reads from your on-premises storage system's management interface. You can only specify one ARN.", - "title": "AgentArns", "type": "array" }, "CloudWatchLogGroupArn": { - "markdownDescription": "Specifies the ARN of the Amazon CloudWatch log group for monitoring and logging discovery job events.", - "title": "CloudWatchLogGroupArn", "type": "string" }, "Name": { - "markdownDescription": "Specifies a familiar name for your on-premises storage system.", - "title": "Name", "type": "string" }, "ServerConfiguration": { - "$ref": "#/definitions/AWS::DataSync::StorageSystem.ServerConfiguration", - "markdownDescription": "Specifies the server name and network port required to connect with the management interface of your on-premises storage system.", - "title": "ServerConfiguration" + "$ref": "#/definitions/AWS::DataSync::StorageSystem.ServerConfiguration" }, "ServerCredentials": { - "$ref": "#/definitions/AWS::DataSync::StorageSystem.ServerCredentials", - "markdownDescription": "Specifies the user name and password for accessing your on-premises storage system's management interface.", - "title": "ServerCredentials" + "$ref": "#/definitions/AWS::DataSync::StorageSystem.ServerCredentials" }, "SystemType": { - "markdownDescription": "Specifies the type of on-premises storage system that you want DataSync Discovery to collect information about.\n\n> DataSync Discovery currently supports NetApp Fabric-Attached Storage (FAS) and All Flash FAS (AFF) systems running ONTAP 9.7 or later.", - "title": "SystemType", "type": "string" }, "Tags": { "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "Specifies labels that help you categorize, filter, and search for your AWS resources. We recommend creating at least a name tag for your on-premises storage system.", - "title": "Tags", "type": "array" } }, @@ -62986,13 +62972,9 @@ "additionalProperties": false, "properties": { "ServerHostname": { - "markdownDescription": "The domain name or IP address of your storage system's management interface.", - "title": "ServerHostname", "type": "string" }, "ServerPort": { - "markdownDescription": "The network port for accessing the storage system's management interface.", - "title": "ServerPort", "type": "number" } }, @@ -63005,13 +62987,9 @@ "additionalProperties": false, "properties": { "Password": { - "markdownDescription": "Specifies the password for your storage system's management interface.", - "title": "Password", "type": "string" }, "Username": { - "markdownDescription": "Specifies the user name for your storage system's management interface.", - "title": "Username", "type": "string" } }, @@ -83844,7 +83822,7 @@ }, "Options": { "additionalProperties": true, - "markdownDescription": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using Fargate.Optional when using EC2.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n\nThe following options apply to all supported log drivers.\n\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to the log driver specified using `logDriver` . The delivery mode you choose affects application availability when the flow of logs from container is interrupted.\n\nIf you use the `blocking` mode and the flow of logs is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n\nYou can set a default `mode` for all containers in a specific AWS Region by using the `defaultLogDriverMode` account setting. If you don't specify the `mode` option or configure the account setting, Amazon ECS will default to the `blocking` mode. For more information about the account setting, see [Default log driver mode](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#default-log-driver-mode) in the *Amazon Elastic Container Service Developer Guide* .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` . For more information, see [Under the hood: FireLens for Amazon ECS Tasks](https://docs.aws.amazon.com/containers/under-the-hood-firelens-for-amazon-ecs-tasks/) .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "markdownDescription": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using Fargate.Optional when using EC2.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n\nThe following options apply to all supported log drivers.\n\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to the log driver specified using `logDriver` . The delivery mode you choose affects application availability when the flow of logs from container is interrupted.\n\nIf you use the `blocking` mode and the flow of logs is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n\nYou can set a default `mode` for all containers in a specific AWS Region by using the `defaultLogDriverMode` account setting. If you don't specify the `mode` option or configure the account setting, Amazon ECS will default to the `blocking` mode. For more information about the account setting, see [Default log driver mode](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#default-log-driver-mode) in the *Amazon Elastic Container Service Developer Guide* .\n\n> On June 25, 2025, Amazon ECS is changing the default log driver mode from `blocking` to `non-blocking` to prioritize task availability over logging. To continue using the `blocking` mode after this change, do one of the following:\n> \n> - Set the `mode` option in your container definition's `logConfiguration` as `blocking` .\n> - Set the `defaultLogDriverMode` account setting to `blocking` .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` . For more information, see [Under the hood: FireLens for Amazon ECS Tasks](https://docs.aws.amazon.com/containers/under-the-hood-firelens-for-amazon-ecs-tasks/) .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -84065,12 +84043,12 @@ "additionalProperties": false, "properties": { "Encrypted": { - "markdownDescription": "Indicates whether the volume should be encrypted. If no value is specified, encryption is turned on by default. This parameter maps 1:1 with the `Encrypted` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* .", + "markdownDescription": "Indicates whether the volume should be encrypted. If you turn on Region-level Amazon EBS encryption by default but set this value as `false` , the setting is overridden and the volume is encrypted with the KMS key specified for Amazon EBS encryption by default. This parameter maps 1:1 with the `Encrypted` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* .", "title": "Encrypted", "type": "boolean" }, "FilesystemType": { - "markdownDescription": "The filesystem type for the volume. For volumes created from a snapshot, you must specify the same filesystem type that the volume was using when the snapshot was created. If there is a filesystem type mismatch, the task will fail to start.\n\nThe available Linux filesystem types are `ext3` , `ext4` , and `xfs` . If no value is specified, the `xfs` filesystem type is used by default.\n\nThe available Windows filesystem types are `NTFS` .", + "markdownDescription": "The filesystem type for the volume. For volumes created from a snapshot, you must specify the same filesystem type that the volume was using when the snapshot was created. If there is a filesystem type mismatch, the tasks will fail to start.\n\nThe available Linux filesystem types are `ext3` , `ext4` , and `xfs` . If no value is specified, the `xfs` filesystem type is used by default.\n\nThe available Windows filesystem types are `NTFS` .", "title": "FilesystemType", "type": "string" }, @@ -84080,7 +84058,7 @@ "type": "number" }, "KmsKeyId": { - "markdownDescription": "The Amazon Resource Name (ARN) identifier of the AWS Key Management Service key to use for Amazon EBS encryption. When encryption is turned on and no AWS Key Management Service key is specified, the default AWS managed key for Amazon EBS volumes is used. This parameter maps 1:1 with the `KmsKeyId` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* .\n\n> AWS authenticates the AWS Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or ARN that is invalid, the action can appear to complete, but eventually fails.", + "markdownDescription": "The Amazon Resource Name (ARN) identifier of the AWS Key Management Service key to use for Amazon EBS encryption. When a key is specified using this parameter, it overrides Amazon EBS default encryption or any KMS key that you specified for cluster-level managed storage encryption. This parameter maps 1:1 with the `KmsKeyId` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* . For more information about encrypting Amazon EBS volumes attached to tasks, see [Encrypt data stored in Amazon EBS volumes attached to Amazon ECS tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ebs-kms-encryption.html) .\n\n> AWS authenticates the AWS Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or ARN that is invalid, the action can appear to complete, but eventually fails.", "title": "KmsKeyId", "type": "string" }, @@ -84095,7 +84073,7 @@ "type": "number" }, "SnapshotId": { - "markdownDescription": "The snapshot that Amazon ECS uses to create the volume. You must specify either a snapshot ID or a volume size. This parameter maps 1:1 with the `SnapshotId` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* .", + "markdownDescription": "The snapshot that Amazon ECS uses to create volumes for attachment to tasks maintained by the service. You must specify either `snapshotId` or `sizeInGiB` in your volume configuration. This parameter maps 1:1 with the `SnapshotId` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* .", "title": "SnapshotId", "type": "string" }, @@ -85014,7 +84992,7 @@ }, "Options": { "additionalProperties": true, - "markdownDescription": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using Fargate.Optional when using EC2.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n\nThe following options apply to all supported log drivers.\n\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to the log driver specified using `logDriver` . The delivery mode you choose affects application availability when the flow of logs from container is interrupted.\n\nIf you use the `blocking` mode and the flow of logs is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n\nYou can set a default `mode` for all containers in a specific AWS Region by using the `defaultLogDriverMode` account setting. If you don't specify the `mode` option or configure the account setting, Amazon ECS will default to the `blocking` mode. For more information about the account setting, see [Default log driver mode](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#default-log-driver-mode) in the *Amazon Elastic Container Service Developer Guide* .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` . For more information, see [Under the hood: FireLens for Amazon ECS Tasks](https://docs.aws.amazon.com/containers/under-the-hood-firelens-for-amazon-ecs-tasks/) .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "markdownDescription": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using Fargate.Optional when using EC2.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n\nThe following options apply to all supported log drivers.\n\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to the log driver specified using `logDriver` . The delivery mode you choose affects application availability when the flow of logs from container is interrupted.\n\nIf you use the `blocking` mode and the flow of logs is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n\nYou can set a default `mode` for all containers in a specific AWS Region by using the `defaultLogDriverMode` account setting. If you don't specify the `mode` option or configure the account setting, Amazon ECS will default to the `blocking` mode. For more information about the account setting, see [Default log driver mode](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#default-log-driver-mode) in the *Amazon Elastic Container Service Developer Guide* .\n\n> On June 25, 2025, Amazon ECS is changing the default log driver mode from `blocking` to `non-blocking` to prioritize task availability over logging. To continue using the `blocking` mode after this change, do one of the following:\n> \n> - Set the `mode` option in your container definition's `logConfiguration` as `blocking` .\n> - Set the `defaultLogDriverMode` account setting to `blocking` .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` . For more information, see [Under the hood: FireLens for Amazon ECS Tasks](https://docs.aws.amazon.com/containers/under-the-hood-firelens-for-amazon-ecs-tasks/) .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -85197,7 +85175,7 @@ "type": "string" }, "Value": { - "markdownDescription": "The namespaced kernel parameter to set a `value` for.\n\nValid IPC namespace values: `\"kernel.msgmax\" | \"kernel.msgmnb\" | \"kernel.msgmni\" | \"kernel.sem\" | \"kernel.shmall\" | \"kernel.shmmax\" | \"kernel.shmmni\" | \"kernel.shm_rmid_forced\"` , and `Sysctls` that start with `\"fs.mqueue.*\"`\n\nValid network namespace values: `Sysctls` that start with `\"net.*\"`\n\nAll of these values are supported by Fargate.", + "markdownDescription": "The namespaced kernel parameter to set a `value` for.\n\nValid IPC namespace values: `\"kernel.msgmax\" | \"kernel.msgmnb\" | \"kernel.msgmni\" | \"kernel.sem\" | \"kernel.shmall\" | \"kernel.shmmax\" | \"kernel.shmmni\" | \"kernel.shm_rmid_forced\"` , and `Sysctls` that start with `\"fs.mqueue.*\"`\n\nValid network namespace values: `Sysctls` that start with `\"net.*\"` . Only namespaced `Sysctls` that exist within the container starting with \"net.* are accepted.\n\nAll of these values are supported by Fargate.", "title": "Value", "type": "string" } @@ -90891,7 +90869,7 @@ "type": "string" }, "ClusterMode": { - "markdownDescription": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Valkey or Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Valkey or Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/modify-cluster-mode.html) .", + "markdownDescription": "The mode can be enabled or disabled. To change the cluster mode from disabled to enabled, you must first set the cluster mode to compatible. The compatible mode allows your Valkey or Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Valkey or Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/modify-cluster-mode.html) .", "title": "ClusterMode", "type": "string" }, @@ -93718,7 +93696,7 @@ "items": { "type": "string" }, - "markdownDescription": "The host names. The maximum size of each name is 128 characters. The comparison is case insensitive. The following wildcard characters are supported: * (matches 0 or more characters) and ? (matches exactly 1 character).\n\nIf you specify multiple strings, the condition is satisfied if one of the strings matches the host name.", + "markdownDescription": "The host names. The maximum size of each name is 128 characters. The comparison is case insensitive. The following wildcard characters are supported: * (matches 0 or more characters) and ? (matches exactly 1 character). You must include at least one \".\" character. You can include only alphabetical characters after the final \".\" character.\n\nIf you specify multiple strings, the condition is satisfied if one of the strings matches the host name.", "title": "Values", "type": "array" } @@ -93985,7 +93963,7 @@ "items": { "$ref": "#/definitions/AWS::ElasticLoadBalancingV2::LoadBalancer.LoadBalancerAttribute" }, - "markdownDescription": "The load balancer attributes.", + "markdownDescription": "The load balancer attributes. Attributes that you do not modify retain their current values.", "title": "LoadBalancerAttributes", "type": "array" }, @@ -94216,7 +94194,7 @@ "items": { "$ref": "#/definitions/AWS::ElasticLoadBalancingV2::TargetGroup.TargetGroupAttribute" }, - "markdownDescription": "The target group attributes.", + "markdownDescription": "The target group attributes. Attributes that you do not modify retain their current values.", "title": "TargetGroupAttributes", "type": "array" }, @@ -103231,8 +103209,6 @@ "type": "string" }, "DesiredEC2Instances": { - "markdownDescription": "The number of EC2 instances that you want this fleet to host. When creating a new fleet, GameLift automatically sets this value to \"1\" and initiates a single instance. Once the fleet is active, update this value to trigger GameLift to add or remove instances from the fleet.", - "title": "DesiredEC2Instances", "type": "number" }, "EC2InboundPermissions": { @@ -103272,8 +103248,6 @@ "type": "array" }, "MaxSize": { - "markdownDescription": "The maximum number of instances that are allowed in the specified fleet location. If this parameter is not set, the default is 1.", - "title": "MaxSize", "type": "number" }, "MetricGroups": { @@ -103285,8 +103259,6 @@ "type": "array" }, "MinSize": { - "markdownDescription": "The minimum number of instances that are allowed in the specified fleet location. If this parameter is not set, the default is 0.", - "title": "MinSize", "type": "number" }, "Name": { @@ -163677,7 +163649,7 @@ "items": { "$ref": "#/definitions/AWS::MediaPackageV2::OriginEndpoint.HlsManifestConfiguration" }, - "markdownDescription": "The HLS manfiests associated with the origin endpoint configuration.", + "markdownDescription": "The HLS manifests associated with the origin endpoint configuration.", "title": "HlsManifests", "type": "array" }, @@ -163891,7 +163863,7 @@ "title": "FilterConfiguration" }, "ManifestName": { - "markdownDescription": "A short short string that's appended to the endpoint URL. The manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default manifest name, `index` . MediaPackage automatically inserts the format extension, such as `.m3u8` . You can't use the same manifest name if you use HLS manifest and low-latency HLS manifest. The `manifestName` on the `HLSManifest` object overrides the `manifestName` you provided on the `originEndpoint` object.", + "markdownDescription": "A short string that's appended to the endpoint URL. The manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default manifest name, `index` . MediaPackage automatically inserts the format extension, such as `.m3u8` . You can't use the same manifest name if you use HLS manifest and low-latency HLS manifest. The `manifestName` on the `HLSManifest` object overrides the `manifestName` you provided on the `originEndpoint` object.", "title": "ManifestName", "type": "string" }, @@ -268788,7 +268760,7 @@ "type": "string" }, "OversizeHandling": { - "markdownDescription": "What AWS WAF should do if the headers of the request are more numerous or larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to AWS WAF .\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available headers normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.", + "markdownDescription": "What AWS WAF should do if the headers determined by your match scope are more numerous or larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to AWS WAF .\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available headers normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.", "title": "OversizeHandling", "type": "string" } @@ -270290,7 +270262,7 @@ "type": "string" }, "OversizeHandling": { - "markdownDescription": "What AWS WAF should do if the headers of the request are more numerous or larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to AWS WAF .\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available headers normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.", + "markdownDescription": "What AWS WAF should do if the headers determined by your match scope are more numerous or larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to AWS WAF .\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available headers normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.", "title": "OversizeHandling", "type": "string" } diff --git a/schema_source/cloudformation-docs.json b/schema_source/cloudformation-docs.json index 5fcf6db2f..23ea4180f 100644 --- a/schema_source/cloudformation-docs.json +++ b/schema_source/cloudformation-docs.json @@ -3171,7 +3171,7 @@ "ApiId": "The `Api` ID.", "CodeHandlers": "The event handler functions that run custom business logic to process published events and subscribe requests.", "CodeS3Location": "The Amazon S3 endpoint where the code is located.", - "HandlerConfigs": "", + "HandlerConfigs": "The configuration for the `OnPublish` and `OnSubscribe` handlers.", "Name": "The name of the channel namespace. This name must be unique within the `Api` .", "PublishAuthModes": "The authorization mode to use for publishing messages on the channel namespace. This configuration overrides the default `Api` authorization configuration.", "SubscribeAuthModes": "The authorization mode to use for subscribing to messages on the channel namespace. This configuration overrides the default `Api` authorization configuration.", @@ -3181,19 +3181,19 @@ "AuthType": "The authorization type." }, "AWS::AppSync::ChannelNamespace HandlerConfig": { - "Behavior": "", - "Integration": "" + "Behavior": "The behavior for the handler.", + "Integration": "The integration data source configuration for the handler." }, "AWS::AppSync::ChannelNamespace HandlerConfigs": { - "OnPublish": "", - "OnSubscribe": "" + "OnPublish": "The configuration for the `OnPublish` handler.", + "OnSubscribe": "The configuration for the `OnSubscribe` handler." }, "AWS::AppSync::ChannelNamespace Integration": { - "DataSourceName": "", - "LambdaConfig": "" + "DataSourceName": "The unique name of the data source that has been configured on the API.", + "LambdaConfig": "The configuration for a Lambda data source." }, "AWS::AppSync::ChannelNamespace LambdaConfig": { - "InvokeType": "" + "InvokeType": "The invocation type for a Lambda data source." }, "AWS::AppSync::ChannelNamespace Tag": { "Key": "Describes the key of the tag.", @@ -6097,12 +6097,12 @@ "GuardrailIdentifier": "The unique identifier of the guardrail. This can be an ID or the ARN." }, "AWS::Bedrock::IntelligentPromptRouter": { - "Description": "", - "FallbackModel": "", - "Models": "", - "PromptRouterName": "", + "Description": "An optional description of the prompt router to help identify its purpose.", + "FallbackModel": "The default model to use when the routing criteria is not met.", + "Models": "A list of foundation models that the prompt router can route requests to. At least one model must be specified.", + "PromptRouterName": "The name of the prompt router. The name must be unique within your AWS account in the current region.", "RoutingCriteria": "Routing criteria for a prompt router.", - "Tags": "" + "Tags": "An array of key-value pairs to apply to this resource as tags. You can use tags to categorize and manage your AWS resources." }, "AWS::Bedrock::IntelligentPromptRouter PromptRouterTargetModel": { "ModelArn": "The target model's ARN." @@ -6111,7 +6111,7 @@ "ResponseQualityDifference": "The criteria's response quality difference." }, "AWS::Bedrock::IntelligentPromptRouter Tag": { - "Key": "The key associated with a tag.", + "Key": "The tag's key.", "Value": "The value associated with a tag." }, "AWS::Bedrock::KnowledgeBase": { @@ -8171,7 +8171,7 @@ "HTTPSPort": "The HTTPS port of the CloudFront VPC origin endpoint configuration. The default value is `443` .", "Name": "The name of the CloudFront VPC origin endpoint configuration.", "OriginProtocolPolicy": "The origin protocol policy for the CloudFront VPC origin endpoint configuration.", - "OriginSSLProtocols": "" + "OriginSSLProtocols": "Specifies the minimum SSL/TLS protocol that CloudFront uses when connecting to your origin over HTTPS. Valid values include `SSLv3` , `TLSv1` , `TLSv1.1` , and `TLSv1.2` .\n\nFor more information, see [Minimum Origin SSL Protocol](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginSSLProtocols) in the *Amazon CloudFront Developer Guide* ." }, "AWS::CloudTrail::Channel": { "Destinations": "One or more event data stores to which events arriving through a channel will be logged.", @@ -8526,6 +8526,7 @@ }, "AWS::CodeBuild::Fleet ComputeConfiguration": { "disk": "The amount of disk space of the instance type included in your fleet.", + "instanceType": "The EC2 instance type to be launched in your fleet.", "machineType": "The machine type of the instance type included in your fleet.", "memory": "The amount of memory of the instance type included in your fleet.", "vCpu": "The number of vCPUs of the instance type included in your fleet." @@ -9069,6 +9070,7 @@ }, "AWS::CodePipeline::Pipeline EnvironmentVariable": { "Name": "The environment variable name in the key-value pair.", + "Type": "Specifies the type of use for the environment variable value. The value can be either `PLAINTEXT` or `SECRETS_MANAGER` . If the value is `SECRETS_MANAGER` , provide the Secrets reference in the EnvironmentVariable value.", "Value": "The environment variable value in the key-value pair." }, "AWS::CodePipeline::Pipeline FailureConditions": { @@ -12498,27 +12500,6 @@ "Key": "The key for an AWS resource tag.", "Value": "The value for an AWS resource tag." }, - "AWS::DataSync::StorageSystem": { - "AgentArns": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects to and reads from your on-premises storage system's management interface. You can only specify one ARN.", - "CloudWatchLogGroupArn": "Specifies the ARN of the Amazon CloudWatch log group for monitoring and logging discovery job events.", - "Name": "Specifies a familiar name for your on-premises storage system.", - "ServerConfiguration": "Specifies the server name and network port required to connect with the management interface of your on-premises storage system.", - "ServerCredentials": "Specifies the user name and password for accessing your on-premises storage system's management interface.", - "SystemType": "Specifies the type of on-premises storage system that you want DataSync Discovery to collect information about.\n\n> DataSync Discovery currently supports NetApp Fabric-Attached Storage (FAS) and All Flash FAS (AFF) systems running ONTAP 9.7 or later.", - "Tags": "Specifies labels that help you categorize, filter, and search for your AWS resources. We recommend creating at least a name tag for your on-premises storage system." - }, - "AWS::DataSync::StorageSystem ServerConfiguration": { - "ServerHostname": "The domain name or IP address of your storage system's management interface.", - "ServerPort": "The network port for accessing the storage system's management interface." - }, - "AWS::DataSync::StorageSystem ServerCredentials": { - "Password": "Specifies the password for your storage system's management interface.", - "Username": "Specifies the user name for your storage system's management interface." - }, - "AWS::DataSync::StorageSystem Tag": { - "Key": "The key for an AWS resource tag.", - "Value": "The value for an AWS resource tag." - }, "AWS::DataSync::Task": { "CloudWatchLogGroupArn": "Specifies the Amazon Resource Name (ARN) of an Amazon CloudWatch log group for monitoring your task.\n\nFor Enhanced mode tasks, you don't need to specify anything. DataSync automatically sends logs to a CloudWatch log group named `/aws/datasync` .\n\nFor more information, see [Monitoring data transfers with CloudWatch Logs](https://docs.aws.amazon.com/datasync/latest/userguide/configure-logging.html) .", "DestinationLocationArn": "The Amazon Resource Name (ARN) of an AWS storage resource's location.", @@ -14049,7 +14030,12 @@ "HostRecovery": "Indicates whether to enable or disable host recovery for the Dedicated Host. Host recovery is disabled by default. For more information, see [Host recovery](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-hosts-recovery.html) in the *Amazon EC2 User Guide* .\n\nDefault: `off`", "InstanceFamily": "The instance family supported by the Dedicated Host. For example, `m5` .", "InstanceType": "Specifies the instance type to be supported by the Dedicated Hosts. If you specify an instance type, the Dedicated Hosts support instances of the specified instance type only.", - "OutpostArn": "The Amazon Resource Name (ARN) of the AWS Outpost on which the Dedicated Host is allocated." + "OutpostArn": "The Amazon Resource Name (ARN) of the AWS Outpost on which the Dedicated Host is allocated.", + "Tags": "Any tags assigned to the Dedicated Host." + }, + "AWS::EC2::Host Tag": { + "Key": "The key of the tag.\n\nConstraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with `aws:` .", + "Value": "The value of the tag.\n\nConstraints: Tag values are case-sensitive and accept a maximum of 256 Unicode characters." }, "AWS::EC2::IPAM": { "DefaultResourceDiscoveryOrganizationalUnitExclusions": "If your IPAM is integrated with AWS Organizations, you can exclude an [organizational unit (OU)](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_getting-started_concepts.html#organizationalunit) from being managed by IPAM. When you exclude an OU, IPAM will not manage the IP addresses in accounts in that OU. For more information, see [Exclude organizational units from IPAM](https://docs.aws.amazon.com/vpc/latest/ipam/exclude-ous.html) in the *Amazon Virtual Private Cloud IP Address Manager User Guide* .", @@ -14372,6 +14358,7 @@ "KmsKeyId": "Identifier (key ID, key alias, key ARN, or alias ARN) of the customer managed KMS key to use for EBS encryption.", "SnapshotId": "The ID of the snapshot.", "Throughput": "The throughput to provision for a `gp3` volume, with a maximum of 1,000 MiB/s.\n\nValid Range: Minimum value of 125. Maximum value of 1000.", + "VolumeInitializationRate": "Specifies the Amazon EBS Provisioned Rate for Volume Initialization (volume initialization rate), in MiB/s, at which to download the snapshot blocks from Amazon S3 to the volume. This is also known as *volume initialization* . Specifying a volume initialization rate ensures that the volume is initialized at a predictable and consistent rate after creation.\n\nThis parameter is supported only for volumes created from snapshots. Omit this parameter if:\n\n- You want to create the volume using fast snapshot restore. You must specify a snapshot that is enabled for fast snapshot restore. In this case, the volume is fully initialized at creation.\n\n> If you specify a snapshot that is enabled for fast snapshot restore and a volume initialization rate, the volume will be initialized at the specified rate instead of fast snapshot restore.\n- You want to create a volume that is initialized at the default rate.\n\nFor more information, see [Initialize Amazon EBS volumes](https://docs.aws.amazon.com/ebs/latest/userguide/initalize-volume.html) in the *Amazon EC2 User Guide* .\n\nValid range: 100 - 300 MiB/s", "VolumeSize": "The size of the volume, in GiBs. You must specify either a snapshot ID or a volume size. The following are the supported volumes sizes for each volume type:\n\n- `gp2` and `gp3` : 1 - 16,384 GiB\n- `io1` : 4 - 16,384 GiB\n- `io2` : 4 - 65,536 GiB\n- `st1` and `sc1` : 125 - 16,384 GiB\n- `standard` : 1 - 1024 GiB", "VolumeType": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html) in the *Amazon EBS User Guide* ." }, @@ -14702,6 +14689,7 @@ "AWS::EC2::NetworkInsightsAnalysis": { "AdditionalAccounts": "The member accounts that contain resources that the path can traverse.", "FilterInArns": "The Amazon Resource Names (ARN) of the resources that the path must traverse.", + "FilterOutArns": "The Amazon Resource Names (ARN) of the resources that the path must ignore.", "NetworkInsightsPathId": "The ID of the path.", "Tags": "The tags to apply." }, @@ -16087,8 +16075,8 @@ "S3KeyPrefix": "An optional folder in the S3 bucket to place logs in." }, "AWS::ECS::Cluster ManagedStorageConfiguration": { - "FargateEphemeralStorageKmsKeyId": "Specify the AWS Key Management Service key ID for the Fargate ephemeral storage.\n\nThe key must be a single Region key.", - "KmsKeyId": "Specify a AWS Key Management Service key ID to encrypt the managed storage.\n\nThe key must be a single Region key." + "FargateEphemeralStorageKmsKeyId": "Specify the AWS Key Management Service key ID for Fargate ephemeral storage.\n\nWhen you specify a `fargateEphemeralStorageKmsKeyId` , AWS Fargate uses the key to encrypt data at rest in ephemeral storage. For more information about Fargate ephemeral storage encryption, see [Customer managed keys for AWS Fargate ephemeral storage for Amazon ECS](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/fargate-storage-encryption.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThe key must be a single Region key.", + "KmsKeyId": "Specify a AWS Key Management Service key ID to encrypt Amazon ECS managed storage.\n\nWhen you specify a `kmsKeyId` , Amazon ECS uses the key to encrypt data volumes managed by Amazon ECS that are attached to tasks in the cluster. The following data volumes are managed by Amazon ECS: Amazon EBS. For more information about encryption of Amazon EBS volumes attached to Amazon ECS tasks, see [Encrypt data stored in Amazon EBS volumes for Amazon ECS](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ebs-kms-encryption.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThe key must be a single Region key." }, "AWS::ECS::Cluster ServiceConnectDefaults": { "Namespace": "The namespace name or full Amazon Resource Name (ARN) of the AWS Cloud Map namespace that's used when you create a service and don't specify a Service Connect configuration. The namespace name can include up to 1024 characters. The name is case-sensitive. The name can't include greater than (>), less than (<), double quotation marks (\"), or slash (/).\n\nIf you enter an existing namespace name or ARN, then that namespace will be used. Any namespace type is supported. The namespace must be in this account and this AWS Region.\n\nIf you enter a new name, a AWS Cloud Map namespace will be created. Amazon ECS creates a AWS Cloud Map namespace with the \"API calls\" method of instance discovery only. This instance discovery method is the \"HTTP\" namespace type in the AWS Command Line Interface . Other types of instance discovery aren't used by Service Connect.\n\nIf you update the cluster with an empty string `\"\"` for the namespace name, the cluster configuration for Service Connect is removed. Note that the namespace will remain in AWS Cloud Map and must be deleted separately.\n\nFor more information about AWS Cloud Map , see [Working with Services](https://docs.aws.amazon.com/cloud-map/latest/dg/working-with-services.html) in the *AWS Cloud Map Developer Guide* ." @@ -16180,7 +16168,7 @@ }, "AWS::ECS::Service LogConfiguration": { "LogDriver": "The log driver to use for the container.\n\nFor tasks on AWS Fargate , the supported log drivers are `awslogs` , `splunk` , and `awsfirelens` .\n\nFor tasks hosted on Amazon EC2 instances, the supported log drivers are `awslogs` , `fluentd` , `gelf` , `json-file` , `journald` , `syslog` , `splunk` , and `awsfirelens` .\n\nFor more information about using the `awslogs` log driver, see [Send Amazon ECS logs to CloudWatch](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_awslogs.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nFor more information about using the `awsfirelens` log driver, see [Send Amazon ECS logs to an AWS service or AWS Partner](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html) .\n\n> If you have a custom driver that isn't listed, you can fork the Amazon ECS container agent project that's [available on GitHub](https://docs.aws.amazon.com/https://github.com/aws/amazon-ecs-agent) and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, we don't currently provide support for running modified copies of this software.", - "Options": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using Fargate.Optional when using EC2.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n\nThe following options apply to all supported log drivers.\n\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to the log driver specified using `logDriver` . The delivery mode you choose affects application availability when the flow of logs from container is interrupted.\n\nIf you use the `blocking` mode and the flow of logs is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n\nYou can set a default `mode` for all containers in a specific AWS Region by using the `defaultLogDriverMode` account setting. If you don't specify the `mode` option or configure the account setting, Amazon ECS will default to the `blocking` mode. For more information about the account setting, see [Default log driver mode](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#default-log-driver-mode) in the *Amazon Elastic Container Service Developer Guide* .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` . For more information, see [Under the hood: FireLens for Amazon ECS Tasks](https://docs.aws.amazon.com/containers/under-the-hood-firelens-for-amazon-ecs-tasks/) .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "Options": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using Fargate.Optional when using EC2.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n\nThe following options apply to all supported log drivers.\n\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to the log driver specified using `logDriver` . The delivery mode you choose affects application availability when the flow of logs from container is interrupted.\n\nIf you use the `blocking` mode and the flow of logs is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n\nYou can set a default `mode` for all containers in a specific AWS Region by using the `defaultLogDriverMode` account setting. If you don't specify the `mode` option or configure the account setting, Amazon ECS will default to the `blocking` mode. For more information about the account setting, see [Default log driver mode](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#default-log-driver-mode) in the *Amazon Elastic Container Service Developer Guide* .\n\n> On June 25, 2025, Amazon ECS is changing the default log driver mode from `blocking` to `non-blocking` to prioritize task availability over logging. To continue using the `blocking` mode after this change, do one of the following:\n> \n> - Set the `mode` option in your container definition's `logConfiguration` as `blocking` .\n> - Set the `defaultLogDriverMode` account setting to `blocking` .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` . For more information, see [Under the hood: FireLens for Amazon ECS Tasks](https://docs.aws.amazon.com/containers/under-the-hood-firelens-for-amazon-ecs-tasks/) .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "SecretOptions": "The secrets to pass to the log configuration. For more information, see [Specifying sensitive data](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) in the *Amazon Elastic Container Service Developer Guide* ." }, "AWS::ECS::Service NetworkConfiguration": { @@ -16225,15 +16213,16 @@ "RoleArn": "The Amazon Resource Name (ARN) of the IAM role that's associated with the Service Connect TLS." }, "AWS::ECS::Service ServiceManagedEBSVolumeConfiguration": { - "Encrypted": "Indicates whether the volume should be encrypted. If no value is specified, encryption is turned on by default. This parameter maps 1:1 with the `Encrypted` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* .", - "FilesystemType": "The filesystem type for the volume. For volumes created from a snapshot, you must specify the same filesystem type that the volume was using when the snapshot was created. If there is a filesystem type mismatch, the task will fail to start.\n\nThe available Linux filesystem types are `ext3` , `ext4` , and `xfs` . If no value is specified, the `xfs` filesystem type is used by default.\n\nThe available Windows filesystem types are `NTFS` .", + "Encrypted": "Indicates whether the volume should be encrypted. If you turn on Region-level Amazon EBS encryption by default but set this value as `false` , the setting is overridden and the volume is encrypted with the KMS key specified for Amazon EBS encryption by default. This parameter maps 1:1 with the `Encrypted` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* .", + "FilesystemType": "The filesystem type for the volume. For volumes created from a snapshot, you must specify the same filesystem type that the volume was using when the snapshot was created. If there is a filesystem type mismatch, the tasks will fail to start.\n\nThe available Linux filesystem types are `ext3` , `ext4` , and `xfs` . If no value is specified, the `xfs` filesystem type is used by default.\n\nThe available Windows filesystem types are `NTFS` .", "Iops": "The number of I/O operations per second (IOPS). For `gp3` , `io1` , and `io2` volumes, this represents the number of IOPS that are provisioned for the volume. For `gp2` volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.\n\nThe following are the supported values for each volume type.\n\n- `gp3` : 3,000 - 16,000 IOPS\n- `io1` : 100 - 64,000 IOPS\n- `io2` : 100 - 256,000 IOPS\n\nThis parameter is required for `io1` and `io2` volume types. The default for `gp3` volumes is `3,000 IOPS` . This parameter is not supported for `st1` , `sc1` , or `standard` volume types.\n\nThis parameter maps 1:1 with the `Iops` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* .", - "KmsKeyId": "The Amazon Resource Name (ARN) identifier of the AWS Key Management Service key to use for Amazon EBS encryption. When encryption is turned on and no AWS Key Management Service key is specified, the default AWS managed key for Amazon EBS volumes is used. This parameter maps 1:1 with the `KmsKeyId` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* .\n\n> AWS authenticates the AWS Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or ARN that is invalid, the action can appear to complete, but eventually fails.", + "KmsKeyId": "The Amazon Resource Name (ARN) identifier of the AWS Key Management Service key to use for Amazon EBS encryption. When a key is specified using this parameter, it overrides Amazon EBS default encryption or any KMS key that you specified for cluster-level managed storage encryption. This parameter maps 1:1 with the `KmsKeyId` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* . For more information about encrypting Amazon EBS volumes attached to tasks, see [Encrypt data stored in Amazon EBS volumes attached to Amazon ECS tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ebs-kms-encryption.html) .\n\n> AWS authenticates the AWS Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or ARN that is invalid, the action can appear to complete, but eventually fails.", "RoleArn": "The ARN of the IAM role to associate with this volume. This is the Amazon ECS infrastructure IAM role that is used to manage your AWS infrastructure. We recommend using the Amazon ECS-managed `AmazonECSInfrastructureRolePolicyForVolumes` IAM policy with this role. For more information, see [Amazon ECS infrastructure IAM role](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/infrastructure_IAM_role.html) in the *Amazon ECS Developer Guide* .", "SizeInGiB": "The size of the volume in GiB. You must specify either a volume size or a snapshot ID. If you specify a snapshot ID, the snapshot size is used for the volume size by default. You can optionally specify a volume size greater than or equal to the snapshot size. This parameter maps 1:1 with the `Size` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* .\n\nThe following are the supported volume size values for each volume type.\n\n- `gp2` and `gp3` : 1-16,384\n- `io1` and `io2` : 4-16,384\n- `st1` and `sc1` : 125-16,384\n- `standard` : 1-1,024", - "SnapshotId": "The snapshot that Amazon ECS uses to create the volume. You must specify either a snapshot ID or a volume size. This parameter maps 1:1 with the `SnapshotId` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* .", + "SnapshotId": "The snapshot that Amazon ECS uses to create volumes for attachment to tasks maintained by the service. You must specify either `snapshotId` or `sizeInGiB` in your volume configuration. This parameter maps 1:1 with the `SnapshotId` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* .", "TagSpecifications": "The tags to apply to the volume. Amazon ECS applies service-managed tags by default. This parameter maps 1:1 with the `TagSpecifications.N` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* .", "Throughput": "The throughput to provision for a volume, in MiB/s, with a maximum of 1,000 MiB/s. This parameter maps 1:1 with the `Throughput` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* .\n\n> This parameter is only supported for the `gp3` volume type.", + "VolumeInitializationRate": "The rate, in MiB/s, at which data is fetched from a snapshot of an existing EBS volume to create new volumes for attachment to the tasks maintained by the service. This property can be specified only if you specify a `snapshotId` . For more information, see [Initialize Amazon EBS volumes](https://docs.aws.amazon.com/ebs/latest/userguide/initalize-volume.html) in the *Amazon EBS User Guide* .", "VolumeType": "The volume type. This parameter maps 1:1 with the `VolumeType` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* . For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) in the *Amazon EC2 User Guide* .\n\nThe following are the supported volume types.\n\n- General Purpose SSD: `gp2` | `gp3`\n- Provisioned IOPS SSD: `io1` | `io2`\n- Throughput Optimized HDD: `st1`\n- Cold HDD: `sc1`\n- Magnetic: `standard`\n\n> The magnetic volume type is not supported on Fargate." }, "AWS::ECS::Service ServiceRegistry": { @@ -16407,7 +16396,7 @@ }, "AWS::ECS::TaskDefinition LogConfiguration": { "LogDriver": "The log driver to use for the container.\n\nFor tasks on AWS Fargate , the supported log drivers are `awslogs` , `splunk` , and `awsfirelens` .\n\nFor tasks hosted on Amazon EC2 instances, the supported log drivers are `awslogs` , `fluentd` , `gelf` , `json-file` , `journald` , `syslog` , `splunk` , and `awsfirelens` .\n\nFor more information about using the `awslogs` log driver, see [Send Amazon ECS logs to CloudWatch](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_awslogs.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nFor more information about using the `awsfirelens` log driver, see [Send Amazon ECS logs to an AWS service or AWS Partner](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html) .\n\n> If you have a custom driver that isn't listed, you can fork the Amazon ECS container agent project that's [available on GitHub](https://docs.aws.amazon.com/https://github.com/aws/amazon-ecs-agent) and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, we don't currently provide support for running modified copies of this software.", - "Options": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using Fargate.Optional when using EC2.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n\nThe following options apply to all supported log drivers.\n\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to the log driver specified using `logDriver` . The delivery mode you choose affects application availability when the flow of logs from container is interrupted.\n\nIf you use the `blocking` mode and the flow of logs is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n\nYou can set a default `mode` for all containers in a specific AWS Region by using the `defaultLogDriverMode` account setting. If you don't specify the `mode` option or configure the account setting, Amazon ECS will default to the `blocking` mode. For more information about the account setting, see [Default log driver mode](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#default-log-driver-mode) in the *Amazon Elastic Container Service Developer Guide* .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` . For more information, see [Under the hood: FireLens for Amazon ECS Tasks](https://docs.aws.amazon.com/containers/under-the-hood-firelens-for-amazon-ecs-tasks/) .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "Options": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using Fargate.Optional when using EC2.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n\nThe following options apply to all supported log drivers.\n\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to the log driver specified using `logDriver` . The delivery mode you choose affects application availability when the flow of logs from container is interrupted.\n\nIf you use the `blocking` mode and the flow of logs is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n\nYou can set a default `mode` for all containers in a specific AWS Region by using the `defaultLogDriverMode` account setting. If you don't specify the `mode` option or configure the account setting, Amazon ECS will default to the `blocking` mode. For more information about the account setting, see [Default log driver mode](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#default-log-driver-mode) in the *Amazon Elastic Container Service Developer Guide* .\n\n> On June 25, 2025, Amazon ECS is changing the default log driver mode from `blocking` to `non-blocking` to prioritize task availability over logging. To continue using the `blocking` mode after this change, do one of the following:\n> \n> - Set the `mode` option in your container definition's `logConfiguration` as `blocking` .\n> - Set the `defaultLogDriverMode` account setting to `blocking` .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` . For more information, see [Under the hood: FireLens for Amazon ECS Tasks](https://docs.aws.amazon.com/containers/under-the-hood-firelens-for-amazon-ecs-tasks/) .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "SecretOptions": "The secrets to pass to the log configuration. For more information, see [Specifying sensitive data](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) in the *Amazon Elastic Container Service Developer Guide* ." }, "AWS::ECS::TaskDefinition MountPoint": { @@ -16450,7 +16439,7 @@ }, "AWS::ECS::TaskDefinition SystemControl": { "Namespace": "The namespaced kernel parameter to set a `value` for.", - "Value": "The namespaced kernel parameter to set a `value` for.\n\nValid IPC namespace values: `\"kernel.msgmax\" | \"kernel.msgmnb\" | \"kernel.msgmni\" | \"kernel.sem\" | \"kernel.shmall\" | \"kernel.shmmax\" | \"kernel.shmmni\" | \"kernel.shm_rmid_forced\"` , and `Sysctls` that start with `\"fs.mqueue.*\"`\n\nValid network namespace values: `Sysctls` that start with `\"net.*\"`\n\nAll of these values are supported by Fargate." + "Value": "The namespaced kernel parameter to set a `value` for.\n\nValid IPC namespace values: `\"kernel.msgmax\" | \"kernel.msgmnb\" | \"kernel.msgmni\" | \"kernel.sem\" | \"kernel.shmall\" | \"kernel.shmmax\" | \"kernel.shmmni\" | \"kernel.shm_rmid_forced\"` , and `Sysctls` that start with `\"fs.mqueue.*\"`\n\nValid network namespace values: `Sysctls` that start with `\"net.*\"` . Only namespaced `Sysctls` that exist within the container starting with \"net.* are accepted.\n\nAll of these values are supported by Fargate." }, "AWS::ECS::TaskDefinition Tag": { "Key": "One part of a key-value pair that make up a tag. A `key` is a general label that acts like a category for more specific tag values.", @@ -17482,7 +17471,7 @@ "CacheParameterGroupName": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Valkey or Redis OSS version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Valkey or Redis OSS (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Valkey or Redis OSS (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", "CacheSecurityGroupNames": "A list of cache security group names to associate with this replication group.", "CacheSubnetGroupName": "The name of the cache subnet group to be used for the replication group.\n\n> If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see [AWS::ElastiCache::SubnetGroup](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticache-subnetgroup.html) .", - "ClusterMode": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Valkey or Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Valkey or Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/modify-cluster-mode.html) .", + "ClusterMode": "The mode can be enabled or disabled. To change the cluster mode from disabled to enabled, you must first set the cluster mode to compatible. The compatible mode allows your Valkey or Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Valkey or Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/modify-cluster-mode.html) .", "DataTieringEnabled": "Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see [Data tiering](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/data-tiering.html) .", "Engine": "The name of the cache engine to be used for the clusters in this replication group. The value must be set to `valkey` or `redis` .\n\n> Upgrading an existing engine from redis to valkey is done through in-place migration, and requires a parameter group.", "EngineVersion": "The version number of the cache engine to be used for the clusters in this replication group. To view the supported cache engine versions, use the `DescribeCacheEngineVersions` operation.\n\n*Important:* You can upgrade to a newer engine version (see [Selecting a Cache Engine and Version](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SelectEngine.html#VersionManagement) ) in the *ElastiCache User Guide* , but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.", @@ -17780,7 +17769,7 @@ "AlpnPolicy": "[TLS listener] The name of the Application-Layer Protocol Negotiation (ALPN) policy.", "Certificates": "The default SSL server certificate for a secure listener. You must provide exactly one certificate if the listener protocol is HTTPS or TLS.\n\nTo create a certificate list for a secure listener, use [AWS::ElasticLoadBalancingV2::ListenerCertificate](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-listenercertificate.html) .", "DefaultActions": "The actions for the default rule. You cannot define a condition for a default rule.\n\nTo create additional rules for an Application Load Balancer, use [AWS::ElasticLoadBalancingV2::ListenerRule](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-listenerrule.html) .", - "ListenerAttributes": "The listener attributes.", + "ListenerAttributes": "The listener attributes. Attributes that you do not modify retain their current values.", "LoadBalancerArn": "The Amazon Resource Name (ARN) of the load balancer.", "MutualAuthentication": "The mutual authentication configuration information.", "Port": "The port on which the load balancer is listening. You can't specify a port for a Gateway Load Balancer.", @@ -17916,7 +17905,7 @@ "TargetGroups": "Information about how traffic will be distributed between multiple target groups in a forward rule." }, "AWS::ElasticLoadBalancingV2::ListenerRule HostHeaderConfig": { - "Values": "The host names. The maximum size of each name is 128 characters. The comparison is case insensitive. The following wildcard characters are supported: * (matches 0 or more characters) and ? (matches exactly 1 character).\n\nIf you specify multiple strings, the condition is satisfied if one of the strings matches the host name." + "Values": "The host names. The maximum size of each name is 128 characters. The comparison is case insensitive. The following wildcard characters are supported: * (matches 0 or more characters) and ? (matches exactly 1 character). You must include at least one \".\" character. You can include only alphabetical characters after the final \".\" character.\n\nIf you specify multiple strings, the condition is satisfied if one of the strings matches the host name." }, "AWS::ElasticLoadBalancingV2::ListenerRule HttpHeaderConfig": { "HttpHeaderName": "The name of the HTTP header field. The maximum size is 40 characters. The header name is case insensitive. The allowed characters are specified by RFC 7230. Wildcards are not supported.", @@ -17969,7 +17958,7 @@ "EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic": "Indicates whether to evaluate inbound security group rules for traffic sent to a Network Load Balancer through AWS PrivateLink . The default is `on` .\n\nYou can't configure this property on a Network Load Balancer unless you associated a security group with the load balancer when you created it.", "IpAddressType": "The IP address type. Internal load balancers must use `ipv4` .\n\n[Application Load Balancers] The possible values are `ipv4` (IPv4 addresses), `dualstack` (IPv4 and IPv6 addresses), and `dualstack-without-public-ipv4` (public IPv6 addresses and private IPv4 and IPv6 addresses).\n\nApplication Load Balancer authentication supports IPv4 addresses only when connecting to an Identity Provider (IdP) or Amazon Cognito endpoint. Without a public IPv4 address the load balancer can't complete the authentication process, resulting in HTTP 500 errors.\n\n[Network Load Balancers and Gateway Load Balancers] The possible values are `ipv4` (IPv4 addresses) and `dualstack` (IPv4 and IPv6 addresses).", "Ipv4IpamPoolId": "The ID of the IPv4 IPAM pool.", - "LoadBalancerAttributes": "The load balancer attributes.", + "LoadBalancerAttributes": "The load balancer attributes. Attributes that you do not modify retain their current values.", "MinimumLoadBalancerCapacity": "The minimum capacity for a load balancer.", "Name": "The name of the load balancer. This name must be unique per region per account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, must not begin or end with a hyphen, and must not begin with \"internal-\".\n\nIf you don't specify a name, AWS CloudFormation generates a unique physical ID for the load balancer. If you specify a name, you cannot perform updates that require replacement of this resource, but you can perform other updates. To replace the resource, specify a new name.", "Scheme": "The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the internet.\n\nThe nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can route requests only from clients with access to the VPC for the load balancer.\n\nThe default is an Internet-facing load balancer.\n\nYou can't specify a scheme for a Gateway Load Balancer.", @@ -18012,7 +18001,7 @@ "Protocol": "The protocol to use for routing traffic to the targets. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP, TLS, UDP, or TCP_UDP. For Gateway Load Balancers, the supported protocol is GENEVE. A TCP_UDP listener must be associated with a TCP_UDP target group. If the target is a Lambda function, this parameter does not apply.", "ProtocolVersion": "[HTTP/HTTPS protocol] The protocol version. The possible values are `GRPC` , `HTTP1` , and `HTTP2` .", "Tags": "The tags.", - "TargetGroupAttributes": "The target group attributes.", + "TargetGroupAttributes": "The target group attributes. Attributes that you do not modify retain their current values.", "TargetType": "The type of target that you must specify when registering targets with this target group. You can't specify targets for a target group using more than one target type.\n\n- `instance` - Register targets by instance ID. This is the default value.\n- `ip` - Register targets by IP address. You can specify IP addresses from the subnets of the virtual private cloud (VPC) for the target group, the RFC 1918 range (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16), and the RFC 6598 range (100.64.0.0/10). You can't specify publicly routable IP addresses.\n- `lambda` - Register a single Lambda function as a target.\n- `alb` - Register a single Application Load Balancer as a target.", "Targets": "The targets.", "UnhealthyThresholdCount": "The number of consecutive health check failures required before considering a target unhealthy. The range is 2-10. If the target group protocol is TCP, TCP_UDP, UDP, TLS, HTTP or HTTPS, the default is 2. For target groups with a protocol of GENEVE, the default is 2. If the target type is `lambda` , the default is 5.", @@ -19599,16 +19588,13 @@ "CertificateConfiguration": "Prompts Amazon GameLift Servers to generate a TLS/SSL certificate for the fleet. Amazon GameLift Servers uses the certificates to encrypt traffic between game clients and the game servers running on Amazon GameLift Servers. By default, the `CertificateConfiguration` is `DISABLED` . You can't change this property after you create the fleet.\n\nAWS Certificate Manager (ACM) certificates expire after 13 months. Certificate expiration can cause fleets to fail, preventing players from connecting to instances in the fleet. We recommend you replace fleets before 13 months, consider using fleet aliases for a smooth transition.\n\n> ACM isn't available in all AWS regions. A fleet creation request with certificate generation enabled in an unsupported Region, fails with a 4xx error. For more information about the supported Regions, see [Supported Regions](https://docs.aws.amazon.com/acm/latest/userguide/acm-regions.html) in the *AWS Certificate Manager User Guide* .", "ComputeType": "The type of compute resource used to host your game servers.\n\n- `EC2` \u2013 The game server build is deployed to Amazon EC2 instances for cloud hosting. This is the default setting.\n- `ANYWHERE` \u2013 Game servers and supporting software are deployed to compute resources that you provide and manage. With this compute type, you can also set the `AnywhereConfiguration` parameter.", "Description": "A description for the fleet.", - "DesiredEC2Instances": "The number of EC2 instances that you want this fleet to host. When creating a new fleet, GameLift automatically sets this value to \"1\" and initiates a single instance. Once the fleet is active, update this value to trigger GameLift to add or remove instances from the fleet.", "EC2InboundPermissions": "The IP address ranges and port settings that allow inbound traffic to access game server processes and other processes on this fleet. Set this parameter for managed EC2 fleets. You can leave this parameter empty when creating the fleet, but you must call [](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetPortSettings) to set it before players can connect to game sessions. As a best practice, we recommend opening ports for remote access only when you need them and closing them when you're finished. For Amazon GameLift Servers Realtime fleets, Amazon GameLift Servers automatically sets TCP and UDP ranges.", "EC2InstanceType": "The Amazon GameLift Servers-supported Amazon EC2 instance type to use with managed EC2 fleets. Instance type determines the computing resources that will be used to host your game servers, including CPU, memory, storage, and networking capacity. See [Amazon Elastic Compute Cloud Instance Types](https://docs.aws.amazon.com/ec2/instance-types/) for detailed descriptions of Amazon EC2 instance types.", "FleetType": "Indicates whether to use On-Demand or Spot instances for this fleet. By default, this property is set to `ON_DEMAND` . Learn more about when to use [On-Demand versus Spot Instances](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-ec2-instances.html#gamelift-ec2-instances-spot) . This fleet property can't be changed after the fleet is created.", "InstanceRoleARN": "A unique identifier for an IAM role that manages access to your AWS services. With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, including install scripts, server processes, and daemons (background processes). Create a role or look up a role's ARN by using the [IAM dashboard](https://docs.aws.amazon.com/iam/) in the AWS Management Console . Learn more about using on-box credentials for your game servers at [Access external resources from a game server](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is `EC2` .", "InstanceRoleCredentialsProvider": "Indicates that fleet instances maintain a shared credentials file for the IAM role defined in `InstanceRoleArn` . Shared credentials allow applications that are deployed with the game server executable to communicate with other AWS resources. This property is used only when the game server is integrated with the server SDK version 5.x. For more information about using shared credentials, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is `EC2` .", "Locations": "A set of remote locations to deploy additional instances to and manage as a multi-location fleet. Use this parameter when creating a fleet in AWS Regions that support multiple locations. You can add any AWS Region or Local Zone that's supported by Amazon GameLift Servers. Provide a list of one or more AWS Region codes, such as `us-west-2` , or Local Zone names. When using this parameter, Amazon GameLift Servers requires you to include your home location in the request. For a list of supported Regions and Local Zones, see [Amazon GameLift Servers service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", - "MaxSize": "The maximum number of instances that are allowed in the specified fleet location. If this parameter is not set, the default is 1.", "MetricGroups": "The name of an AWS CloudWatch metric group to add this fleet to. A metric group is used to aggregate the metrics for multiple fleets. You can specify an existing metric group name or set a new name to create a new metric group. A fleet can be included in only one metric group at a time.", - "MinSize": "The minimum number of instances that are allowed in the specified fleet location. If this parameter is not set, the default is 0.", "Name": "A descriptive label that is associated with a fleet. Fleet names do not need to be unique.", "NewGameSessionProtectionPolicy": "The status of termination protection for active game sessions on the fleet. By default, this property is set to `NoProtection` .\n\n- *NoProtection* - Game sessions can be terminated during active gameplay as a result of a scale-down event.\n- *FullProtection* - Game sessions in `ACTIVE` status cannot be terminated during a scale-down event.", "PeerVpcAwsAccountId": "Used when peering your Amazon GameLift Servers fleet with a VPC, the unique identifier for the AWS account that owns the VPC. You can find your account ID in the AWS Management Console under account settings.", @@ -19616,7 +19602,8 @@ "ResourceCreationLimitPolicy": "A policy that limits the number of game sessions that an individual player can create on instances in this fleet within a specified span of time.", "RuntimeConfiguration": "Instructions for how to launch and maintain server processes on instances in the fleet. The runtime configuration defines one or more server process configurations, each identifying a build executable or Realtime script file and the number of processes of that type to run concurrently.\n\n> The `RuntimeConfiguration` parameter is required unless the fleet is being configured using the older parameters `ServerLaunchPath` and `ServerLaunchParameters` , which are still supported for backward compatibility.", "ScalingPolicies": "Rule that controls how a fleet is scaled. Scaling policies are uniquely identified by the combination of name and fleet ID.", - "ScriptId": "The unique identifier for a Realtime configuration script to be deployed on fleet instances. You can use either the script ID or ARN. Scripts must be uploaded to Amazon GameLift Servers prior to creating the fleet. This fleet property cannot be changed later.\n\n> You can't use the `!Ref` command to reference a script created with a CloudFormation template for the fleet property `ScriptId` . Instead, use `Fn::GetAtt Script.Arn` or `Fn::GetAtt Script.Id` to retrieve either of these properties as input for `ScriptId` . Alternatively, enter a `ScriptId` string manually." + "ScriptId": "The unique identifier for a Realtime configuration script to be deployed on fleet instances. You can use either the script ID or ARN. Scripts must be uploaded to Amazon GameLift Servers prior to creating the fleet. This fleet property cannot be changed later.\n\n> You can't use the `!Ref` command to reference a script created with a CloudFormation template for the fleet property `ScriptId` . Instead, use `Fn::GetAtt Script.Arn` or `Fn::GetAtt Script.Id` to retrieve either of these properties as input for `ScriptId` . Alternatively, enter a `ScriptId` string manually.", + "Tags": "" }, "AWS::GameLift::Fleet AnywhereConfiguration": { "Cost": "The cost to run your fleet per hour. Amazon GameLift Servers uses the provided cost of your fleet to balance usage in queues. For more information about queues, see [Setting up queues](https://docs.aws.amazon.com/gamelift/latest/developerguide/queues-intro.html) in the *Amazon GameLift Servers Developer Guide* ." @@ -19667,6 +19654,10 @@ "LaunchPath": "The location of a game build executable or Realtime script. Game builds and Realtime scripts are installed on instances at the root:\n\n- Windows (custom game builds only): `C:\\game` . Example: \" `C:\\game\\MyGame\\server.exe` \"\n- Linux: `/local/game` . Examples: \" `/local/game/MyGame/server.exe` \" or \" `/local/game/MyRealtimeScript.js` \"\n\n> Amazon GameLift Servers doesn't support the use of setup scripts that launch the game executable. For custom game builds, this parameter must indicate the executable that calls the server SDK operations `initSDK()` and `ProcessReady()` .", "Parameters": "An optional list of parameters to pass to the server executable or Realtime script on launch.\n\nLength Constraints: Minimum length of 1. Maximum length of 1024.\n\nPattern: [A-Za-z0-9_:.+\\/\\\\\\- =@{},?'\\[\\]\"]+" }, + "AWS::GameLift::Fleet Tag": { + "Key": "The key for a developer-defined key value pair for tagging an AWS resource.", + "Value": "The value for a developer-defined key value pair for tagging an AWS resource." + }, "AWS::GameLift::Fleet TargetConfiguration": { "TargetValue": "Desired value to use with a target-based scaling policy. The value must be relevant for whatever metric the scaling policy is using. For example, in a policy using the metric PercentAvailableGameSessions, the target value should be the preferred size of the fleet's buffer (the percent of capacity that should be idle and ready for new game sessions)." }, @@ -19814,12 +19805,12 @@ "Tags": "A list of labels to assign to the new stream group resource. Tags are developer-defined key-value pairs. Tagging AWS resources is useful for resource management, access management and cost allocation. See [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Reference* ." }, "AWS::GameLiftStreams::StreamGroup DefaultApplication": { - "Arn": "An [Amazon Resource Name (ARN)](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference-arns.html) that uniquely identifies the application resource. Format example: `arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6` .", - "Id": "An ID that uniquely identifies the application resource. For example: `a-9ZY8X7Wv6` ." + "Arn": "An [Amazon Resource Name (ARN)](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference-arns.html) that uniquely identifies the application resource. Example ARN: `arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6` .", + "Id": "An ID that uniquely identifies the application resource. Example ID: `a-9ZY8X7Wv6` ." }, "AWS::GameLiftStreams::StreamGroup LocationConfiguration": { "AlwaysOnCapacity": "The streaming capacity that is allocated and ready to handle stream requests without delay. You pay for this capacity whether it's in use or not. Best for quickest time from streaming request to streaming session.", - "LocationName": "A location's name. For example, `us-east-1` . For a complete list of locations that Amazon GameLift Streams supports, refer to [Regions and quotas](https://docs.aws.amazon.com/gameliftstreams/latest/developerguide/regions-quotas.html) in the *Amazon GameLift Streams Developer Guide* .", + "LocationName": "A location's name. For example, `us-east-1` . For a complete list of locations that Amazon GameLift Streams supports, refer to [Regions, quotas, and limitations](https://docs.aws.amazon.com/gameliftstreams/latest/developerguide/regions-quotas.html) in the *Amazon GameLift Streams Developer Guide* .", "OnDemandCapacity": "The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes)." }, "AWS::GlobalAccelerator::Accelerator": { @@ -30365,7 +30356,7 @@ "DashManifests": "A DASH manifest configuration.", "Description": "The description associated with the origin endpoint.", "ForceEndpointErrorConfiguration": "The failover settings for the endpoint.", - "HlsManifests": "The HLS manfiests associated with the origin endpoint configuration.", + "HlsManifests": "The HLS manifests associated with the origin endpoint configuration.", "LowLatencyHlsManifests": "The low-latency HLS (LL-HLS) manifests associated with the origin endpoint.", "OriginEndpointName": "The name of the origin endpoint associated with the origin endpoint configuration.", "Segment": "The segment associated with the origin endpoint.", @@ -30427,7 +30418,7 @@ "AWS::MediaPackageV2::OriginEndpoint LowLatencyHlsManifestConfiguration": { "ChildManifestName": "The name of the child manifest associated with the low-latency HLS (LL-HLS) manifest configuration of the origin endpoint.", "FilterConfiguration": "", - "ManifestName": "A short short string that's appended to the endpoint URL. The manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default manifest name, `index` . MediaPackage automatically inserts the format extension, such as `.m3u8` . You can't use the same manifest name if you use HLS manifest and low-latency HLS manifest. The `manifestName` on the `HLSManifest` object overrides the `manifestName` you provided on the `originEndpoint` object.", + "ManifestName": "A short string that's appended to the endpoint URL. The manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default manifest name, `index` . MediaPackage automatically inserts the format extension, such as `.m3u8` . You can't use the same manifest name if you use HLS manifest and low-latency HLS manifest. The `manifestName` on the `HLSManifest` object overrides the `manifestName` you provided on the `originEndpoint` object.", "ManifestWindowSeconds": "The total duration (in seconds) of the manifest's content.", "ProgramDateTimeIntervalSeconds": "Inserts `EXT-X-PROGRAM-DATE-TIME` tags in the output manifest at the interval that you specify. If you don't enter an interval, `EXT-X-PROGRAM-DATE-TIME` tags aren't included in the manifest. The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player.\n\nIrrespective of this parameter, if any `ID3Timed` metadata is in the HLS input, MediaPackage passes through that metadata to the HLS output.", "ScteHls": "The SCTE-35 HLS configuration associated with the low-latency HLS (LL-HLS) manifest configuration of the origin endpoint.", @@ -31706,12 +31697,31 @@ "Name": "The workflow's name.", "ParameterTemplate": "The workflow's parameter template.", "StorageCapacity": "The default static storage capacity (in gibibytes) for runs that use this workflow or workflow version.", + "StorageType": "", "Tags": "Tags for the workflow." }, "AWS::Omics::Workflow WorkflowParameter": { "Description": "The parameter's description.", "Optional": "Whether the parameter is optional." }, + "AWS::Omics::WorkflowVersion": { + "Accelerators": "", + "DefinitionUri": "", + "Description": "The description of the workflow version.", + "Engine": "", + "Main": "", + "ParameterTemplate": "", + "StorageCapacity": "", + "StorageType": "", + "Tags": "", + "VersionName": "The name of the workflow version.", + "WorkflowBucketOwnerId": "", + "WorkflowId": "The workflow's ID." + }, + "AWS::Omics::WorkflowVersion WorkflowParameter": { + "Description": "The parameter's description.", + "Optional": "Whether the parameter is optional." + }, "AWS::OpenSearchServerless::AccessPolicy": { "Description": "The description of the policy.", "Name": "The name of the policy.", @@ -46644,6 +46654,7 @@ "Alias": "The unique and identifiable alias of the contact or escalation plan.", "DisplayName": "The full name of the contact or escalation plan.", "Plan": "A list of stages. A contact has an engagement plan with stages that contact specified contact channels. An escalation plan uses stages that contact specified contacts.", + "Tags": "", "Type": "The type of contact.\n\n- `PERSONAL` : A single, individual contact.\n- `ESCALATION` : An escalation plan.\n- `ONCALL_SCHEDULE` : An on-call schedule." }, "AWS::SSMContacts::Contact ChannelTargetInfo": { @@ -46659,6 +46670,10 @@ "RotationIds": "The Amazon Resource Names (ARNs) of the on-call rotations associated with the plan.", "Targets": "The contacts or contact methods that the escalation plan or engagement plan is engaging." }, + "AWS::SSMContacts::Contact Tag": { + "Key": "Name of the object key.", + "Value": "Value of the tag." + }, "AWS::SSMContacts::Contact Targets": { "ChannelTargetInfo": "Information about the contact channel that Incident Manager engages.", "ContactTargetInfo": "The contact that Incident Manager is engaging during an incident." @@ -46825,8 +46840,8 @@ "Tags": "Key-value pairs of metadata to assign to the configuration manager." }, "AWS::SSMQuickSetup::ConfigurationManager ConfigurationDefinition": { - "LocalDeploymentAdministrationRoleArn": "The ARN of the IAM role used to administrate local configuration deployments.", - "LocalDeploymentExecutionRoleName": "The name of the IAM role used to deploy local configurations.", + "LocalDeploymentAdministrationRoleArn": "The ARN of the IAM role used to administrate local configuration deployments.\n\n> Although this element is listed as \"Required: No\", a value can be omitted only for organizational deployments of types other than `AWSQuickSetupType-PatchPolicy` . A value must be provided when you are running an organizational deployment for a patch policy or running any type of deployment for a single account.", + "LocalDeploymentExecutionRoleName": "The name of the IAM role used to deploy local configurations.\n\n> Although this element is listed as \"Required: No\", a value can be omitted only for organizational deployments of types other than `AWSQuickSetupType-PatchPolicy` . A value must be provided when you are running an organizational deployment for a patch policy or running any type of deployment for a single account.", "Parameters": "The parameters for the configuration definition type. Parameters for configuration definitions vary based the configuration type. The following lists outline the parameters for each configuration type.\n\n- **AWS Config Recording (Type: AWS QuickSetupType-CFGRecording)** - - `RecordAllResources`\n\n- Description: (Optional) A boolean value that determines whether all supported resources are recorded. The default value is \" `true` \".\n- `ResourceTypesToRecord`\n\n- Description: (Optional) A comma separated list of resource types you want to record.\n- `RecordGlobalResourceTypes`\n\n- Description: (Optional) A boolean value that determines whether global resources are recorded with all resource configurations. The default value is \" `false` \".\n- `GlobalResourceTypesRegion`\n\n- Description: (Optional) Determines the AWS Region where global resources are recorded.\n- `UseCustomBucket`\n\n- Description: (Optional) A boolean value that determines whether a custom Amazon S3 bucket is used for delivery. The default value is \" `false` \".\n- `DeliveryBucketName`\n\n- Description: (Optional) The name of the Amazon S3 bucket you want AWS Config to deliver configuration snapshots and configuration history files to.\n- `DeliveryBucketPrefix`\n\n- Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket.\n- `NotificationOptions`\n\n- Description: (Optional) Determines the notification configuration for the recorder. The valid values are `NoStreaming` , `UseExistingTopic` , and `CreateTopic` . The default value is `NoStreaming` .\n- `CustomDeliveryTopicAccountId`\n\n- Description: (Optional) The ID of the AWS account where the Amazon SNS topic you want to use for notifications resides. You must specify a value for this parameter if you use the `UseExistingTopic` notification option.\n- `CustomDeliveryTopicName`\n\n- Description: (Optional) The name of the Amazon SNS topic you want to use for notifications. You must specify a value for this parameter if you use the `UseExistingTopic` notification option.\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(7 days)` , `rate(1 days)` , and `none` . The default value is \" `none` \".\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Change Manager (Type: AWS QuickSetupType-SSMChangeMgr)** - - `DelegatedAccountId`\n\n- Description: (Required) The ID of the delegated administrator account.\n- `JobFunction`\n\n- Description: (Required) The name for the Change Manager job function.\n- `PermissionType`\n\n- Description: (Optional) Specifies whether you want to use default administrator permissions for the job function role, or provide a custom IAM policy. The valid values are `CustomPermissions` and `AdminPermissions` . The default value for the parameter is `CustomerPermissions` .\n- `CustomPermissions`\n\n- Description: (Optional) A JSON string containing the IAM policy you want your job function to use. You must provide a value for this parameter if you specify `CustomPermissions` for the `PermissionType` parameter.\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Conformance Packs (Type: AWS QuickSetupType-CFGCPacks)** - - `DelegatedAccountId`\n\n- Description: (Optional) The ID of the delegated administrator account. This parameter is required for Organization deployments.\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(2 days)` , and `none` . The default value is \" `none` \".\n- `CPackNames`\n\n- Description: (Required) A comma separated list of AWS Config conformance packs.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Default Host Management Configuration (Type: AWS QuickSetupType-DHMC)** - - `UpdateSSMAgent`\n\n- Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is \" `true` \".\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) The AWS Regions to deploy the configuration to. For this type, the parameter only accepts a value of `AllRegions` .\n- **DevOps\u00a0Guru (Type: AWS QuickSetupType-DevOpsGuru)** - - `AnalyseAllResources`\n\n- Description: (Optional) A boolean value that determines whether DevOps\u00a0Guru analyzes all AWS CloudFormation stacks in the account. The default value is \" `false` \".\n- `EnableSnsNotifications`\n\n- Description: (Optional) A boolean value that determines whether DevOps\u00a0Guru sends notifications when an insight is created. The default value is \" `true` \".\n- `EnableSsmOpsItems`\n\n- Description: (Optional) A boolean value that determines whether DevOps\u00a0Guru creates an OpsCenter OpsItem when an insight is created. The default value is \" `true` \".\n- `EnableDriftRemediation`\n\n- Description: (Optional) A boolean value that determines whether a drift remediation schedule is used. The default value is \" `false` \".\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(1 days)` , and `none` . The default value is \" `none` \".\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Distributor (Type: AWS QuickSetupType-Distributor)** - - `PackagesToInstall`\n\n- Description: (Required) A comma separated list of packages you want to install on the target instances. The valid values are `AWSEFSTools` , `AWSCWAgent` , and `AWSEC2LaunchAgent` .\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(2 days)` , and `none` . The default value is \" `rate(30 days)` \".\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Host Management (Type: AWS QuickSetupType-SSMHostMgmt)** - - `UpdateSSMAgent`\n\n- Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is \" `true` \".\n- `UpdateEc2LaunchAgent`\n\n- Description: (Optional) A boolean value that determines whether the EC2 Launch agent is updated on the target instances every month. The default value is \" `false` \".\n- `CollectInventory`\n\n- Description: (Optional) A boolean value that determines whether instance metadata is collected on the target instances every 30 minutes. The default value is \" `true` \".\n- `ScanInstances`\n\n- Description: (Optional) A boolean value that determines whether the target instances are scanned daily for available patches. The default value is \" `true` \".\n- `InstallCloudWatchAgent`\n\n- Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is installed on the target instances. The default value is \" `false` \".\n- `UpdateCloudWatchAgent`\n\n- Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is updated on the target instances every month. The default value is \" `false` \".\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Optional) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Optional) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Optional) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **OpsCenter (Type: AWS QuickSetupType-SSMOpsCenter)** - - `DelegatedAccountId`\n\n- Description: (Required) The ID of the delegated administrator account.\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Patch Policy (Type: AWS QuickSetupType-PatchPolicy)** - - `PatchPolicyName`\n\n- Description: (Required) A name for the patch policy. The value you provide is applied to target Amazon EC2 instances as a tag.\n- `SelectedPatchBaselines`\n\n- Description: (Required) An array of JSON objects containing the information for the patch baselines to include in your patch policy.\n- `PatchBaselineUseDefault`\n\n- Description: (Optional) A value that determines whether the selected patch baselines are all AWS provided. Supported values are `default` and `custom` .\n- `PatchBaselineRegion`\n\n- Description: (Required) The AWS Region where the patch baseline exist.\n- `ConfigurationOptionsPatchOperation`\n\n- Description: (Optional) Determines whether target instances scan for available patches, or scan and install available patches. The valid values are `Scan` and `ScanAndInstall` . The default value for the parameter is `Scan` .\n- `ConfigurationOptionsScanValue`\n\n- Description: (Optional) A cron expression that is used as the schedule for when instances scan for available patches.\n- `ConfigurationOptionsInstallValue`\n\n- Description: (Optional) A cron expression that is used as the schedule for when instances install available patches.\n- `ConfigurationOptionsScanNextInterval`\n\n- Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is \" `false` \".\n- `ConfigurationOptionsInstallNextInterval`\n\n- Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is \" `false` \".\n- `RebootOption`\n\n- Description: (Optional) Determines whether instances are rebooted after patches are installed. Valid values are `RebootIfNeeded` and `NoReboot` .\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `OutputLogEnableS3`\n\n- Description: (Optional) A boolean value that determines whether command output logs are sent to Amazon S3.\n- `OutputS3Location`\n\n- Description: (Optional) Information about the Amazon S3 bucket where you want to store the output details of the request.\n\n- `OutputBucketRegion`\n\n- Description: (Optional) The AWS Region where the Amazon S3 bucket you want to deliver command output to is located.\n- `OutputS3BucketName`\n\n- Description: (Optional) The name of the Amazon S3 bucket you want to deliver command output to.\n- `OutputS3KeyPrefix`\n\n- Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket.\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Resource Explorer (Type: AWS QuickSetupType-ResourceExplorer)** - - `SelectedAggregatorRegion`\n\n- Description: (Required) The AWS Region where you want to create the aggregator index.\n- `ReplaceExistingAggregator`\n\n- Description: (Required) A boolean value that determines whether to demote an existing aggregator if it is in a Region that differs from the value you specify for the `SelectedAggregatorRegion` .\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Resource Scheduler (Type: AWS QuickSetupType-Scheduler)** - - `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target.\n- `ICalendarString`\n\n- Description: (Required) An iCalendar formatted string containing the schedule you want Change Manager to use.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.", "Type": "The type of the Quick Setup configuration.", "TypeVersion": "The version of the Quick Setup type used.", @@ -46920,6 +46935,7 @@ "AppName": "The name of the app.", "AppType": "The type of app.", "DomainId": "The domain ID.", + "RecoveryMode": "", "ResourceSpec": "Specifies the ARNs of a SageMaker image and SageMaker image version, and the instance type that the version runs on.", "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", "UserProfileName": "The user profile name." @@ -47178,6 +47194,7 @@ }, "AWS::SageMaker::Domain CodeEditorAppSettings": { "AppLifecycleManagement": "Settings that are used to configure and manage the lifecycle of CodeEditor applications.", + "BuiltInLifecycleConfigArn": "The lifecycle configuration that runs before the default lifecycle configuration. It can override changes made in the default lifecycle configuration.", "CustomImages": "A list of custom SageMaker images that are configured to run as a Code Editor app.", "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the Code Editor app.", "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Code Editor application lifecycle configuration." @@ -47233,6 +47250,10 @@ "FileSystemId": "The globally unique, 17-digit, ID of the file system, assigned by Amazon FSx for Lustre.", "FileSystemPath": "The path to the file system directory that is accessible in Amazon SageMaker Studio. Permitted users can access only this directory and below." }, + "AWS::SageMaker::Domain HiddenSageMakerImage": { + "SageMakerImageName": "The SageMaker image name that you are hiding from the Studio user interface.", + "VersionAliases": "The version aliases you are hiding from the Studio user interface." + }, "AWS::SageMaker::Domain IdleSettings": { "IdleTimeoutInMinutes": "The time that SageMaker waits after the application becomes idle before shutting it down.", "LifecycleManagement": "Indicates whether idle shutdown is activated for the application type.", @@ -47241,6 +47262,7 @@ }, "AWS::SageMaker::Domain JupyterLabAppSettings": { "AppLifecycleManagement": "Indicates whether idle shutdown is activated for JupyterLab applications.", + "BuiltInLifecycleConfigArn": "The lifecycle configuration that runs before the default lifecycle configuration. It can override changes made in the default lifecycle configuration.", "CodeRepositories": "A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterLab application.", "CustomImages": "A list of custom SageMaker images that are configured to run as a JupyterLab app.", "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterLab app.", @@ -47282,13 +47304,16 @@ }, "AWS::SageMaker::Domain StudioWebPortalSettings": { "HiddenAppTypes": "The [Applications supported in Studio](https://docs.aws.amazon.com/sagemaker/latest/dg/studio-updated-apps.html) that are hidden from the Studio left navigation pane.", - "HiddenMlTools": "The machine learning tools that are hidden from the Studio left navigation pane." + "HiddenInstanceTypes": "The instance types you are hiding from the Studio user interface.", + "HiddenMlTools": "The machine learning tools that are hidden from the Studio left navigation pane.", + "HiddenSageMakerImageVersionAliases": "The version aliases you are hiding from the Studio user interface." }, "AWS::SageMaker::Domain Tag": { "Key": "The tag key. Tag keys must be unique per resource.", "Value": "The tag value." }, "AWS::SageMaker::Domain UserSettings": { + "AutoMountHomeEFS": "Indicates whether auto-mounting of an EFS volume is supported for the user profile. The `DefaultAsDomain` value is only supported for user profiles. Do not use the `DefaultAsDomain` value when setting this parameter for a domain.\n\nSageMaker applies this setting only to private spaces that the user creates in the domain. SageMaker doesn't apply this setting to shared spaces.", "CodeEditorAppSettings": "The Code Editor application settings.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "CustomFileSystemConfigs": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker AI Studio.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "CustomPosixUserConfig": "Details about the POSIX identity that is used for file system operations.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", @@ -48737,6 +48762,7 @@ }, "AWS::SageMaker::UserProfile CodeEditorAppSettings": { "AppLifecycleManagement": "Settings that are used to configure and manage the lifecycle of CodeEditor applications.", + "BuiltInLifecycleConfigArn": "The lifecycle configuration that runs before the default lifecycle configuration. It can override changes made in the default lifecycle configuration.", "CustomImages": "A list of custom SageMaker images that are configured to run as a Code Editor app.", "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the Code Editor app.", "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Code Editor application lifecycle configuration." @@ -48772,6 +48798,10 @@ "FileSystemId": "The globally unique, 17-digit, ID of the file system, assigned by Amazon FSx for Lustre.", "FileSystemPath": "The path to the file system directory that is accessible in Amazon SageMaker Studio. Permitted users can access only this directory and below." }, + "AWS::SageMaker::UserProfile HiddenSageMakerImage": { + "SageMakerImageName": "The SageMaker image name that you are hiding from the Studio user interface.", + "VersionAliases": "The version aliases you are hiding from the Studio user interface." + }, "AWS::SageMaker::UserProfile IdleSettings": { "IdleTimeoutInMinutes": "The time that SageMaker waits after the application becomes idle before shutting it down.", "LifecycleManagement": "Indicates whether idle shutdown is activated for the application type.", @@ -48780,6 +48810,7 @@ }, "AWS::SageMaker::UserProfile JupyterLabAppSettings": { "AppLifecycleManagement": "Indicates whether idle shutdown is activated for JupyterLab applications.", + "BuiltInLifecycleConfigArn": "The lifecycle configuration that runs before the default lifecycle configuration. It can override changes made in the default lifecycle configuration.", "CodeRepositories": "A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterLab application.", "CustomImages": "A list of custom SageMaker images that are configured to run as a JupyterLab app.", "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterLab app.", @@ -48811,13 +48842,16 @@ }, "AWS::SageMaker::UserProfile StudioWebPortalSettings": { "HiddenAppTypes": "The [Applications supported in Studio](https://docs.aws.amazon.com/sagemaker/latest/dg/studio-updated-apps.html) that are hidden from the Studio left navigation pane.", - "HiddenMlTools": "The machine learning tools that are hidden from the Studio left navigation pane." + "HiddenInstanceTypes": "The instance types you are hiding from the Studio user interface.", + "HiddenMlTools": "The machine learning tools that are hidden from the Studio left navigation pane.", + "HiddenSageMakerImageVersionAliases": "The version aliases you are hiding from the Studio user interface." }, "AWS::SageMaker::UserProfile Tag": { "Key": "The tag key. Tag keys must be unique per resource.", "Value": "The tag value." }, "AWS::SageMaker::UserProfile UserSettings": { + "AutoMountHomeEFS": "Indicates whether auto-mounting of an EFS volume is supported for the user profile. The `DefaultAsDomain` value is only supported for user profiles. Do not use the `DefaultAsDomain` value when setting this parameter for a domain.\n\nSageMaker applies this setting only to private spaces that the user creates in the domain. SageMaker doesn't apply this setting to shared spaces.", "CodeEditorAppSettings": "The Code Editor application settings.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "CustomFileSystemConfigs": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker AI Studio.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "CustomPosixUserConfig": "Details about the POSIX identity that is used for file system operations.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", @@ -51052,7 +51086,7 @@ "AWS::WAFv2::RuleGroup Headers": { "MatchPattern": "The filter to use to identify the subset of headers to inspect in a web request.\n\nYou must specify exactly one setting: either `All` , `IncludedHeaders` , or `ExcludedHeaders` .\n\nExample JSON: `\"MatchPattern\": { \"ExcludedHeaders\": [ \"KeyToExclude1\", \"KeyToExclude2\" ] }`", "MatchScope": "The parts of the headers to match with the rule inspection criteria. If you specify `ALL` , AWS WAF inspects both keys and values.\n\n`All` does not require a match to be found in the keys and a match to be found in the values. It requires a match to be found in the keys or the values or both. To require a match in the keys and in the values, use a logical `AND` statement to combine two match rules, one that inspects the keys and another that inspects the values.", - "OversizeHandling": "What AWS WAF should do if the headers of the request are more numerous or larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to AWS WAF .\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available headers normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement." + "OversizeHandling": "What AWS WAF should do if the headers determined by your match scope are more numerous or larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to AWS WAF .\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available headers normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement." }, "AWS::WAFv2::RuleGroup IPSetForwardedIPConfiguration": { "FallbackBehavior": "The match status to assign to the web request if the request doesn't have a valid IP address in the specified position.\n\n> If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all. \n\nYou can specify the following fallback behaviors:\n\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.", @@ -51374,7 +51408,7 @@ "AWS::WAFv2::WebACL Headers": { "MatchPattern": "The filter to use to identify the subset of headers to inspect in a web request.\n\nYou must specify exactly one setting: either `All` , `IncludedHeaders` , or `ExcludedHeaders` .\n\nExample JSON: `\"MatchPattern\": { \"ExcludedHeaders\": [ \"KeyToExclude1\", \"KeyToExclude2\" ] }`", "MatchScope": "The parts of the headers to match with the rule inspection criteria. If you specify `ALL` , AWS WAF inspects both keys and values.\n\n`All` does not require a match to be found in the keys and a match to be found in the values. It requires a match to be found in the keys or the values or both. To require a match in the keys and in the values, use a logical `AND` statement to combine two match rules, one that inspects the keys and another that inspects the values.", - "OversizeHandling": "What AWS WAF should do if the headers of the request are more numerous or larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to AWS WAF .\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available headers normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement." + "OversizeHandling": "What AWS WAF should do if the headers determined by your match scope are more numerous or larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to AWS WAF .\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available headers normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement." }, "AWS::WAFv2::WebACL IPSetForwardedIPConfiguration": { "FallbackBehavior": "The match status to assign to the web request if the request doesn't have a valid IP address in the specified position.\n\n> If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all. \n\nYou can specify the following fallback behaviors:\n\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.", @@ -52054,7 +52088,7 @@ "Description": "The description of the pool.", "DirectoryId": "The identifier of the directory used by the pool.", "PoolName": "The name of the pool.", - "RunningMode": "", + "RunningMode": "The running mode of the pool.", "TimeoutSettings": "The amount of time that a pool session remains active after users disconnect. If they try to reconnect to the pool session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new pool instance." }, "AWS::WorkSpaces::WorkspacesPool ApplicationSettings": { diff --git a/schema_source/cloudformation.schema.json b/schema_source/cloudformation.schema.json index 67711e8b4..4ba98880a 100644 --- a/schema_source/cloudformation.schema.json +++ b/schema_source/cloudformation.schema.json @@ -62888,41 +62888,27 @@ "items": { "type": "string" }, - "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects to and reads from your on-premises storage system's management interface. You can only specify one ARN.", - "title": "AgentArns", "type": "array" }, "CloudWatchLogGroupArn": { - "markdownDescription": "Specifies the ARN of the Amazon CloudWatch log group for monitoring and logging discovery job events.", - "title": "CloudWatchLogGroupArn", "type": "string" }, "Name": { - "markdownDescription": "Specifies a familiar name for your on-premises storage system.", - "title": "Name", "type": "string" }, "ServerConfiguration": { - "$ref": "#/definitions/AWS::DataSync::StorageSystem.ServerConfiguration", - "markdownDescription": "Specifies the server name and network port required to connect with the management interface of your on-premises storage system.", - "title": "ServerConfiguration" + "$ref": "#/definitions/AWS::DataSync::StorageSystem.ServerConfiguration" }, "ServerCredentials": { - "$ref": "#/definitions/AWS::DataSync::StorageSystem.ServerCredentials", - "markdownDescription": "Specifies the user name and password for accessing your on-premises storage system's management interface.", - "title": "ServerCredentials" + "$ref": "#/definitions/AWS::DataSync::StorageSystem.ServerCredentials" }, "SystemType": { - "markdownDescription": "Specifies the type of on-premises storage system that you want DataSync Discovery to collect information about.\n\n> DataSync Discovery currently supports NetApp Fabric-Attached Storage (FAS) and All Flash FAS (AFF) systems running ONTAP 9.7 or later.", - "title": "SystemType", "type": "string" }, "Tags": { "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "Specifies labels that help you categorize, filter, and search for your AWS resources. We recommend creating at least a name tag for your on-premises storage system.", - "title": "Tags", "type": "array" } }, @@ -62958,13 +62944,9 @@ "additionalProperties": false, "properties": { "ServerHostname": { - "markdownDescription": "The domain name or IP address of your storage system's management interface.", - "title": "ServerHostname", "type": "string" }, "ServerPort": { - "markdownDescription": "The network port for accessing the storage system's management interface.", - "title": "ServerPort", "type": "number" } }, @@ -62977,13 +62959,9 @@ "additionalProperties": false, "properties": { "Password": { - "markdownDescription": "Specifies the password for your storage system's management interface.", - "title": "Password", "type": "string" }, "Username": { - "markdownDescription": "Specifies the user name for your storage system's management interface.", - "title": "Username", "type": "string" } }, @@ -83809,7 +83787,7 @@ }, "Options": { "additionalProperties": true, - "markdownDescription": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using Fargate.Optional when using EC2.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n\nThe following options apply to all supported log drivers.\n\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to the log driver specified using `logDriver` . The delivery mode you choose affects application availability when the flow of logs from container is interrupted.\n\nIf you use the `blocking` mode and the flow of logs is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n\nYou can set a default `mode` for all containers in a specific AWS Region by using the `defaultLogDriverMode` account setting. If you don't specify the `mode` option or configure the account setting, Amazon ECS will default to the `blocking` mode. For more information about the account setting, see [Default log driver mode](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#default-log-driver-mode) in the *Amazon Elastic Container Service Developer Guide* .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` . For more information, see [Under the hood: FireLens for Amazon ECS Tasks](https://docs.aws.amazon.com/containers/under-the-hood-firelens-for-amazon-ecs-tasks/) .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "markdownDescription": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using Fargate.Optional when using EC2.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n\nThe following options apply to all supported log drivers.\n\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to the log driver specified using `logDriver` . The delivery mode you choose affects application availability when the flow of logs from container is interrupted.\n\nIf you use the `blocking` mode and the flow of logs is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n\nYou can set a default `mode` for all containers in a specific AWS Region by using the `defaultLogDriverMode` account setting. If you don't specify the `mode` option or configure the account setting, Amazon ECS will default to the `blocking` mode. For more information about the account setting, see [Default log driver mode](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#default-log-driver-mode) in the *Amazon Elastic Container Service Developer Guide* .\n\n> On June 25, 2025, Amazon ECS is changing the default log driver mode from `blocking` to `non-blocking` to prioritize task availability over logging. To continue using the `blocking` mode after this change, do one of the following:\n> \n> - Set the `mode` option in your container definition's `logConfiguration` as `blocking` .\n> - Set the `defaultLogDriverMode` account setting to `blocking` .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` . For more information, see [Under the hood: FireLens for Amazon ECS Tasks](https://docs.aws.amazon.com/containers/under-the-hood-firelens-for-amazon-ecs-tasks/) .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -84030,12 +84008,12 @@ "additionalProperties": false, "properties": { "Encrypted": { - "markdownDescription": "Indicates whether the volume should be encrypted. If no value is specified, encryption is turned on by default. This parameter maps 1:1 with the `Encrypted` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* .", + "markdownDescription": "Indicates whether the volume should be encrypted. If you turn on Region-level Amazon EBS encryption by default but set this value as `false` , the setting is overridden and the volume is encrypted with the KMS key specified for Amazon EBS encryption by default. This parameter maps 1:1 with the `Encrypted` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* .", "title": "Encrypted", "type": "boolean" }, "FilesystemType": { - "markdownDescription": "The filesystem type for the volume. For volumes created from a snapshot, you must specify the same filesystem type that the volume was using when the snapshot was created. If there is a filesystem type mismatch, the task will fail to start.\n\nThe available Linux filesystem types are `ext3` , `ext4` , and `xfs` . If no value is specified, the `xfs` filesystem type is used by default.\n\nThe available Windows filesystem types are `NTFS` .", + "markdownDescription": "The filesystem type for the volume. For volumes created from a snapshot, you must specify the same filesystem type that the volume was using when the snapshot was created. If there is a filesystem type mismatch, the tasks will fail to start.\n\nThe available Linux filesystem types are `ext3` , `ext4` , and `xfs` . If no value is specified, the `xfs` filesystem type is used by default.\n\nThe available Windows filesystem types are `NTFS` .", "title": "FilesystemType", "type": "string" }, @@ -84045,7 +84023,7 @@ "type": "number" }, "KmsKeyId": { - "markdownDescription": "The Amazon Resource Name (ARN) identifier of the AWS Key Management Service key to use for Amazon EBS encryption. When encryption is turned on and no AWS Key Management Service key is specified, the default AWS managed key for Amazon EBS volumes is used. This parameter maps 1:1 with the `KmsKeyId` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* .\n\n> AWS authenticates the AWS Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or ARN that is invalid, the action can appear to complete, but eventually fails.", + "markdownDescription": "The Amazon Resource Name (ARN) identifier of the AWS Key Management Service key to use for Amazon EBS encryption. When a key is specified using this parameter, it overrides Amazon EBS default encryption or any KMS key that you specified for cluster-level managed storage encryption. This parameter maps 1:1 with the `KmsKeyId` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* . For more information about encrypting Amazon EBS volumes attached to tasks, see [Encrypt data stored in Amazon EBS volumes attached to Amazon ECS tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ebs-kms-encryption.html) .\n\n> AWS authenticates the AWS Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or ARN that is invalid, the action can appear to complete, but eventually fails.", "title": "KmsKeyId", "type": "string" }, @@ -84060,7 +84038,7 @@ "type": "number" }, "SnapshotId": { - "markdownDescription": "The snapshot that Amazon ECS uses to create the volume. You must specify either a snapshot ID or a volume size. This parameter maps 1:1 with the `SnapshotId` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* .", + "markdownDescription": "The snapshot that Amazon ECS uses to create volumes for attachment to tasks maintained by the service. You must specify either `snapshotId` or `sizeInGiB` in your volume configuration. This parameter maps 1:1 with the `SnapshotId` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* .", "title": "SnapshotId", "type": "string" }, @@ -84979,7 +84957,7 @@ }, "Options": { "additionalProperties": true, - "markdownDescription": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using Fargate.Optional when using EC2.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n\nThe following options apply to all supported log drivers.\n\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to the log driver specified using `logDriver` . The delivery mode you choose affects application availability when the flow of logs from container is interrupted.\n\nIf you use the `blocking` mode and the flow of logs is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n\nYou can set a default `mode` for all containers in a specific AWS Region by using the `defaultLogDriverMode` account setting. If you don't specify the `mode` option or configure the account setting, Amazon ECS will default to the `blocking` mode. For more information about the account setting, see [Default log driver mode](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#default-log-driver-mode) in the *Amazon Elastic Container Service Developer Guide* .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` . For more information, see [Under the hood: FireLens for Amazon ECS Tasks](https://docs.aws.amazon.com/containers/under-the-hood-firelens-for-amazon-ecs-tasks/) .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "markdownDescription": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using Fargate.Optional when using EC2.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n\nThe following options apply to all supported log drivers.\n\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to the log driver specified using `logDriver` . The delivery mode you choose affects application availability when the flow of logs from container is interrupted.\n\nIf you use the `blocking` mode and the flow of logs is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n\nYou can set a default `mode` for all containers in a specific AWS Region by using the `defaultLogDriverMode` account setting. If you don't specify the `mode` option or configure the account setting, Amazon ECS will default to the `blocking` mode. For more information about the account setting, see [Default log driver mode](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#default-log-driver-mode) in the *Amazon Elastic Container Service Developer Guide* .\n\n> On June 25, 2025, Amazon ECS is changing the default log driver mode from `blocking` to `non-blocking` to prioritize task availability over logging. To continue using the `blocking` mode after this change, do one of the following:\n> \n> - Set the `mode` option in your container definition's `logConfiguration` as `blocking` .\n> - Set the `defaultLogDriverMode` account setting to `blocking` .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` . For more information, see [Under the hood: FireLens for Amazon ECS Tasks](https://docs.aws.amazon.com/containers/under-the-hood-firelens-for-amazon-ecs-tasks/) .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -85162,7 +85140,7 @@ "type": "string" }, "Value": { - "markdownDescription": "The namespaced kernel parameter to set a `value` for.\n\nValid IPC namespace values: `\"kernel.msgmax\" | \"kernel.msgmnb\" | \"kernel.msgmni\" | \"kernel.sem\" | \"kernel.shmall\" | \"kernel.shmmax\" | \"kernel.shmmni\" | \"kernel.shm_rmid_forced\"` , and `Sysctls` that start with `\"fs.mqueue.*\"`\n\nValid network namespace values: `Sysctls` that start with `\"net.*\"`\n\nAll of these values are supported by Fargate.", + "markdownDescription": "The namespaced kernel parameter to set a `value` for.\n\nValid IPC namespace values: `\"kernel.msgmax\" | \"kernel.msgmnb\" | \"kernel.msgmni\" | \"kernel.sem\" | \"kernel.shmall\" | \"kernel.shmmax\" | \"kernel.shmmni\" | \"kernel.shm_rmid_forced\"` , and `Sysctls` that start with `\"fs.mqueue.*\"`\n\nValid network namespace values: `Sysctls` that start with `\"net.*\"` . Only namespaced `Sysctls` that exist within the container starting with \"net.* are accepted.\n\nAll of these values are supported by Fargate.", "title": "Value", "type": "string" } @@ -90856,7 +90834,7 @@ "type": "string" }, "ClusterMode": { - "markdownDescription": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Valkey or Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Valkey or Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/modify-cluster-mode.html) .", + "markdownDescription": "The mode can be enabled or disabled. To change the cluster mode from disabled to enabled, you must first set the cluster mode to compatible. The compatible mode allows your Valkey or Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Valkey or Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/modify-cluster-mode.html) .", "title": "ClusterMode", "type": "string" }, @@ -93683,7 +93661,7 @@ "items": { "type": "string" }, - "markdownDescription": "The host names. The maximum size of each name is 128 characters. The comparison is case insensitive. The following wildcard characters are supported: * (matches 0 or more characters) and ? (matches exactly 1 character).\n\nIf you specify multiple strings, the condition is satisfied if one of the strings matches the host name.", + "markdownDescription": "The host names. The maximum size of each name is 128 characters. The comparison is case insensitive. The following wildcard characters are supported: * (matches 0 or more characters) and ? (matches exactly 1 character). You must include at least one \".\" character. You can include only alphabetical characters after the final \".\" character.\n\nIf you specify multiple strings, the condition is satisfied if one of the strings matches the host name.", "title": "Values", "type": "array" } @@ -93950,7 +93928,7 @@ "items": { "$ref": "#/definitions/AWS::ElasticLoadBalancingV2::LoadBalancer.LoadBalancerAttribute" }, - "markdownDescription": "The load balancer attributes.", + "markdownDescription": "The load balancer attributes. Attributes that you do not modify retain their current values.", "title": "LoadBalancerAttributes", "type": "array" }, @@ -94181,7 +94159,7 @@ "items": { "$ref": "#/definitions/AWS::ElasticLoadBalancingV2::TargetGroup.TargetGroupAttribute" }, - "markdownDescription": "The target group attributes.", + "markdownDescription": "The target group attributes. Attributes that you do not modify retain their current values.", "title": "TargetGroupAttributes", "type": "array" }, @@ -103189,8 +103167,6 @@ "type": "string" }, "DesiredEC2Instances": { - "markdownDescription": "The number of EC2 instances that you want this fleet to host. When creating a new fleet, GameLift automatically sets this value to \"1\" and initiates a single instance. Once the fleet is active, update this value to trigger GameLift to add or remove instances from the fleet.", - "title": "DesiredEC2Instances", "type": "number" }, "EC2InboundPermissions": { @@ -103230,8 +103206,6 @@ "type": "array" }, "MaxSize": { - "markdownDescription": "The maximum number of instances that are allowed in the specified fleet location. If this parameter is not set, the default is 1.", - "title": "MaxSize", "type": "number" }, "MetricGroups": { @@ -103243,8 +103217,6 @@ "type": "array" }, "MinSize": { - "markdownDescription": "The minimum number of instances that are allowed in the specified fleet location. If this parameter is not set, the default is 0.", - "title": "MinSize", "type": "number" }, "Name": { @@ -163628,7 +163600,7 @@ "items": { "$ref": "#/definitions/AWS::MediaPackageV2::OriginEndpoint.HlsManifestConfiguration" }, - "markdownDescription": "The HLS manfiests associated with the origin endpoint configuration.", + "markdownDescription": "The HLS manifests associated with the origin endpoint configuration.", "title": "HlsManifests", "type": "array" }, @@ -163842,7 +163814,7 @@ "title": "FilterConfiguration" }, "ManifestName": { - "markdownDescription": "A short short string that's appended to the endpoint URL. The manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default manifest name, `index` . MediaPackage automatically inserts the format extension, such as `.m3u8` . You can't use the same manifest name if you use HLS manifest and low-latency HLS manifest. The `manifestName` on the `HLSManifest` object overrides the `manifestName` you provided on the `originEndpoint` object.", + "markdownDescription": "A short string that's appended to the endpoint URL. The manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default manifest name, `index` . MediaPackage automatically inserts the format extension, such as `.m3u8` . You can't use the same manifest name if you use HLS manifest and low-latency HLS manifest. The `manifestName` on the `HLSManifest` object overrides the `manifestName` you provided on the `originEndpoint` object.", "title": "ManifestName", "type": "string" }, @@ -268711,7 +268683,7 @@ "type": "string" }, "OversizeHandling": { - "markdownDescription": "What AWS WAF should do if the headers of the request are more numerous or larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to AWS WAF .\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available headers normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.", + "markdownDescription": "What AWS WAF should do if the headers determined by your match scope are more numerous or larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to AWS WAF .\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available headers normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.", "title": "OversizeHandling", "type": "string" } @@ -270213,7 +270185,7 @@ "type": "string" }, "OversizeHandling": { - "markdownDescription": "What AWS WAF should do if the headers of the request are more numerous or larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to AWS WAF .\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available headers normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.", + "markdownDescription": "What AWS WAF should do if the headers determined by your match scope are more numerous or larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to AWS WAF .\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available headers normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.", "title": "OversizeHandling", "type": "string" }