@awsless/awsless 0.0.454 → 0.0.455

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/bin.js CHANGED
@@ -38,6 +38,7 @@ import { confirm, isCancel, log } from "@clack/prompts";
38
38
  // src/util/workspace.ts
39
39
  import {
40
40
  DynamoLockBackend,
41
+ enableDebug,
41
42
  S3StateBackend,
42
43
  Terraform,
43
44
  WorkSpace
@@ -123,6 +124,9 @@ var createWorkSpace = async (props) => {
123
124
  const terraform = new Terraform({
124
125
  providerLocation: join2(homedir(), `.awsless/providers`)
125
126
  });
127
+ if (process.env.VERBOSE) {
128
+ enableDebug();
129
+ }
126
130
  const aws = await terraform.install("hashicorp", "aws", "5.94.1");
127
131
  const workspace = new WorkSpace({
128
132
  providers: [
@@ -992,35 +996,33 @@ var AppSchema = z19.object({
992
996
  import { z as z32 } from "zod";
993
997
 
994
998
  // src/feature/cache/schema.ts
999
+ import { gibibytes as gibibytes2 } from "@awsless/size";
995
1000
  import { z as z20 } from "zod";
996
- var TypeSchema = z20.enum([
997
- "t4g.small",
998
- "t4g.medium",
999
- "r6g.large",
1000
- "r6g.xlarge",
1001
- "r6g.2xlarge",
1002
- "r6g.4xlarge",
1003
- "r6g.8xlarge",
1004
- "r6g.12xlarge",
1005
- "r6g.16xlarge",
1006
- "r6gd.xlarge",
1007
- "r6gd.2xlarge",
1008
- "r6gd.4xlarge",
1009
- "r6gd.8xlarge"
1010
- ]);
1011
- var PortSchema = z20.number().int().min(1).max(5e4);
1012
- var ShardsSchema = z20.number().int().min(0).max(100);
1013
- var ReplicasPerShardSchema = z20.number().int().min(0).max(5);
1014
- var EngineVersionSchema = z20.enum(["7.3", "7.2"]);
1001
+ var StorageSchema = SizeSchema.refine(sizeMin(gibibytes2(1)), "Minimum storage size is 1 GB").refine(
1002
+ sizeMax(gibibytes2(5e3)),
1003
+ "Maximum storage size is 5000 GB"
1004
+ );
1005
+ var MinimumStorageSchema = StorageSchema.describe(
1006
+ "The lower limit for data storage the cache is set to use. You can specify a size value from 1 GB to 5000 GB."
1007
+ );
1008
+ var MaximumStorageSchema = StorageSchema.describe(
1009
+ "The upper limit for data storage the cache is set to use. You can specify a size value from 1 GB to 5000 GB."
1010
+ );
1011
+ var EcpuSchema = z20.number().int().min(1e3).max(15e6);
1012
+ var MinimumEcpuSchema = EcpuSchema.describe(
1013
+ "The minimum number of ECPUs the cache can consume per second. You can specify a integer from 1,000 to 15,000,000."
1014
+ );
1015
+ var MaximumEcpuSchema = EcpuSchema.describe(
1016
+ "The maximum number of ECPUs the cache can consume per second. You can specify a integer from 1,000 to 15,000,000."
1017
+ );
1015
1018
  var CachesSchema = z20.record(
1016
1019
  ResourceIdSchema,
1017
1020
  z20.object({
1018
- type: TypeSchema.default("t4g.small"),
1019
- port: PortSchema.default(6379),
1020
- shards: ShardsSchema.default(1),
1021
- replicasPerShard: ReplicasPerShardSchema.default(1),
1022
- engineVersion: EngineVersionSchema.default("7.3"),
1023
- dataTiering: z20.boolean().default(false)
1021
+ minStorage: MinimumStorageSchema.optional(),
1022
+ maxStorage: MaximumStorageSchema.optional(),
1023
+ minECPU: MinimumEcpuSchema.optional(),
1024
+ maxECPU: MaximumEcpuSchema.optional(),
1025
+ snapshotRetentionLimit: z20.number().int().positive().default(1)
1024
1026
  })
1025
1027
  ).optional().describe("Define the caches in your stack. For access to the cache put your functions inside the global VPC.");
1026
1028
 
@@ -1111,10 +1113,10 @@ var CronsSchema = z24.record(
1111
1113
  ).optional().describe(`Define the cron jobs in your stack.`);
1112
1114
 
1113
1115
  // src/feature/search/schema.ts
1114
- import { gibibytes as gibibytes2 } from "@awsless/size";
1116
+ import { gibibytes as gibibytes3 } from "@awsless/size";
1115
1117
  import { z as z25 } from "zod";
1116
1118
  var VersionSchema = z25.enum(["2.13", "2.11", "2.9", "2.7", "2.5", "2.3", "1.3"]);
1117
- var TypeSchema2 = z25.enum([
1119
+ var TypeSchema = z25.enum([
1118
1120
  "t3.small",
1119
1121
  "t3.medium",
1120
1122
  "m3.medium",
@@ -1188,11 +1190,11 @@ var TypeSchema2 = z25.enum([
1188
1190
  "r6gd.12xlarge",
1189
1191
  "r6gd.16xlarge"
1190
1192
  ]);
1191
- var StorageSizeSchema = SizeSchema.refine(sizeMin(gibibytes2(10)), "Minimum storage size is 10 GB").refine(sizeMax(gibibytes2(100)), "Maximum storage size is 100 GB").describe("The size of the function's /tmp directory. You can specify a size value from 512 MB to 10 GiB.");
1193
+ var StorageSizeSchema = SizeSchema.refine(sizeMin(gibibytes3(10)), "Minimum storage size is 10 GB").refine(sizeMax(gibibytes3(100)), "Maximum storage size is 100 GB").describe("The size of the function's /tmp directory. You can specify a size value from 512 MB to 10 GiB.");
1192
1194
  var SearchsSchema = z25.record(
1193
1195
  ResourceIdSchema,
1194
1196
  z25.object({
1195
- type: TypeSchema2.default("t3.small"),
1197
+ type: TypeSchema.default("t3.small"),
1196
1198
  count: z25.number().int().min(1).default(1),
1197
1199
  version: VersionSchema.default("2.13"),
1198
1200
  storage: StorageSizeSchema.default("10 GB")
@@ -1899,6 +1901,7 @@ var authFeature = defineFeature({
1899
1901
  // src/feature/cache/index.ts
1900
1902
  import { $ as $2, Group as Group2 } from "@awsless/formation";
1901
1903
  import { constantCase as constantCase3 } from "change-case";
1904
+ import { toGibibytes } from "@awsless/size";
1902
1905
  var typeGenCode = `
1903
1906
  import { Cluster, CommandOptions } from '@awsless/redis'
1904
1907
 
@@ -1936,72 +1939,84 @@ var cacheFeature = defineFeature({
1936
1939
  resourceName: id,
1937
1940
  seperator: "-"
1938
1941
  });
1939
- const subnetGroup = new $2.aws.memorydb.SubnetGroup(
1940
- group,
1941
- "subnets",
1942
- {
1943
- name,
1944
- subnetIds: ctx.shared.get("vpc", "private-subnets")
1945
- },
1946
- {
1947
- // retainOnDelete: retain,
1948
- // import: ctx.import ? name : undefined,
1949
- }
1950
- );
1951
- const securityGroup = new $2.aws.security.Group(
1952
- group,
1953
- "security",
1954
- {
1955
- name,
1956
- vpcId: ctx.shared.get("vpc", "id"),
1957
- description: name
1958
- },
1959
- {
1960
- // retainOnDelete: retain,
1961
- // import: ctx.import ? name : undefined,
1962
- }
1963
- );
1964
- new $2.aws.vpc.SecurityGroupIngressRule(group, "rule-ip-v4", {
1942
+ const securityGroup = new $2.aws.security.Group(group, "security", {
1943
+ name,
1944
+ vpcId: ctx.shared.get("vpc", "id"),
1945
+ description: name
1946
+ });
1947
+ const cache = new $2.aws.elasticache.ServerlessCache(group, "cache", {
1948
+ name,
1949
+ engine: "valkey",
1950
+ dailySnapshotTime: "02:00",
1951
+ majorEngineVersion: "8",
1952
+ snapshotRetentionLimit: props.snapshotRetentionLimit,
1953
+ securityGroupIds: [securityGroup.id],
1954
+ subnetIds: ctx.shared.get("vpc", "private-subnets"),
1955
+ cacheUsageLimits: [
1956
+ {
1957
+ dataStorage: props.minStorage || props.maxStorage ? [
1958
+ {
1959
+ minimum: props.minStorage && toGibibytes(props.minStorage),
1960
+ maximum: props.maxStorage && toGibibytes(props.maxStorage),
1961
+ unit: "GB"
1962
+ }
1963
+ ] : [],
1964
+ ecpuPerSecond: props.minECPU || props.maxECPU ? [
1965
+ {
1966
+ minimum: props.minECPU,
1967
+ maximum: props.maxECPU
1968
+ }
1969
+ ] : []
1970
+ }
1971
+ ]
1972
+ });
1973
+ const masterHost = cache.endpoint.pipe((v) => v.at(0).address);
1974
+ const masterPort = cache.endpoint.pipe((v) => v.at(0).port);
1975
+ new $2.aws.vpc.SecurityGroupIngressRule(group, "master-rule-ip-v4", {
1965
1976
  securityGroupId: securityGroup.id,
1966
- description: `Allow ipv4 on port: ${props.port}`,
1977
+ description: masterPort.pipe((port) => `Allow ipv4 on port: ${port}`),
1967
1978
  ipProtocol: "tcp",
1968
1979
  cidrIpv4: "0.0.0.0/0",
1969
- fromPort: props.port,
1970
- toPort: props.port
1980
+ fromPort: masterPort,
1981
+ toPort: masterPort
1971
1982
  });
1972
- new $2.aws.vpc.SecurityGroupIngressRule(group, "rule-ip-v6", {
1983
+ new $2.aws.vpc.SecurityGroupIngressRule(group, "master-rule-ip-v6", {
1973
1984
  securityGroupId: securityGroup.id,
1974
- description: `Allow ipv6 on port: ${props.port}`,
1985
+ description: masterPort.pipe((port) => `Allow ipv6 on port: ${port}`),
1975
1986
  ipProtocol: "tcp",
1976
1987
  cidrIpv6: "::/0",
1977
- fromPort: props.port,
1978
- toPort: props.port
1988
+ fromPort: masterPort,
1989
+ toPort: masterPort
1990
+ });
1991
+ const slaveHost = cache.readerEndpoint.pipe((v) => v.at(0).address);
1992
+ const slavePort = cache.readerEndpoint.pipe((v) => v.at(0).port);
1993
+ new $2.aws.vpc.SecurityGroupIngressRule(group, "slave-rule-ip-v4", {
1994
+ securityGroupId: securityGroup.id,
1995
+ description: slavePort.pipe((port) => `Allow ipv4 on port: ${port}`),
1996
+ ipProtocol: "tcp",
1997
+ cidrIpv4: "0.0.0.0/0",
1998
+ fromPort: slavePort,
1999
+ toPort: slavePort
2000
+ });
2001
+ new $2.aws.vpc.SecurityGroupIngressRule(group, "slave-rule-ip-v6", {
2002
+ securityGroupId: securityGroup.id,
2003
+ description: slavePort.pipe((port) => `Allow ipv6 on port: ${port}`),
2004
+ ipProtocol: "tcp",
2005
+ cidrIpv6: "::/0",
2006
+ fromPort: slavePort,
2007
+ toPort: slavePort
1979
2008
  });
1980
- const cluster = new $2.aws.memorydb.Cluster(
1981
- group,
1982
- "cluster",
1983
- {
1984
- name,
1985
- aclName: "open-access",
1986
- nodeType: `db.${props.type}`,
1987
- engine: "valkey",
1988
- engineVersion: props.engineVersion,
1989
- port: props.port,
1990
- securityGroupIds: [securityGroup.id],
1991
- subnetGroupName: subnetGroup.name,
1992
- dataTiering: props.dataTiering,
1993
- numReplicasPerShard: props.replicasPerShard,
1994
- numShards: props.shards
1995
- },
1996
- {
1997
- // retainOnDelete: retain,
1998
- // import: ctx.import ? name : undefined,
1999
- }
2000
- );
2001
2009
  const prefix = `CACHE_${constantCase3(ctx.stack.name)}_${constantCase3(id)}`;
2002
- const host = cluster.clusterEndpoint.pipe((v) => v[0].address);
2003
- ctx.addEnv(`${prefix}_HOST`, host);
2004
- ctx.addEnv(`${prefix}_PORT`, props.port.toString());
2010
+ ctx.addEnv(`${prefix}_HOST`, masterHost);
2011
+ ctx.addEnv(
2012
+ `${prefix}_PORT`,
2013
+ masterPort.pipe((p2) => p2.toString())
2014
+ );
2015
+ ctx.addEnv(`${prefix}_SLAVE_HOST`, slaveHost);
2016
+ ctx.addEnv(
2017
+ `${prefix}_SLAVE_PORT`,
2018
+ slavePort.pipe((p2) => p2.toString())
2019
+ );
2005
2020
  }
2006
2021
  }
2007
2022
  });
@@ -3991,7 +4006,7 @@ var rpcFeature = defineFeature({
3991
4006
  import { Group as Group14 } from "@awsless/formation";
3992
4007
  import { constantCase as constantCase9 } from "change-case";
3993
4008
  import { $ as $14 } from "@awsless/formation";
3994
- import { toGibibytes } from "@awsless/size";
4009
+ import { toGibibytes as toGibibytes2 } from "@awsless/size";
3995
4010
  var typeGenCode4 = `
3996
4011
  import { AnyStruct, Table } from '@awsless/open-search'
3997
4012
 
@@ -4033,7 +4048,7 @@ var searchFeature = defineFeature({
4033
4048
  },
4034
4049
  ebsOptions: {
4035
4050
  ebsEnabled: true,
4036
- volumeSize: toGibibytes(props.storage),
4051
+ volumeSize: toGibibytes2(props.storage),
4037
4052
  volumeType: "gp2"
4038
4053
  },
4039
4054
  domainEndpointOptions: {
@@ -583,35 +583,33 @@ var AppSchema = z19.object({
583
583
  import { z as z32 } from "zod";
584
584
 
585
585
  // src/feature/cache/schema.ts
586
+ import { gibibytes as gibibytes2 } from "@awsless/size";
586
587
  import { z as z20 } from "zod";
587
- var TypeSchema = z20.enum([
588
- "t4g.small",
589
- "t4g.medium",
590
- "r6g.large",
591
- "r6g.xlarge",
592
- "r6g.2xlarge",
593
- "r6g.4xlarge",
594
- "r6g.8xlarge",
595
- "r6g.12xlarge",
596
- "r6g.16xlarge",
597
- "r6gd.xlarge",
598
- "r6gd.2xlarge",
599
- "r6gd.4xlarge",
600
- "r6gd.8xlarge"
601
- ]);
602
- var PortSchema = z20.number().int().min(1).max(5e4);
603
- var ShardsSchema = z20.number().int().min(0).max(100);
604
- var ReplicasPerShardSchema = z20.number().int().min(0).max(5);
605
- var EngineVersionSchema = z20.enum(["7.3", "7.2"]);
588
+ var StorageSchema = SizeSchema.refine(sizeMin(gibibytes2(1)), "Minimum storage size is 1 GB").refine(
589
+ sizeMax(gibibytes2(5e3)),
590
+ "Maximum storage size is 5000 GB"
591
+ );
592
+ var MinimumStorageSchema = StorageSchema.describe(
593
+ "The lower limit for data storage the cache is set to use. You can specify a size value from 1 GB to 5000 GB."
594
+ );
595
+ var MaximumStorageSchema = StorageSchema.describe(
596
+ "The upper limit for data storage the cache is set to use. You can specify a size value from 1 GB to 5000 GB."
597
+ );
598
+ var EcpuSchema = z20.number().int().min(1e3).max(15e6);
599
+ var MinimumEcpuSchema = EcpuSchema.describe(
600
+ "The minimum number of ECPUs the cache can consume per second. You can specify a integer from 1,000 to 15,000,000."
601
+ );
602
+ var MaximumEcpuSchema = EcpuSchema.describe(
603
+ "The maximum number of ECPUs the cache can consume per second. You can specify a integer from 1,000 to 15,000,000."
604
+ );
606
605
  var CachesSchema = z20.record(
607
606
  ResourceIdSchema,
608
607
  z20.object({
609
- type: TypeSchema.default("t4g.small"),
610
- port: PortSchema.default(6379),
611
- shards: ShardsSchema.default(1),
612
- replicasPerShard: ReplicasPerShardSchema.default(1),
613
- engineVersion: EngineVersionSchema.default("7.3"),
614
- dataTiering: z20.boolean().default(false)
608
+ minStorage: MinimumStorageSchema.optional(),
609
+ maxStorage: MaximumStorageSchema.optional(),
610
+ minECPU: MinimumEcpuSchema.optional(),
611
+ maxECPU: MaximumEcpuSchema.optional(),
612
+ snapshotRetentionLimit: z20.number().int().positive().default(1)
615
613
  })
616
614
  ).optional().describe("Define the caches in your stack. For access to the cache put your functions inside the global VPC.");
617
615
 
@@ -702,10 +700,10 @@ var CronsSchema = z24.record(
702
700
  ).optional().describe(`Define the cron jobs in your stack.`);
703
701
 
704
702
  // src/feature/search/schema.ts
705
- import { gibibytes as gibibytes2 } from "@awsless/size";
703
+ import { gibibytes as gibibytes3 } from "@awsless/size";
706
704
  import { z as z25 } from "zod";
707
705
  var VersionSchema = z25.enum(["2.13", "2.11", "2.9", "2.7", "2.5", "2.3", "1.3"]);
708
- var TypeSchema2 = z25.enum([
706
+ var TypeSchema = z25.enum([
709
707
  "t3.small",
710
708
  "t3.medium",
711
709
  "m3.medium",
@@ -779,11 +777,11 @@ var TypeSchema2 = z25.enum([
779
777
  "r6gd.12xlarge",
780
778
  "r6gd.16xlarge"
781
779
  ]);
782
- var StorageSizeSchema = SizeSchema.refine(sizeMin(gibibytes2(10)), "Minimum storage size is 10 GB").refine(sizeMax(gibibytes2(100)), "Maximum storage size is 100 GB").describe("The size of the function's /tmp directory. You can specify a size value from 512 MB to 10 GiB.");
780
+ var StorageSizeSchema = SizeSchema.refine(sizeMin(gibibytes3(10)), "Minimum storage size is 10 GB").refine(sizeMax(gibibytes3(100)), "Maximum storage size is 100 GB").describe("The size of the function's /tmp directory. You can specify a size value from 512 MB to 10 GiB.");
783
781
  var SearchsSchema = z25.record(
784
782
  ResourceIdSchema,
785
783
  z25.object({
786
- type: TypeSchema2.default("t3.small"),
784
+ type: TypeSchema.default("t3.small"),
787
785
  count: z25.number().int().min(1).default(1),
788
786
  version: VersionSchema.default("2.13"),
789
787
  storage: StorageSizeSchema.default("10 GB")
Binary file