Open6
mysqldumpするだけのfargetタスクを作ってるめり

lib/fargate-cluster-stack.ts
// lib/fargate-cluster-stack.ts
import * as cdk from 'aws-cdk-lib';
import { Construct } from 'constructs';
import * as ec2 from 'aws-cdk-lib/aws-ec2';
import * as ecs from 'aws-cdk-lib/aws-ecs';
interface FargateClusterStackProps extends cdk.StackProps {
vpc: ec2.IVpc;
}
export class FargateClusterStack extends cdk.Stack {
public readonly cluster: ecs.Cluster;
constructor(scope: Construct, id: string, props: FargateClusterStackProps) {
super(scope, id, props);
this.cluster = new ecs.Cluster(this, 'FargateCluster', {
vpc: props.vpc,
// Optional: CloudWatch Container Insights を有効にする
containerInsights: true,
});
}
}
bin/app.ts
@@ -1,9 +1,9 @@
+import { FargateClusterStack } from '../lib/fargate-cluster-stack';
+new FargateClusterStack(app, `${namePrefix}-fargate-cluster`, {
+ vpc: vpc.vpc,
+ env: commonEnv,
});
これでまずecsクラスターができる

タスク定義付ける
import * as logs from 'aws-cdk-lib/aws-logs';
export class FargateClusterStack extends cdk.Stack {
public readonly cluster: ecs.Cluster;
constructor(scope: Construct, id: string, props: FargateClusterStackProps) {
super(scope, id, props);
this.cluster = new ecs.Cluster(this, 'FargateCluster', {
vpc: props.vpc,
containerInsights: true,
});
const taskDef = new ecs.FargateTaskDefinition(this, 'DumpTaskDef', {
cpu: 512,
memoryLimitMiB: 1024,
});
taskDef.addContainer('DummyContainer', {
image: ecs.ContainerImage.fromRegistry('amazonlinux'),
command: ['echo', 'Hello from dummy task'],
essential: true,
logging: ecs.LogDrivers.awsLogs({
streamPrefix: 'dummy',
logRetention: logs.RetentionDays.THREE_DAYS,
}),
});
}
}
こんな感じでAmazonLinux
を使ったコンテナーができる
当該クラスターからタスクを引く
こんな感じにログに出てくればよいだろう

イメージをカスタムする
lib/fargate-cluster-stack.ts
@@ -4,6 +4,7 @@ import { Construct } from 'constructs';
import * as ec2 from 'aws-cdk-lib/aws-ec2';
import * as ecs from 'aws-cdk-lib/aws-ecs';
import * as logs from 'aws-cdk-lib/aws-logs';
+import * as path from 'path';
interface FargateClusterStackProps extends cdk.StackProps {
vpc: ec2.IVpc;
@@ -26,7 +27,7 @@ export class FargateClusterStack extends cdk.Stack {
});
taskDef.addContainer('DummyContainer', {
- image: ecs.ContainerImage.fromRegistry('amazonlinux'),
+ image: ecs.ContainerImage.fromAsset(path.join(__dirname, '../docker/mysqldump')),
command: ['echo', 'Hello from dummy task'],
essential: true,
logging: ecs.LogDrivers.awsLogs({
関連ファイルを作成する
docker/mysqldump/Dockerfile
FROM amazonlinux:2
RUN yum install -y \
mysql \
gzip \
curl \
unzip \
jq && \
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \
unzip awscliv2.zip && \
./aws/install && \
rm -rf awscliv2.zip aws
COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]
docker/mysqldump/entrypoint.sh
#!/bin/bash
set -euo pipefail
echo "=== Starting mysqldump container ==="
mysqldump --version
実行するとECRにイメージがpushされ、それを使うようになっちょる
タスクを実行するとログに出てくる

rdsの情報をecsに引き渡すように
bin/app.ts
@@ -23,7 +23,7 @@ const monitoringStack = new MonitoringStack(app, `${namePrefix}-monitoring`, {
webhookUrl: 'https://hooks.slack.com/services/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',
});
-new RdsStack(app, `${namePrefix}-rds`, {
+const rdsStack = new RdsStack(app, `${namePrefix}-rds`, {
vpc: vpc.vpc,
namePrefix,
env: commonEnv,
@@ -39,4 +39,6 @@ new Ec2Stack(app, `${namePrefix}-ec2`, {
new FargateClusterStack(app, `${namePrefix}-fargate-cluster`, {
vpc: vpc.vpc,
env: commonEnv,
+ dbInstance: rdsStack.dbInstance,
+ dbName: rdsStack.dbName,
});
lib/rds-stack.ts
@@ -13,6 +13,9 @@ interface RdsStackProps extends cdk.StackProps {
}
export class RdsStack extends cdk.Stack {
+ public readonly dbInstance: rds.DatabaseInstance;
+ public readonly dbName: string = 'defaultDB';
+
constructor(scope: Construct, id: string, props: RdsStackProps) {
super(scope, id, props);
@@ -68,10 +71,11 @@ export class RdsStack extends cdk.Stack {
// バックアップ7days
backupRetention: cdk.Duration.days(7),
// Default Database
- databaseName: 'defaultDB',
+ databaseName: this.dbName,
parameterGroup,
cloudwatchLogsExports: ['slowquery'],
});
+ this.dbInstance = rdsInstance;
const alarmTopic = new sns.Topic(this, 'AlarmTopic', {
displayName: 'RDS CPU Alarm Topic',
lib/fargate-cluster-stack.ts
@@ -3,11 +3,14 @@ import * as cdk from 'aws-cdk-lib';
import { Construct } from 'constructs';
import * as ec2 from 'aws-cdk-lib/aws-ec2';
import * as ecs from 'aws-cdk-lib/aws-ecs';
+import * as rds from 'aws-cdk-lib/aws-rds';
import * as logs from 'aws-cdk-lib/aws-logs';
import * as path from 'path';
interface FargateClusterStackProps extends cdk.StackProps {
vpc: ec2.IVpc;
+ dbInstance: rds.IDatabaseInstance;
+ dbName: string;
}
export class FargateClusterStack extends cdk.Stack {
@@ -30,6 +33,12 @@ export class FargateClusterStack extends cdk.Stack {
image: ecs.ContainerImage.fromAsset(path.join(__dirname, '../docker/mysqldump')),
command: ['echo', 'Hello from dummy task'],
essential: true,
+ environment: {
+ DB_HOST: props.dbInstance.dbInstanceEndpointAddress,
+ DB_PORT: props.dbInstance.dbInstanceEndpointPort,
+ DB_NAME: props.dbName,
+ DB_USER: 'admin', // SecretsManagerに保存されてるが今回は暫定で直書き
+ },
logging: ecs.LogDrivers.awsLogs({
streamPrefix: 'dummy',
logRetention: logs.RetentionDays.THREE_DAYS,
docker/mysqldump/entrypoint.sh
@@ -3,3 +3,4 @@ set -euo pipefail
echo "=== Starting mysqldump container ==="
mysqldump --version
+env
これで一応環境変数にある程度渡ってくる

secret managerから情報を引き出す
lib/fargate-cluster-stack.ts
@@ -9,7 +9,7 @@ import * as path from 'path';
interface FargateClusterStackProps extends cdk.StackProps {
vpc: ec2.IVpc;
- dbInstance: rds.IDatabaseInstance;
+ dbInstance: rds.DatabaseInstance;
dbName: string;
}
@@ -37,12 +37,15 @@ export class FargateClusterStack extends cdk.Stack {
DB_HOST: props.dbInstance.dbInstanceEndpointAddress,
DB_PORT: props.dbInstance.dbInstanceEndpointPort,
DB_NAME: props.dbName,
- DB_USER: 'admin', // SecretsManagerに保存されてるが今回は暫定で直書き
+ DB_SECRET_NAME: props.dbInstance.secret?.secretName || '',
},
logging: ecs.LogDrivers.awsLogs({
streamPrefix: 'dummy',
logRetention: logs.RetentionDays.THREE_DAYS,
}),
});
+ if (props.dbInstance.secret) {
+ props.dbInstance.secret.grantRead(taskDef.taskRole);
+ }
}
}
docker/mysqldump/entrypoint.sh
@@ -3,3 +3,18 @@ set -euo pipefail
echo "=== Starting mysqldump container ==="
mysqldump --version
+env
+
+if [[ -z "${DB_SECRET_NAME:-}" ]]; then
+ echo "[ERROR] DB_SECRET_NAME is not set"
+ exit 1
+fi
+
+SECRET_JSON=$(aws secretsmanager get-secret-value --secret-id "$DB_SECRET_NAME" --query SecretString --output text)
+DB_USER=$(echo "$SECRET_JSON" | jq -r '.username')
+DB_PASS=$(echo "$SECRET_JSON" | jq -r '.password')
+
+echo "[INFO] Dumping DB: $DB_NAME at $DB_HOST:$DB_PORT with user $DB_USER / passwd $DB_PASS"
+
+mysqldump -h "$DB_HOST" -P "$DB_PORT" -u "$DB_USER" -p"$DB_PASS" "$DB_NAME" | gzip > /dump.sql.gz \
+ && echo "✅ mysqldump completed successfully: /dump.sql.gz created."
これで完全にDBにアクセス可能となり、dumpが成功したはず

S3に転送
書く気力がおれそうw
ここでは事前にバケットを取得している
lib/fargate-cluster-stack.ts
@@ -6,6 +6,7 @@ import * as ecs from 'aws-cdk-lib/aws-ecs';
import * as rds from 'aws-cdk-lib/aws-rds';
import * as logs from 'aws-cdk-lib/aws-logs';
import * as path from 'path';
+import * as iam from 'aws-cdk-lib/aws-iam';
interface FargateClusterStackProps extends cdk.StackProps {
vpc: ec2.IVpc;
@@ -38,12 +39,18 @@ export class FargateClusterStack extends cdk.Stack {
DB_PORT: props.dbInstance.dbInstanceEndpointPort,
DB_NAME: props.dbName,
DB_SECRET_NAME: props.dbInstance.secret?.secretName || '',
+ S3_BUCKET: '<バケット>',
+ S3_PREFIX: 'dbdump',
},
logging: ecs.LogDrivers.awsLogs({
streamPrefix: 'dummy',
logRetention: logs.RetentionDays.THREE_DAYS,
}),
});
+ taskDef.taskRole.addToPrincipalPolicy(new iam.PolicyStatement({
+ actions: ['s3:PutObject'],
+ resources: ['arn:aws:s3:::<バケット>/dbdump/*'],
+ }));
if (props.dbInstance.secret) {
props.dbInstance.secret.grantRead(taskDef.taskRole);
}
docker/mysqldump/entrypoint.sh
@@ -2,4 +2,20 @@
set -euo pipefail
echo "=== Starting mysqldump container ==="
+
mysqldump --version
+
+# Secrets Manager から取得
+echo "=== Fetching credentials from Secrets Manager ==="
+secret=$(aws secretsmanager get-secret-value --secret-id "$DB_SECRET_NAME" --query SecretString --output text)
+DB_USER=$(echo "$secret" | jq -r '.username')
+DB_PASS=$(echo "$secret" | jq -r '.password')
+
+# ダンプファイル生成
+echo "=== Dumping database ==="
+mysqldump -h "$DB_HOST" -P "$DB_PORT" -u "$DB_USER" -p"$DB_PASS" "$DB_NAME" | gzip > /dump.sql.gz
+
+echo "=== Uploading to S3 ==="
+aws s3 cp /dump.sql.gz "s3://$S3_BUCKET/$S3_PREFIX/dump.sql.gz"
+
+echo "=== All done ==="