diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b010ffd --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +node_modules +.DS_Store* diff --git a/README.md b/README.md new file mode 100644 index 0000000..9122c87 --- /dev/null +++ b/README.md @@ -0,0 +1,52 @@ +# hubot-aws + +Hubot masters aws commands + +### Commands + +``` +hubot autoscaling create --name=[group_name] --launch_name=[launch_configuration_name] - Create an AutoScaling Group +hubot autoscaling create --name=[group_name] --launch_name=[launch_configuration_name] --dry-run - Try creating an AutoScaling Group +hubot autoscaling delete --group_name=[group_name] - Delete the AutoScaling Group +hubot autoscaling delete --group_name=[group_name] --force - Delete the AutoScaling Group with live instances +hubot autoscaling launch create --name=[launch_configuration_name] - Create an AutoScaling LaunchConfiguration +hubot autoscaling launch create --name=[launch_configuration_name] --dry-run - Try creating an AutoScaling LaunchConfiguration +hubot autoscaling launch delete --name=[launch_configuration_name] - Delete the AutoScaling LaunchConfiguration +hubot autoscaling launch ls - Displays all AutoScaling LaunchConfigurations +hubot autoscaling launch ls --name=[launch_configuration_name] - Details an Autoscaling LaunchConfiguration +hubot autoscaling ls - Displays all AutoScaling Groups +hubot autoscaling ls --name=[group_name] - Details an Autoscaling Group +hubot autoscaling notification delete --group_name=[group_name] --arn=[topic_arn] - Delete the AutoScaling Notificatoin +hubot autoscaling notification ls - Displays all AutoScaling NotificationConfigurations +hubot autoscaling notification ls --group_name=[group_name] - Details an Autoscaling NotificationConfiguration +hubot autoscaling notification put --group_name=[group_name] - Put an AutoScaling Notifications +hubot autoscaling notification put --group_name=[group_name] --dry-run - Try putting an AutoScaling Notifications +hubot autoscaling policy delete --group_name=[group_name] --policy_name=[policy_name] - Delete the AutoScaling Policy +hubot autoscaling policy ls - Displays all AutoScaling Policies +hubot autoscaling policy ls --group_name=[group_name] - Details an Autoscaling Policy +hubot autoscaling policy put --add --group_name=[group_name] - Put an AutoScaling ScaleOut Policy +hubot autoscaling policy put --add --group_name=[group_name] --dry-run - Try putting an AutoScaling ScaleOut Policy +hubot autoscaling policy put --remove --group_name=[group_name] - Put an AutoScaling ScaleIn Policy +hubot autoscaling policy put --remove --group_name=[group_name] --dry-run - Try putting an AutoScaling ScaleIn Policy +hubot autoscaling update --json=[json] - Update the AutoScaling Group +hubot autoscaling update --name=[name] --desired=[desired] - Update DesiredCapacity the AutoScaling Group +hubot autoscaling update --name=[name] --max=[max] - Update MaxSize of the AutoScaling Group +hubot autoscaling update --name=[name] --min=[min] - Update MinSize of the AutoScaling Group +hubot cloudwatch alarm delete --name=[alarm_name] - Delete the Alarm +hubot cloudwatch alarm ls - Displays all Alarms +hubot cloudwatch alarm ls --name=[alarm_name] - Details an Alarm +hubot ec2 ls - Displays all Instances +hubot ec2 ls --instance_id=[instance_id] - Details an Instance +hubot ec2 run - Run an Instance +hubot ec2 run --dry-run - Try running an Instance +hubot ec2 sg create --vpc_id=[vpc_id] --group_name=[group_name] --desc=[desc] - Create a SecurityGroup +hubot ec2 sg create --vpc_id=[vpc_id] --group_name=[group_name] --desc=[desc] --dry-run - Try creating a SecurityGroup +hubot ec2 sg delete --group_id=[group_id] - Delete the SecurityGroup +hubot ec2 sg ls - Desplays all SecurityGroups +hubot ec2 spot ls - Displays all SpotInstances +hubot ec2 terminate --instance_id=[instance_id] - Terminate the Instance +hubot s3 ls - Displays all S3 buckets +hubot s3 ls --bucket_name=[bucket-name] - Displays all objects +hubot s3 ls --bucket_name=[bucket-name] --prefix=[prefix] - Displays all objects with prefix +hubot s3 ls --bucket_name=[bucket-name] --prefix=[prefix] --marker=[marker] - Displays all objects with prefix from marker +``` diff --git a/auth.coffee b/auth.coffee new file mode 100644 index 0000000..63593d2 --- /dev/null +++ b/auth.coffee @@ -0,0 +1,14 @@ +module.exports = { + canAccess: (robot, user) -> + return true if process.env.HUBOT_AWS_DEBUG + + if robot.auth.isAdmin(user) + return true + + role = process.env.HUBOT_AWS_CAN_ACCESS_ROLE + if role && robot.auth.hasRole(user, role) + return true + else + return false +} + diff --git a/aws.coffee b/aws.coffee new file mode 100644 index 0000000..30cfc9b --- /dev/null +++ b/aws.coffee @@ -0,0 +1,8 @@ +module.exports = { + aws: -> + aws = require 'aws-sdk' + aws.config.accessKeyId = process.env.HUBOT_AWS_ACCESS_KEY_ID + aws.config.secretAccessKey = process.env.HUBOT_AWS_SECRET_ACCESS_KEY + aws.config.region = process.env.HUBOT_AWS_REGION + return aws +} diff --git a/index.coffee b/index.coffee new file mode 100644 index 0000000..d29c408 --- /dev/null +++ b/index.coffee @@ -0,0 +1,10 @@ +fs = require 'fs' +path = require 'path' + +module.exports = (robot) -> + scripts_path = path.resolve __dirname, 'scripts' + if fs.existsSync scripts_path + for category_file in fs.readdirSync(scripts_path) + category_path = path.resolve scripts_path, category_file + if fs.existsSync category_path + robot.loadFile category_path, file for file in fs.readdirSync(category_path) diff --git a/package.json b/package.json new file mode 100644 index 0000000..e2cdc70 --- /dev/null +++ b/package.json @@ -0,0 +1,33 @@ +{ + "name": "hubot-aws", + "version": "0.0.1", + "description": "Hubot masters aws commands", + "repository": { + "type": "git", + "url": "ssh://git@github.com/yoheimuta/hubot-aws.git" + }, + "main": "index.coffee", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "dependencies": { + "aws-sdk": "2.1.23", + "tsv": "0.2.0", + "async": "0.9.0", + "underscore": "1.8.3", + "cson": "3.0.1", + "moment": "2.10.2" + }, + "keywords": [ + "aws", + "hubot", + "hubot aws", + "hubot-aws", + "aws-cli" + ], + "bugs": { + "url": "https://github.com/yoheimuta/hubot-aws/issues" + }, + "author": "yoheimuta ", + "license": "MIT" +} diff --git a/scripts/autoscaling/create_group.coffee b/scripts/autoscaling/create_group.coffee new file mode 100644 index 0000000..3d2781d --- /dev/null +++ b/scripts/autoscaling/create_group.coffee @@ -0,0 +1,39 @@ +# Description: +# Create autoscaling group +# +# Commands: +# hubot autoscaling create --name=[group_name] --launch_name=[launch_configuration_name] --dry-run - Try creating an AutoScaling Group +# hubot autoscaling create --name=[group_name] --launch_name=[launch_configuration_name] - Create an AutoScaling Group + +fs = require 'fs' +cson = require 'cson' +util = require 'util' + +module.exports = (robot) -> + robot.respond /autoscaling create --name=(.*) --launch_name=(.*?)(| --dry-run)$/i, (msg) -> + unless require('../../auth.coffee').canAccess(robot, msg.envelope.user) + msg.send "You cannot access this feature. Please contact with admin" + return + + name = msg.match[1].trim() + conf = msg.match[2].trim() + dry_run = if msg.match[3] then true else false + + msg.send "Requesting name=#{name}, launch_name=#{conf}, dry-run=#{dry_run}..." + + params = cson.parseCSONFile process.env.HUBOT_AWS_AS_GROUP_CONFIG + params.AutoScalingGroupName = name + params.LaunchConfigurationName = conf + + if dry_run + msg.send util.inspect(params, false, null) + return + + aws = require('../../aws.coffee').aws() + autoscaling = new aws.AutoScaling({apiVersion: '2011-01-01'}) + + autoscaling.createAutoScalingGroup params, (err, res)-> + if err + msg.send "Error: #{err}" + else + msg.send util.inspect(res, false, null) diff --git a/scripts/autoscaling/create_launch.coffee b/scripts/autoscaling/create_launch.coffee new file mode 100644 index 0000000..95b157c --- /dev/null +++ b/scripts/autoscaling/create_launch.coffee @@ -0,0 +1,44 @@ +# Description: +# Create autoscaling launch configurations +# +# Commands: +# hubot autoscaling launch create --name=[launch_configuration_name] --dry-run - Try creating an AutoScaling LaunchConfiguration +# hubot autoscaling launch create --name=[launch_configuration_name] - Create an AutoScaling LaunchConfiguration + +fs = require 'fs' +cson = require 'cson' +util = require 'util' + +module.exports = (robot) -> + robot.respond /autoscaling launch create --name=(.*?)(| --dry-run)$/i, (msg) -> + unless require('../../auth.coffee').canAccess(robot, msg.envelope.user) + msg.send "You cannot access this feature. Please contact with admin" + return + + name = msg.match[1].trim() + dry_run = if msg.match[2] then true else false + + msg.send "Requesting name=#{name}, dry-run=#{dry_run}..." + + launch_configuration_path = process.env.HUBOT_AWS_AS_LAUNCH_CONF_CONFIG + params = cson.parseCSONFile launch_configuration_path + + params.LaunchConfigurationName = name + + userdata_path = process.env.HUBOT_AWS_AS_LAUNCH_CONF_USERDATA_PATH + if fs.existsSync userdata_path + init_file = fs.readFileSync userdata_path, 'utf-8' + params.UserData = new Buffer(init_file).toString('base64') + + if dry_run + msg.send util.inspect(params, false, null) + return + + aws = require('../../aws.coffee').aws() + autoscaling = new aws.AutoScaling({apiVersion: '2011-01-01'}) + + autoscaling.createLaunchConfiguration params, (err, res)-> + if err + msg.send "Error: #{err}" + else + msg.send util.inspect(res, false, null) diff --git a/scripts/autoscaling/delete_group.coffee b/scripts/autoscaling/delete_group.coffee new file mode 100644 index 0000000..754616a --- /dev/null +++ b/scripts/autoscaling/delete_group.coffee @@ -0,0 +1,28 @@ +# Description: +# Delete autoscaling group +# +# Commands: +# hubot autoscaling delete --group_name=[group_name] - Delete the AutoScaling Group +# hubot autoscaling delete --group_name=[group_name] --force - Delete the AutoScaling Group with live instances + +util = require 'util' + +module.exports = (robot) -> + robot.respond /autoscaling delete --group_name=(.*?)(| .*)$/i, (msg) -> + unless require('../../auth.coffee').canAccess(robot, msg.envelope.user) + msg.send "You cannot access this feature. Please contact with admin" + return + + name = msg.match[1].trim() || '' + force = if msg.match[2].trim() == '--force' then true else false + + msg.send "Requesting #{name} #{force}..." + + aws = require('../../aws.coffee').aws() + autoscaling = new aws.AutoScaling({apiVersion: '2011-01-01'}) + + autoscaling.deleteAutoScalingGroup { AutoScalingGroupName: name, ForceDelete: force }, (err, res)-> + if err + msg.send "Error: #{err}" + else + msg.send util.inspect(res, false, null) diff --git a/scripts/autoscaling/delete_launch.coffee b/scripts/autoscaling/delete_launch.coffee new file mode 100644 index 0000000..3f4ea64 --- /dev/null +++ b/scripts/autoscaling/delete_launch.coffee @@ -0,0 +1,29 @@ +# Description: +# Delete autoscaling launch configurations +# +# Commands: +# hubot autoscaling launch delete --name=[launch_configuration_name] - Delete the AutoScaling LaunchConfiguration + +fs = require 'fs' +cson = require 'cson' +util = require 'util' + +module.exports = (robot) -> + robot.respond /autoscaling launch delete --name=(.*)$/i, (msg) -> + unless require('../../auth.coffee').canAccess(robot, msg.envelope.user) + msg.send "You cannot access this feature. Please contact with admin" + return + + name = msg.match[1].trim() || '' + + msg.send "Requesting #{name}..." + + aws = require('../../aws.coffee').aws() + autoscaling = new aws.AutoScaling({apiVersion: '2011-01-01'}) + + autoscaling.deleteLaunchConfiguration { LaunchConfigurationName: name }, (err, res)-> + if err + msg.send "Error: #{err}" + else + msg.send util.inspect(res, false, null) + diff --git a/scripts/autoscaling/delete_notification.coffee b/scripts/autoscaling/delete_notification.coffee new file mode 100644 index 0000000..842a4e1 --- /dev/null +++ b/scripts/autoscaling/delete_notification.coffee @@ -0,0 +1,27 @@ +# Description: +# Delete an autoscaling notification +# +# Commands: +# hubot autoscaling notification delete --group_name=[group_name] --arn=[topic_arn] - Delete the AutoScaling Notificatoin + +util = require 'util' + +module.exports = (robot) -> + robot.respond /autoscaling notification delete --group_name=(.*) --arn=(.*)$/i, (msg) -> + unless require('../../auth.coffee').canAccess(robot, msg.envelope.user) + msg.send "You cannot access this feature. Please contact with admin" + return + + group_name = msg.match[1].trim() + topic_arn = msg.match[2].trim() + + msg.send "Requesting group_name=#{group_name} topic_arn=#{topic_arn}..." + + aws = require('../../aws.coffee').aws() + autoscaling = new aws.AutoScaling({apiVersion: '2011-01-01'}) + + autoscaling.deletePolicy { AutoScalingGroupName: group_name, TopicARN: topic_arn}, (err, res)-> + if err + msg.send "Error: #{err}" + else + msg.send util.inspect(res, false, null) diff --git a/scripts/autoscaling/delete_policy.coffee b/scripts/autoscaling/delete_policy.coffee new file mode 100644 index 0000000..6f613d2 --- /dev/null +++ b/scripts/autoscaling/delete_policy.coffee @@ -0,0 +1,28 @@ +# Description: +# Delete an autoscaling policy +# +# Commands: +# hubot autoscaling policy delete --group_name=[group_name] --policy_name=[policy_name] - Delete the AutoScaling Policy + +util = require 'util' + +module.exports = (robot) -> + robot.respond /autoscaling policy delete --group_name=(.*) --policy_name=(.*)$/i, (msg) -> + unless require('../../auth.coffee').canAccess(robot, msg.envelope.user) + msg.send "You cannot access this feature. Please contact with admin" + return + + group_name = msg.match[1].trim() + policy_name = msg.match[2].trim() + + msg.send "Requesting #{group_name} #{policy_name}..." + + aws = require('../../aws.coffee').aws() + autoscaling = new aws.AutoScaling({apiVersion: '2011-01-01'}) + + autoscaling.deletePolicy { AutoScalingGroupName: group_name, PolicyName: policy_name}, (err, res)-> + if err + msg.send "Error: #{err}" + else + msg.send util.inspect(res, false, null) + diff --git a/scripts/autoscaling/ls_group.coffee b/scripts/autoscaling/ls_group.coffee new file mode 100644 index 0000000..061654f --- /dev/null +++ b/scripts/autoscaling/ls_group.coffee @@ -0,0 +1,55 @@ +# Description: +# List autoscaling group +# +# Commands: +# hubot autoscaling ls - Displays all AutoScaling Groups +# hubot autoscaling ls --name=[group_name] - Details an Autoscaling Group + +moment = require 'moment' +util = require 'util' + +module.exports = (robot) -> + robot.respond /autoscaling ls($| --name=)(.*)$/i, (msg) -> + arg_name = msg.match[2].trim() || '' + + msg.send "Fetching #{arg_name}..." + + aws = require('../../aws.coffee').aws() + autoscaling = new aws.AutoScaling({apiVersion: '2011-01-01'}) + + autoscaling.describeAutoScalingGroups (if arg_name then { AutoScalingGroupNames: [arg_name] } else null), (err, res)-> + if err + msg.send "Error: #{err}" + else + if arg_name + msg.send util.inspect(res, false, null) + else + msg.send "time\tcurrent_size\tdesired_size\tmin_size\tmax_size\taz\telb\tconf\tname" + msg.send "\ttag.Key\ttag.Value" + msg.send Array(130).join('-') + msg.send Array(130).join('-') + + messages = [] + + res.AutoScalingGroups.sort (a, b) -> + moment(a.CreatedTime) - moment(b.CreatedTime) + + for group in res.AutoScalingGroups + time = moment(group.CreatedTime).format('YYYY-MM-DD HH:mm:ssZ') + name = group.AutoScalingGroupName + conf = group.LaunchConfigurationName + az = group.AvailabilityZones.join "," + elb = group.LoadBalancerNames.join "," + min_size = group.MinSize + max_size = group.MaxSize + desired_size = group.DesiredCapacity + current_size = group.Instances.length + + messages.push("#{time}\t#{current_size}\t#{desired_size}\t#{min_size}\t#{max_size}\t#{az}\t#{elb}\t#{conf}\t#{name}") + + for tag in group.Tags + messages.push("\t#{tag.Key}\t#{tag.Value}") + + message = messages.join "\n" + message ||= '[None]' + msg.send message diff --git a/scripts/autoscaling/ls_launch.coffee b/scripts/autoscaling/ls_launch.coffee new file mode 100644 index 0000000..132b652 --- /dev/null +++ b/scripts/autoscaling/ls_launch.coffee @@ -0,0 +1,42 @@ +# Description: +# List autoscaling launch configuration +# +# Commands: +# hubot autoscaling launch ls - Displays all AutoScaling LaunchConfigurations +# hubot autoscaling launch ls --name=[launch_configuration_name] - Details an Autoscaling LaunchConfiguration + +moment = require 'moment' +util = require 'util' +tsv = require 'tsv' + +module.exports = (robot) -> + robot.respond /autoscaling launch ls($| --name=)(.*)$/i, (msg) -> + arg_name = msg.match[2].trim() || '' + + msg.send "Fetching #{arg_name}..." + + aws = require('../../aws.coffee').aws() + autoscaling = new aws.AutoScaling({apiVersion: '2011-01-01'}) + + autoscaling.describeLaunchConfigurations (if arg_name then { LaunchConfigurationNames: [arg_name] } else null), (err, res)-> + if err + msg.send "Error: #{err}" + else + if arg_name + msg.send util.inspect(res, false, null) + else + messages = [] + for conf in res.LaunchConfigurations + messages.push({ + time : moment(conf.CreatedTime).format('YYYY-MM-DD HH:mm:ssZ') + name : conf.LaunchConfigurationName + image : conf.ImageId + type : conf.InstanceType + price : conf.SpotPrice || '[NoPrice]' + security : conf.SecurityGroups.join "," + }) + + messages.sort (a, b) -> + moment(a.time) - moment(b.time) + message = tsv.stringify(messages) || '[None]' + msg.send message diff --git a/scripts/autoscaling/ls_notification.coffee b/scripts/autoscaling/ls_notification.coffee new file mode 100644 index 0000000..bff06c5 --- /dev/null +++ b/scripts/autoscaling/ls_notification.coffee @@ -0,0 +1,42 @@ +# Description: +# List autoscaling notification configuration +# +# Commands: +# hubot autoscaling notification ls - Displays all AutoScaling NotificationConfigurations +# hubot autoscaling notification ls --group_name=[group_name] - Details an Autoscaling NotificationConfiguration + +util = require 'util' +tsv = require 'tsv' + +module.exports = (robot) -> + robot.respond /autoscaling notification ls($| --group_name=)(.*)$/i, (msg) -> + group_name = msg.match[2].trim() || '' + + msg.send "Fetching #{group_name}..." + + aws = require('../../aws.coffee').aws() + autoscaling = new aws.AutoScaling({apiVersion: '2011-01-01'}) + + autoscaling.describeNotificationConfigurations (if group_name then { AutoScalingGroupNames: [group_name] } else null), (err, res)-> + if err + msg.send "Error: #{err}" + else + if group_name + msg.send util.inspect(res, false, null) + else + messages = [] + res.NotificationConfigurations.sort (a, b) -> + return -1 if a.AutoScalingGroupName < b.AutoScalingGroupName + return 1 if a.AutoScalingGroupName > b.AutoScalingGroupName + return 0 + + for conf in res.NotificationConfigurations + messages.push({ + name : conf.AutoScalingGroupName + type : conf.NotificationType + topic_arn : conf.TopicARN + }) + + message = tsv.stringify(messages) || '[None]' + msg.send message + diff --git a/scripts/autoscaling/ls_policy.coffee b/scripts/autoscaling/ls_policy.coffee new file mode 100644 index 0000000..232451a --- /dev/null +++ b/scripts/autoscaling/ls_policy.coffee @@ -0,0 +1,81 @@ +# Description: +# List autoscaling policies +# +# Commands: +# hubot autoscaling policy ls - Displays all AutoScaling Policies +# hubot autoscaling policy ls --group_name=[group_name] - Details an Autoscaling Policy + +util = require 'util' +async = require 'async' +moment = require 'moment' +_ = require 'underscore' + +module.exports = (robot) -> + robot.respond /autoscaling policy ls($| --group_name=)(.*)$/i, (msg) -> + group_name = msg.match[2].trim() || '' + + msg.send "Fetching #{group_name}..." + + aws = require('../../aws.coffee').aws() + autoscaling = new aws.AutoScaling({apiVersion: '2011-01-01'}) + + autoscaling.describePolicies (if group_name then { AutoScalingGroupName: group_name } else null), (err, res)-> + if err + msg.send "Error: #{err}" + else + if group_name + msg.send util.inspect(res, false, null) + else + msg.send "name\ttype\tadjustment\tcooldown\tgroup_name" + msg.send "\ttime\tnamespace\tmetric\tstatistic\tthreshold\tperiod\toperator\talarm_name" + msg.send Array(130).join('-') + msg.send Array(130).join('-') + + messages = [] + + res.ScalingPolicies.sort (a, b) -> + return -1 if a.AutoScalingGroupName < b.AutoScalingGroupName + return 1 if a.AutoScalingGroupName > b.AutoScalingGroupName + return 0 + + async.eachSeries res.ScalingPolicies, (conf, next) -> + name = conf.PolicyName + type = conf.AdjustmentType + adjustment = conf.ScalingAdjustment + cooldown = conf.Cooldown || '[NoValue]' + group_name = conf.AutoScalingGroupName + + messages.push("\n#{name}\t#{type}\t#{adjustment}\t#{cooldown}\t#{group_name}") + + cloudwatch = new aws.CloudWatch({apiVersion: '2010-08-01'}) + for alarm in conf.Alarms + cloudwatch.describeAlarms { AlarmNames: [alarm.AlarmName] }, (err, res)-> + if err + msg.send "DescribeAlarm: #{err}" + else + res.MetricAlarms.sort (a, b) -> + moment(a.AlarmConfigurationUpdatedTimestamp) - moment(b.AlarmConfigurationUpdatedTimestamp) + + for alarm in res.MetricAlarms + time = moment(alarm.AlarmalarmigurationUpdatedTimestamp).format('YYYY-MM-DD HH:mm:ssZ') || '[NoTime]' + alarm_name = alarm.AlarmName + namespace = alarm.Namespace + metric = alarm.MetricName + statistic = alarm.Statistic + threshold = alarm.Threshold + period = alarm.Period + operator = alarm.ComparisonOperator + + messages.push("\t#{time}\t#{namespace}\t#{metric}\t#{statistic}\t#{threshold}\t#{period}\t#{operator}\t#{alarm_name}") + # TODO: wait to complete multi alarm loop + next() + + next() if conf.Alarms.length == 0 + + , (err) -> + if err + msg.send "async.each Error: #{err}" + else + message = messages.join "\n" + message ||= '[None]' + msg.send message diff --git a/scripts/autoscaling/put_notification.coffee b/scripts/autoscaling/put_notification.coffee new file mode 100644 index 0000000..163f33d --- /dev/null +++ b/scripts/autoscaling/put_notification.coffee @@ -0,0 +1,39 @@ +# Description: +# Put autoscaling notifications +# +# Commands: +# hubot autoscaling notification put --group_name=[group_name] --dry-run - Try putting an AutoScaling Notifications +# hubot autoscaling notification put --group_name=[group_name] - Put an AutoScaling Notifications + +cson = require 'cson' +util = require 'util' + +module.exports = (robot) -> + robot.respond /autoscaling notification put --group_name=(.*?)(| --dry-run)$/i, (msg) -> + unless require('../../auth.coffee').canAccess(robot, msg.envelope.user) + msg.send "You cannot access this feature. Please contact with admin" + return + + group_name = msg.match[1].trim() + dry_run = if msg.match[2] then true else false + + msg.send "Requesting notifications, AutoScalingGroupName=#{group_name}, dry-run=#{dry_run}..." + + params = cson.parseCSONFile process.env.HUBOT_AWS_AS_NOTIFICATION + + aws = require('../../aws.coffee').aws() + autoscaling = new aws.AutoScaling({apiVersion: '2011-01-01'}) + + for param in params.NotificationConfigurations + param.AutoScalingGroupName = group_name + + if dry_run + msg.send util.inspect(param, false, null) + continue + + autoscaling.putScalingPolicy param, (err, res)-> + if err + msg.send "PutScalingPolicyError: #{err}" + msg.send util.inspect(param, false, null) + return + msg.send util.inspect(res, false, null) diff --git a/scripts/autoscaling/put_policy.coffee b/scripts/autoscaling/put_policy.coffee new file mode 100644 index 0000000..157dfee --- /dev/null +++ b/scripts/autoscaling/put_policy.coffee @@ -0,0 +1,87 @@ +# Description: +# Put autoscaling scaling policy +# +# Commands: +# hubot autoscaling policy put --add --group_name=[group_name] --dry-run - Try putting an AutoScaling ScaleOut Policy +# hubot autoscaling policy put --add --group_name=[group_name] - Put an AutoScaling ScaleOut Policy +# hubot autoscaling policy put --remove --group_name=[group_name] --dry-run - Try putting an AutoScaling ScaleIn Policy +# hubot autoscaling policy put --remove --group_name=[group_name] - Put an AutoScaling ScaleIn Policy + +cson = require 'cson' +util = require 'util' + +putPolicy = (msg, params, alarm_params) -> + aws = require('../../aws.coffee').aws() + autoscaling = new aws.AutoScaling({apiVersion: '2011-01-01'}) + + autoscaling.putScalingPolicy params, (err, res)-> + if err + msg.send "PutScalingPolicyError: #{err}" + return + msg.send util.inspect(res, false, null) + + alarm_params.AlarmActions ?= [] + alarm_params.AlarmActions.push(res.PolicyARN) + + cloudwatch = new aws.CloudWatch({apiVersion: '2010-08-01'}) + cloudwatch.putMetricAlarm alarm_params, (err, res)-> + if err + msg.send "PutMetricAlarmError: #{err}" + return + msg.send util.inspect(res, false, null) + + +module.exports = (robot) -> + robot.respond /autoscaling policy put --add --group_name=(.*?)(| --dry-run)$/i, (msg) -> + unless require('../../auth.coffee').canAccess(robot, msg.envelope.user) + msg.send "You cannot access this feature. Please contact with admin" + return + + group_name = msg.match[1].trim() + dry_run = if msg.match[2] then true else false + + msg.send "Requesting add policy, AutoScalingGroupName=#{group_name}, dry-run=#{dry_run}..." + + params = cson.parseCSONFile process.env.HUBOT_AWS_AS_POLICY_ADD + params.AutoScalingGroupName = group_name + + alarm_params = cson.parseCSONFile process.env.HUBOT_AWS_CW_ALARM_ADD + alarm_params.AlarmName = "awsec2-#{group_name}-add" + alarm_params.Dimensions = [{ + Name: 'AutoScalingGroupName', + Value: group_name + }] + + if dry_run + msg.send util.inspect(params, false, null) + msg.send util.inspect(alarm_params, false, null) + return + + putPolicy msg, params, alarm_params + + robot.respond /autoscaling policy put --remove --group_name=(.*?)(| --dry-run)$/i, (msg) -> + unless require('../../auth.coffee').canAccess(robot, msg.envelope.user) + msg.send "You cannot access this feature. Please contact with admin" + return + + group_name = msg.match[1].trim() + dry_run = if msg.match[2] then true else false + + msg.send "Requesting remove policy, AutoScalingGroupName=#{group_name}, dry-run=#{dry_run}..." + + params = cson.parseCSONFile process.env.HUBOT_AWS_AS_POLICY_REMOVE + params.AutoScalingGroupName = group_name + + alarm_params = cson.parseCSONFile process.env.HUBOT_AWS_CW_ALARM_REMOVE + alarm_params.AlarmName = "awsec2-#{group_name}-remove" + alarm_params.Dimensions = [{ + Name: 'AutoScalingGroupName', + Value: group_name + }] + + if dry_run + msg.send util.inspect(params, false, null) + msg.send util.inspect(alarm_params, false, null) + return + + putPolicy msg, params, alarm_params diff --git a/scripts/autoscaling/update_group.coffee b/scripts/autoscaling/update_group.coffee new file mode 100644 index 0000000..edc9688 --- /dev/null +++ b/scripts/autoscaling/update_group.coffee @@ -0,0 +1,75 @@ +# Description: +# Update autoscaling group +# [json] +# ex) { "AutoScalingGroupName": "test-group", "MinSize": 0, "MaxSize": 0 } +# ex) { "AutoScalingGroupName": "test-group", "DesiredCapacity": 0 } +# +# Commands: +# hubot autoscaling update --json=[json] - Update the AutoScaling Group +# hubot autoscaling update --name=[name] --min=[min] - Update MinSize of the AutoScaling Group +# hubot autoscaling update --name=[name] --max=[max] - Update MaxSize of the AutoScaling Group +# hubot autoscaling update --name=[name] --desired=[desired] - Update DesiredCapacity the AutoScaling Group + +fs = require 'fs' +cson = require 'cson' +util = require 'util' + +update = (msg, json) -> + aws = require('../../aws.coffee').aws() + autoscaling = new aws.AutoScaling({apiVersion: '2011-01-01'}) + + autoscaling.updateAutoScalingGroup json, (err, res)-> + if err + msg.send "Error: #{err}" + else + msg.send util.inspect(res, false, null) + +module.exports = (robot) -> + robot.respond /autoscaling update --json=(.*)$/i, (msg) -> + unless require('../../auth.coffee').canAccess(robot, msg.envelope.user) + msg.send "You cannot access this feature. Please contact with admin" + return + + json_str = msg.match[1].trim() + + msg.send "Requesting #{json_str}..." + update msg, JSON.parse(json_str) + + robot.respond /autoscaling update --name=(.*) --min=(.*)$/i, (msg) -> + unless require('../../auth.coffee').canAccess(robot, msg.envelope.user) + msg.send "You cannot access this feature. Please contact with admin" + return + + name = msg.match[1] + min = msg.match[2] + return unless name + return unless min + + msg.send "Requesting AutoScalingGroupName=#{name}, MinSize=#{min}..." + update msg, { AutoScalingGroupName: name, MinSize: min } + + robot.respond /autoscaling update --name=(.*) --max=(.*)$/i, (msg) -> + unless require('../../auth.coffee').canAccess(robot, msg.envelope.user) + msg.send "You cannot access this feature. Please contact with admin" + return + + name = msg.match[1] + max = msg.match[2] + return unless name + return unless max + + msg.send "Requesting AutoScalingGroupName=#{name}, MaxSize=#{max}..." + update msg, { AutoScalingGroupName: name, MaxSize: max } + + robot.respond /autoscaling update --name=(.*) --desired=(.*)$/i, (msg) -> + unless require('../../auth.coffee').canAccess(robot, msg.envelope.user) + msg.send "You cannot access this feature. Please contact with admin" + return + + name = msg.match[1] + desired = msg.match[2] + return unless name + return unless desired + + msg.send "Requesting AutoScalingGroupName=#{name}, DesiredCapacity=#{desired}..." + update msg, { AutoScalingGroupName: name, DesiredCapacity: desired } diff --git a/scripts/cloudwatch/delete_alarm.coffee b/scripts/cloudwatch/delete_alarm.coffee new file mode 100644 index 0000000..f2f987b --- /dev/null +++ b/scripts/cloudwatch/delete_alarm.coffee @@ -0,0 +1,27 @@ +# Description: +# Delete a cloudwatch alarm +# +# Commands: +# hubot cloudwatch alarm delete --name=[alarm_name] - Delete the Alarm +# +util = require 'util' + +module.exports = (robot) -> + robot.respond /cloudwatch alarm delete --name=(.*)$/i, (msg) -> + unless require('../../auth.coffee').canAccess(robot, msg.envelope.user) + msg.send "You cannot access this feature. Please contact with admin" + return + + alarm_name = msg.match[1] + + msg.send "Deleting #{alarm_name}..." + + aws = require('../../aws.coffee').aws() + cloudwatch = new aws.CloudWatch({apiVersion: '2010-08-01'}) + + cloudwatch.deleteAlarms { AlarmNames: [alarm_name] }, (err, res)-> + if err + msg.send "Error: #{err}" + else + msg.send util.inspect(res, false, null) + diff --git a/scripts/cloudwatch/ls_alarms.coffee b/scripts/cloudwatch/ls_alarms.coffee new file mode 100644 index 0000000..bc31df2 --- /dev/null +++ b/scripts/cloudwatch/ls_alarms.coffee @@ -0,0 +1,55 @@ +# Description: +# List cloudwatch alarms +# +# Commands: +# hubot cloudwatch alarm ls - Displays all Alarms +# hubot cloudwatch alarm ls --name=[alarm_name] - Details an Alarm + +util = require 'util' +moment = require 'moment' + +module.exports = (robot) -> + robot.respond /cloudwatch alarm ls($| --name=)(.*)$/i, (msg) -> + alarm_name = msg.match[2].trim() || '' + + msg.send "Fetching #{alarm_name}..." + + aws = require('../../aws.coffee').aws() + cloudwatch = new aws.CloudWatch({apiVersion: '2010-08-01'}) + + cloudwatch.describeAlarms (if alarm_name then { AlarmNames: [alarm_name] } else null), (err, res)-> + if err + msg.send "Error: #{err}" + else + if alarm_name + msg.send util.inspect(res, false, null) + else + msg.send "time\tnamespace\tmetric\tstatistic\tthreshold\tperiod\toperator\tname" + msg.send "\tdimension.Name\tdimension.Value" + msg.send Array(130).join('-') + msg.send Array(130).join('-') + + messages = [] + + res.MetricAlarms.sort (a, b) -> + moment(a.AlarmConfigurationUpdatedTimestamp) - moment(b.AlarmConfigurationUpdatedTimestamp) + + for conf in res.MetricAlarms + time = moment(conf.AlarmConfigurationUpdatedTimestamp).format('YYYY-MM-DD HH:mm:ssZ') + name = conf.AlarmName + namespace = conf.Namespace + metric = conf.MetricName + statistic = conf.Statistic + threshold = conf.Threshold + period = conf.Period + operator = conf.ComparisonOperator + + messages.push("#{time}\t#{namespace}\t#{metric}\t#{statistic}\t#{threshold}\t#{period}\t#{operator}\t#{name}") + for dimension in conf.Dimensions + messages.push("\t#{dimension.Name}\t#{dimension.Value}") + messages.push("\n") + + message = messages.join "\n" + message ||= '[None]' + msg.send message + diff --git a/scripts/ec2/create_security_groups.coffee b/scripts/ec2/create_security_groups.coffee new file mode 100644 index 0000000..8b11ea9 --- /dev/null +++ b/scripts/ec2/create_security_groups.coffee @@ -0,0 +1,41 @@ +# Description: +# Create ec2 security groups +# +# Commands: +# hubot ec2 sg create --vpc_id=[vpc_id] --group_name=[group_name] --desc=[desc] --dry-run - Try creating a SecurityGroup +# hubot ec2 sg create --vpc_id=[vpc_id] --group_name=[group_name] --desc=[desc] - Create a SecurityGroup + +util = require 'util' + +module.exports = (robot) -> + robot.respond /ec2 sg create --vpc_id=(.*) --group_name=(.*) --desc=(.*?)(| --dry-run)$/i, (msg) -> + unless require('../../auth.coffee').canAccess(robot, msg.envelope.user) + msg.send "You cannot access this feature. Please contact with admin" + return + + vpc_id = msg.match[1].trim() + group_name = msg.match[2].trim() + desc = msg.match[3].trim() + dry_run = if msg.match[4] then true else false + + msg.send "Requesting vpc_id=#{vpc_id}, group_name=#{group_name}, desc=#{desc}, dry_run=#{dry_run}..." + + params = { + VpcId : vpc_id, + GroupName : group_name, + Description : desc, + DryRun : dry_run, + } + + if dry_run + msg.send util.inspect(params, false, null) + return + + aws = require('../../aws.coffee').aws() + ec2 = new aws.EC2({apiVersion: '2014-10-01'}) + + ec2.createSecurityGroup params, (err, res)-> + if err + msg.send "Error: #{err}" + else + msg.send "Success to create SecurityGroupId: #{res.GroupId}" diff --git a/scripts/ec2/delete_security_groups.coffee b/scripts/ec2/delete_security_groups.coffee new file mode 100644 index 0000000..a422c5a --- /dev/null +++ b/scripts/ec2/delete_security_groups.coffee @@ -0,0 +1,28 @@ +# Description: +# Delete ec2 security groups +# +# Commands: +# hubot ec2 sg delete --group_id=[group_id] - Delete the SecurityGroup + +util = require 'util' + +module.exports = (robot) -> + robot.respond /ec2 sg delete --group_id=(.*)$/i, (msg) -> + unless require('../../auth.coffee').canAccess(robot, msg.envelope.user) + msg.send "You cannot access this feature. Please contact with admin" + return + + group_id = msg.match[1].trim() || '' + + msg.send "Deleting group_id=#{group_id}..." + + aws = require('../../aws.coffee').aws() + ec2 = new aws.EC2({apiVersion: '2014-10-01'}) + + ec2.deleteSecurityGroup { GroupId: group_id }, (err, res)-> + if err + msg.send "Error: #{err}" + else + msg.send "Success to delete sg" + msg.send util.inspect(res, false, null) + diff --git a/scripts/ec2/ls.coffee b/scripts/ec2/ls.coffee new file mode 100644 index 0000000..a6a442a --- /dev/null +++ b/scripts/ec2/ls.coffee @@ -0,0 +1,57 @@ +# Description: +# List ec2 instances info +# Show detail about a instance if specified instance id +# +# Commands: +# hubot ec2 ls - Displays all Instances +# hubot ec2 ls --instance_id=[instance_id] - Details an Instance + +moment = require 'moment' +util = require 'util' +tsv = require 'tsv' + +module.exports = (robot) -> + robot.respond /ec2 ls($| --instance_id=)(.*)$/i, (msg) -> + ins_id = msg.match[2].trim() || '' + + msg.send "Fetching #{ins_id}..." + + aws = require('../../aws.coffee').aws() + ec2 = new aws.EC2({apiVersion: '2014-10-01'}) + + ec2.describeInstances (if ins_id then { InstanceIds: [ins_id] } else null), (err, res)-> + if err + msg.send "DescribeInstancesError: #{err}" + else + if ins_id + msg.send util.inspect(res, false, null) + + ec2.describeInstanceAttribute { InstanceId: ins_id, Attribute: 'userData' }, (err, res)-> + if err + msg.send "DescribeInstanceAttributeError: #{err}" + else if res.UserData.Value + msg.send new Buffer(res.UserData.Value, 'base64').toString('ascii') + else + messages = [] + for data in res.Reservations + ins = data.Instances[0] + + for tag in ins.Tags when tag.Key is 'Name' + name = tag.Value + + messages.push({ + time : moment(ins.LaunchTime).format('YYYY-MM-DD HH:mm:ssZ') + state : ins.State.Name + id : ins.InstanceId + image : ins.ImageId + az : ins.Placement.AvailabilityZone + subnet : ins.SubnetId + type : ins.InstanceType + ip : ins.PrivateIpAddress + name : name || '[NoName]' + }) + + messages.sort (a, b) -> + moment(a.time) - moment(b.time) + message = tsv.stringify(messages) || '[None]' + msg.send message diff --git a/scripts/ec2/ls_security_groups.coffee b/scripts/ec2/ls_security_groups.coffee new file mode 100644 index 0000000..9e8638d --- /dev/null +++ b/scripts/ec2/ls_security_groups.coffee @@ -0,0 +1,62 @@ +# Description: +# List ec2 security groups info +# +# Commands: +# hubot ec2 sg ls - Desplays all SecurityGroups + +module.exports = (robot) -> + robot.respond /ec2 sg ls$/i, (msg) -> + msg.send "Fetching ..." + + aws = require('../../aws.coffee').aws() + ec2 = new aws.EC2({apiVersion: '2014-10-01'}) + + ec2.describeSecurityGroups null, (err, res)-> + if err + msg.send "Error: #{err}" + else + msg.send "vpc_id\tgroup_id\tgroup_name\tname\tdesc" + msg.send "\tprotocol\tfrom\tto\trange" + msg.send Array(130).join('-') + msg.send Array(130).join('-') + + messages = [] + + res.SecurityGroups.sort (a, b) -> + if a.GroupName < b.GroupName then return -1 + if b.GroupName < a.GroupName then return 1 + return 0 + + for sg in res.SecurityGroups + vpc_id = sg.VpcId + group_name = sg.GroupName || '[NoName]' + group_id = sg.GroupId + desc = sg.Description + name = '[NoName]' + for tag in sg.Tags when tag.Key is 'Name' + name = tag.Value || '[NoName]' + + messages.push("\n#{vpc_id}\t#{group_id}\t#{group_name}\t#{name}\t#{desc}") + + for inbound in sg.IpPermissions + protocol = inbound.IpProtocol + if protocol == '-1' then protocol = 'All traffic' + + from = inbound.FromPort || 'All' + to = inbound.ToPort || 'All' + for ipRange in inbound.IpRanges + range = ipRange.CidrIp + messages.push("\tInbound \t#{protocol}\t#{from}\t#{to}\t#{range}") + + for outbound in sg.IpPermissionsEgress + protocol = outbound.IpProtocol + if protocol == '-1' then protocol = 'All traffic' + + from = outbound.FromPort || 'All' + to = outbound.ToPort || 'All' + for ipRange in outbound.IpRanges + range = ipRange.CidrIp + messages.push("\tOutbound\t#{protocol}\t#{from}\t#{to}\t#{range}") + + message = messages.join "\n" + msg.send message diff --git a/scripts/ec2/ls_spots.coffee b/scripts/ec2/ls_spots.coffee new file mode 100644 index 0000000..914c879 --- /dev/null +++ b/scripts/ec2/ls_spots.coffee @@ -0,0 +1,82 @@ +# Description: +# List ec2 spot instances info +# +# Commands: +# hubot ec2 spot ls - Displays all SpotInstances + +moment = require "moment" +tsv = require 'tsv' +async = require 'async' +_ = require 'underscore' + +module.exports = (robot) -> + robot.respond /ec2 spot ls$/i, (msg) -> + msg.send "Fetching ..." + + aws = require('../../aws.coffee').aws() + ec2 = new aws.EC2({apiVersion: '2014-10-01'}) + + ec2.describeSpotInstanceRequests null, (err, res)-> + if err + msg.send "SpotInstanceRequestError: #{err}" + else + messages = [] + + # eachSeries is safer but slower + async.eachSeries res.SpotInstanceRequests, (ins, next) -> + request = { + time : moment(ins.Status.UpdateTime).format('YYYY-MM-DD HH:mm:ssZ') + code : ins.Status.Code + id : ins.InstanceId + az : ins.LaunchedAvailabilityZone + type : ins.LaunchSpecification.InstanceType + spotPrice : ins.SpotPrice + } + + ec2.describeInstances {InstanceIds:[request.id]}, (err, res)-> + if err + msg.send "DescribeInstancesError: #{err}" + else + request.ip = '[NoIP]' + for data in res.Reservations + ins = data.Instances[0] + request.ip = ins.PrivateIpAddress + + similar_one = _.find messages, (one)-> return (one.type == request.type && one.az == request.az) + if similar_one + request.price = similar_one.price + request.avg_latest_price = similar_one.avg_latest_price + messages.push(request) + next() + return + + ec2.describeSpotPriceHistory { + InstanceTypes : [request.type], + ProductDescriptions : ['Linux/UNIX'], + StartTime : moment().utc().subtract(1, 'hours').toDate(), + EndTime : moment().utc().toDate(), + AvailabilityZone : request.az + }, (err, res) -> + if err + msg.send "DescribeSpotPriceHistory: #{err}" + else + history = res.SpotPriceHistory + request.price = history[0].SpotPrice + + sum = _.reduce history, (memo, param)-> + return memo + (+param.SpotPrice) + , 0 + + request.avg_latest_price = (sum / history.length).toFixed(6) + + messages.push(request) + next() + + , (err) -> + if err + msg.send "async Error: #{err}" + else + messages.sort (a, b) -> + moment(a.time) - moment(b.time) + message = tsv.stringify(messages) + msg.send message diff --git a/scripts/ec2/run.coffee b/scripts/ec2/run.coffee new file mode 100644 index 0000000..a727c5f --- /dev/null +++ b/scripts/ec2/run.coffee @@ -0,0 +1,56 @@ +# Description: +# Run ec2 instances +# +# Commands: +# hubot ec2 run --dry-run - Try running an Instance +# hubot ec2 run - Run an Instance + +fs = require 'fs' +cson = require 'cson' +util = require 'util' + +module.exports = (robot) -> + robot.respond /ec2 run(| --dry-run)$/i, (msg) -> + unless require('../../auth.coffee').canAccess(robot, msg.envelope.user) + msg.send "You cannot access this feature. Please contact with admin" + return + + dry_run = if msg.match[1] then true else false + + msg.send "Requesting dry-run=#{dry_run}..." + + run_configuration_path = process.env.HUBOT_AWS_EC2_RUN_CONFIG + params = cson.parseCSONFile run_configuration_path + + userdata_path = process.env.HUBOT_AWS_EC2_RUN_USERDATA_PATH + if fs.existsSync userdata_path + init_file = fs.readFileSync userdata_path, 'utf-8' + params.UserData = new Buffer(init_file).toString('base64') + + if dry_run + msg.send util.inspect(params, false, null) + return + + aws = require('../../aws.coffee').aws() + ec2 = new aws.EC2({apiVersion: '2014-10-01'}) + + ec2.runInstances params, (err, res)-> + if err + msg.send "Error: #{err}" + else + messages = [] + for ins in res.Instances + state = ins.State.Name + id = ins.InstanceId + type = ins.InstanceType + for network in ins.NetworkInterfaces + ip = network.PrivateIpAddress + for tag in ins.Tags when tag.Key is 'Name' + name = tag.Value || '[NoName]' + + messages.push("#{state}\t#{id}\t#{type}\t#{ip}\t#{name}") + + messages.sort() + message = messages.join "\n" + msg.send message + diff --git a/scripts/ec2/terminate.coffee b/scripts/ec2/terminate.coffee new file mode 100644 index 0000000..d1298f6 --- /dev/null +++ b/scripts/ec2/terminate.coffee @@ -0,0 +1,33 @@ +# Description: +# Terminate ec2 instances +# +# Commands: +# hubot ec2 terminate --instance_id=[instance_id] - Terminate the Instance + +module.exports = (robot) -> + robot.respond /ec2 terminate --instance_id=(.*)$/i, (msg) -> + unless require('../../auth.coffee').canAccess(robot, msg.envelope.user) + msg.send "You cannot access this feature. Please contact with admin" + return + + ins_id = msg.match[1] + + msg.send "Terminating #{ins_id}..." + + aws = require('../../aws.coffee').aws() + ec2 = new aws.EC2({apiVersion: '2014-10-01'}) + + ec2.terminateInstances { DryRun: false, InstanceIds: [ins_id] }, (err, res)-> + if err + msg.send "Error: #{err}" + else + messages = [] + for ins in res.TerminatingInstances + id = ins.InstanceId + state = ins.CurrentState.Name + + messages.push("#{id}\t#{state}") + + messages.sort() + message = messages.join "\n" + msg.send message diff --git a/scripts/s3/ls_buckets.coffee b/scripts/s3/ls_buckets.coffee new file mode 100644 index 0000000..f091315 --- /dev/null +++ b/scripts/s3/ls_buckets.coffee @@ -0,0 +1,31 @@ +# Description: +# List s3 buckets info +# +# Commands: +# hubot s3 ls - Displays all S3 buckets + +moment = require 'moment' +tsv = require 'tsv' + +module.exports = (robot) -> + robot.respond /s3 ls$/i, (msg) -> + msg.send "Fetching ..." + + aws = require('../../aws.coffee').aws() + s3 = new aws.S3({apiVersion: '2006-03-01'}) + + s3.listBuckets (err, res)-> + if err + msg.send "Error: #{err}" + else + messages = [] + for bucket in res.Buckets + messages.push({ + time: moment(bucket.CreationDate).format('YYYY-MM-DD HH:mm:ssZ') + name: bucket.Name + }) + + messages.sort (a, b) -> + moment(a.time) - moment(b.time) + message = tsv.stringify(messages) || '[None]' + msg.send message diff --git a/scripts/s3/ls_objects.coffee b/scripts/s3/ls_objects.coffee new file mode 100644 index 0000000..3952b28 --- /dev/null +++ b/scripts/s3/ls_objects.coffee @@ -0,0 +1,47 @@ +# Description: +# List s3 objects info +# +# Commands: +# hubot s3 ls --bucket_name=[bucket-name] - Displays all objects +# hubot s3 ls --bucket_name=[bucket-name] --prefix=[prefix] - Displays all objects with prefix +# hubot s3 ls --bucket_name=[bucket-name] --prefix=[prefix] --marker=[marker] - Displays all objects with prefix from marker + +moment = require 'moment' +util = require 'util' +tsv = require 'tsv' +_ = require 'underscore' + +module.exports = (robot) -> + robot.respond /s3 ls --bucket_name=(.*?)($| --prefix=)(.*?)($| --marker=)(.*)$/i, (msg) -> + bucket = msg.match[1].trim() + prefix = msg.match[3].trim() + marker = msg.match[5].trim() + + msg.send "Fetching #{bucket}, #{prefix}, #{marker}..." + + aws = require('../../aws.coffee').aws() + s3 = new aws.S3({apiVersion: '2006-03-01'}) + + s3.listObjects { Bucket: bucket, Delimiter: '/', Prefix: prefix, Marker: marker }, (err, res)-> + if err + msg.send "Error: #{err}" + else + messages = [] + for content in res.Contents + messages.push({ + Key: content.Key + }) + + message = tsv.stringify(messages) + msg.send message + + prefix_msgs = [] + for p in res.CommonPrefixes + prefix_msgs.push({ + Prefix: p.Prefix + }) + + prefix_msg = tsv.stringify(prefix_msgs) + msg.send prefix_msg + + msg.send "NextMacker is #{res.NextMarker}" || 'NextMarker is none'