# REQUIRED: The name of your application app_name: RepairtechsolutionsCom # REQUIRED: The system user to run your app servers as app_user: app # REQUIRED: Notification emails (e.g. monit) get sent to this address # admin_email: "root@#{full_host}" # OPTIONAL: If not set, you won't be able to access web_tools # server (munin stats, monit status, etc) web_tools_user: admin web_tools_password: password # REQUIRED: The timezone the server should be in timezone: US/Eastern # REQUIRED: the domain all the instances should be associated with # domain: repairtechsolutions.com # OPTIONAL: See rubber-dns.yml for dns configuration # This lets rubber update a dynamic dns service with the instance alias # and ip when they are created. It also allows setting up arbitrary # dns records (CNAME, MX, Round Robin DNS, etc) # OPTIONAL: Additional rubber file to pull config from if it exists. This file will # also be pushed to remote host at Rubber.root/config/rubber/rubber-secret.yml # # rubber_secret: "#{File.expand_path('~') + '/.ec2' + (Rubber.env == 'production' ? '' : '_dev') + '/rubber-secret.yml' rescue ''}" # REQUIRED All known cloud providers with the settings needed to configure them # There's only one working cloud provider right now - Amazon Web Services # To implement another, clone lib/rubber/cloud/aws.rb or make the fog provider # work in a generic fashion # cloud_providers: aws: # REQUIRED The AWS region that you want to use. # # Options include # us-east-1 # eu-west-1 # ap-northeast-1 # ap-southeast-1 # ap-southeast-2 # region: us-east-1 # REQUIRED The amazon keys and account ID (digits only, no dashes) used to access the AWS API # access_key: XXXXXXXXXXXXXXXXXXXX secret_access_key: YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY account: ZZZZZZZZZZZZ # REQUIRED: The name of the amazon keypair and location of its private key # # NOTE: for some reason Capistrano requires you to have both the public and # the private key in the same folder, the public key should have the # extension ".pub". The easiest way to get your hand on this is to create the # public key from the private key: ssh-keygen -y -f gsg-keypair > gsg-keypair.pub # key_name: gsg-keypair key_file: "#{Dir[(File.expand_path('~') rescue '/root') + '/.ec2/*' + cloud_providers.aws.key_name].first}" # OPTIONAL: Needed for bundling a running instance using rubber:bundle # # pk_file: "#{Dir[(File.expand_path('~') rescue '/root') + '/.ec2/pk-*'].first}" # cert_file: "#{Dir[(File.expand_path('~') rescue '/root') + '/.ec2/cert-*'].first}" # image_bucket: "#{app_name}-images" # OPTIONAL: Needed for backing up database to s3 # backup_bucket: "#{app_name}-backups" # REQUIRED: the ami and instance type for creating instances # The Ubuntu images at http://alestic.com/ work well # Ubuntu 12.04 Precise instance-store 64-bit: ami-3c994355 # # m1.small or m1.large or m1.xlarge image_type: m1.small #image_id: ami-3c994355 image_id: ami-8baa73e2 # OPTIONAL: EC2 spot instance request support. # # Enables the creation of spot instance requests. Rubber will wait synchronously until the request is fulfilled, # at which point it will begin initializing the instance, unless spot_instance_request_timeout is set. # spot_instance: true # # The maximum price you would like to pay for your spot instance. # spot_price: "0.085" # # If a spot instance request can't be fulfilled in 3 minutes, fallback to on-demand instance creation. If not set, # the default is infinite. # spot_instance_request_timeout: 180 # Use an alternate cloud provider supported by fog. This doesn't fully work # yet due to differences in providers within fog, but gives you a starting # point for contributing a new provider to rubber. See rubber/lib/rubber/cloud(.rb) fog: credentials: provider: rackspace rackspace_api_key: 'XXX' rackspace_username: 'YYY' image_type: 123 image_id: 123 # REQUIRED the cloud provider to use # cloud_provider: aws # OPTIONAL: Where to store instance data. # # Allowed forms are: # filesystem: "file:#{Rubber.root}/config/rubber/instance-#{Rubber.env}.yml" # cloud storage (s3): "storage:#{cloud_provider.aws.backup_bucket}/RubberInstances_#{app_name}/instance-#{Rubber.env}.yml" # cloud table (simpledb): "table:RubberInstances_#{app_name}_#{Rubber.env}" # # If you need to port between forms, load the rails console then: # Rubber.instances.save(location) # where location is one of the allowed forms for this variable # # instance_storage: "file:#{Rubber.root}/config/rubber/instance-#{Rubber.env}.yml" # OPTIONAL: Where to store a backup of the instance data # # This is most useful when using a remote store in case you end up # wiping the single copy of your instance data. When using the file # store, the instance file is typically under version control with # your project code, so that provides some safety. # # instance_storage_backup: "storage:#{cloud_providers.aws.backup_bucket}/RubberInstances_#{app_name}/instance-#{Rubber.env}-#{Time.now.strftime('%Y%m%d-%H%M%S')}.yml" # OPTIONAL: Define security groups # Each security group is a name associated with a sequence of maps where the # keys are the parameters to the ec2 AuthorizeSecurityGroupIngress API # source_security_group_name, source_security_group_owner_id # ip_protocol, from_port, to_port, cidr_ip # security_groups: default: description: The default security group rules: - source_group_name: default source_group_account: "#{cloud_providers.aws.account}" - protocol: tcp from_port: 22 to_port: 22 source_ips: [0.0.0.0/0] # OPTIONAL: The default security groups to create instances with assigned_security_groups: [default] # OPTIONAL: Automatically create security groups for each host and role # EC2 doesn't allow one to change what groups an instance belongs to after # creation, so its good to have some empty ones predefined. auto_security_groups: true # OPTIONAL: Automatically isolate security groups for each appname/environment # by mangling their names to be appname_env_groupname # This makes it safer to have staging and production coexist on the same EC2 # account, or even multiple apps isolate_security_groups: true # OPTIONAL: Prompts one to sync security group rules when the ones in amazon # differ from those in rubber prompt_for_security_group_sync: false # OPTIONAL: The packages to install on all instances # You can install a specific version of a package by using a sub-array of pkg, version # For example, packages: [[rake, 0.7.1], irb] packages: [postfix, build-essential, git-core, ec2-ami-tools, libxslt-dev, ntp] # OPTIONAL: gem sources to setup for rubygems # gemsources: ["http://rubygems.org", "http://gems.github.com"] # OPTIONAL: The gems to install on all instances # You can install a specific version of a gem by using a sub-array of gem, version # For example, gem: [[rails, 2.2.2], open4, aws-s3] gems: [open4, aws-s3, bundler, [rubber, "#{Rubber.version}"]] # OPTIONAL: A string prepended to shell command strings that cause multi # statement shell commands to fail fast. You may need to comment this out # on some platforms, but it works for me on linux/osx with a bash shell # stop_on_error_cmd: "function error_exit { exit 99; }; trap error_exit ERR" # OPTIONAL: The default set of roles to use when creating a staging instance # with "cap rubber:create_staging". By default this uses all the known roles, # excluding slave roles, but this is not always desired for staging, so you can # specify a different set here # # staging_roles: "web,app,db:primary=true" # OPTIONAL: Lets one assign amazon elastic IPs (static IPs) to your instances # You should typically set this on the role/host level rather than # globally , unless you really do want all instances to have a # static IP # # use_static_ip: true # OPTIONAL: Specifies an instance to be created in the given availability zone # Availability zones are sepcified by amazon to be somewhat isolated # from each other so that hardware failures in one zone shouldn't # affect instances in another. As such, it is good to specify these # for instances that need to be redundant to reduce your chance of # downtime. You should typically set this on the role/host level # rather than globally. Use cap rubber:describe_zones to see the list # of zones # availability_zone: us-east-1a # OPTIONAL: If you want t use Elastic Block Store (EBS) persistent # volumes, add them to host specific overrides and they will get created # and assigned to the instance. On initial creation, the volume will get # attached _and_ formatted, but if your host disapears and you recreate # it, the volume will only get remounted thereby preserving your data # # hosts: # my_host: # availability_zone: us-east-1a # volumes: # - size: 100 # size of vol in GBs # zone: us-east-1a # zone to create volume in, needs to match host's zone # device: /dev/sdh # OS device to attach volume to # mount: /mnt/mysql # The directory to mount this volume to # filesystem: ext3 # the filesystem to create on volume # - size: 10 # size of vol in GBs # zone: us-east-1a # zone to create volume in, needs to match host's zone # device: /dev/sdi # OS device to attach volume to # mount: /mnt/logs # The directory to mount this volume to # filesystem: ext3 # the filesystem to create on volume # # # volumes without mount/filesystem can be used in raid arrays # # - size: 50 # size of vol in GBs # zone: us-east-1a # zone to create volume in, needs to match host's zone # device: /dev/sdx # OS device to attach volume to # - size: 50 # size of vol in GBs # zone: us-east-1a # zone to create volume in, needs to match host's zone # device: /dev/sdy # OS device to attach volume to # # # Use some ephemeral volumes for raid array # local_volumes: # - partition_device: /dev/sdb # zero: false # zeros out disk for improved performance # - partition_device: /dev/sdc # zero: false # zeros out disk for improved performance # # # for raid array, you'll need to add mdadm to packages. Likewise, # # xfsprogs is needed for xfs filesystem support # # # packages: [xfsprogs, mdadm] # raid_volumes: # - device: /dev/md0 # OS device to to create raid array on # mount: /mnt/fast # The directory to mount this array to # mount_opts: 'nobootwait' # Recent Ubuntu versions require this flag or SSH will not start on reboot # filesystem: xfs # the filesystem to create on array # filesystem_opts: -f # the filesystem opts in mkfs # raid_level: 0 # the raid level to use for the array # # if you're using Ubuntu 11.x or later (Natty, Oneiric, Precise, etc) # # you will want to specify the source devices in their /dev/xvd format # # see https://bugs.launchpad.net/ubuntu/+source/linux/+bug/684875 for # # more information. # # NOTE: Only make this change for raid source_devices, NOT generic # # volume commands above. # source_devices: [/dev/sdx, /dev/sdy] # the source EBS devices we are creating raid array from (Ubuntu Lucid or older) # source_devices: [/dev/xvdx, /dev/xvdy] # the source EBS devices we are creating raid array from (Ubuntu Natty or newer) # # # for LVM volumes, you'll need to add lvm2 to packages. Likewise, # # xfsprogs is needed for xfs filesystem support # packages: [xfsprogs, lvm2] # lvm_volume_groups: # - name: vg # The volume group name # physical_volumes: [/dev/sdx, /dev/sdy] # Devices used for LVM group (you can use just one, but you can't stripe then) # extent_size: 32 # Size of the volume extent in MB # volumes: # - name: lv # Name of the logical volume # size: 999.9 # Size of volume in GB (slightly less than sum of all physical volumes because LVM reserves some space) # stripes: 2 # Count of stripes for volume # filesystem: xfs # The filesystem to create on the logical volume # filesystem_opts: -f # the filesystem opts in mkfs # mount: /mnt/large_work_dir # The directory to mount this LVM volume to # OPTIONAL: You can also define your own variables here for use when # transforming config files, and they will be available in your config # templates as <%= rubber_env.var_name %> # # var_name: var_value # All variables can also be overridden on the role and/or host level by creating # a sub level to the config under roles and hosts # e.g. to install mysql only on db role, and awstats only on web01: # OPTIONAL: Role specific overrides # roles: # somerole: # packages: [] # somerole2: # myconfig: someval # OPTIONAL: Host specific overrides # hosts: # somehost: # packages: []