Commit d0d3229f authored by Nathan Hartwell's avatar Nathan Hartwell

Merge branch 'master' of https://github.com/mitchellh/packer

Conflicts:
	provisioner/salt-masterless/provisioner.go
parents e5c6f1a7 c8b3dfff
sudo: false
language: go
go:
- 1.2
- 1.3
- tip
- 1.2
- 1.3
- 1.4
- tip
install: make updatedeps
script:
- GOMAXPROCS=2 make test
#- go test -race ./...
- GOMAXPROCS=2 make test
#- go test -race ./...
branches:
only:
- master
notifications:
irc:
channels:
- "irc.freenode.org#packer-tool"
skip_join: true
use_notice: true
matrix:
allow_failures:
- go: tip
fast_finish: true
allow_failures:
- go: tip
## 0.8.0 (unreleased)
FEATURES:
IMPROVEMENTS:
* builder/openstack: Add rackconnect_wait for Rackspace customers to wait for
RackConnect data to appear
* buidler/openstakc: Add ssh_interface option for rackconnect for users that
have prohibitive firewalls
BUG FIXES:
* builder/amazon: Remove deprecated ec2-upload-bundle paramger [GH-1931]
* builder/digitalocean: Ignore invalid fields from the ever-changing v2 API
* builder/docker: Fixed hang on prompt while copying script
* builder/virtualbox: Added SCSI support
* postprocessor/vagrant-cloud: Fixed failing on response
* provisioner/puppet-masterless: Allow manifest_file to be a directory
* provisioner/salt-masterless: Add `--retcode-passthrough` to salt-call
## 0.7.5 (December 9, 2014)
FEATURES:
* **New command: `packer push`**: Push template and files to HashiCorp's
Atlas for building your templates automatically.
* **New post-processor: `atlas`**: Send artifact to HashiCorp's Atlas for
versioning and storing artifacts. These artifacts can then be queried
using the API, Terraform, etc.
IMPROVEMENTS:
* builder/googlecompute: Support for ubuntu-os-cloud project
* builder/googlecompute: Support for OAuth2 to avoid client secrets file
* builder/googlecompute: GCE image from persistant disk instead of tarball
* builder/qemu: Checksum type "none" can be used
* provisioner/chef: Generate a node name if none available
* provisioner/chef: Added ssl_verify_mode configuration
BUG FIXES:
* builder/parallels: Fixed attachment of ISO to cdrom device
* builder/parallels: Fixed boot load ordering
* builder/digitalocean: Fixed decoding of size
* builder/digitalocean: Fixed missing content-type header in request
* builder/digitalocean: Fixed use of private IP
* builder/digitalocean: Fixed the artifact ID generation
* builder/vsphere: Fixed credential escaping
* builder/qemu: Fixed use of CDROM with disk_image
* builder/aws: Fixed IP address for SSH in VPC
* builder/aws: Fixed issue with multiple block devices
* builder/vmware: Upload VMX to ESX5 after editing
* communicator/docker: Fix handling of symlinks during upload
* provisioner/chef: Fixed use of sudo in some cases
* core: Fixed build name interpolation
* postprocessor/vagrant: Fixed check for Vagrantfile template
## 0.7.2 (October 28, 2014)
......
TEST?=./...
VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods \
-nilfunc -printf -rangeloops -shift -structtags -unsafeptr
default: test
......@@ -10,6 +12,7 @@ dev:
test:
go test $(TEST) $(TESTARGS) -timeout=10s
@$(MAKE) vet
testrace:
go test -race $(TEST) $(TESTARGS)
......@@ -17,4 +20,14 @@ testrace:
updatedeps:
go get -d -v -p 2 ./...
.PHONY: bin default test updatedeps
vet:
@go tool vet 2>/dev/null ; if [ $$? -eq 3 ]; then \
go get golang.org/x/tools/cmd/vet; \
fi
@go tool vet $(VETARGS) . ; if [ $$? -eq 1 ]; then \
echo ""; \
echo "Vet found suspicious constructs. Please check the reported constructs"; \
echo "and fix them if necessary before submitting the code for reviewal."; \
fi
.PHONY: bin default test updatedeps vet
# Packer
[![Build Status](https://travis-ci.org/mitchellh/packer.svg?branch=master)](https://travis-ci.org/mitchellh/packer)
* Website: http://www.packer.io
* IRC: `#packer-tool` on Freenode
* Mailing list: [Google Groups](http://groups.google.com/group/packer-tool)
......
......@@ -2,12 +2,13 @@ package common
import (
"fmt"
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/ec2"
"github.com/mitchellh/packer/packer"
"log"
"sort"
"strings"
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/ec2"
"github.com/mitchellh/packer/packer"
)
// Artifact is an artifact implementation that contains built AMIs.
......@@ -53,7 +54,12 @@ func (a *Artifact) String() string {
}
func (a *Artifact) State(name string) interface{} {
return nil
switch name {
case "atlas.artifact.metadata":
return a.stateAtlasMetadata()
default:
return nil
}
}
func (a *Artifact) Destroy() error {
......@@ -79,3 +85,13 @@ func (a *Artifact) Destroy() error {
return nil
}
func (a *Artifact) stateAtlasMetadata() interface{} {
metadata := make(map[string]string)
for region, imageId := range a.Amis {
k := fmt.Sprintf("region.%s", region)
metadata[k] = imageId
}
return metadata
}
package common
import (
"github.com/mitchellh/packer/packer"
"reflect"
"testing"
"github.com/mitchellh/packer/packer"
)
func TestArtifact_Impl(t *testing.T) {
......@@ -26,6 +28,24 @@ func TestArtifactId(t *testing.T) {
}
}
func TestArtifactState_atlasMetadata(t *testing.T) {
a := &Artifact{
Amis: map[string]string{
"east": "foo",
"west": "bar",
},
}
actual := a.State("atlas.artifact.metadata")
expected := map[string]string{
"region.east": "foo",
"region.west": "bar",
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
}
func TestArtifactString(t *testing.T) {
expected := `AMIs were created:
......
......@@ -60,12 +60,12 @@ func (b *BlockDevices) Prepare(t *packer.ConfigTemplate) []error {
var errs []error
for outer, bds := range lists {
for i, bd := range bds {
for i := 0; i < len(bds); i++ {
templates := map[string]*string{
"device_name": &bd.DeviceName,
"snapshot_id": &bd.SnapshotId,
"virtual_name": &bd.VirtualName,
"volume_type": &bd.VolumeType,
"device_name": &bds[i].DeviceName,
"snapshot_id": &bds[i].SnapshotId,
"virtual_name": &bds[i].VirtualName,
"volume_type": &bds[i].VolumeType,
}
errs := make([]error, 0)
......
......@@ -16,14 +16,14 @@ func SSHAddress(e *ec2.EC2, port int, private bool) func(multistep.StateBag) (st
for j := 0; j < 2; j++ {
var host string
i := state.Get("instance").(*ec2.Instance)
if i.DNSName != "" {
host = i.DNSName
} else if i.VpcId != "" {
if i.VpcId != "" {
if i.PublicIpAddress != "" && !private {
host = i.PublicIpAddress
} else {
host = i.PrivateIpAddress
}
} else if i.DNSName != "" {
host = i.DNSName
}
if host != "" {
......
......@@ -79,7 +79,7 @@ func amiRegionCopy(state multistep.StateBag, auth aws.Auth, imageId string,
if err != nil {
return "", fmt.Errorf("Error Copying AMI (%s) to region (%s): %s",
imageId, target, err)
imageId, target.Name, err)
}
stateChange := StateChangeConf{
......@@ -91,7 +91,7 @@ func amiRegionCopy(state multistep.StateBag, auth aws.Auth, imageId string,
if _, err := WaitForState(&stateChange); err != nil {
return "", fmt.Errorf("Error waiting for AMI (%s) in region (%s): %s",
resp.ImageId, target, err)
resp.ImageId, target.Name, err)
}
return resp.ImageId, nil
......
......@@ -87,7 +87,7 @@ func (s *stepCreateAMI) Cleanup(state multistep.StateBag) {
ui.Error(fmt.Sprintf("Error deregistering AMI, may still be around: %s", err))
return
} else if resp.Return == false {
ui.Error(fmt.Sprintf("Error deregistering AMI, may still be around: %s", resp.Return))
ui.Error(fmt.Sprintf("Error deregistering AMI, may still be around: %t", resp.Return))
return
}
}
......@@ -75,7 +75,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
"-s {{.SecretKey}} " +
"-d {{.BundleDirectory}} " +
"--batch " +
"--region {{.Region}} " +
"--location {{.Region}} " +
"--retry"
}
......
......@@ -5,12 +5,16 @@
package digitalocean
type Region struct {
Id uint `json:"id,omitempty"` //only in v1 api
Slug string `json:"slug"` //presen in both api
Name string `json:"name"` //presen in both api
Sizes []string `json:"sizes,omitempty"` //only in v2 api
Available bool `json:"available,omitempty"` //only in v2 api
Features []string `json:"features,omitempty"` //only in v2 api
Slug string `json:"slug"`
Name string `json:"name"`
// v1 only
Id uint `json:"id,omitempty"`
// v2 only
Sizes []string `json:"sizes,omitempty"`
Available bool `json:"available,omitempty"`
Features []string `json:"features,omitempty"`
}
type RegionsResp struct {
......@@ -18,16 +22,19 @@ type RegionsResp struct {
}
type Size struct {
Id uint `json:"id,omitempty"` //only in v1 api
Name string `json:"name,omitempty"` //only in v1 api
Slug string `json:"slug"` //presen in both api
Memory uint `json:"memory,omitempty"` //only in v2 api
VCPUS uint `json:"vcpus,omitempty"` //only in v2 api
Disk uint `json:"disk,omitempty"` //only in v2 api
Transfer float64 `json:"transfer,omitempty"` //only in v2 api
PriceMonthly float64 `json:"price_monthly,omitempty"` //only in v2 api
PriceHourly float64 `json:"price_hourly,omitempty"` //only in v2 api
Regions []string `json:"regions,omitempty"` //only in v2 api
Slug string `json:"slug"`
// v1 only
Id uint `json:"id,omitempty"`
Name string `json:"name,omitempty"`
// v2 only
Memory uint `json:"memory,omitempty"`
VCPUS uint `json:"vcpus,omitempty"`
Disk uint `json:"disk,omitempty"`
Transfer float64 `json:"transfer,omitempty"`
PriceMonthly float64 `json:"price_monthly,omitempty"`
PriceHourly float64 `json:"price_hourly,omitempty"`
}
type SizesResp struct {
......@@ -35,14 +42,15 @@ type SizesResp struct {
}
type Image struct {
Id uint `json:"id"` //presen in both api
Name string `json:"name"` //presen in both api
Slug string `json:"slug"` //presen in both api
Distribution string `json:"distribution"` //presen in both api
Public bool `json:"public,omitempty"` //only in v2 api
Regions []string `json:"regions,omitempty"` //only in v2 api
ActionIds []string `json:"action_ids,omitempty"` //only in v2 api
CreatedAt string `json:"created_at,omitempty"` //only in v2 api
Id uint `json:"id"`
Name string `json:"name"`
Slug string `json:"slug"`
Distribution string `json:"distribution"`
// v2 only
Public bool `json:"public,omitempty"`
ActionIds []string `json:"action_ids,omitempty"`
CreatedAt string `json:"created_at,omitempty"`
}
type ImagesResp struct {
......
......@@ -262,8 +262,10 @@ func (d DigitalOceanClientV2) DropletStatus(id uint) (string, string, error) {
}
var ip string
if len(res.Droplet.Networks.V4) > 0 {
ip = res.Droplet.Networks.V4[0].IPAddr
for _, n := range res.Droplet.Networks.V4 {
if n.Type == "public" {
ip = n.IPAddr
}
}
return ip, res.Droplet.Status, err
......@@ -285,17 +287,21 @@ func NewRequestV2(d DigitalOceanClientV2, path string, method string, req interf
enc.Encode(req)
defer buf.Reset()
request, err = http.NewRequest(method, url, buf)
request.Header.Add("Content-Type", "application/json")
} else {
request, err = http.NewRequest(method, url, nil)
}
if err != nil {
return err
}
// Add the authentication parameters
request.Header.Add("Authorization", "Bearer "+d.APIToken)
log.Printf("sending new request to digitalocean: %s", url)
if buf != nil {
log.Printf("sending new request to digitalocean: %s buffer: %s", url, buf)
} else {
log.Printf("sending new request to digitalocean: %s", url)
}
resp, err := client.Do(request)
if err != nil {
return err
......@@ -325,7 +331,10 @@ func NewRequestV2(d DigitalOceanClientV2, path string, method string, req interf
return errors.New(fmt.Sprintf("Failed to decode JSON response %s (HTTP %v) from DigitalOcean: %s", err.Error(),
resp.StatusCode, body))
}
switch resp.StatusCode {
case 403, 401, 429, 422, 404, 503, 500:
return errors.New(fmt.Sprintf("digitalocean request error: %+v", res))
}
return nil
}
......
......@@ -3,6 +3,7 @@ package digitalocean
import (
"fmt"
"log"
"strconv"
)
type Artifact struct {
......@@ -29,8 +30,7 @@ func (*Artifact) Files() []string {
}
func (a *Artifact) Id() string {
// mimicing the aws builder
return fmt.Sprintf("%s:%s", a.regionName, a.snapshotName)
return strconv.FormatUint(uint64(a.snapshotId), 10)
}
func (a *Artifact) String() string {
......
package digitalocean
import (
"github.com/mitchellh/packer/packer"
"testing"
"github.com/mitchellh/packer/packer"
)
func TestArtifact_Impl(t *testing.T) {
......@@ -13,6 +14,15 @@ func TestArtifact_Impl(t *testing.T) {
}
}
func TestArtifactId(t *testing.T) {
a := &Artifact{"packer-foobar", 42, "San Francisco", nil}
expected := "42"
if a.Id() != expected {
t.Fatalf("artifact ID should match: %v", expected)
}
}
func TestArtifactString(t *testing.T) {
a := &Artifact{"packer-foobar", 42, "San Francisco", nil}
expected := "A snapshot was created: 'packer-foobar' in region 'San Francisco'"
......
......@@ -17,12 +17,12 @@ import (
)
// see https://api.digitalocean.com/images/?client_id=[client_id]&api_key=[api_key]
// name="Ubuntu 12.04.4 x64", id=3101045,
// name="Ubuntu 12.04.4 x64", id=6374128,
const DefaultImage = "ubuntu-12-04-x64"
// see https://api.digitalocean.com/regions/?client_id=[client_id]&api_key=[api_key]
// name="New York", id=1
const DefaultRegion = "nyc1"
// name="New York 3", id=8
const DefaultRegion = "nyc3"
// see https://api.digitalocean.com/sizes/?client_id=[client_id]&api_key=[api_key]
// name="512MB", id=66 (the smallest droplet size)
......
......@@ -264,7 +264,7 @@ func TestBuilderPrepare_SSHUsername(t *testing.T) {
}
if b.config.SSHUsername != "root" {
t.Errorf("invalid: %d", b.config.SSHUsername)
t.Errorf("invalid: %s", b.config.SSHUsername)
}
// Test set
......@@ -297,7 +297,7 @@ func TestBuilderPrepare_SSHTimeout(t *testing.T) {
}
if b.config.RawSSHTimeout != "1m" {
t.Errorf("invalid: %d", b.config.RawSSHTimeout)
t.Errorf("invalid: %s", b.config.RawSSHTimeout)
}
// Test set
......@@ -338,7 +338,7 @@ func TestBuilderPrepare_StateTimeout(t *testing.T) {
}
if b.config.RawStateTimeout != "6m" {
t.Errorf("invalid: %d", b.config.RawStateTimeout)
t.Errorf("invalid: %s", b.config.RawStateTimeout)
}
// Test set
......@@ -379,7 +379,7 @@ func TestBuilderPrepare_PrivateNetworking(t *testing.T) {
}
if b.config.PrivateNetworking != false {
t.Errorf("invalid: %s", b.config.PrivateNetworking)
t.Errorf("invalid: %t", b.config.PrivateNetworking)
}
// Test set
......@@ -394,7 +394,7 @@ func TestBuilderPrepare_PrivateNetworking(t *testing.T) {
}
if b.config.PrivateNetworking != true {
t.Errorf("invalid: %s", b.config.PrivateNetworking)
t.Errorf("invalid: %t", b.config.PrivateNetworking)
}
}
......
......@@ -75,7 +75,7 @@ func (c *Communicator) Upload(dst string, src io.Reader, fi *os.FileInfo) error
// Copy the file into place by copying the temporary file we put
// into the shared folder into the proper location in the container
cmd := &packer.RemoteCmd{
Command: fmt.Sprintf("cp %s/%s %s", c.ContainerDir,
Command: fmt.Sprintf("command cp %s/%s %s", c.ContainerDir,
filepath.Base(tempfile.Name()), dst),
}
......@@ -117,6 +117,16 @@ func (c *Communicator) UploadDir(dst string, src string, exclude []string) error
return os.MkdirAll(hostpath, info.Mode())
}
if info.Mode() & os.ModeSymlink == os.ModeSymlink {
dest, err := os.Readlink(path)
if err != nil {
return err
}
return os.Symlink(dest, hostpath)
}
// It is a file, copy it over, including mode.
src, err := os.Open(path)
if err != nil {
......@@ -156,7 +166,7 @@ func (c *Communicator) UploadDir(dst string, src string, exclude []string) error
// Make the directory, then copy into it
cmd := &packer.RemoteCmd{
Command: fmt.Sprintf("set -e; mkdir -p %s; cp -R %s/* %s",
Command: fmt.Sprintf("set -e; mkdir -p %s; command cp -R %s/* %s",
containerDst, containerSrc, containerDst),
}
if err := c.Start(cmd); err != nil {
......
......@@ -13,16 +13,6 @@ type accountFile struct {
ClientId string `json:"client_id"`
}
// clientSecretsFile represents the structure of the client secrets JSON file.
type clientSecretsFile struct {
Web struct {
AuthURI string `json:"auth_uri"`
ClientEmail string `json:"client_email"`
ClientId string `json:"client_id"`
TokenURI string `json:"token_uri"`
}
}
func loadJSON(result interface{}, path string) error {
f, err := os.Open(path)
if err != nil {
......
......@@ -35,7 +35,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
// representing a GCE machine image.
func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {
driver, err := NewDriverGCE(
ui, b.config.ProjectId, &b.config.account, &b.config.clientSecrets)
ui, b.config.ProjectId, &b.config.account)
if err != nil {
return nil, err
}
......@@ -49,6 +49,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
// Build the steps.
steps := []multistep.Step{
new(StepCheckExistingImage),
&StepCreateSSHKey{
Debug: b.config.PackerDebug,
DebugKeyPath: fmt.Sprintf("gce_%s.pem", b.config.PackerBuildName),
......@@ -65,10 +66,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
SSHWaitTimeout: 5 * time.Minute,
},
new(common.StepProvision),
new(StepUpdateGcloud),
new(StepTeardownInstance),
new(StepCreateImage),
new(StepUploadImage),
new(StepRegisterImage),
}
// Run the steps.
......
......@@ -16,11 +16,10 @@ import (
type Config struct {
common.PackerConfig `mapstructure:",squash"`
AccountFile string `mapstructure:"account_file"`
ClientSecretsFile string `mapstructure:"client_secrets_file"`
ProjectId string `mapstructure:"project_id"`
AccountFile string `mapstructure:"account_file"`
ProjectId string `mapstructure:"project_id"`
BucketName string `mapstructure:"bucket_name"`
DiskName string `mapstructure:"disk_name"`
DiskSizeGb int64 `mapstructure:"disk_size"`
ImageName string `mapstructure:"image_name"`
ImageDescription string `mapstructure:"image_description"`
......@@ -38,8 +37,6 @@ type Config struct {
Zone string `mapstructure:"zone"`
account accountFile
clientSecrets clientSecretsFile
instanceName string
privateKeyBytes []byte
sshTimeout time.Duration
stateTimeout time.Duration
......@@ -83,6 +80,10 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
c.InstanceName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID())
}
if c.DiskName == "" {
c.DiskName = c.InstanceName
}
if c.MachineType == "" {
c.MachineType = "n1-standard-1"
}
......@@ -105,10 +106,9 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
// Process Templates
templates := map[string]*string{
"account_file": &c.AccountFile,
"client_secrets_file": &c.ClientSecretsFile,
"account_file": &c.AccountFile,
"bucket_name": &c.BucketName,
"disk_name": &c.DiskName,
"image_name": &c.ImageName,
"image_description": &c.ImageDescription,
"instance_name": &c.InstanceName,
......@@ -133,21 +133,6 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
}
// Process required parameters.
if c.BucketName == "" {
errs = packer.MultiErrorAppend(
errs, errors.New("a bucket_name must be specified"))
}
if c.AccountFile == "" {
errs = packer.MultiErrorAppend(
errs, errors.New("an account_file must be specified"))
}
if c.ClientSecretsFile == "" {
errs = packer.MultiErrorAppend(
errs, errors.New("a client_secrets_file must be specified"))
}
if c.ProjectId == "" {
errs = packer.MultiErrorAppend(
errs, errors.New("a project_id must be specified"))
......@@ -185,13 +170,6 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
}
}
if c.ClientSecretsFile != "" {
if err := loadJSON(&c.clientSecrets, c.ClientSecretsFile); err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Failed parsing client secrets file: %s", err))
}
}
// Check for any errors.
if errs != nil && len(errs.Errors) > 0 {
return nil, nil, errs
......
......@@ -7,12 +7,10 @@ import (
func testConfig(t *testing.T) map[string]interface{} {
return map[string]interface{}{
"account_file": testAccountFile(t),
"bucket_name": "foo",
"client_secrets_file": testClientSecretsFile(t),
"project_id": "hashicorp",
"source_image": "foo",
"zone": "us-east-1a",
"account_file": testAccountFile(t),
"project_id": "hashicorp",
"source_image": "foo",
"zone": "us-east-1a",
}
}
......@@ -58,33 +56,6 @@ func TestConfigPrepare(t *testing.T) {
true,
},
{
"bucket_name",
nil,
true,
},
{
"bucket_name",
"good",
false,
},
{
"client_secrets_file",
nil,
true,
},
{
"client_secrets_file",
testClientSecretsFile(t),
false,
},
{
"client_secrets_file",
"/tmp/i/should/not/exist",
true,
},
{
"private_key_file",
"/tmp/i/should/not/exist",
......@@ -180,22 +151,6 @@ func testAccountFile(t *testing.T) string {
return tf.Name()
}
func testClientSecretsFile(t *testing.T) string {
tf, err := ioutil.TempFile("", "packer")
if err != nil {
t.Fatalf("err: %s", err)
}
defer tf.Close()
if _, err := tf.Write([]byte(testClientSecretsContent)); err != nil {
t.Fatalf("err: %s", err)
}
return tf.Name()
}
// This is just some dummy data that doesn't actually work (it was revoked
// a long time ago).
const testAccountContent = `{}`
const testClientSecretsContent = `{"web":{"auth_uri":"https://accounts.google.com/o/oauth2/auth","token_uri":"https://accounts.google.com/o/oauth2/token","client_email":"774313886706-eorlsj0r4eqkh5e7nvea5fuf59ifr873@developer.gserviceaccount.com","client_x509_cert_url":"https://www.googleapis.com/robot/v1/metadata/x509/774313886706-eorlsj0r4eqkh5e7nvea5fuf59ifr873@developer.gserviceaccount.com","client_id":"774313886706-eorlsj0r4eqkh5e7nvea5fuf59ifr873.apps.googleusercontent.com","auth_provider_x509_cert_url":"https://www.googleapis.com/oauth2/v1/certs"}}`
......@@ -4,15 +4,23 @@ package googlecompute
// with GCE. The Driver interface exists mostly to allow a mock implementation
// to be used to test the steps.
type Driver interface {
// CreateImage creates an image with the given URL in Google Storage.
CreateImage(name, description, url string) <-chan error
// ImageExists returns true if the specified image exists. If an error
// occurs calling the API, this method returns false.
ImageExists(name string) bool
// CreateImage creates an image from the given disk in Google Compute
// Engine.
CreateImage(name, description, zone, disk string) <-chan error
// DeleteImage deletes the image with the given name.
DeleteImage(name string) <-chan error
// DeleteInstance deletes the given instance.
// DeleteInstance deletes the given instance, keeping the boot disk.
DeleteInstance(zone, name string) (<-chan error, error)
// DeleteDisk deletes the disk with the given name.
DeleteDisk(zone, name string) (<-chan error, error)
// GetNatIP gets the NAT IP address for the instance.
GetNatIP(zone, name string) (string, error)
......
......@@ -4,12 +4,15 @@ import (
"fmt"
"log"
"net/http"
"runtime"
"time"
"code.google.com/p/goauth2/oauth"
"code.google.com/p/goauth2/oauth/jwt"
"code.google.com/p/google-api-go-client/compute/v1"
"github.com/mitchellh/packer/packer"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"golang.org/x/oauth2/jwt"
"google.golang.org/api/compute/v1"
)
// driverGCE is a Driver implementation that actually talks to GCE.
......@@ -20,39 +23,59 @@ type driverGCE struct {
ui packer.Ui
}
const DriverScopes string = "https://www.googleapis.com/auth/compute " +
"https://www.googleapis.com/auth/devstorage.full_control"
func NewDriverGCE(ui packer.Ui, p string, a *accountFile, c *clientSecretsFile) (Driver, error) {
// Get the token for use in our requests
log.Printf("[INFO] Requesting Google token...")
log.Printf("[INFO] -- Email: %s", a.ClientEmail)
log.Printf("[INFO] -- Scopes: %s", DriverScopes)
log.Printf("[INFO] -- Private Key Length: %d", len(a.PrivateKey))
log.Printf("[INFO] -- Token URL: %s", c.Web.TokenURI)
jwtTok := jwt.NewToken(
a.ClientEmail,
DriverScopes,
[]byte(a.PrivateKey))
jwtTok.ClaimSet.Aud = c.Web.TokenURI
token, err := jwtTok.Assert(new(http.Client))
if err != nil {
return nil, fmt.Errorf("Error retrieving auth token: %s", err)
var DriverScopes = []string{"https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/devstorage.full_control"}
func NewDriverGCE(ui packer.Ui, p string, a *accountFile) (Driver, error) {
var err error
var client *http.Client
// Auth with AccountFile first if provided
if a.PrivateKey != "" {
log.Printf("[INFO] Requesting Google token via AccountFile...")
log.Printf("[INFO] -- Email: %s", a.ClientEmail)
log.Printf("[INFO] -- Scopes: %s", DriverScopes)
log.Printf("[INFO] -- Private Key Length: %d", len(a.PrivateKey))
conf := jwt.Config{
Email: a.ClientEmail,
PrivateKey: []byte(a.PrivateKey),
Scopes: DriverScopes,
TokenURL: "https://accounts.google.com/o/oauth2/token",
}
// Initiate an http.Client. The following GET request will be
// authorized and authenticated on the behalf of
// your service account.
client = conf.Client(oauth2.NoContext)
} else {
log.Printf("[INFO] Requesting Google token via GCE Service Role...")
client = &http.Client{
Transport: &oauth2.Transport{
// Fetch from Google Compute Engine's metadata server to retrieve
// an access token for the provided account.
// If no account is specified, "default" is used.
Source: google.ComputeTokenSource(""),
},
}
}
// Instantiate the transport to communicate to Google
transport := &oauth.Transport{
Config: &oauth.Config{
ClientId: a.ClientId,
Scope: DriverScopes,
TokenURL: c.Web.TokenURI,
AuthURL: c.Web.AuthURI,
},
Token: token,
if err != nil {
return nil, err
}
log.Printf("[INFO] Instantiating GCE client...")
service, err := compute.New(transport.Client())
service, err := compute.New(client)
// Set UserAgent
versionString := "0.0.0"
// TODO(dcunnin): Use Packer's version code from version.go
// versionString := main.Version
// if main.VersionPrerelease != "" {
// versionString = fmt.Sprintf("%s-%s", versionString, main.VersionPrerelease)
// }
service.UserAgent = fmt.Sprintf(
"(%s %s) Packer/%s", runtime.GOOS, runtime.GOARCH, versionString)
if err != nil {
return nil, err
}
......@@ -64,15 +87,19 @@ func NewDriverGCE(ui packer.Ui, p string, a *accountFile, c *clientSecretsFile)
}, nil
}
func (d *driverGCE) CreateImage(name, description, url string) <-chan error {
func (d *driverGCE) ImageExists(name string) bool {
_, err := d.service.Images.Get(d.projectId, name).Do()
// The API may return an error for reasons other than the image not
// existing, but this heuristic is sufficient for now.
return err == nil
}
func (d *driverGCE) CreateImage(name, description, zone, disk string) <-chan error {
image := &compute.Image{
Description: description,
Name: name,
RawDisk: &compute.ImageRawDisk{
ContainerType: "TAR",
Source: url,
},
SourceType: "RAW",
SourceDisk: fmt.Sprintf("%s%s/zones/%s/disks/%s", d.service.BasePath, d.projectId, zone, disk),
SourceType: "RAW",
}
errCh := make(chan error, 1)
......@@ -109,6 +136,17 @@ func (d *driverGCE) DeleteInstance(zone, name string) (<-chan error, error) {
return errCh, nil
}
func (d *driverGCE) DeleteDisk(zone, name string) (<-chan error, error) {
op, err := d.service.Disks.Delete(d.projectId, zone, name).Do()
if err != nil {
return nil, err
}
errCh := make(chan error, 1)
go waitForState(errCh, "DONE", d.refreshZoneOp(zone, op))
return errCh, nil
}
func (d *driverGCE) GetNatIP(zone, name string) (string, error) {
instance, err := d.service.Instances.Get(d.projectId, zone, name).Do()
if err != nil {
......@@ -179,7 +217,7 @@ func (d *driverGCE) RunInstance(c *InstanceConfig) (<-chan error, error) {
Mode: "READ_WRITE",
Kind: "compute#attachedDisk",
Boot: true,
AutoDelete: true,
AutoDelete: false,
InitializeParams: &compute.AttachedDiskInitializeParams{
SourceImage: image.SelfLink,
DiskSizeGb: c.DiskSizeGb,
......@@ -235,7 +273,7 @@ func (d *driverGCE) WaitForInstance(state, zone, name string) <-chan error {
}
func (d *driverGCE) getImage(img Image) (image *compute.Image, err error) {
projects := []string{img.ProjectId, "centos-cloud", "coreos-cloud", "debian-cloud", "google-containers", "opensuse-cloud", "rhel-cloud", "suse-cloud", "windows-cloud"}
projects := []string{img.ProjectId, "centos-cloud", "coreos-cloud", "debian-cloud", "google-containers", "opensuse-cloud", "rhel-cloud", "suse-cloud", "ubuntu-os-cloud", "windows-cloud"}
for _, project := range projects {
image, err = d.service.Images.Get(project, img.Name).Do()
if err == nil && image != nil && image.SelfLink != "" {
......
......@@ -3,9 +3,13 @@ package googlecompute
// DriverMock is a Driver implementation that is a mocked out so that
// it can be used for tests.
type DriverMock struct {
ImageExistsName string
ImageExistsResult bool
CreateImageName string
CreateImageDesc string
CreateImageURL string
CreateImageZone string
CreateImageDisk string
CreateImageErrCh <-chan error
DeleteImageName string
......@@ -16,6 +20,11 @@ type DriverMock struct {
DeleteInstanceErrCh <-chan error
DeleteInstanceErr error
DeleteDiskZone string
DeleteDiskName string
DeleteDiskErrCh <-chan error
DeleteDiskErr error
GetNatIPZone string
GetNatIPName string
GetNatIPResult string
......@@ -31,10 +40,16 @@ type DriverMock struct {
WaitForInstanceErrCh <-chan error
}
func (d *DriverMock) CreateImage(name, description, url string) <-chan error {
func (d *DriverMock) ImageExists(name string) bool {
d.ImageExistsName = name
return d.ImageExistsResult
}
func (d *DriverMock) CreateImage(name, description, zone, disk string) <-chan error {
d.CreateImageName = name
d.CreateImageDesc = description
d.CreateImageURL = url
d.CreateImageZone = zone
d.CreateImageDisk = disk
resultCh := d.CreateImageErrCh
if resultCh == nil {
......@@ -73,6 +88,20 @@ func (d *DriverMock) DeleteInstance(zone, name string) (<-chan error, error) {
return resultCh, d.DeleteInstanceErr
}
func (d *DriverMock) DeleteDisk(zone, name string) (<-chan error, error) {
d.DeleteDiskZone = zone
d.DeleteDiskName = name
resultCh := d.DeleteDiskErrCh
if resultCh == nil {
ch := make(chan error)
close(ch)
resultCh = ch
}
return resultCh, d.DeleteDiskErr
}
func (d *DriverMock) GetNatIP(zone, name string) (string, error) {
d.GetNatIPZone = zone
d.GetNatIPName = name
......
package googlecompute
import (
"fmt"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
)
// StepCheckExistingImage represents a Packer build step that checks if the
// target image already exists, and aborts immediately if so.
type StepCheckExistingImage int
// Run executes the Packer build step that checks if the image already exists.
func (s *StepCheckExistingImage) Run(state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
driver := state.Get("driver").(Driver)
ui := state.Get("ui").(packer.Ui)
ui.Say("Checking image does not exist...")
exists := driver.ImageExists(config.ImageName)
if exists {
err := fmt.Errorf("Image %s already exists", config.ImageName)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
return multistep.ActionContinue
}
// Cleanup.
func (s *StepCheckExistingImage) Cleanup(state multistep.StateBag) {}
package googlecompute
import (
"github.com/mitchellh/multistep"
"testing"
)
func TestStepCheckExistingImage_impl(t *testing.T) {
var _ multistep.Step = new(StepCheckExistingImage)
}
func TestStepCheckExistingImage(t *testing.T) {
state := testState(t)
step := new(StepCheckExistingImage)
defer step.Cleanup(state)
state.Put("instance_name", "foo")
config := state.Get("config").(*Config)
driver := state.Get("driver").(*DriverMock)
driver.ImageExistsResult = true
// run the step
if action := step.Run(state); action != multistep.ActionHalt {
t.Fatalf("bad action: %#v", action)
}
// Verify state
if driver.ImageExistsName != config.ImageName {
t.Fatalf("bad: %#v", driver.ImageExistsName)
}
}
package googlecompute
import (
"errors"
"fmt"
"path/filepath"
"time"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
......@@ -14,39 +15,32 @@ type StepCreateImage int
// Run executes the Packer build step that creates a GCE machine image.
//
// Currently the only way to create a GCE image is to run the gcimagebundle
// command on the running GCE instance.
// The image is created from the persistent disk used by the instance. The
// instance must be deleted and the disk retained before doing this step.
func (s *StepCreateImage) Run(state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
comm := state.Get("communicator").(packer.Communicator)
driver := state.Get("driver").(Driver)
ui := state.Get("ui").(packer.Ui)
sudoPrefix := ""
if config.SSHUsername != "root" {
sudoPrefix = "sudo "
}
imageFilename := fmt.Sprintf("%s.tar.gz", config.ImageName)
imageBundleCmd := "/usr/bin/gcimagebundle -d /dev/sda -o /tmp/"
ui.Say("Creating image...")
cmd := new(packer.RemoteCmd)
cmd.Command = fmt.Sprintf("%s%s --output_file_name %s --fssize %d",
sudoPrefix, imageBundleCmd, imageFilename, config.DiskSizeGb*1024*1024*1024)
err := cmd.StartWithUi(comm, ui)
if err == nil && cmd.ExitStatus != 0 {
err = fmt.Errorf(
"gcimagebundle exited with non-zero exit status: %d", cmd.ExitStatus)
errCh := driver.CreateImage(config.ImageName, config.ImageDescription, config.Zone, config.DiskName)
var err error
select {
case err = <-errCh:
case <-time.After(config.stateTimeout):
err = errors.New("time out while waiting for image to register")
}
if err != nil {
err := fmt.Errorf("Error creating image: %s", err)
err := fmt.Errorf("Error waiting for image: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
state.Put("image_file_name", filepath.Join("/tmp", imageFilename))
state.Put("image_name", config.ImageName)
return multistep.ActionContinue
}
// Cleanup.
func (s *StepCreateImage) Cleanup(state multistep.StateBag) {}
package googlecompute
import (
"strings"
"errors"
"testing"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
)
func TestStepCreateImage_impl(t *testing.T) {
......@@ -17,38 +16,49 @@ func TestStepCreateImage(t *testing.T) {
step := new(StepCreateImage)
defer step.Cleanup(state)
comm := new(packer.MockCommunicator)
state.Put("communicator", comm)
config := state.Get("config").(*Config)
driver := state.Get("driver").(*DriverMock)
// run the step
if action := step.Run(state); action != multistep.ActionContinue {
t.Fatalf("bad action: %#v", action)
}
// Verify
if !comm.StartCalled {
t.Fatal("start should be called")
// Verify state
if driver.CreateImageName != config.ImageName {
t.Fatalf("bad: %#v", driver.CreateImageName)
}
if driver.CreateImageDesc != config.ImageDescription {
t.Fatalf("bad: %#v", driver.CreateImageDesc)
}
if strings.HasPrefix(comm.StartCmd.Command, "sudo") {
t.Fatal("should not sudo")
if driver.CreateImageZone != config.Zone {
t.Fatalf("bad: %#v", driver.CreateImageZone)
}
if !strings.Contains(comm.StartCmd.Command, "gcimagebundle") {
t.Fatalf("bad command: %#v", comm.StartCmd.Command)
if driver.CreateImageDisk != config.DiskName {
t.Fatalf("bad: %#v", driver.CreateImageDisk)
}
if _, ok := state.GetOk("image_file_name"); !ok {
t.Fatal("should have image")
nameRaw, ok := state.GetOk("image_name")
if !ok {
t.Fatal("should have name")
}
if name, ok := nameRaw.(string); !ok {
t.Fatal("name is not a string")
} else if name != config.ImageName {
t.Fatalf("bad name: %s", name)
}
}
func TestStepCreateImage_badExitStatus(t *testing.T) {
func TestStepCreateImage_errorOnChannel(t *testing.T) {
state := testState(t)
step := new(StepCreateImage)
defer step.Cleanup(state)
comm := new(packer.MockCommunicator)
comm.StartExitStatus = 12
state.Put("communicator", comm)
errCh := make(chan error, 1)
errCh <- errors.New("error")
driver := state.Get("driver").(*DriverMock)
driver.CreateImageErrCh = errCh
// run the step
if action := step.Run(state); action != multistep.ActionHalt {
......@@ -58,39 +68,7 @@ func TestStepCreateImage_badExitStatus(t *testing.T) {
if _, ok := state.GetOk("error"); !ok {
t.Fatal("should have error")
}
if _, ok := state.GetOk("image_file_name"); ok {
if _, ok := state.GetOk("image_name"); ok {
t.Fatal("should NOT have image")
}
}
func TestStepCreateImage_nonRoot(t *testing.T) {
state := testState(t)
step := new(StepCreateImage)
defer step.Cleanup(state)
comm := new(packer.MockCommunicator)
state.Put("communicator", comm)
config := state.Get("config").(*Config)
config.SSHUsername = "bob"
// run the step
if action := step.Run(state); action != multistep.ActionContinue {
t.Fatalf("bad action: %#v", action)
}
// Verify
if !comm.StartCalled {
t.Fatal("start should be called")
}
if !strings.HasPrefix(comm.StartCmd.Command, "sudo") {
t.Fatal("should sudo")
}
if !strings.Contains(comm.StartCmd.Command, "gcimagebundle") {
t.Fatalf("bad command: %#v", comm.StartCmd.Command)
}
if _, ok := state.GetOk("image_file_name"); !ok {
t.Fatal("should have image")
}
}
......@@ -12,8 +12,6 @@ import (
// StepCreateInstance represents a Packer build step that creates GCE instances.
type StepCreateInstance struct {
Debug bool
instanceName string
}
func (config *Config) getImage() Image {
......@@ -91,14 +89,18 @@ func (s *StepCreateInstance) Run(state multistep.StateBag) multistep.StepAction
// Things succeeded, store the name so we can remove it later
state.Put("instance_name", name)
s.instanceName = name
return multistep.ActionContinue
}
// Cleanup destroys the GCE instance created during the image creation process.
func (s *StepCreateInstance) Cleanup(state multistep.StateBag) {
if s.instanceName == "" {
nameRaw, ok := state.GetOk("instance_name")
if !ok {
return
}
name := nameRaw.(string)
if name == "" {
return
}
......@@ -107,7 +109,7 @@ func (s *StepCreateInstance) Cleanup(state multistep.StateBag) {
ui := state.Get("ui").(packer.Ui)
ui.Say("Deleting instance...")
errCh, err := driver.DeleteInstance(config.Zone, s.instanceName)
errCh, err := driver.DeleteInstance(config.Zone, name)
if err == nil {
select {
case err = <-errCh:
......@@ -120,9 +122,32 @@ func (s *StepCreateInstance) Cleanup(state multistep.StateBag) {
ui.Error(fmt.Sprintf(
"Error deleting instance. Please delete it manually.\n\n"+
"Name: %s\n"+
"Error: %s", s.instanceName, err))
"Error: %s", name, err))
}
s.instanceName = ""
ui.Message("Instance has been deleted!")
state.Put("instance_name", "")
// Deleting the instance does not remove the boot disk. This cleanup removes
// the disk.
ui.Say("Deleting disk...")
errCh, err = driver.DeleteDisk(config.Zone, config.DiskName)
if err == nil {
select {
case err = <-errCh:
case <-time.After(config.stateTimeout):
err = errors.New("time out while waiting for disk to delete")
}
}
if err != nil {
ui.Error(fmt.Sprintf(
"Error deleting disk. Please delete it manually.\n\n"+
"Name: %s\n"+
"Error: %s", config.InstanceName, err))
}
ui.Message("Disk has been deleted!")
return
}
......@@ -39,7 +39,14 @@ func TestStepCreateInstance(t *testing.T) {
t.Fatal("should've deleted instance")
}
if driver.DeleteInstanceZone != config.Zone {
t.Fatal("bad zone: %#v", driver.DeleteInstanceZone)
t.Fatalf("bad instance zone: %#v", driver.DeleteInstanceZone)
}
if driver.DeleteDiskName != config.InstanceName {
t.Fatal("should've deleted disk")
}
if driver.DeleteDiskZone != config.Zone {
t.Fatalf("bad disk zone: %#v", driver.DeleteDiskZone)
}
}
......
package googlecompute
import (
"errors"
"fmt"
"time"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
)
// StepRegisterImage represents a Packer build step that registers GCE machine images.
type StepRegisterImage int
// Run executes the Packer build step that registers a GCE machine image.
func (s *StepRegisterImage) Run(state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
driver := state.Get("driver").(Driver)
ui := state.Get("ui").(packer.Ui)
var err error
imageURL := fmt.Sprintf(
"https://storage.cloud.google.com/%s/%s.tar.gz",
config.BucketName, config.ImageName)
ui.Say("Registering image...")
errCh := driver.CreateImage(config.ImageName, config.ImageDescription, imageURL)
select {
case err = <-errCh:
case <-time.After(config.stateTimeout):
err = errors.New("time out while waiting for image to register")
}
if err != nil {
err := fmt.Errorf("Error waiting for image: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
state.Put("image_name", config.ImageName)
return multistep.ActionContinue
}
// Cleanup.
func (s *StepRegisterImage) Cleanup(state multistep.StateBag) {}
package googlecompute
import (
"errors"
"github.com/mitchellh/multistep"
"testing"
"time"
)
func TestStepRegisterImage_impl(t *testing.T) {
var _ multistep.Step = new(StepRegisterImage)
}
func TestStepRegisterImage(t *testing.T) {
state := testState(t)
step := new(StepRegisterImage)
defer step.Cleanup(state)
config := state.Get("config").(*Config)
driver := state.Get("driver").(*DriverMock)
// run the step
if action := step.Run(state); action != multistep.ActionContinue {
t.Fatalf("bad action: %#v", action)
}
// Verify state
if driver.CreateImageName != config.ImageName {
t.Fatalf("bad: %#v", driver.CreateImageName)
}
if driver.CreateImageDesc != config.ImageDescription {
t.Fatalf("bad: %#v", driver.CreateImageDesc)
}
nameRaw, ok := state.GetOk("image_name")
if !ok {
t.Fatal("should have name")
}
if name, ok := nameRaw.(string); !ok {
t.Fatal("name is not a string")
} else if name != config.ImageName {
t.Fatalf("bad name: %s", name)
}
}
func TestStepRegisterImage_waitError(t *testing.T) {
state := testState(t)
step := new(StepRegisterImage)
defer step.Cleanup(state)
errCh := make(chan error, 1)
errCh <- errors.New("error")
driver := state.Get("driver").(*DriverMock)
driver.CreateImageErrCh = errCh
// run the step
if action := step.Run(state); action != multistep.ActionHalt {
t.Fatalf("bad action: %#v", action)
}
// Verify state
if _, ok := state.GetOk("error"); !ok {
t.Fatal("should have error")
}
if _, ok := state.GetOk("image_name"); ok {
t.Fatal("should NOT have image_name")
}
}
func TestStepRegisterImage_errorTimeout(t *testing.T) {
state := testState(t)
step := new(StepRegisterImage)
defer step.Cleanup(state)
errCh := make(chan error, 1)
go func() {
<-time.After(10 * time.Millisecond)
errCh <- nil
}()
config := state.Get("config").(*Config)
config.stateTimeout = 1 * time.Microsecond
driver := state.Get("driver").(*DriverMock)
driver.CreateImageErrCh = errCh
// run the step
if action := step.Run(state); action != multistep.ActionHalt {
t.Fatalf("bad action: %#v", action)
}
// Verify state
if _, ok := state.GetOk("error"); !ok {
t.Fatal("should have error")
}
if _, ok := state.GetOk("image_name"); ok {
t.Fatal("should NOT have image name")
}
}
package googlecompute
import (
"errors"
"fmt"
"time"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
)
// StepTeardownInstance represents a Packer build step that tears down GCE
// instances.
type StepTeardownInstance struct {
Debug bool
}
// Run executes the Packer build step that tears down a GCE instance.
func (s *StepTeardownInstance) Run(state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
driver := state.Get("driver").(Driver)
ui := state.Get("ui").(packer.Ui)
name := config.InstanceName
if name == "" {
return multistep.ActionHalt
}
ui.Say("Deleting instance...")
errCh, err := driver.DeleteInstance(config.Zone, name)
if err == nil {
select {
case err = <-errCh:
case <-time.After(config.stateTimeout):
err = errors.New("time out while waiting for instance to delete")
}
}
if err != nil {
ui.Error(fmt.Sprintf(
"Error deleting instance. Please delete it manually.\n\n"+
"Name: %s\n"+
"Error: %s", name, err))
return multistep.ActionHalt
}
ui.Message("Instance has been deleted!")
state.Put("instance_name", "")
return multistep.ActionContinue
}
// Deleting the instance does not remove the boot disk. This cleanup removes
// the disk.
func (s *StepTeardownInstance) Cleanup(state multistep.StateBag) {
config := state.Get("config").(*Config)
driver := state.Get("driver").(Driver)
ui := state.Get("ui").(packer.Ui)
ui.Say("Deleting disk...")
errCh, err := driver.DeleteDisk(config.Zone, config.DiskName)
if err == nil {
select {
case err = <-errCh:
case <-time.After(config.stateTimeout):
err = errors.New("time out while waiting for disk to delete")
}
}
if err != nil {
ui.Error(fmt.Sprintf(
"Error deleting disk. Please delete it manually.\n\n"+
"Name: %s\n"+
"Error: %s", config.InstanceName, err))
}
ui.Message("Disk has been deleted!")
return
}
package googlecompute
import (
"github.com/mitchellh/multistep"
"testing"
)
func TestStepTeardownInstance_impl(t *testing.T) {
var _ multistep.Step = new(StepTeardownInstance)
}
func TestStepTeardownInstance(t *testing.T) {
state := testState(t)
step := new(StepTeardownInstance)
defer step.Cleanup(state)
config := state.Get("config").(*Config)
driver := state.Get("driver").(*DriverMock)
// run the step
if action := step.Run(state); action != multistep.ActionContinue {
t.Fatalf("bad action: %#v", action)
}
if driver.DeleteInstanceName != config.InstanceName {
t.Fatal("should've deleted instance")
}
if driver.DeleteInstanceZone != config.Zone {
t.Fatalf("bad zone: %#v", driver.DeleteInstanceZone)
}
// cleanup
step.Cleanup(state)
if driver.DeleteDiskName != config.InstanceName {
t.Fatal("should've deleted disk")
}
if driver.DeleteDiskZone != config.Zone {
t.Fatalf("bad zone: %#v", driver.DeleteDiskZone)
}
}
package googlecompute
import (
"fmt"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
)
// StepUpdateGcloud represents a Packer build step that updates the gsutil
// utility to the latest version available.
type StepUpdateGcloud int
// Run executes the Packer build step that updates the gsutil utility to the
// latest version available.
//
// This step is required to prevent the image creation process from hanging;
// the image creation process utilizes the gcimagebundle cli tool which will
// prompt to update gsutil if a newer version is available.
func (s *StepUpdateGcloud) Run(state multistep.StateBag) multistep.StepAction {
comm := state.Get("communicator").(packer.Communicator)
config := state.Get("config").(*Config)
ui := state.Get("ui").(packer.Ui)
sudoPrefix := ""
if config.SSHUsername != "root" {
sudoPrefix = "sudo "
}
gsutilUpdateCmd := "/usr/local/bin/gcloud -q components update"
cmd := new(packer.RemoteCmd)
cmd.Command = fmt.Sprintf("%s%s", sudoPrefix, gsutilUpdateCmd)
ui.Say("Updating gcloud components...")
err := cmd.StartWithUi(comm, ui)
if err == nil && cmd.ExitStatus != 0 {
err = fmt.Errorf(
"gcloud components update exited with non-zero exit status: %d", cmd.ExitStatus)
}
if err != nil {
err := fmt.Errorf("Error updating gcloud components: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
return multistep.ActionContinue
}
// Cleanup.
func (s *StepUpdateGcloud) Cleanup(state multistep.StateBag) {}
package googlecompute
import (
"strings"
"testing"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
)
func TestStepUpdateGcloud_impl(t *testing.T) {
var _ multistep.Step = new(StepUpdateGcloud)
}
func TestStepUpdateGcloud(t *testing.T) {
state := testState(t)
step := new(StepUpdateGcloud)
defer step.Cleanup(state)
comm := new(packer.MockCommunicator)
state.Put("communicator", comm)
// run the step
if action := step.Run(state); action != multistep.ActionContinue {
t.Fatalf("bad action: %#v", action)
}
// Verify
if !comm.StartCalled {
t.Fatal("start should be called")
}
if strings.HasPrefix(comm.StartCmd.Command, "sudo") {
t.Fatal("should not sudo")
}
if !strings.Contains(comm.StartCmd.Command, "gcloud -q components update") {
t.Fatalf("bad command: %#v", comm.StartCmd.Command)
}
}
func TestStepUpdateGcloud_badExitStatus(t *testing.T) {
state := testState(t)
step := new(StepUpdateGcloud)
defer step.Cleanup(state)
comm := new(packer.MockCommunicator)
comm.StartExitStatus = 12
state.Put("communicator", comm)
// run the step
if action := step.Run(state); action != multistep.ActionHalt {
t.Fatalf("bad action: %#v", action)
}
if _, ok := state.GetOk("error"); !ok {
t.Fatal("should have error")
}
}
func TestStepUpdateGcloud_nonRoot(t *testing.T) {
state := testState(t)
step := new(StepUpdateGcloud)
defer step.Cleanup(state)
comm := new(packer.MockCommunicator)
state.Put("communicator", comm)
config := state.Get("config").(*Config)
config.SSHUsername = "bob"
// run the step
if action := step.Run(state); action != multistep.ActionContinue {
t.Fatalf("bad action: %#v", action)
}
// Verify
if !comm.StartCalled {
t.Fatal("start should be called")
}
if !strings.HasPrefix(comm.StartCmd.Command, "sudo") {
t.Fatal("should sudo")
}
if !strings.Contains(comm.StartCmd.Command, "gcloud -q components update") {
t.Fatalf("bad command: %#v", comm.StartCmd.Command)
}
}
package googlecompute
import (
"fmt"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
)
// StepUploadImage represents a Packer build step that uploads GCE machine images.
type StepUploadImage int
// Run executes the Packer build step that uploads a GCE machine image.
func (s *StepUploadImage) Run(state multistep.StateBag) multistep.StepAction {
comm := state.Get("communicator").(packer.Communicator)
config := state.Get("config").(*Config)
imageFilename := state.Get("image_file_name").(string)
ui := state.Get("ui").(packer.Ui)
sudoPrefix := ""
if config.SSHUsername != "root" {
sudoPrefix = "sudo "
}
ui.Say("Uploading image...")
cmd := new(packer.RemoteCmd)
cmd.Command = fmt.Sprintf("%s/usr/local/bin/gsutil cp %s gs://%s",
sudoPrefix, imageFilename, config.BucketName)
err := cmd.StartWithUi(comm, ui)
if err == nil && cmd.ExitStatus != 0 {
err = fmt.Errorf(
"gsutil exited with non-zero exit status: %d", cmd.ExitStatus)
}
if err != nil {
err := fmt.Errorf("Error uploading image: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
return multistep.ActionContinue
}
// Cleanup.
func (s *StepUploadImage) Cleanup(state multistep.StateBag) {}
package googlecompute
import (
"strings"
"testing"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
)
func TestStepUploadImage_impl(t *testing.T) {
var _ multistep.Step = new(StepUploadImage)
}
func TestStepUploadImage(t *testing.T) {
state := testState(t)
step := new(StepUploadImage)
defer step.Cleanup(state)
comm := new(packer.MockCommunicator)
state.Put("communicator", comm)
state.Put("image_file_name", "foo")
// run the step
if action := step.Run(state); action != multistep.ActionContinue {
t.Fatalf("bad action: %#v", action)
}
// Verify
if !comm.StartCalled {
t.Fatal("start should be called")
}
if strings.HasPrefix(comm.StartCmd.Command, "sudo") {
t.Fatal("should not sudo")
}
if !strings.Contains(comm.StartCmd.Command, "gsutil cp") {
t.Fatalf("bad command: %#v", comm.StartCmd.Command)
}
}
func TestStepUploadImage_badExitStatus(t *testing.T) {
state := testState(t)
step := new(StepUploadImage)
defer step.Cleanup(state)
comm := new(packer.MockCommunicator)
comm.StartExitStatus = 12
state.Put("communicator", comm)
state.Put("image_file_name", "foo")
// run the step
if action := step.Run(state); action != multistep.ActionHalt {
t.Fatalf("bad action: %#v", action)
}
if _, ok := state.GetOk("error"); !ok {
t.Fatal("should have error")
}
}
func TestStepUploadImage_nonRoot(t *testing.T) {
state := testState(t)
step := new(StepUploadImage)
defer step.Cleanup(state)
comm := new(packer.MockCommunicator)
state.Put("communicator", comm)
state.Put("image_file_name", "foo")
config := state.Get("config").(*Config)
config.SSHUsername = "bob"
// run the step
if action := step.Run(state); action != multistep.ActionContinue {
t.Fatalf("bad action: %#v", action)
}
// Verify
if !comm.StartCalled {
t.Fatal("start should be called")
}
if !strings.HasPrefix(comm.StartCmd.Command, "sudo") {
t.Fatal("should sudo")
}
if !strings.Contains(comm.StartCmd.Command, "gsutil cp") {
t.Fatalf("bad command: %#v", comm.StartCmd.Command)
}
}
......@@ -41,6 +41,6 @@ func (a *Artifact) State(name string) interface{} {
}
func (a *Artifact) Destroy() error {
log.Printf("Destroying image: %d", a.ImageId)
log.Printf("Destroying image: %s", a.ImageId)
return a.Conn.DeleteImageById(a.ImageId)
}
......@@ -95,12 +95,15 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
SecurityGroups: b.config.SecurityGroups,
Networks: b.config.Networks,
},
&StepWaitForRackConnect{
Wait: b.config.RackconnectWait,
},
&StepAllocateIp{
FloatingIpPool: b.config.FloatingIpPool,
FloatingIp: b.config.FloatingIp,
},
&common.StepConnectSSH{
SSHAddress: SSHAddress(csp, b.config.SSHPort),
SSHAddress: SSHAddress(csp, b.config.SSHInterface, b.config.SSHPort),
SSHConfig: SSHConfig(b.config.SSHUsername),
SSHWaitTimeout: b.config.SSHTimeout(),
},
......
......@@ -15,8 +15,10 @@ type RunConfig struct {
RawSSHTimeout string `mapstructure:"ssh_timeout"`
SSHUsername string `mapstructure:"ssh_username"`
SSHPort int `mapstructure:"ssh_port"`
SSHInterface string `mapstructure:"ssh_interface"`
OpenstackProvider string `mapstructure:"openstack_provider"`
UseFloatingIp bool `mapstructure:"use_floating_ip"`
RackconnectWait bool `mapstructure:"rackconnect_wait"`
FloatingIpPool string `mapstructure:"floating_ip_pool"`
FloatingIp string `mapstructure:"floating_ip"`
SecurityGroups []string `mapstructure:"security_groups"`
......@@ -68,10 +70,14 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error {
}
templates := map[string]*string{
"flavor": &c.Flavor,
"ssh_timeout": &c.RawSSHTimeout,
"ssh_username": &c.SSHUsername,
"source_image": &c.SourceImage,
"flavor": &c.Flavor,
"ssh_timeout": &c.RawSSHTimeout,
"ssh_username": &c.SSHUsername,
"ssh_interface": &c.SSHInterface,
"source_image": &c.SourceImage,
"openstack_provider": &c.OpenstackProvider,
"floating_ip_pool": &c.FloatingIpPool,
"floating_ip": &c.FloatingIp,
}
for n, ptr := range templates {
......
......@@ -12,7 +12,7 @@ import (
// SSHAddress returns a function that can be given to the SSH communicator
// for determining the SSH address based on the server AccessIPv4 setting..
func SSHAddress(csp gophercloud.CloudServersProvider, port int) func(multistep.StateBag) (string, error) {
func SSHAddress(csp gophercloud.CloudServersProvider, sshinterface string, port int) func(multistep.StateBag) (string, error) {
return func(state multistep.StateBag) (string, error) {
s := state.Get("server").(*gophercloud.Server)
......@@ -25,6 +25,11 @@ func SSHAddress(csp gophercloud.CloudServersProvider, port int) func(multistep.S
return "", errors.New("Error parsing SSH addresses")
}
for pool, addresses := range ip_pools {
if sshinterface != "" {
if pool != sshinterface {
continue
}
}
if pool != "" {
for _, address := range addresses {
if address.Addr != "" && address.Version == 4 {
......
package openstack
import (
"fmt"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
"time"
"github.com/mitchellh/gophercloud-fork-40444fb"
)
type StepWaitForRackConnect struct {
Wait bool
}
func (s *StepWaitForRackConnect) Run(state multistep.StateBag) multistep.StepAction {
if !s.Wait {
return multistep.ActionContinue
}
csp := state.Get("csp").(gophercloud.CloudServersProvider)
server := state.Get("server").(*gophercloud.Server)
ui := state.Get("ui").(packer.Ui)
ui.Say(fmt.Sprintf("Waiting for server (%s) to become RackConnect ready...", server.Id))
for {
server, err := csp.ServerById(server.Id)
if err != nil {
return multistep.ActionHalt
}
if server.Metadata["rackconnect_automation_status"] == "DEPLOYED" {
break
}
time.Sleep(2 * time.Second)
}
return multistep.ActionContinue
}
func (s *StepWaitForRackConnect) Cleanup(state multistep.StateBag) {
}
......@@ -55,6 +55,7 @@ func NewDriver() (Driver, error) {
var drivers map[string]Driver
var prlctlPath string
var supportedVersions []string
dhcp_lease_file := "/Library/Preferences/Parallels/parallels_dhcp_leases"
if runtime.GOOS != "darwin" {
return nil, fmt.Errorf(
......@@ -74,11 +75,13 @@ func NewDriver() (Driver, error) {
drivers = map[string]Driver{
"10": &Parallels10Driver{
Parallels9Driver: Parallels9Driver{
PrlctlPath: prlctlPath,
PrlctlPath: prlctlPath,
dhcp_lease_file: dhcp_lease_file,
},
},
"9": &Parallels9Driver{
PrlctlPath: prlctlPath,
PrlctlPath: prlctlPath,
dhcp_lease_file: dhcp_lease_file,
},
}
......
......@@ -9,6 +9,7 @@ import (
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
......@@ -18,6 +19,8 @@ import (
type Parallels9Driver struct {
// This is the path to the "prlctl" application.
PrlctlPath string
// The path to the parallels_dhcp_leases file
dhcp_lease_file string
}
func (d *Parallels9Driver) Import(name, srcPath, dstDir string, reassignMac bool) error {
......@@ -276,31 +279,43 @@ func (d *Parallels9Driver) Mac(vmName string) (string, error) {
}
// Finds the IP address of a VM connected that uses DHCP by its MAC address
//
// Parses the file /Library/Preferences/Parallels/parallels_dhcp_leases
// file contain a list of DHCP leases given by Parallels Desktop
// Example line:
// 10.211.55.181="1418921112,1800,001c42f593fb,ff42f593fb000100011c25b9ff001c42f593fb"
// IP Address ="Lease expiry, Lease time, MAC, MAC or DUID"
func (d *Parallels9Driver) IpAddress(mac string) (string, error) {
var stdout bytes.Buffer
dhcp_lease_file := "/Library/Preferences/Parallels/parallels_dhcp_leases"
if len(mac) != 12 {
return "", fmt.Errorf("Not a valid MAC address: %s. It should be exactly 12 digits.", mac)
}
cmd := exec.Command("grep", "-i", mac, dhcp_lease_file)
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
leases, err := ioutil.ReadFile(d.dhcp_lease_file)
if err != nil {
return "", err
}
stdoutString := strings.TrimSpace(stdout.String())
re := regexp.MustCompile("(.*)=.*")
ipMatch := re.FindAllStringSubmatch(stdoutString, 1)
re := regexp.MustCompile("(.*)=\"(.*),(.*)," + strings.ToLower(mac) + ",.*\"")
mostRecentIp := ""
mostRecentLease := uint64(0)
for _, l := range re.FindAllStringSubmatch(string(leases), -1) {
ip := l[1]
expiry, _ := strconv.ParseUint(l[2], 10, 64)
leaseTime, _ := strconv.ParseUint(l[3], 10, 32)
log.Printf("Found lease: %s for MAC: %s, expiring at %d, leased for %d s.\n", ip, mac, expiry, leaseTime)
if mostRecentLease <= expiry-leaseTime {
mostRecentIp = ip
mostRecentLease = expiry - leaseTime
}
}
if len(ipMatch) != 1 {
return "", fmt.Errorf("IP lease not found for MAC address %s in: %s\n", mac, dhcp_lease_file)
if len(mostRecentIp) == 0 {
return "", fmt.Errorf("IP lease not found for MAC address %s in: %s\n", mac, d.dhcp_lease_file)
}
ip := ipMatch[0][1]
log.Printf("Found IP lease: %s for MAC address %s\n", ip, mac)
return ip, nil
log.Printf("Found IP lease: %s for MAC address %s\n", mostRecentIp, mac)
return mostRecentIp, nil
}
func (d *Parallels9Driver) ToolsIsoPath(k string) (string, error) {
......
package common
import (
"io/ioutil"
"os"
"testing"
)
func TestParallels9Driver_impl(t *testing.T) {
var _ Driver = new(Parallels9Driver)
}
func TestIpAddress(t *testing.T) {
tf, err := ioutil.TempFile("", "packer")
if err != nil {
t.Fatalf("err: %s", err)
}
defer os.Remove(tf.Name())
d := Parallels9Driver{
dhcp_lease_file: tf.Name(),
}
// No lease should be found in an empty file
ip, err := d.IpAddress("123456789012")
if err == nil {
t.Fatalf("Found IP: \"%v\". No IP should be found!\n", ip)
}
// The most recent lease, 10.211.55.126 should be found
c := []byte(`
[vnic0]
10.211.55.125="1418288000,1800,001c4235240c,ff4235240c000100011c1c10e7001c4235240c"
10.211.55.126="1418288969,1800,001c4235240c,ff4235240c000100011c1c11ad001c4235240c"
10.211.55.254="1411712008,1800,001c42a51419,01001c42a51419"
`)
ioutil.WriteFile(tf.Name(), c, 0666)
ip, err = d.IpAddress("001C4235240c")
if err != nil {
t.Fatalf("Error: %v\n", err)
}
if ip != "10.211.55.126" {
t.Fatalf("Should have found 10.211.55.126, not %s!\n", ip)
}
// The most recent lease, 10.211.55.124 should be found
c = []byte(`[vnic0]
10.211.55.124="1418288969,1800,001c4235240c,ff4235240c000100011c1c11ad001c4235240c"
10.211.55.125="1418288000,1800,001c4235240c,ff4235240c000100011c1c10e7001c4235240c"
10.211.55.254="1411712008,1800,001c42a51419,01001c42a51419"
`)
ioutil.WriteFile(tf.Name(), c, 0666)
ip, err = d.IpAddress("001c4235240c")
if err != nil {
t.Fatalf("Error: %v\n", err)
}
if ip != "10.211.55.124" {
t.Fatalf("Should have found 10.211.55.124, not %s!\n", ip)
}
}
......@@ -256,6 +256,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
new(stepHTTPServer),
new(stepCreateVM),
new(stepCreateDisk),
new(stepSetBootOrder),
new(stepAttachISO),
&parallelscommon.StepAttachParallelsTools{
ParallelsToolsMode: b.config.ParallelsToolsMode,
......
......@@ -75,7 +75,7 @@ func TestBuilderPrepare_DiskSize(t *testing.T) {
}
if b.config.DiskSize != 60000 {
t.Fatalf("bad size: %s", b.config.DiskSize)
t.Fatalf("bad size: %d", b.config.DiskSize)
}
}
......
......@@ -17,9 +17,8 @@ import (
// vmName string
//
// Produces:
type stepAttachISO struct {
cdromDevice string
}
// attachedIso bool
type stepAttachISO struct{}
func (s *stepAttachISO) Run(state multistep.StateBag) multistep.StepAction {
driver := state.Get("driver").(parallelscommon.Driver)
......@@ -27,76 +26,42 @@ func (s *stepAttachISO) Run(state multistep.StateBag) multistep.StepAction {
ui := state.Get("ui").(packer.Ui)
vmName := state.Get("vmName").(string)
// Attach the disk to the controller
ui.Say("Attaching ISO to the new CD/DVD drive...")
cdrom, err := driver.DeviceAddCdRom(vmName, isoPath)
if err != nil {
err := fmt.Errorf("Error attaching ISO: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
// Set new boot order
ui.Say("Setting the boot order...")
// Attach the disk to the cdrom0 device. We couldn't use a separated device because it is failed to boot in PD9 [GH-1667]
ui.Say("Attaching ISO to the default CD/DVD ROM device...")
command := []string{
"set", vmName,
"--device-bootorder", fmt.Sprintf("hdd0 %s cdrom0 net0", cdrom),
"--device-set", "cdrom0",
"--image", isoPath,
"--enable", "--connect",
}
if err := driver.Prlctl(command...); err != nil {
err := fmt.Errorf("Error setting the boot order: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
// Disable 'cdrom0' device
ui.Say("Disabling default CD/DVD drive...")
command = []string{
"set", vmName,
"--device-set", "cdrom0", "--disable",
}
if err := driver.Prlctl(command...); err != nil {
err := fmt.Errorf("Error disabling default CD/DVD drive: %s", err)
err := fmt.Errorf("Error attaching ISO: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
// Track the device name so that we can can delete later
s.cdromDevice = cdrom
// Set some state so we know to remove
state.Put("attachedIso", true)
return multistep.ActionContinue
}
func (s *stepAttachISO) Cleanup(state multistep.StateBag) {
if _, ok := state.GetOk("attachedIso"); !ok {
return
}
driver := state.Get("driver").(parallelscommon.Driver)
ui := state.Get("ui").(packer.Ui)
vmName := state.Get("vmName").(string)
// Enable 'cdrom0' device back
log.Println("Enabling default CD/DVD drive...")
// Detach ISO by setting an empty string image.
log.Println("Detaching ISO from the default CD/DVD ROM device...")
command := []string{
"set", vmName,
"--device-set", "cdrom0", "--enable", "--disconnect",
}
if err := driver.Prlctl(command...); err != nil {
ui.Error(fmt.Sprintf("Error enabling default CD/DVD drive: %s", err))
}
// Detach ISO
if s.cdromDevice == "" {
return
}
log.Println("Detaching ISO...")
command = []string{
"set", vmName,
"--device-del", s.cdromDevice,
"--device-set", "cdrom0",
"--image", "", "--disconnect", "--enable",
}
if err := driver.Prlctl(command...); err != nil {
......
package iso
import (
"fmt"
"github.com/mitchellh/multistep"
parallelscommon "github.com/mitchellh/packer/builder/parallels/common"
"github.com/mitchellh/packer/packer"
)
// This step sets the device boot order for the virtual machine.
//
// Uses:
// driver Driver
// ui packer.Ui
// vmName string
//
// Produces:
type stepSetBootOrder struct{}
func (s *stepSetBootOrder) Run(state multistep.StateBag) multistep.StepAction {
driver := state.Get("driver").(parallelscommon.Driver)
ui := state.Get("ui").(packer.Ui)
vmName := state.Get("vmName").(string)
// Set new boot order
ui.Say("Setting the boot order...")
command := []string{
"set", vmName,
"--device-bootorder", fmt.Sprintf("hdd0 cdrom0 net0"),
}
if err := driver.Prlctl(command...); err != nil {
err := fmt.Errorf("Error setting the boot order: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
return multistep.ActionContinue
}
func (s *stepSetBootOrder) Cleanup(state multistep.StateBag) {}
......@@ -122,6 +122,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
if err != nil {
return nil, err
}
warnings := make([]string, 0)
b.config.tpl, err = packer.NewConfigTemplate()
if err != nil {
......@@ -304,22 +305,24 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
errs, errors.New("http_port_min must be less than http_port_max"))
}
if b.config.ISOChecksum == "" {
errs = packer.MultiErrorAppend(
errs, errors.New("Due to large file sizes, an iso_checksum is required"))
} else {
b.config.ISOChecksum = strings.ToLower(b.config.ISOChecksum)
}
if b.config.ISOChecksumType == "" {
errs = packer.MultiErrorAppend(
errs, errors.New("The iso_checksum_type must be specified."))
} else {
b.config.ISOChecksumType = strings.ToLower(b.config.ISOChecksumType)
if h := common.HashForType(b.config.ISOChecksumType); h == nil {
errs = packer.MultiErrorAppend(
errs,
fmt.Errorf("Unsupported checksum type: %s", b.config.ISOChecksumType))
if b.config.ISOChecksumType != "none" {
if b.config.ISOChecksum == "" {
errs = packer.MultiErrorAppend(
errs, errors.New("Due to large file sizes, an iso_checksum is required"))
} else {
b.config.ISOChecksum = strings.ToLower(b.config.ISOChecksum)
}
if h := common.HashForType(b.config.ISOChecksumType); h == nil {
errs = packer.MultiErrorAppend(
errs,
fmt.Errorf("Unsupported checksum type: %s", b.config.ISOChecksumType))
}
}
}
......@@ -404,11 +407,17 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
b.config.QemuArgs = make([][]string, 0)
}
if b.config.ISOChecksumType == "none" {
warnings = append(warnings,
"A checksum type of 'none' was specified. Since ISO files are so big,\n"+
"a checksum is highly recommended.")
}
if errs != nil && len(errs.Errors) > 0 {
return nil, errs
return warnings, errs
}
return nil, nil
return warnings, nil
}
func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {
......@@ -418,6 +427,15 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
return nil, fmt.Errorf("Failed creating Qemu driver: %s", err)
}
steprun := &stepRun{}
if !b.config.DiskImage {
steprun.BootDrive = "once=d"
steprun.Message = "Starting VM, booting from CD-ROM"
} else {
steprun.BootDrive = "c"
steprun.Message = "Starting VM, booting disk image"
}
steps := []multistep.Step{
&common.StepDownload{
Checksum: b.config.ISOChecksum,
......@@ -436,10 +454,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
new(stepHTTPServer),
new(stepForwardSSH),
new(stepConfigureVNC),
&stepRun{
BootDrive: "once=d",
Message: "Starting VM, booting from CD-ROM",
},
steprun,
&stepBootWait{},
&stepTypeBootCommand{},
&common.StepConnectSSH{
......
......@@ -160,7 +160,7 @@ func TestBuilderPrepare_DiskSize(t *testing.T) {
}
if b.config.DiskSize != 60000 {
t.Fatalf("bad size: %s", b.config.DiskSize)
t.Fatalf("bad size: %d", b.config.DiskSize)
}
}
......
......@@ -2,10 +2,11 @@ package qemu
import (
"fmt"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
"path/filepath"
"strings"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
)
// This step copies the virtual disk that will be used as the
......@@ -19,6 +20,7 @@ func (s *stepCopyDisk) Run(state multistep.StateBag) multistep.StepAction {
ui := state.Get("ui").(packer.Ui)
path := filepath.Join(config.OutputDir, fmt.Sprintf("%s.%s", config.VMName,
strings.ToLower(config.Format)))
name := config.VMName + "." + strings.ToLower(config.Format)
command := []string{
"convert",
......@@ -39,6 +41,8 @@ func (s *stepCopyDisk) Run(state multistep.StateBag) multistep.StepAction {
return multistep.ActionHalt
}
state.Put("disk_filename", name)
return multistep.ActionContinue
}
......
......@@ -82,7 +82,9 @@ func getCommandArgs(bootDrive string, state multistep.StateBag) ([]string, error
defaultArgs["-netdev"] = fmt.Sprintf("user,id=user.0,hostfwd=tcp::%v-:22", sshHostPort)
defaultArgs["-device"] = fmt.Sprintf("%s,netdev=user.0", config.NetDevice)
defaultArgs["-drive"] = fmt.Sprintf("file=%s,if=%s,cache=%s", imgPath, config.DiskInterface, config.DiskCache)
defaultArgs["-cdrom"] = isoPath
if !config.DiskImage {
defaultArgs["-cdrom"] = isoPath
}
defaultArgs["-boot"] = bootDrive
defaultArgs["-m"] = "512M"
defaultArgs["-vnc"] = vnc
......
......@@ -19,6 +19,9 @@ type Driver interface {
// Create a SATA controller.
CreateSATAController(vm string, controller string) error
// Create a SCSI controller.
CreateSCSIController(vm string, controller string) error
// Delete a VM by name
Delete(string) error
......
......@@ -36,6 +36,18 @@ func (d *VBox42Driver) CreateSATAController(vmName string, name string) error {
return d.VBoxManage(command...)
}
func (d *VBox42Driver) CreateSCSIController(vmName string, name string) error {
command := []string{
"storagectl", vmName,
"--name", name,
"--add", "scsi",
"--controller", "LSILogic",
}
return d.VBoxManage(command...)
}
func (d *VBox42Driver) Delete(name string) error {
return d.VBoxManage("unregistervm", name, "--delete")
}
......
......@@ -9,6 +9,10 @@ type DriverMock struct {
CreateSATAControllerController string
CreateSATAControllerErr error
CreateSCSIControllerVM string
CreateSCSIControllerController string
CreateSCSIControllerErr error
DeleteCalled bool
DeleteName string
DeleteErr error
......@@ -49,6 +53,12 @@ func (d *DriverMock) CreateSATAController(vm string, controller string) error {
return d.CreateSATAControllerErr
}
func (d *DriverMock) CreateSCSIController(vm string, controller string) error {
d.CreateSCSIControllerVM = vm
d.CreateSCSIControllerController = vm
return d.CreateSCSIControllerErr
}
func (d *DriverMock) Delete(name string) error {
d.DeleteCalled = true
d.DeleteName = name
......
......@@ -158,9 +158,9 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
}
}
if b.config.HardDriveInterface != "ide" && b.config.HardDriveInterface != "sata" {
if b.config.HardDriveInterface != "ide" && b.config.HardDriveInterface != "sata" && b.config.HardDriveInterface != "scsi" {
errs = packer.MultiErrorAppend(
errs, errors.New("hard_drive_interface can only be ide or sata"))
errs, errors.New("hard_drive_interface can only be ide, sata, or scsi"))
}
if b.config.ISOChecksumType == "" {
......
......@@ -83,7 +83,7 @@ func TestBuilderPrepare_DiskSize(t *testing.T) {
}
if b.config.DiskSize != 60000 {
t.Fatalf("bad size: %s", b.config.DiskSize)
t.Fatalf("bad size: %d", b.config.DiskSize)
}
}
......
......@@ -63,12 +63,25 @@ func (s *stepCreateDisk) Run(state multistep.StateBag) multistep.StepAction {
}
}
if config.HardDriveInterface == "scsi" {
if err := driver.CreateSCSIController(vmName, "SCSI Controller"); err != nil {
err := fmt.Errorf("Error creating disk controller: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
}
// Attach the disk to the controller
controllerName := "IDE Controller"
if config.HardDriveInterface == "sata" {
controllerName = "SATA Controller"
}
if config.HardDriveInterface == "scsi" {
controllerName = "SCSI Controller"
}
command = []string{
"storageattach", vmName,
"--storagectl", controllerName,
......
......@@ -127,7 +127,7 @@ func TestStepShutdown_locks(t *testing.T) {
lockPath := filepath.Join(dir.dir, "nope.lck")
err := ioutil.WriteFile(lockPath, []byte("foo"), 0644)
if err != nil {
t.Fatalf("err: %s")
t.Fatalf("err: %s", err)
}
// Remove the lock file after a certain time
......
......@@ -368,6 +368,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
SkipFloppy: true,
},
&vmwcommon.StepCleanVMX{},
&StepUploadVMX{
RemoteType: b.config.RemoteType,
},
&vmwcommon.StepCompactDisk{
Skip: b.config.SkipCompaction,
},
......
......@@ -175,7 +175,7 @@ func TestBuilderPrepare_DiskSize(t *testing.T) {
}
if b.config.DiskSize != 60000 {
t.Fatalf("bad size: %s", b.config.DiskSize)
t.Fatalf("bad size: %d", b.config.DiskSize)
}
}
......
......@@ -56,6 +56,10 @@ func (d *ESX5Driver) IsRunning(string) (bool, error) {
return strings.Contains(state, "Powered on"), nil
}
func (d *ESX5Driver) ReloadVM() error {
return d.sh("vim-cmd", "vmsvc/reload", d.vmId)
}
func (d *ESX5Driver) Start(vmxPathLocal string, headless bool) error {
for i := 0; i < 20; i++ {
err := d.sh("vim-cmd", "vmsvc/power.on", d.vmId)
......
......@@ -17,4 +17,10 @@ type RemoteDriver interface {
// Removes a VM from inventory specified by the path to the VMX given.
Unregister(string) error
// Uploads a local file to remote side.
upload(dst, src string) error
// Reload VM on remote side.
ReloadVM() error
}
......@@ -19,6 +19,10 @@ type RemoteDriverMock struct {
UnregisterCalled bool
UnregisterPath string
UnregisterErr error
uploadErr error
ReloadVMErr error
}
func (d *RemoteDriverMock) UploadISO(path string, checksum string, checksumType string) (string, error) {
......@@ -38,3 +42,11 @@ func (d *RemoteDriverMock) Unregister(path string) error {
d.UnregisterPath = path
return d.UnregisterErr
}
func (d *RemoteDriverMock) upload(dst, src string) error {
return d.uploadErr
}
func (d *RemoteDriverMock) ReloadVM() error {
return d.ReloadVMErr
}
package iso
import (
"fmt"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
vmwcommon "github.com/mitchellh/packer/builder/vmware/common"
"path/filepath"
)
// This step upload the VMX to the remote host
//
// Uses:
// driver Driver
// ui packer.Ui
// vmx_path string
//
// Produces:
// <nothing>
type StepUploadVMX struct{
RemoteType string
}
func (c *StepUploadVMX) Run(state multistep.StateBag) multistep.StepAction {
driver := state.Get("driver").(vmwcommon.Driver)
ui := state.Get("ui").(packer.Ui)
vmxPath := state.Get("vmx_path").(string)
if c.RemoteType == "esx5" {
remoteDriver, ok := driver.(RemoteDriver)
if ok {
remoteVmxPath := filepath.ToSlash(filepath.Join(fmt.Sprintf("%s",remoteDriver), filepath.Base(vmxPath)))
if err := remoteDriver.upload(remoteVmxPath, vmxPath); err != nil {
state.Put("error", fmt.Errorf("Error writing VMX: %s", err))
return multistep.ActionHalt
}
}
if err := remoteDriver.ReloadVM(); err != nil {
ui.Error(fmt.Sprintf("Error reload VM: %s", err))
}
}
return multistep.ActionContinue
}
func (StepUploadVMX) Cleanup(multistep.StateBag) {}
......@@ -12,7 +12,7 @@ import (
"github.com/mitchellh/packer/packer"
)
// Builder implements packer.Builder and builds the actual VirtualBox
// Builder implements packer.Builder and builds the actual VMware
// images.
type Builder struct {
config *Config
......
......@@ -34,7 +34,7 @@ func runCheckpoint(c *config) {
version := Version
if VersionPrerelease != "" {
version += fmt.Sprintf(".%s", VersionPrerelease)
version += fmt.Sprintf("-%s", VersionPrerelease)
}
signaturePath := filepath.Join(configDir, "checkpoint_signature")
......
package command
import (
"path/filepath"
"testing"
"github.com/mitchellh/cli"
)
const fixturesDir = "./test-fixtures"
func fatalCommand(t *testing.T, m Meta) {
ui := m.Ui.(*cli.MockUi)
t.Fatalf(
"Bad exit code.\n\nStdout:\n\n%s\n\nStderr:\n\n%s",
ui.OutputWriter.String(),
ui.ErrorWriter.String())
}
func testFixture(n string) string {
return filepath.Join(fixturesDir, n)
}
func testMeta(t *testing.T) Meta {
return Meta{
Ui: new(cli.MockUi),
}
}
package command
import (
"flag"
"fmt"
"io"
"os"
"os/signal"
"path/filepath"
"strings"
"github.com/hashicorp/atlas-go/archive"
"github.com/hashicorp/atlas-go/v1"
"github.com/mitchellh/packer/packer"
)
// archiveTemplateEntry is the name the template always takes within the slug.
const archiveTemplateEntry = ".packer-template"
type PushCommand struct {
Meta
client *atlas.Client
// For tests:
uploadFn pushUploadFn
}
// pushUploadFn is the callback type used for tests to stub out the uploading
// logic of the push command.
type pushUploadFn func(
io.Reader, *uploadOpts) (<-chan struct{}, <-chan error, error)
func (c *PushCommand) Run(args []string) int {
var token string
var message string
var create bool
f := flag.NewFlagSet("push", flag.ContinueOnError)
f.Usage = func() { c.Ui.Error(c.Help()) }
f.StringVar(&token, "token", "", "token")
f.StringVar(&message, "m", "", "message")
f.StringVar(&message, "message", "", "message")
f.BoolVar(&create, "create", false, "create (deprecated)")
if err := f.Parse(args); err != nil {
return 1
}
args = f.Args()
if len(args) != 1 {
f.Usage()
return 1
}
// Print deprecations
if create {
c.Ui.Error(fmt.Sprintf("The '-create' option is now the default and is\n" +
"longer used. It will be removed in the next version."))
}
// Read the template
tpl, err := packer.ParseTemplateFile(args[0], nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Failed to parse template: %s", err))
return 1
}
// Validate some things
if tpl.Push.Name == "" {
c.Ui.Error(fmt.Sprintf(
"The 'push' section must be specified in the template with\n" +
"at least the 'name' option set."))
return 1
}
// Determine our token
if token == "" {
token = tpl.Push.Token
}
// Build our client
defer func() { c.client = nil }()
c.client = atlas.DefaultClient()
if tpl.Push.Address != "" {
c.client, err = atlas.NewClient(tpl.Push.Address)
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error setting up API client: %s", err))
return 1
}
}
if token != "" {
c.client.Token = token
}
// Build the archiving options
var opts archive.ArchiveOpts
opts.Include = tpl.Push.Include
opts.Exclude = tpl.Push.Exclude
opts.VCS = tpl.Push.VCS
opts.Extra = map[string]string{
archiveTemplateEntry: args[0],
}
// Determine the path we're archiving. This logic is a bit complicated
// as there are three possibilities:
//
// 1.) BaseDir is an absolute path, just use that.
//
// 2.) BaseDir is empty, so we use the directory of the template.
//
// 3.) BaseDir is relative, so we use the path relative to the directory
// of the template.
//
path := tpl.Push.BaseDir
if path == "" || !filepath.IsAbs(path) {
tplPath, err := filepath.Abs(args[0])
if err != nil {
c.Ui.Error(fmt.Sprintf("Error determining path to archive: %s", err))
return 1
}
tplPath = filepath.Dir(tplPath)
if path != "" {
tplPath = filepath.Join(tplPath, path)
}
path, err = filepath.Abs(tplPath)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error determining path to archive: %s", err))
return 1
}
}
// Find the Atlas post-processors, if possible
var atlasPPs []packer.RawPostProcessorConfig
for _, list := range tpl.PostProcessors {
for _, pp := range list {
if pp.Type == "atlas" {
atlasPPs = append(atlasPPs, pp)
}
}
}
// Build the upload options
var uploadOpts uploadOpts
uploadOpts.Slug = tpl.Push.Name
uploadOpts.Builds = make(map[string]*uploadBuildInfo)
for _, b := range tpl.Builders {
info := &uploadBuildInfo{Type: b.Type}
// Determine if we're artifacting this build
for _, pp := range atlasPPs {
if !pp.Skip(b.Name) {
info.Artifact = true
break
}
}
uploadOpts.Builds[b.Name] = info
}
// Add the upload metadata
metadata := make(map[string]interface{})
if message != "" {
metadata["message"] = message
}
metadata["template"] = tpl.RawContents
metadata["template_name"] = filepath.Base(args[0])
uploadOpts.Metadata = metadata
// Warn about builds not having post-processors.
var badBuilds []string
for name, b := range uploadOpts.Builds {
if b.Artifact {
continue
}
badBuilds = append(badBuilds, name)
}
if len(badBuilds) > 0 {
c.Ui.Error(fmt.Sprintf(
"Warning! One or more of the builds in this template does not\n"+
"have an Atlas post-processor. Artifacts from this template will\n"+
"not appear in the Atlas artifact registry.\n\n"+
"This is just a warning. Atlas will still build your template\n"+
"and assume other post-processors are sending the artifacts where\n"+
"they need to go.\n\n"+
"Builds: %s\n\n", strings.Join(badBuilds, ", ")))
}
// Start the archiving process
r, err := archive.CreateArchive(path, &opts)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error archiving: %s", err))
return 1
}
defer r.Close()
// Start the upload process
doneCh, uploadErrCh, err := c.upload(r, &uploadOpts)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error starting upload: %s", err))
return 1
}
// Make a ctrl-C channel
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, os.Interrupt)
defer signal.Stop(sigCh)
err = nil
select {
case err = <-uploadErrCh:
err = fmt.Errorf("Error uploading: %s", err)
case <-sigCh:
err = fmt.Errorf("Push cancelled from Ctrl-C")
case <-doneCh:
}
if err != nil {
c.Ui.Error(err.Error())
return 1
}
c.Ui.Output(fmt.Sprintf("Push successful to '%s'", tpl.Push.Name))
return 0
}
func (*PushCommand) Help() string {
helpText := `
Usage: packer push [options] TEMPLATE
Push the given template and supporting files to a Packer build service such as
Atlas.
If a build configuration for the given template does not exist, it will be
created automatically. If the build configuration already exists, a new
version will be created with this template and the supporting files.
Additional configuration options (such as the Atlas server URL and files to
include) may be specified in the "push" section of the Packer template. Please
see the online documentation for more information about these configurables.
Options:
-m, -message=<detail> A message to identify the purpose or changes in this
Packer template much like a VCS commit message
-token=<token> The access token to use to when uploading
`
return strings.TrimSpace(helpText)
}
func (*PushCommand) Synopsis() string {
return "push a template and supporting files to a Packer build service"
}
func (c *PushCommand) upload(
r *archive.Archive, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
if c.uploadFn != nil {
return c.uploadFn(r, opts)
}
// Separate the slug into the user and name components
user, name, err := atlas.ParseSlug(opts.Slug)
if err != nil {
return nil, nil, fmt.Errorf("upload: %s", err)
}
// Get the build configuration
bc, err := c.client.BuildConfig(user, name)
if err != nil {
if err == atlas.ErrNotFound {
// Build configuration doesn't exist, attempt to create it
bc, err = c.client.CreateBuildConfig(user, name)
}
if err != nil {
return nil, nil, fmt.Errorf("upload: %s", err)
}
}
// Build the version to send up
version := atlas.BuildConfigVersion{
User: bc.User,
Name: bc.Name,
Builds: make([]atlas.BuildConfigBuild, 0, len(opts.Builds)),
}
for name, info := range opts.Builds {
version.Builds = append(version.Builds, atlas.BuildConfigBuild{
Name: name,
Type: info.Type,
Artifact: info.Artifact,
})
}
// Start the upload
doneCh, errCh := make(chan struct{}), make(chan error)
go func() {
err := c.client.UploadBuildConfigVersion(&version, opts.Metadata, r, r.Size)
if err != nil {
errCh <- err
return
}
close(doneCh)
}()
return doneCh, errCh, nil
}
type uploadOpts struct {
URL string
Slug string
Builds map[string]*uploadBuildInfo
Metadata map[string]interface{}
}
type uploadBuildInfo struct {
Type string
Artifact bool
}
package command
import (
"archive/tar"
"bytes"
"compress/gzip"
"fmt"
"io"
"path/filepath"
"reflect"
"sort"
"testing"
)
func TestPush_noArgs(t *testing.T) {
c := &PushCommand{Meta: testMeta(t)}
code := c.Run(nil)
if code != 1 {
t.Fatalf("bad: %#v", code)
}
}
func TestPush_multiArgs(t *testing.T) {
c := &PushCommand{Meta: testMeta(t)}
code := c.Run([]string{"one", "two"})
if code != 1 {
t.Fatalf("bad: %#v", code)
}
}
func TestPush(t *testing.T) {
var actual []string
var actualOpts *uploadOpts
uploadFn := func(r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
actual = testArchive(t, r)
actualOpts = opts
doneCh := make(chan struct{})
close(doneCh)
return doneCh, nil, nil
}
c := &PushCommand{
Meta: testMeta(t),
uploadFn: uploadFn,
}
args := []string{filepath.Join(testFixture("push"), "template.json")}
if code := c.Run(args); code != 0 {
fatalCommand(t, c.Meta)
}
expected := []string{
archiveTemplateEntry,
"template.json",
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
expectedBuilds := map[string]*uploadBuildInfo{
"dummy": &uploadBuildInfo{
Type: "dummy",
},
}
if !reflect.DeepEqual(actualOpts.Builds, expectedBuilds) {
t.Fatalf("bad: %#v", actualOpts.Builds)
}
}
func TestPush_builds(t *testing.T) {
var actualOpts *uploadOpts
uploadFn := func(
r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
actualOpts = opts
doneCh := make(chan struct{})
close(doneCh)
return doneCh, nil, nil
}
c := &PushCommand{
Meta: testMeta(t),
uploadFn: uploadFn,
}
args := []string{filepath.Join(testFixture("push-builds"), "template.json")}
if code := c.Run(args); code != 0 {
fatalCommand(t, c.Meta)
}
expectedBuilds := map[string]*uploadBuildInfo{
"dummy": &uploadBuildInfo{
Type: "dummy",
Artifact: true,
},
"foo": &uploadBuildInfo{
Type: "dummy",
},
}
if !reflect.DeepEqual(actualOpts.Builds, expectedBuilds) {
t.Fatalf("bad: %#v", actualOpts.Builds)
}
}
func TestPush_noName(t *testing.T) {
uploadFn := func(r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
return nil, nil, nil
}
c := &PushCommand{
Meta: testMeta(t),
uploadFn: uploadFn,
}
args := []string{filepath.Join(testFixture("push-no-name"), "template.json")}
if code := c.Run(args); code != 1 {
fatalCommand(t, c.Meta)
}
}
func TestPush_uploadError(t *testing.T) {
uploadFn := func(r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
return nil, nil, fmt.Errorf("bad")
}
c := &PushCommand{
Meta: testMeta(t),
uploadFn: uploadFn,
}
args := []string{filepath.Join(testFixture("push"), "template.json")}
if code := c.Run(args); code != 1 {
fatalCommand(t, c.Meta)
}
}
func TestPush_uploadErrorCh(t *testing.T) {
uploadFn := func(r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
errCh := make(chan error, 1)
errCh <- fmt.Errorf("bad")
return nil, errCh, nil
}
c := &PushCommand{
Meta: testMeta(t),
uploadFn: uploadFn,
}
args := []string{filepath.Join(testFixture("push"), "template.json")}
if code := c.Run(args); code != 1 {
fatalCommand(t, c.Meta)
}
}
func testArchive(t *testing.T, r io.Reader) []string {
// Finish the archiving process in-memory
var buf bytes.Buffer
if _, err := io.Copy(&buf, r); err != nil {
t.Fatalf("err: %s", err)
}
gzipR, err := gzip.NewReader(&buf)
if err != nil {
t.Fatalf("err: %s", err)
}
tarR := tar.NewReader(gzipR)
// Read all the entries
result := make([]string, 0, 5)
for {
hdr, err := tarR.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatalf("err: %s", err)
}
result = append(result, hdr.Name)
}
sort.Strings(result)
return result
}
{
"builders": [
{"type": "dummy"},
{"type": "dummy", "name": "foo"}
],
"post-processors": [{
"type": "atlas",
"only": ["dummy"]
}],
"push": {
"name": "foo/bar"
}
}
{
"builders": [{"type": "dummy"}]
}
{
"builders": [{"type": "dummy"}],
"push": {
"name": "foo/bar"
}
}
......@@ -50,6 +50,12 @@ func init() {
}, nil
},
"push": func() (cli.Command, error) {
return &command.PushCommand{
Meta: meta,
}, nil
},
"validate": func() (cli.Command, error) {
return &command.ValidateCommand{
Meta: meta,
......
......@@ -66,6 +66,12 @@ func (f *BuildOptions) AllUserVars() (map[string]string, error) {
func (f *BuildOptions) Builds(t *packer.Template, cf *packer.ComponentFinder) ([]packer.Build, error) {
buildNames := t.BuildNames()
// Process the name
tpl, _, err := t.NewConfigTemplate()
if err != nil {
return nil, err
}
checks := make(map[string][]string)
checks["except"] = f.Except
checks["only"] = f.Only
......@@ -73,7 +79,12 @@ func (f *BuildOptions) Builds(t *packer.Template, cf *packer.ComponentFinder) ([
for _, n := range ns {
found := false
for _, actual := range buildNames {
if actual == n {
var processed string
processed, err = tpl.Process(actual, nil)
if err != nil {
return nil, err
}
if actual == n || processed == n {
found = true
break
}
......@@ -88,17 +99,22 @@ func (f *BuildOptions) Builds(t *packer.Template, cf *packer.ComponentFinder) ([
builds := make([]packer.Build, 0, len(buildNames))
for _, buildName := range buildNames {
var processedBuildName string
processedBuildName, err = tpl.Process(buildName, nil)
if err != nil {
return nil, err
}
if len(f.Except) > 0 {
found := false
for _, except := range f.Except {
if buildName == except {
if buildName == except || processedBuildName == except {
found = true
break
}
}
if found {
log.Printf("Skipping build '%s' because specified by -except.", buildName)
log.Printf("Skipping build '%s' because specified by -except.", processedBuildName)
continue
}
}
......@@ -106,19 +122,19 @@ func (f *BuildOptions) Builds(t *packer.Template, cf *packer.ComponentFinder) ([
if len(f.Only) > 0 {
found := false
for _, only := range f.Only {
if buildName == only {
if buildName == only || processedBuildName == only {
found = true
break
}
}
if !found {
log.Printf("Skipping build '%s' because not specified by -only.", buildName)
log.Printf("Skipping build '%s' because not specified by -only.", processedBuildName)
continue
}
}
log.Printf("Creating build: %s", buildName)
log.Printf("Creating build: %s", processedBuildName)
build, err := t.Build(buildName, cf)
if err != nil {
return nil, fmt.Errorf("Failed to create build '%s': \n\n%s", buildName, err)
......
......@@ -7,17 +7,23 @@ import (
func testTemplate() (*packer.Template, *packer.ComponentFinder) {
tplData := `{
"builders": [
{
"type": "foo"
},
{
"type": "bar"
}
]
}`
tpl, err := packer.ParseTemplate([]byte(tplData), nil)
"variables": {
"foo": null
},
"builders": [
{
"type": "foo"
},
{
"name": "{{user \"foo\"}}",
"type": "bar"
}
]
}
`
tpl, err := packer.ParseTemplate([]byte(tplData), map[string]string{"foo": "bar"})
if err != nil {
panic(err)
}
......@@ -59,6 +65,44 @@ func TestBuildOptionsBuilds_except(t *testing.T) {
}
}
//Test to make sure the build name pattern matches
func TestBuildOptionsBuilds_exceptConfigTemplateRaw(t *testing.T) {
opts := new(BuildOptions)
opts.Except = []string{"{{user \"foo\"}}"}
bs, err := opts.Builds(testTemplate())
if err != nil {
t.Fatalf("err: %s", err)
}
if len(bs) != 1 {
t.Fatalf("bad: %d", len(bs))
}
if bs[0].Name() != "foo" {
t.Fatalf("bad: %s", bs[0].Name())
}
}
//Test to make sure the processed build name matches
func TestBuildOptionsBuilds_exceptConfigTemplateProcessed(t *testing.T) {
opts := new(BuildOptions)
opts.Except = []string{"bar"}
bs, err := opts.Builds(testTemplate())
if err != nil {
t.Fatalf("err: %s", err)
}
if len(bs) != 1 {
t.Fatalf("bad: %d", len(bs))
}
if bs[0].Name() != "foo" {
t.Fatalf("bad: %s", bs[0].Name())
}
}
func TestBuildOptionsBuilds_only(t *testing.T) {
opts := new(BuildOptions)
opts.Only = []string{"foo"}
......@@ -77,6 +121,44 @@ func TestBuildOptionsBuilds_only(t *testing.T) {
}
}
//Test to make sure the build name pattern matches
func TestBuildOptionsBuilds_onlyConfigTemplateRaw(t *testing.T) {
opts := new(BuildOptions)
opts.Only = []string{"{{user \"foo\"}}"}
bs, err := opts.Builds(testTemplate())
if err != nil {
t.Fatalf("err: %s", err)
}
if len(bs) != 1 {
t.Fatalf("bad: %d", len(bs))
}
if bs[0].Name() != "bar" {
t.Fatalf("bad: %s", bs[0].Name())
}
}
//Test to make sure the processed build name matches
func TestBuildOptionsBuilds_onlyConfigTemplateProcessed(t *testing.T) {
opts := new(BuildOptions)
opts.Only = []string{"bar"}
bs, err := opts.Builds(testTemplate())
if err != nil {
t.Fatalf("err: %s", err)
}
if len(bs) != 1 {
t.Fatalf("bad: %d", len(bs))
}
if bs[0].Name() != "bar" {
t.Fatalf("bad: %s", bs[0].Name())
}
}
func TestBuildOptionsBuilds_exceptNonExistent(t *testing.T) {
opts := new(BuildOptions)
opts.Except = []string{"i-dont-exist"}
......
......@@ -33,7 +33,7 @@ func CheckUnusedConfig(md *mapstructure.Metadata) *packer.MultiError {
for _, unused := range md.Unused {
if unused != "type" && !strings.HasPrefix(unused, "packer_") {
errs = append(
errs, fmt.Errorf("Unknown configuration key: %s", unused))
errs, fmt.Errorf("Unknown configuration key: %q", unused))
}
}
}
......
......@@ -76,7 +76,7 @@ func TestStepCreateFloppy(t *testing.T) {
floppy_path := state.Get("floppy_path").(string)
if _, err := os.Stat(floppy_path); err != nil {
t.Fatal("file not found: %s for %v", floppy_path, step.Files)
t.Fatalf("file not found: %s for %v", floppy_path, step.Files)
}
if len(step.FilesAdded) != expected {
......@@ -86,7 +86,7 @@ func TestStepCreateFloppy(t *testing.T) {
step.Cleanup(state)
if _, err := os.Stat(floppy_path); err == nil {
t.Fatal("file found: %s for %v", floppy_path, step.Files)
t.Fatalf("file found: %s for %v", floppy_path, step.Files)
}
}
}
......@@ -177,7 +177,7 @@ func xxxTestStepCreateFloppy_notfound(t *testing.T) {
floppy_path := state.Get("floppy_path").(string)
if _, err := os.Stat(floppy_path); err != nil {
t.Fatal("file not found: %s for %v", floppy_path, step.Files)
t.Fatalf("file not found: %s for %v", floppy_path, step.Files)
}
if len(step.FilesAdded) != expected {
......@@ -187,7 +187,7 @@ func xxxTestStepCreateFloppy_notfound(t *testing.T) {
step.Cleanup(state)
if _, err := os.Stat(floppy_path); err == nil {
t.Fatal("file found: %s for %v", floppy_path, step.Files)
t.Fatalf("file found: %s for %v", floppy_path, step.Files)
}
}
}
......@@ -336,7 +336,7 @@ func scpUploadFile(dst string, src io.Reader, w io.Writer, r *bufio.Reader, fi *
var mode os.FileMode
var size int64
if fi != nil {
if fi != nil && (*fi).Mode().IsRegular() {
mode = (*fi).Mode().Perm()
size = (*fi).Size()
} else {
......
......@@ -83,10 +83,10 @@ func newMockLineServer(t *testing.T) string {
}
t.Log("Accepted channel")
go func() {
go func(channelType string) {
defer channel.Close()
conn.OpenChannel(newChannel.ChannelType(), nil)
}()
conn.OpenChannel(channelType, nil)
}(newChannel.ChannelType())
}
conn.Close()
}()
......
......@@ -49,11 +49,13 @@ func decodeConfig(r io.Reader, c *config) error {
// Discover discovers plugins.
//
// This looks in the directory of the executable and the CWD, in that
// order for priority.
// Search the directory of the executable, then the plugins directory, and
// finally the CWD, in that order. Any conflicts will overwrite previously
// found plugins, in that order.
// Hence, the priority order is the reverse of the search order - i.e., the
// CWD has the highest priority.
func (c *config) Discover() error {
// Next, look in the same directory as the executable. Any conflicts
// will overwrite those found in our current directory.
// First, look in the same directory as the executable.
exePath, err := osext.Executable()
if err != nil {
log.Printf("[ERR] Error loading exe directory: %s", err)
......@@ -63,7 +65,7 @@ func (c *config) Discover() error {
}
}
// Look in the plugins directory
// Next, look in the plugins directory.
dir, err := ConfigDir()
if err != nil {
log.Printf("[ERR] Error loading config directory: %s", err)
......@@ -73,7 +75,7 @@ func (c *config) Discover() error {
}
}
// Look in the cwd.
// Last, look in the CWD.
if err := c.discover("."); err != nil {
return err
}
......@@ -180,7 +182,7 @@ func (c *config) discoverSingle(glob string, m *map[string]string) error {
// Look for foo-bar-baz. The plugin name is "baz"
plugin := file[len(prefix):]
log.Printf("[DEBUG] Discoverd plugin: %s = %s", plugin, match)
log.Printf("[DEBUG] Discovered plugin: %s = %s", plugin, match)
(*m)[plugin] = match
}
......
......@@ -166,6 +166,7 @@ func wrappedMain() int {
Commands: Commands,
HelpFunc: cli.BasicHelpFunc("packer"),
HelpWriter: os.Stdout,
Version: Version,
}
exitCode, err := cli.Run()
......
......@@ -3,15 +3,16 @@ package packer
import (
"bytes"
"fmt"
"github.com/hashicorp/go-version"
"github.com/mitchellh/mapstructure"
jsonutil "github.com/mitchellh/packer/common/json"
"io"
"io/ioutil"
"os"
"sort"
"text/template"
"time"
"github.com/hashicorp/go-version"
"github.com/mitchellh/mapstructure"
jsonutil "github.com/mitchellh/packer/common/json"
)
// The rawTemplate struct represents the structure of a template read
......@@ -24,6 +25,7 @@ type rawTemplate struct {
Description string
Builders []map[string]interface{}
Hooks map[string][]string
Push PushConfig
PostProcessors []interface{} `mapstructure:"post-processors"`
Provisioners []map[string]interface{}
Variables map[string]interface{}
......@@ -32,14 +34,27 @@ type rawTemplate struct {
// The Template struct represents a parsed template, parsed into the most
// completed form it can be without additional processing by the caller.
type Template struct {
RawContents []byte
Description string
Variables map[string]RawVariable
Builders map[string]RawBuilderConfig
Hooks map[string][]string
Push *PushConfig
PostProcessors [][]RawPostProcessorConfig
Provisioners []RawProvisionerConfig
}
// PushConfig is the configuration structure for the push settings.
type PushConfig struct {
Name string
Address string
BaseDir string `mapstructure:"base_dir"`
Include []string
Exclude []string
Token string
VCS bool
}
// The RawBuilderConfig struct represents a raw, unprocessed builder
// configuration. It contains the name of the builder as well as the
// raw configuration. If requested, this is used to compile into a full
......@@ -150,10 +165,12 @@ func ParseTemplate(data []byte, vars map[string]string) (t *Template, err error)
}
t = &Template{}
t.RawContents = data
t.Description = rawTpl.Description
t.Variables = make(map[string]RawVariable)
t.Builders = make(map[string]RawBuilderConfig)
t.Hooks = rawTpl.Hooks
t.Push = &rawTpl.Push
t.PostProcessors = make([][]RawPostProcessorConfig, len(rawTpl.PostProcessors))
t.Provisioners = make([]RawProvisionerConfig, len(rawTpl.Provisioners))
......@@ -475,52 +492,13 @@ func (t *Template) Build(name string, components *ComponentFinder) (b Build, err
return
}
// Prepare the variable template processor, which is a bit unique
// because we don't allow user variable usage and we add a function
// to read from the environment.
varTpl, err := NewConfigTemplate()
if err != nil {
return nil, err
}
varTpl.Funcs(template.FuncMap{
"env": templateEnv,
"user": templateDisableUser,
})
// Prepare the variables
var varErrors []error
variables := make(map[string]string)
for k, v := range t.Variables {
if v.Required && !v.HasValue {
varErrors = append(varErrors,
fmt.Errorf("Required user variable '%s' not set", k))
}
var val string
if v.HasValue {
val = v.Value
} else {
val, err = varTpl.Process(v.Default, nil)
if err != nil {
varErrors = append(varErrors,
fmt.Errorf("Error processing user variable '%s': %s'", k, err))
}
}
variables[k] = val
}
if len(varErrors) > 0 {
return nil, &MultiError{varErrors}
}
// Process the name
tpl, err := NewConfigTemplate()
tpl, variables, err := t.NewConfigTemplate()
if err != nil {
return nil, err
}
tpl.UserVars = variables
rawName := name
name, err = tpl.Process(name, nil)
if err != nil {
return nil, err
......@@ -554,7 +532,7 @@ func (t *Template) Build(name string, components *ComponentFinder) (b Build, err
for _, rawPPs := range t.PostProcessors {
current := make([]coreBuildPostProcessor, 0, len(rawPPs))
for _, rawPP := range rawPPs {
if rawPP.TemplateOnlyExcept.Skip(name) {
if rawPP.TemplateOnlyExcept.Skip(rawName) {
continue
}
......@@ -587,7 +565,7 @@ func (t *Template) Build(name string, components *ComponentFinder) (b Build, err
// Prepare the provisioners
provisioners := make([]coreBuildProvisioner, 0, len(t.Provisioners))
for _, rawProvisioner := range t.Provisioners {
if rawProvisioner.TemplateOnlyExcept.Skip(name) {
if rawProvisioner.TemplateOnlyExcept.Skip(rawName) {
continue
}
......@@ -636,6 +614,59 @@ func (t *Template) Build(name string, components *ComponentFinder) (b Build, err
return
}
//Build a ConfigTemplate object populated by the values within a
//parsed template
func (t *Template) NewConfigTemplate() (c *ConfigTemplate, variables map[string]string, err error) {
// Prepare the variable template processor, which is a bit unique
// because we don't allow user variable usage and we add a function
// to read from the environment.
varTpl, err := NewConfigTemplate()
if err != nil {
return nil, nil, err
}
varTpl.Funcs(template.FuncMap{
"env": templateEnv,
"user": templateDisableUser,
})
// Prepare the variables
var varErrors []error
variables = make(map[string]string)
for k, v := range t.Variables {
if v.Required && !v.HasValue {
varErrors = append(varErrors,
fmt.Errorf("Required user variable '%s' not set", k))
}
var val string
if v.HasValue {
val = v.Value
} else {
val, err = varTpl.Process(v.Default, nil)
if err != nil {
varErrors = append(varErrors,
fmt.Errorf("Error processing user variable '%s': %s'", k, err))
}
}
variables[k] = val
}
if len(varErrors) > 0 {
return nil, variables, &MultiError{varErrors}
}
// Process the name
tpl, err := NewConfigTemplate()
if err != nil {
return nil, variables, err
}
tpl.UserVars = variables
return tpl, variables, nil
}
// TemplateOnlyExcept contains the logic required for "only" and "except"
// meta-parameters.
type TemplateOnlyExcept struct {
......
......@@ -58,6 +58,10 @@ func TestParseTemplateFile_basic(t *testing.T) {
if len(result.Builders) != 1 {
t.Fatalf("bad: %#v", result.Builders)
}
if string(result.RawContents) != data {
t.Fatalf("expected %q to be %q", result.RawContents, data)
}
}
func TestParseTemplateFile_minPackerVersionBad(t *testing.T) {
......@@ -493,7 +497,7 @@ func TestParseTemplate_Provisioners(t *testing.T) {
result, err := ParseTemplate([]byte(data), nil)
if err != nil {
t.Fatal("err: %s", err)
t.Fatalf("err: %s", err)
}
if result == nil {
t.Fatal("should have result")
......@@ -525,7 +529,7 @@ func TestParseTemplate_ProvisionerPauseBefore(t *testing.T) {
result, err := ParseTemplate([]byte(data), nil)
if err != nil {
t.Fatal("err: %s", err)
t.Fatalf("err: %s", err)
}
if result == nil {
t.Fatal("should have result")
......@@ -541,6 +545,41 @@ func TestParseTemplate_ProvisionerPauseBefore(t *testing.T) {
}
}
func TestParseTemplateFile_push(t *testing.T) {
data := `
{
"builders": [{"type": "something"}],
"push": {
"name": "hello",
"include": ["one"],
"exclude": ["two"]
}
}
`
tf, err := ioutil.TempFile("", "packer")
if err != nil {
t.Fatalf("err: %s", err)
}
tf.Write([]byte(data))
tf.Close()
result, err := ParseTemplateFile(tf.Name(), nil)
if err != nil {
t.Fatalf("err: %s", err)
}
expected := &PushConfig{
Name: "hello",
Include: []string{"one"},
Exclude: []string{"two"},
}
if !reflect.DeepEqual(result.Push, expected) {
t.Fatalf("bad: %#v", result.Push)
}
}
func TestParseTemplate_Variables(t *testing.T) {
data := `
{
......@@ -1165,7 +1204,62 @@ func TestTemplateBuild_exceptPP(t *testing.T) {
t.Fatal("should have no postProcessors")
}
// Verify test2 has no post-processors
// Verify test2 has one post-processors
build, err = template.Build("test2", testTemplateComponentFinder())
if err != nil {
t.Fatalf("err: %s", err)
}
cbuild = build.(*coreBuild)
if len(cbuild.postProcessors) != 1 {
t.Fatalf("invalid: %d", len(cbuild.postProcessors))
}
}
func TestTemplateBuild_exceptPPConfigTemplateName(t *testing.T) {
data := `
{
"variables": {
"foo": null
},
"builders": [
{
"name": "test1-{{user \"foo\"}}",
"type": "test-builder"
},
{
"name": "test2",
"type": "test-builder"
}
],
"post-processors": [
{
"type": "test-pp",
"except": ["test1-{{user \"foo\"}}"]
}
]
}
`
template, err := ParseTemplate([]byte(data), map[string]string{"foo": "bar"})
if err != nil {
t.Fatalf("err: %s", err)
}
// Verify test1 has no post-processors
build, err := template.Build("test1-{{user \"foo\"}}", testTemplateComponentFinder())
if err != nil {
t.Fatalf("err: %s", err)
}
cbuild := build.(*coreBuild)
if len(cbuild.postProcessors) > 0 {
t.Fatal("should have no postProcessors")
}
// Verify test2 has one post-processors
build, err = template.Build("test2", testTemplateComponentFinder())
if err != nil {
t.Fatalf("err: %s", err)
......@@ -1245,7 +1339,62 @@ func TestTemplateBuild_exceptProv(t *testing.T) {
t.Fatal("should have no provisioners")
}
// Verify test2 has no provisioners
// Verify test2 has one provisioners
build, err = template.Build("test2", testTemplateComponentFinder())
if err != nil {
t.Fatalf("err: %s", err)
}
cbuild = build.(*coreBuild)
if len(cbuild.provisioners) != 1 {
t.Fatalf("invalid: %d", len(cbuild.provisioners))
}
}
func TestTemplateBuild_exceptProvConfigTemplateName(t *testing.T) {
data := `
{
"variables": {
"foo": null
},
"builders": [
{
"name": "test1-{{user \"foo\"}}",
"type": "test-builder"
},
{
"name": "test2",
"type": "test-builder"
}
],
"provisioners": [
{
"type": "test-prov",
"except": ["test1-{{user \"foo\"}}"]
}
]
}
`
template, err := ParseTemplate([]byte(data), map[string]string{"foo": "bar"})
if err != nil {
t.Fatalf("err: %s", err)
}
// Verify test1 has no provisioners
build, err := template.Build("test1-{{user \"foo\"}}", testTemplateComponentFinder())
if err != nil {
t.Fatalf("err: %s", err)
}
cbuild := build.(*coreBuild)
if len(cbuild.provisioners) > 0 {
t.Fatal("should have no provisioners")
}
// Verify test2 has one provisioners
build, err = template.Build("test2", testTemplateComponentFinder())
if err != nil {
t.Fatalf("err: %s", err)
......@@ -1325,7 +1474,7 @@ func TestTemplateBuild_onlyPP(t *testing.T) {
t.Fatal("should have no postProcessors")
}
// Verify test2 has no post-processors
// Verify test2 has one post-processors
build, err = template.Build("test2", testTemplateComponentFinder())
if err != nil {
t.Fatalf("err: %s", err)
......@@ -1337,6 +1486,61 @@ func TestTemplateBuild_onlyPP(t *testing.T) {
}
}
func TestTemplateBuild_onlyPPConfigTemplateName(t *testing.T) {
data := `
{
"variables": {
"foo": null
},
"builders": [
{
"name": "test1",
"type": "test-builder"
},
{
"name": "test2-{{user \"foo\"}}",
"type": "test-builder"
}
],
"post-processors": [
{
"type": "test-pp",
"only": ["test2-{{user \"foo\"}}"]
}
]
}
`
template, err := ParseTemplate([]byte(data), map[string]string{"foo": "bar"})
if err != nil {
t.Fatalf("err: %s", err)
}
// Verify test1 has no post-processors
build, err := template.Build("test1", testTemplateComponentFinder())
if err != nil {
t.Fatalf("err: %s", err)
}
cbuild := build.(*coreBuild)
if len(cbuild.postProcessors) > 0 {
t.Fatal("should have no postProcessors")
}
// Verify test2 has one post-processors
build, err = template.Build("test2-{{user \"foo\"}}", testTemplateComponentFinder())
if err != nil {
t.Fatalf("err: %s", err)
}
cbuild = build.(*coreBuild)
if len(cbuild.postProcessors) != 1 {
t.Fatalf("invalid: %d", len(cbuild.postProcessors))
}
}
func TestTemplateBuild_onlyProvInvalid(t *testing.T) {
data := `
{
......@@ -1405,7 +1609,7 @@ func TestTemplateBuild_onlyProv(t *testing.T) {
t.Fatal("should have no provisioners")
}
// Verify test2 has no provisioners
// Verify test2 has one provisioners
build, err = template.Build("test2", testTemplateComponentFinder())
if err != nil {
t.Fatalf("err: %s", err)
......@@ -1417,6 +1621,61 @@ func TestTemplateBuild_onlyProv(t *testing.T) {
}
}
func TestTemplateBuild_onlyProvConfigTemplateName(t *testing.T) {
data := `
{
"variables": {
"foo": null
},
"builders": [
{
"name": "test1",
"type": "test-builder"
},
{
"name": "test2-{{user \"foo\"}}",
"type": "test-builder"
}
],
"provisioners": [
{
"type": "test-prov",
"only": ["test2-{{user \"foo\"}}"]
}
]
}
`
template, err := ParseTemplate([]byte(data), map[string]string{"foo": "bar"})
if err != nil {
t.Fatalf("err: %s", err)
}
// Verify test1 has no provisioners
build, err := template.Build("test1", testTemplateComponentFinder())
if err != nil {
t.Fatalf("err: %s", err)
}
cbuild := build.(*coreBuild)
if len(cbuild.provisioners) > 0 {
t.Fatal("should have no provisioners")
}
// Verify test2 has one provisioners
build, err = template.Build("test2-{{user \"foo\"}}", testTemplateComponentFinder())
if err != nil {
t.Fatalf("err: %s", err)
}
cbuild = build.(*coreBuild)
if len(cbuild.provisioners) != 1 {
t.Fatalf("invalid: %d", len(cbuild.provisioners))
}
}
func TestTemplate_Build_ProvisionerOverride(t *testing.T) {
data := `
{
......
package main
import (
"github.com/mitchellh/packer/packer/plugin"
"github.com/mitchellh/packer/post-processor/atlas"
)
func main() {
server, err := plugin.Server()
if err != nil {
panic(err)
}
server.RegisterPostProcessor(new(atlas.PostProcessor))
server.Serve()
}
package atlas
import (
"fmt"
)
const BuilderId = "packer.post-processor.atlas"
type Artifact struct {
Name string
Type string
Version int
}
func (*Artifact) BuilderId() string {
return BuilderId
}
func (a *Artifact) Files() []string {
return nil
}
func (a *Artifact) Id() string {
return fmt.Sprintf("%s/%s/%d", a.Name, a.Type, a.Version)
}
func (a *Artifact) String() string {
return fmt.Sprintf("%s/%s (v%d)", a.Name, a.Type, a.Version)
}
func (*Artifact) State(name string) interface{} {
return nil
}
func (a *Artifact) Destroy() error {
return nil
}
package atlas
import (
"fmt"
"os"
"strconv"
"strings"
"github.com/hashicorp/atlas-go/archive"
"github.com/hashicorp/atlas-go/v1"
"github.com/mitchellh/mapstructure"
"github.com/mitchellh/packer/common"
"github.com/mitchellh/packer/packer"
)
const BuildEnvKey = "ATLAS_BUILD_ID"
// Artifacts can return a string for this state key and the post-processor
// will use automatically use this as the type. The user's value overrides
// this if `artifact_type_override` is set to true.
const ArtifactStateType = "atlas.artifact.type"
// Artifacts can return a map[string]string for this state key and this
// post-processor will automatically merge it into the metadata for any
// uploaded artifact versions.
const ArtifactStateMetadata = "atlas.artifact.metadata"
type Config struct {
common.PackerConfig `mapstructure:",squash"`
Artifact string
Type string `mapstructure:"artifact_type"`
TypeOverride bool `mapstructure:"artifact_type_override"`
Metadata map[string]string
ServerAddr string `mapstructure:"server_address"`
Token string
// This shouldn't ever be set outside of unit tests.
Test bool `mapstructure:"test"`
tpl *packer.ConfigTemplate
user, name string
buildId int
}
type PostProcessor struct {
config Config
client *atlas.Client
}
func (p *PostProcessor) Configure(raws ...interface{}) error {
_, err := common.DecodeConfig(&p.config, raws...)
if err != nil {
return err
}
p.config.tpl, err = packer.NewConfigTemplate()
if err != nil {
return err
}
p.config.tpl.UserVars = p.config.PackerUserVars
templates := map[string]*string{
"artifact": &p.config.Artifact,
"type": &p.config.Type,
"server_address": &p.config.ServerAddr,
"token": &p.config.Token,
}
errs := new(packer.MultiError)
for key, ptr := range templates {
*ptr, err = p.config.tpl.Process(*ptr, nil)
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error processing %s: %s", key, err))
}
}
required := map[string]*string{
"artifact": &p.config.Artifact,
"artifact_type": &p.config.Type,
}
for key, ptr := range required {
if *ptr == "" {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("%s must be set", key))
}
}
if len(errs.Errors) > 0 {
return errs
}
p.config.user, p.config.name, err = atlas.ParseSlug(p.config.Artifact)
if err != nil {
return err
}
// If we have a build ID, save it
if v := os.Getenv(BuildEnvKey); v != "" {
raw, err := strconv.ParseInt(v, 0, 0)
if err != nil {
return fmt.Errorf(
"Error parsing build ID: %s", err)
}
p.config.buildId = int(raw)
}
// Build the client
p.client = atlas.DefaultClient()
if p.config.ServerAddr != "" {
p.client, err = atlas.NewClient(p.config.ServerAddr)
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error initializing atlas client: %s", err))
return errs
}
}
if p.config.Token != "" {
p.client.Token = p.config.Token
}
if !p.config.Test {
// Verify the client
if err := p.client.Verify(); err != nil {
if err == atlas.ErrAuth {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error connecting to atlas server, please check your ATLAS_TOKEN env: %s", err))
} else {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error initializing atlas client: %s", err))
}
return errs
}
}
return nil
}
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) {
if _, err := p.client.Artifact(p.config.user, p.config.name); err != nil {
if err != atlas.ErrNotFound {
return nil, false, fmt.Errorf(
"Error finding artifact: %s", err)
}
// Artifact doesn't exist, create it
ui.Message(fmt.Sprintf("Creating artifact: %s", p.config.Artifact))
_, err = p.client.CreateArtifact(p.config.user, p.config.name)
if err != nil {
return nil, false, fmt.Errorf(
"Error creating artifact: %s", err)
}
}
opts := &atlas.UploadArtifactOpts{
User: p.config.user,
Name: p.config.name,
Type: p.config.Type,
ID: artifact.Id(),
Metadata: p.metadata(artifact),
BuildID: p.config.buildId,
}
if fs := artifact.Files(); len(fs) > 0 {
var archiveOpts archive.ArchiveOpts
// We have files. We want to compress/upload them. If we have just
// one file, then we use it as-is. Otherwise, we compress all of
// them into a single file.
var path string
if len(fs) == 1 {
path = fs[0]
} else {
path = longestCommonPrefix(fs)
if path == "" {
return nil, false, fmt.Errorf(
"No common prefix for achiving files: %v", fs)
}
// Modify the archive options to only include the files
// that are in our file list.
include := make([]string, 0, len(fs))
for i, f := range fs {
include[i] = strings.Replace(f, path, "", 1)
}
archiveOpts.Include = include
}
r, err := archive.CreateArchive(path, &archiveOpts)
if err != nil {
return nil, false, fmt.Errorf(
"Error archiving artifact: %s", err)
}
defer r.Close()
opts.File = r
opts.FileSize = r.Size
}
ui.Message("Uploading artifact version...")
var av *atlas.ArtifactVersion
doneCh := make(chan struct{})
errCh := make(chan error, 1)
go func() {
var err error
av, err = p.client.UploadArtifact(opts)
if err != nil {
errCh <- err
return
}
close(doneCh)
}()
select {
case err := <-errCh:
return nil, false, fmt.Errorf("Error uploading: %s", err)
case <-doneCh:
}
return &Artifact{
Name: p.config.Artifact,
Type: p.config.Type,
Version: av.Version,
}, true, nil
}
func (p *PostProcessor) metadata(artifact packer.Artifact) map[string]string {
var metadata map[string]string
metadataRaw := artifact.State(ArtifactStateMetadata)
if metadataRaw != nil {
if err := mapstructure.Decode(metadataRaw, &metadata); err != nil {
panic(err)
}
}
if p.config.Metadata != nil {
// If we have no extra metadata, just return as-is
if metadata == nil {
return p.config.Metadata
}
// Merge the metadata
for k, v := range p.config.Metadata {
metadata[k] = v
}
}
return metadata
}
func (p *PostProcessor) artifactType(artifact packer.Artifact) string {
if !p.config.TypeOverride {
if v := artifact.State(ArtifactStateType); v != nil {
return v.(string)
}
}
return p.config.Type
}
package atlas
import (
"os"
"reflect"
"testing"
"github.com/mitchellh/packer/packer"
)
func TestPostProcessorConfigure(t *testing.T) {
currentEnv := os.Getenv("ATLAS_TOKEN")
os.Setenv("ATLAS_TOKEN", "")
defer os.Setenv("ATLAS_TOKEN", currentEnv)
var p PostProcessor
if err := p.Configure(validDefaults()); err != nil {
t.Fatalf("err: %s", err)
}
if p.client == nil {
t.Fatal("should have client")
}
if p.client.Token != "" {
t.Fatal("should not have token")
}
}
func TestPostProcessorConfigure_buildId(t *testing.T) {
defer os.Setenv(BuildEnvKey, os.Getenv(BuildEnvKey))
os.Setenv(BuildEnvKey, "5")
var p PostProcessor
if err := p.Configure(validDefaults()); err != nil {
t.Fatalf("err: %s", err)
}
if p.config.buildId != 5 {
t.Fatalf("bad: %#v", p.config.buildId)
}
}
func TestPostProcessorMetadata(t *testing.T) {
var p PostProcessor
if err := p.Configure(validDefaults()); err != nil {
t.Fatalf("err: %s", err)
}
artifact := new(packer.MockArtifact)
metadata := p.metadata(artifact)
if len(metadata) > 0 {
t.Fatalf("bad: %#v", metadata)
}
}
func TestPostProcessorMetadata_artifact(t *testing.T) {
config := validDefaults()
config["metadata"] = map[string]string{
"foo": "bar",
}
var p PostProcessor
if err := p.Configure(config); err != nil {
t.Fatalf("err: %s", err)
}
artifact := new(packer.MockArtifact)
artifact.StateValues = map[string]interface{}{
ArtifactStateMetadata: map[interface{}]interface{}{
"bar": "baz",
},
}
metadata := p.metadata(artifact)
expected := map[string]string{
"foo": "bar",
"bar": "baz",
}
if !reflect.DeepEqual(metadata, expected) {
t.Fatalf("bad: %#v", metadata)
}
}
func TestPostProcessorMetadata_config(t *testing.T) {
config := validDefaults()
config["metadata"] = map[string]string{
"foo": "bar",
}
var p PostProcessor
if err := p.Configure(config); err != nil {
t.Fatalf("err: %s", err)
}
artifact := new(packer.MockArtifact)
metadata := p.metadata(artifact)
expected := map[string]string{
"foo": "bar",
}
if !reflect.DeepEqual(metadata, expected) {
t.Fatalf("bad: %#v", metadata)
}
}
func TestPostProcessorType(t *testing.T) {
var p PostProcessor
if err := p.Configure(validDefaults()); err != nil {
t.Fatalf("err: %s", err)
}
artifact := new(packer.MockArtifact)
actual := p.artifactType(artifact)
if actual != "foo" {
t.Fatalf("bad: %#v", actual)
}
}
func TestPostProcessorType_artifact(t *testing.T) {
var p PostProcessor
if err := p.Configure(validDefaults()); err != nil {
t.Fatalf("err: %s", err)
}
artifact := new(packer.MockArtifact)
artifact.StateValues = map[string]interface{}{
ArtifactStateType: "bar",
}
actual := p.artifactType(artifact)
if actual != "bar" {
t.Fatalf("bad: %#v", actual)
}
}
func validDefaults() map[string]interface{} {
return map[string]interface{}{
"artifact": "mitchellh/test",
"artifact_type": "foo",
"test": true,
}
}
package atlas
import (
"math"
"strings"
)
// longestCommonPrefix finds the longest common prefix for all the strings
// given as an argument, or returns the empty string if a prefix can't be
// found.
//
// This function just uses brute force instead of a more optimized algorithm.
func longestCommonPrefix(vs []string) string {
var length int64
// Find the shortest string
var shortest string
length = math.MaxUint32
for _, v := range vs {
if int64(len(v)) < length {
shortest = v
length = int64(len(v))
}
}
// Now go through and find a prefix to all the strings using this
// short string, which itself must contain the prefix.
for i := len(shortest); i > 0; i-- {
// We only care about prefixes with path seps
if shortest[i-1] != '/' {
continue
}
bad := false
prefix := shortest[0 : i]
for _, v := range vs {
if !strings.HasPrefix(v, prefix) {
bad = true
break
}
}
if !bad {
return prefix
}
}
return ""
}
package atlas
import (
"testing"
)
func TestLongestCommonPrefix(t *testing.T) {
cases := []struct {
Input []string
Output string
}{
{
[]string{"foo", "bar"},
"",
},
{
[]string{"foo", "foobar"},
"",
},
{
[]string{"foo/", "foo/bar"},
"foo/",
},
{
[]string{"/foo/", "/bar"},
"/",
},
}
for _, tc := range cases {
actual := longestCommonPrefix(tc.Input)
if actual != tc.Output {
t.Fatalf("bad: %#v\n\n%#v", actual, tc.Input)
}
}
}
......@@ -78,7 +78,7 @@ func (v VagrantCloudClient) Get(path string) (*http.Response, error) {
req.Header.Add("Content-Type", "application/json")
resp, err := v.client.Do(req)
log.Printf("Post-Processor Vagrant Cloud API Response: \n\n%s", resp)
log.Printf("Post-Processor Vagrant Cloud API Response: \n\n%+v", resp)
return resp, err
}
......@@ -96,7 +96,7 @@ func (v VagrantCloudClient) Delete(path string) (*http.Response, error) {
req.Header.Add("Content-Type", "application/json")
resp, err := v.client.Do(req)
log.Printf("Post-Processor Vagrant Cloud API Response: \n\n%s", resp)
log.Printf("Post-Processor Vagrant Cloud API Response: \n\n%+v", resp)
return resp, err
}
......@@ -128,7 +128,7 @@ func (v VagrantCloudClient) Upload(path string, url string) (*http.Response, err
resp, err := v.client.Do(request)
log.Printf("Post-Processor Vagrant Cloud Upload Response: \n\n%s", resp)
log.Printf("Post-Processor Vagrant Cloud Upload Response: \n\n%+v", resp)
return resp, err
}
......@@ -153,7 +153,7 @@ func (v VagrantCloudClient) Post(path string, body interface{}) (*http.Response,
resp, err := v.client.Do(req)
log.Printf("Post-Processor Vagrant Cloud API Response: \n\n%s", resp)
log.Printf("Post-Processor Vagrant Cloud API Response: \n\n%+v", resp)
return resp, err
}
......@@ -172,7 +172,7 @@ func (v VagrantCloudClient) Put(path string) (*http.Response, error) {
resp, err := v.client.Do(req)
log.Printf("Post-Processor Vagrant Cloud API Response: \n\n%s", resp)
log.Printf("Post-Processor Vagrant Cloud API Response: \n\n%+v", resp)
return resp, err
}
......@@ -25,7 +25,7 @@ func (s *stepCreateProvider) Run(state multistep.StateBag) multistep.StepAction
providerName := state.Get("providerName").(string)
downloadUrl := state.Get("boxDownloadUrl").(string)
path := fmt.Sprintf("box/%s/version/%v/providers", box.Tag, version.Number)
path := fmt.Sprintf("box/%s/version/%v/providers", box.Tag, version.Version)
provider := &Provider{Name: providerName}
......@@ -86,7 +86,7 @@ func (s *stepCreateProvider) Cleanup(state multistep.StateBag) {
ui.Say("Cleaning up provider")
ui.Message(fmt.Sprintf("Deleting provider: %s", s.name))
path := fmt.Sprintf("box/%s/version/%v/provider/%s", box.Tag, version.Number, s.name)
path := fmt.Sprintf("box/%s/version/%v/provider/%s", box.Tag, version.Version, s.name)
// No need for resp from the cleanup DELETE
_, err := client.Delete(path)
......
......@@ -9,11 +9,9 @@ import (
type Version struct {
Version string `json:"version"`
Description string `json:"description,omitempty"`
Number uint `json:"number,omitempty"`
}
type stepCreateVersion struct {
number uint // number of the version, if needed in cleanup
}
func (s *stepCreateVersion) Run(state multistep.StateBag) multistep.StepAction {
......@@ -52,9 +50,6 @@ func (s *stepCreateVersion) Run(state multistep.StateBag) multistep.StepAction {
return multistep.ActionHalt
}
// Save the number for cleanup
s.number = version.Number
state.Put("version", version)
return multistep.ActionContinue
......@@ -63,15 +58,8 @@ func (s *stepCreateVersion) Run(state multistep.StateBag) multistep.StepAction {
func (s *stepCreateVersion) Cleanup(state multistep.StateBag) {
client := state.Get("client").(*VagrantCloudClient)
ui := state.Get("ui").(packer.Ui)
config := state.Get("config").(Config)
box := state.Get("box").(*Box)
// If we didn't save the version number, it likely doesn't exist or
// already existed
if s.number == 0 {
ui.Message("Version was not created or previously existed, not deleting")
return
}
version := state.Get("version").(*Version)
_, cancelled := state.GetOk(multistep.StateCancelled)
_, halted := state.GetOk(multistep.StateHalted)
......@@ -82,10 +70,10 @@ func (s *stepCreateVersion) Cleanup(state multistep.StateBag) {
return
}
path := fmt.Sprintf("box/%s/version/%v", box.Tag, s.number)
path := fmt.Sprintf("box/%s/version/%v", box.Tag, version.Version)
ui.Say("Cleaning up version")
ui.Message(fmt.Sprintf("Deleting version: %s", config.Version))
ui.Message(fmt.Sprintf("Deleting version: %s", version.Version))
// No need for resp from the cleanup DELETE
_, err := client.Delete(path)
......
......@@ -22,7 +22,7 @@ func (s *stepPrepareUpload) Run(state multistep.StateBag) multistep.StepAction {
provider := state.Get("provider").(*Provider)
artifactFilePath := state.Get("artifactFilePath").(string)
path := fmt.Sprintf("box/%s/version/%v/provider/%s/upload", box.Tag, version.Number, provider.Name)
path := fmt.Sprintf("box/%s/version/%v/provider/%s/upload", box.Tag, version.Version, provider.Name)
upload := &Upload{}
ui.Say(fmt.Sprintf("Preparing upload of box: %s", artifactFilePath))
......
......@@ -24,7 +24,7 @@ func (s *stepReleaseVersion) Run(state multistep.StateBag) multistep.StepAction
return multistep.ActionContinue
}
path := fmt.Sprintf("box/%s/version/%v/release", box.Tag, version.Number)
path := fmt.Sprintf("box/%s/version/%v/release", box.Tag, version.Version)
resp, err := client.Put(path)
......
......@@ -19,7 +19,7 @@ func (s *stepVerifyUpload) Run(state multistep.StateBag) multistep.StepAction {
upload := state.Get("upload").(*Upload)
provider := state.Get("provider").(*Provider)
path := fmt.Sprintf("box/%s/version/%v/provider/%s", box.Tag, version.Number, provider.Name)
path := fmt.Sprintf("box/%s/version/%v/provider/%s", box.Tag, version.Version, provider.Name)
providerCheck := &Provider{}
......
......@@ -10,7 +10,7 @@ import (
// These are the extensions of files and directories that are unnecessary for the function
// of a Parallels virtual machine.
var UnnecessaryFilesPatterns = []string{"\\.log$", "\\.backup$", "\\.Backup$", "\\.app/"}
var UnnecessaryFilesPatterns = []string{"\\.log$", "\\.backup$", "\\.Backup$", "\\.app/", "/Windows Disks/"}
type ParallelsProvider struct{}
......
......@@ -113,14 +113,8 @@ func (p *PostProcessor) PostProcessProvider(name string, provider Provider, ui p
// Write our Vagrantfile
var customVagrantfile string
if config.VagrantfileTemplate != "" {
vagrantfilePath, err := config.tpl.Process(config.VagrantfileTemplate, nil)
if err != nil {
return nil, false, err
}
ui.Message(fmt.Sprintf(
"Using custom Vagrantfile: %s", vagrantfilePath))
customBytes, err := ioutil.ReadFile(vagrantfilePath)
ui.Message(fmt.Sprintf("Using custom Vagrantfile: %s", config.VagrantfileTemplate))
customBytes, err := ioutil.ReadFile(config.VagrantfileTemplate)
if err != nil {
return nil, false, err
}
......@@ -200,11 +194,29 @@ func (p *PostProcessor) configureSingle(config *Config, raws ...interface{}) err
// Accumulate any errors
errs := common.CheckUnusedConfig(md)
templates := map[string]*string{
"vagrantfile_template": &config.VagrantfileTemplate,
}
for key, ptr := range templates {
*ptr, err = config.tpl.Process(*ptr, nil)
if err != nil {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("Error processing %s: %s", key, err))
}
}
validates := map[string]*string{
"output": &config.OutputPath,
"vagrantfile_template": &config.VagrantfileTemplate,
}
if config.VagrantfileTemplate != "" {
_, err := os.Stat(config.VagrantfileTemplate)
if err != nil {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("vagrantfile_template '%s' does not exist", config.VagrantfileTemplate))
}
}
for n, ptr := range validates {
if err := config.tpl.Validate(*ptr); err != nil {
errs = packer.MultiErrorAppend(
......
......@@ -4,6 +4,8 @@ import (
"bytes"
"compress/flate"
"github.com/mitchellh/packer/packer"
"io/ioutil"
"os"
"strings"
"testing"
)
......@@ -82,16 +84,22 @@ func TestPostProcessorPrepare_outputPath(t *testing.T) {
func TestPostProcessorPrepare_subConfigs(t *testing.T) {
var p PostProcessor
f, err := ioutil.TempFile("", "packer")
if err != nil {
t.Fatalf("err: %s", err)
}
defer os.Remove(f.Name())
// Default
c := testConfig()
c["compression_level"] = 42
c["vagrantfile_template"] = "foo"
c["vagrantfile_template"] = f.Name()
c["override"] = map[string]interface{}{
"aws": map[string]interface{}{
"compression_level": 7,
},
}
err := p.Configure(c)
err = p.Configure(c)
if err != nil {
t.Fatalf("err: %s", err)
}
......@@ -100,7 +108,7 @@ func TestPostProcessorPrepare_subConfigs(t *testing.T) {
t.Fatalf("bad: %#v", p.configs[""].CompressionLevel)
}
if p.configs[""].VagrantfileTemplate != "foo" {
if p.configs[""].VagrantfileTemplate != f.Name() {
t.Fatalf("bad: %#v", p.configs[""].VagrantfileTemplate)
}
......@@ -108,11 +116,35 @@ func TestPostProcessorPrepare_subConfigs(t *testing.T) {
t.Fatalf("bad: %#v", p.configs["aws"].CompressionLevel)
}
if p.configs["aws"].VagrantfileTemplate != "foo" {
if p.configs["aws"].VagrantfileTemplate != f.Name() {
t.Fatalf("bad: %#v", p.configs["aws"].VagrantfileTemplate)
}
}
func TestPostProcessorPrepare_vagrantfileTemplateExists(t *testing.T) {
f, err := ioutil.TempFile("", "packer")
if err != nil {
t.Fatalf("err: %s", err)
}
name := f.Name()
c := testConfig()
c["vagrantfile_template"] = name
if err := f.Close(); err != nil {
t.Fatalf("err: %s", err)
}
if err := os.Remove(name); err != nil {
t.Fatalf("err: %s", err)
}
var p PostProcessor
if err := p.Configure(c); err == nil {
t.Fatal("expected an error since vagrantfile_template does not exist")
}
}
func TestPostProcessorPostProcess_badId(t *testing.T) {
artifact := &packer.MockArtifact{
BuilderIdValue: "invalid.packer",
......@@ -124,6 +156,41 @@ func TestPostProcessorPostProcess_badId(t *testing.T) {
}
}
func TestPostProcessorPostProcess_vagrantfileUserVariable(t *testing.T) {
var p PostProcessor
f, err := ioutil.TempFile("", "packer")
if err != nil {
t.Fatalf("err: %s", err)
}
defer os.Remove(f.Name())
c := map[string]interface{}{
"packer_user_variables": map[string]string{
"foo": f.Name(),
},
"vagrantfile_template": "{{user `foo`}}",
}
err = p.Configure(c)
if err != nil {
t.Fatalf("err: %s", err)
}
a := &packer.MockArtifact{
BuilderIdValue: "packer.parallels",
}
a2, _, err := p.PostProcess(testUi(), a)
if a2 != nil {
for _, fn := range a2.Files() {
defer os.Remove(fn)
}
}
if err != nil {
t.Fatalf("err: %s", err)
}
}
func TestProviderForName(t *testing.T) {
if v, ok := providerForName("virtualbox").(*VBoxProvider); !ok {
t.Fatalf("bad: %#v", v)
......
......@@ -130,7 +130,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac
fmt.Sprintf("%s", vmx),
fmt.Sprintf("vi://%s:%s@%s/%s/host/%s/Resources/%s/",
url.QueryEscape(p.config.Username),
p.config.Password,
url.QueryEscape(p.config.Password),
p.config.Host,
p.config.Datacenter,
p.config.Cluster,
......
......@@ -14,6 +14,7 @@ import (
"strings"
"github.com/mitchellh/packer/common"
"github.com/mitchellh/packer/common/uuid"
"github.com/mitchellh/packer/packer"
)
......@@ -21,6 +22,7 @@ type Config struct {
common.PackerConfig `mapstructure:",squash"`
ChefEnvironment string `mapstructure:"chef_environment"`
SslVerifyMode string `mapstructure:"ssl_verify_mode"`
ConfigTemplate string `mapstructure:"config_template"`
ExecuteCommand string `mapstructure:"execute_command"`
InstallCommand string `mapstructure:"install_command"`
......@@ -49,6 +51,7 @@ type ConfigTemplate struct {
ValidationKeyPath string
ValidationClientName string
ChefEnvironment string
SslVerifyMode string
}
type ExecuteTemplate struct {
......@@ -78,6 +81,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error {
templates := map[string]*string{
"chef_environment": &p.config.ChefEnvironment,
"ssl_verify_mode": &p.config.SslVerifyMode,
"config_template": &p.config.ConfigTemplate,
"node_name": &p.config.NodeName,
"staging_dir": &p.config.StagingDir,
......@@ -187,7 +191,11 @@ func (p *Provisioner) Prepare(raws ...interface{}) error {
}
func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {
nodeName := p.config.NodeName
if nodeName == "" {
nodeName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID())
}
remoteValidationKeyPath := ""
serverUrl := p.config.ServerUrl
......@@ -209,7 +217,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {
}
configPath, err := p.createConfig(
ui, comm, nodeName, serverUrl, remoteValidationKeyPath, p.config.ValidationClientName, p.config.ChefEnvironment)
ui, comm, nodeName, serverUrl, remoteValidationKeyPath, p.config.ValidationClientName, p.config.ChefEnvironment, p.config.SslVerifyMode)
if err != nil {
return fmt.Errorf("Error creating Chef config file: %s", err)
}
......@@ -263,7 +271,7 @@ func (p *Provisioner) uploadDirectory(ui packer.Ui, comm packer.Communicator, ds
return comm.UploadDir(dst, src, nil)
}
func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, nodeName string, serverUrl string, remoteKeyPath string, validationClientName string, chefEnvironment string) (string, error) {
func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, nodeName string, serverUrl string, remoteKeyPath string, validationClientName string, chefEnvironment string, sslVerifyMode string) (string, error) {
ui.Message("Creating configuration file 'client.rb'")
// Read the template
......@@ -289,6 +297,7 @@ func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, nodeN
ValidationKeyPath: remoteKeyPath,
ValidationClientName: validationClientName,
ChefEnvironment: chefEnvironment,
SslVerifyMode: sslVerifyMode,
})
if err != nil {
return "", err
......@@ -333,8 +342,14 @@ func (p *Provisioner) createJson(ui packer.Ui, comm packer.Communicator) (string
func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir string) error {
ui.Message(fmt.Sprintf("Creating directory: %s", dir))
mkdirCmd := fmt.Sprintf("mkdir -p '%s'", dir)
if !p.config.PreventSudo {
mkdirCmd = "sudo " + mkdirCmd
}
cmd := &packer.RemoteCmd{
Command: fmt.Sprintf("sudo mkdir -p '%s'", dir),
Command: mkdirCmd,
}
if err := cmd.StartWithUi(comm, ui); err != nil {
......@@ -382,8 +397,14 @@ func (p *Provisioner) cleanClient(ui packer.Ui, comm packer.Communicator, node s
func (p *Provisioner) removeDir(ui packer.Ui, comm packer.Communicator, dir string) error {
ui.Message(fmt.Sprintf("Removing directory: %s", dir))
rmCmd := fmt.Sprintf("rm -rf '%s'", dir)
if !p.config.PreventSudo {
rmCmd = "sudo " + rmCmd
}
cmd := &packer.RemoteCmd{
Command: fmt.Sprintf("sudo rm -rf %s", dir),
Command: rmCmd,
}
if err := cmd.StartWithUi(comm, ui); err != nil {
......@@ -553,10 +574,11 @@ validation_client_name "chef-validator"
{{if ne .ValidationKeyPath ""}}
validation_key "{{.ValidationKeyPath}}"
{{end}}
{{if ne .NodeName ""}}
node_name "{{.NodeName}}"
{{end}}
{{if ne .ChefEnvironment ""}}
environment "{{.ChefEnvironment}}"
{{end}}
{{if ne .SslVerifyMode ""}}
ssl_verify_mode :{{.SslVerifyMode}}
{{end}}
`
package chefclient
import (
"bytes"
"io/ioutil"
"os"
"strings"
"testing"
"github.com/mitchellh/packer/packer"
......@@ -135,3 +137,55 @@ func TestProvisionerPrepare_serverUrl(t *testing.T) {
t.Fatalf("err: %s", err)
}
}
func TestProvisioner_createDir(t *testing.T) {
p1 := &Provisioner{config: Config{PreventSudo: true}}
p2 := &Provisioner{config: Config{PreventSudo: false}}
comm := &packer.MockCommunicator{}
ui := &packer.BasicUi{
Reader: new(bytes.Buffer),
Writer: new(bytes.Buffer),
}
if err := p1.createDir(ui, comm, "/tmp/foo"); err != nil {
t.Fatalf("err: %s", err)
}
if strings.HasPrefix(comm.StartCmd.Command, "sudo") {
t.Fatalf("createDir should not use sudo, got: \"%s\"", comm.StartCmd.Command)
}
if err := p2.createDir(ui, comm, "/tmp/foo"); err != nil {
t.Fatalf("err: %s", err)
}
if !strings.HasPrefix(comm.StartCmd.Command, "sudo") {
t.Fatalf("createDir should use sudo, got: \"%s\"", comm.StartCmd.Command)
}
}
func TestProvisioner_removeDir(t *testing.T) {
p1 := &Provisioner{config: Config{PreventSudo: true}}
p2 := &Provisioner{config: Config{PreventSudo: false}}
comm := &packer.MockCommunicator{}
ui := &packer.BasicUi{
Reader: new(bytes.Buffer),
Writer: new(bytes.Buffer),
}
if err := p1.removeDir(ui, comm, "/tmp/foo"); err != nil {
t.Fatalf("err: %s", err)
}
if strings.HasPrefix(comm.StartCmd.Command, "sudo") {
t.Fatalf("removeDir should not use sudo, got: \"%s\"", comm.StartCmd.Command)
}
if err := p2.removeDir(ui, comm, "/tmp/foo"); err != nil {
t.Fatalf("err: %s", err)
}
if !strings.HasPrefix(comm.StartCmd.Command, "sudo") {
t.Fatalf("removeDir should use sudo, got: \"%s\"", comm.StartCmd.Command)
}
}
......@@ -176,13 +176,10 @@ func (p *Provisioner) Prepare(raws ...interface{}) error {
errs = packer.MultiErrorAppend(errs,
fmt.Errorf("A manifest_file must be specified."))
} else {
info, err := os.Stat(p.config.ManifestFile)
_, err := os.Stat(p.config.ManifestFile)
if err != nil {
errs = packer.MultiErrorAppend(errs,
fmt.Errorf("manifest_file is invalid: %s", err))
} else if info.IsDir() {
errs = packer.MultiErrorAppend(errs,
fmt.Errorf("manifest_file must point to a file"))
}
}
......@@ -193,7 +190,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error {
fmt.Errorf("module_path[%d] is invalid: %s", i, err))
} else if !info.IsDir() {
errs = packer.MultiErrorAppend(errs,
fmt.Errorf("module_path[%d] must point to a directory"))
fmt.Errorf("module_path[%d] must point to a directory", i))
}
}
......
......@@ -116,11 +116,18 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {
ui.Say("Provisioning with Salt...")
if !p.config.SkipBootstrap {
cmd := &packer.RemoteCmd{
Command: fmt.Sprintf("wget -O - https://bootstrap.saltstack.org | %s %s", p.sudo("sh -s"), p.config.BootstrapArgs),
Command: fmt.Sprintf("curl -L https://bootstrap.saltstack.com -o /tmp/install_salt.sh"),
}
ui.Message(fmt.Sprintf("Installing Salt with command %s", cmd))
ui.Message(fmt.Sprintf("Downloading saltstack bootstrap to /tmp/install_salt.sh"))
if err = cmd.StartWithUi(comm, ui); err != nil {
return fmt.Errorf("Unable to install Salt: %d", err)
return fmt.Errorf("Unable to download Salt: %s", err)
}
cmd = &packer.RemoteCmd{
Command: fmt.Sprintf("%s /tmp/install_salt.sh %s", p.sudo("sh"), p.config.BootstrapArgs),
}
ui.Message(fmt.Sprintf("Installing Salt with command %s", cmd.Command))
if err = cmd.StartWithUi(comm, ui); err != nil {
return fmt.Errorf("Unable to install Salt: %s", err)
}
}
......@@ -141,7 +148,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {
src = filepath.ToSlash(filepath.Join(p.config.TempConfigDir, "minion"))
dst = "/etc/salt/minion"
if err = p.moveFile(ui, comm, dst, src); err != nil {
return fmt.Errorf("Unable to move %s/minion to /etc/salt/minion: %d", p.config.TempConfigDir, err)
return fmt.Errorf("Unable to move %s/minion to /etc/salt/minion: %s", p.config.TempConfigDir, err)
}
}
......@@ -156,7 +163,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {
src = filepath.ToSlash(filepath.Join(p.config.TempConfigDir, "states"))
dst = "/srv/salt"
if err = p.moveFile(ui, comm, dst, src); err != nil {
return fmt.Errorf("Unable to move %s/states to /srv/salt: %d", p.config.TempConfigDir, err)
return fmt.Errorf("Unable to move %s/states to /srv/salt: %s", p.config.TempConfigDir, err)
}
if p.config.LocalPillarRoots != "" {
......@@ -171,12 +178,12 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {
src = filepath.ToSlash(filepath.Join(p.config.TempConfigDir, "pillar"))
dst = "/srv/pillar"
if err = p.moveFile(ui, comm, dst, src); err != nil {
return fmt.Errorf("Unable to move %s/pillar to /srv/pillar: %d", p.config.TempConfigDir, err)
return fmt.Errorf("Unable to move %s/pillar to /srv/pillar: %s", p.config.TempConfigDir, err)
}
}
ui.Message("Running highstate")
cmd := &packer.RemoteCmd{Command: p.sudo("salt-call --local state.highstate -l info")}
cmd := &packer.RemoteCmd{Command: p.sudo("salt-call --local state.highstate -l info --retcode-passthrough")}
if err = cmd.StartWithUi(comm, ui); err != nil || cmd.ExitStatus != 0 {
if err == nil {
err = fmt.Errorf("Bad exit status: %d", cmd.ExitStatus)
......@@ -224,7 +231,7 @@ func (p *Provisioner) moveFile(ui packer.Ui, comm packer.Communicator, dst, src
err = fmt.Errorf("Bad exit status: %d", cmd.ExitStatus)
}
return fmt.Errorf("Unable to move %s/minion to /etc/salt/minion: %d", p.config.TempConfigDir, err)
return fmt.Errorf("Unable to move %s/minion to /etc/salt/minion: %s", p.config.TempConfigDir, err)
}
return nil
}
......
......@@ -41,8 +41,7 @@ Set the following self-explanatory environmental variables:
Set the following environmental variables:
* `GC_BUCKET_NAME`
* `GC_CLIENT_SECRETS_FILE`
* `GC_PRIVATE_KEY_FILE`
* `GC_ACCOUNT_FILE`
* `GC_PROJECT_ID`
### Running
......
......@@ -8,8 +8,7 @@ fixtures builder-googlecompute
# Required parameters
: ${GC_BUCKET_NAME:?}
: ${GC_CLIENT_SECRETS_FILE:?}
: ${GC_PRIVATE_KEY_FILE:?}
: ${GC_ACCOUNT_FILE:?}
: ${GC_PROJECT_ID:?}
command -v gcutil >/dev/null 2>&1 || {
echo "'gcutil' must be installed" >&2
......@@ -17,8 +16,7 @@ command -v gcutil >/dev/null 2>&1 || {
}
USER_VARS="-var bucket_name=${GC_BUCKET_NAME}"
USER_VARS="${USER_VARS} -var client_secrets_file=${GC_CLIENT_SECRETS_FILE}"
USER_VARS="${USER_VARS} -var private_key_file=${GC_PRIVATE_KEY_FILE}"
USER_VARS="${USER_VARS} -var account_file=${GC_ACCOUNT_FILE}"
USER_VARS="${USER_VARS} -var project_id=${GC_PROJECT_ID}"
# This tests if GCE has an image that contains the given parameter.
......@@ -30,7 +28,7 @@ gc_has_image() {
teardown() {
gcutil --format=names --project=${GC_PROJECT_ID} listimages \
| grep packerbats \
| xargs -n1 gcutil --project=${GC_PROJECT_ID} --force deleteimage
| xargs -n1 gcutil --project=${GC_PROJECT_ID} deleteimage --force
}
@test "googlecompute: build minimal.json" {
......
......@@ -17,20 +17,20 @@ load test_helper
[[ "$output" == *"Packer v"* ]]
run packer -v
[ "$status" -eq 0 ]
[[ "$output" == *"Packer v"* ]]
[ "$status" -eq 1 ]
[[ "$output" =~ ([0-9]+\.[0-9]+) ]]
run packer --version
[ "$status" -eq 0 ]
[[ "$output" == *"Packer v"* ]]
[ "$status" -eq 1 ]
[[ "$output" =~ ([0-9]+\.[0-9]+) ]]
}
@test "cli: packer version show help" {
run packer version -h
[ "$status" -eq 0 ]
[[ "$output" == *"usage: packer version"* ]]
[[ "$output" == *"Packer v"* ]]
run packer version --help
[ "$status" -eq 0 ]
[[ "$output" == *"usage: packer version"* ]]
[[ "$output" == *"Packer v"* ]]
}
{
"variables": {
"bucket_name": null,
"client_secrets_file": null,
"private_key_file": null,
"account_file": null,
"project_id": null
},
"builders": [{
"type": "googlecompute",
"bucket_name": "{{user `bucket_name`}}",
"client_secrets_file": "{{user `client_secrets_file`}}",
"private_key_file": "{{user `private_key_file`}}",
"account_file": "{{user `account_file`}}",
"project_id": "{{user `project_id`}}",
"image_name": "packerbats-minimal-{{timestamp}}",
"source_image": "debian-7-wheezy-v20131120",
"source_image": "debian-7-wheezy-v20141108",
"zone": "us-central1-a"
}]
}
......@@ -4,9 +4,9 @@ package main
var GitCommit string
// The main version number that is being run at the moment.
const Version = "0.8.0"
const Version = "0.7.5"
// A pre-release marker for the version. If this is "" (empty string)
// then it means that it is a final release. Otherwise, this is a pre-release
// such as "dev" (in development), "beta", "rc1", etc.
const VersionPrerelease = "dev"
const VersionPrerelease = ""
GIT
remote: git://github.com/hashicorp/middleman-hashicorp.git
revision: b82c2c2fdc244cd0bd529ff27cfab24e43f07708
revision: 783fe9517dd02badb85e5ddfeda4d8e35bbd05a8
specs:
middleman-hashicorp (0.1.0)
bootstrap-sass (~> 3.2)
bootstrap-sass (~> 3.3)
builder (~> 3.2)
less (~> 2.6)
middleman (~> 3.3)
middleman-livereload (~> 3.3)
middleman-livereload (~> 3.4)
middleman-minify-html (~> 3.4)
middleman-syntax (~> 2.0)
rack-contrib (~> 1.1)
rack-contrib (~> 1.2)
rack-rewrite (~> 1.5)
rack-ssl-enforcer (~> 0.2)
redcarpet (~> 3.1)
redcarpet (~> 3.2)
therubyracer (~> 0.12)
thin (~> 1.6)
GEM
remote: https://rubygems.org/
specs:
activesupport (4.1.6)
activesupport (4.1.9)
i18n (~> 0.6, >= 0.6.9)
json (~> 1.7, >= 1.7.7)
minitest (~> 5.1)
thread_safe (~> 0.1)
tzinfo (~> 1.1)
bootstrap-sass (3.2.0.2)
sass (~> 3.2)
autoprefixer-rails (5.1.7.1)
execjs
json
bootstrap-sass (3.3.4.1)
autoprefixer-rails (>= 5.0.0.1)
sass (>= 3.2.19)
builder (3.2.2)
celluloid (0.16.0)
timers (~> 4.0.0)
chunky_png (1.3.3)
chunky_png (1.3.4)
coffee-script (2.3.0)
coffee-script-source
execjs
coffee-script-source (1.8.0)
coffee-script-source (1.9.1)
commonjs (0.2.7)
compass (1.0.1)
compass (1.0.3)
chunky_png (~> 1.2)
compass-core (~> 1.0.1)
compass-core (~> 1.0.2)
compass-import-once (~> 1.0.5)
rb-fsevent (>= 0.9.3)
rb-inotify (>= 0.9)
sass (>= 3.3.13, < 3.5)
compass-core (1.0.1)
compass-core (1.0.3)
multi_json (~> 1.0)
sass (>= 3.3.0, < 3.5)
compass-import-once (1.0.5)
sass (>= 3.2, < 3.5)
daemons (1.1.9)
daemons (1.2.2)
em-websocket (0.5.1)
eventmachine (>= 0.12.9)
http_parser.rb (~> 0.6.0)
erubis (2.7.0)
eventmachine (1.0.3)
execjs (2.2.2)
ffi (1.9.6)
haml (4.0.5)
eventmachine (1.0.7)
execjs (2.4.0)
ffi (1.9.8)
haml (4.0.6)
tilt
hike (1.2.3)
hitimes (1.2.2)
......@@ -65,86 +69,86 @@ GEM
uber (~> 0.0.4)
htmlcompressor (0.1.2)
http_parser.rb (0.6.0)
i18n (0.6.11)
json (1.8.1)
kramdown (1.5.0)
i18n (0.7.0)
json (1.8.2)
kramdown (1.6.0)
less (2.6.0)
commonjs (~> 0.2.7)
libv8 (3.16.14.7)
listen (2.7.11)
listen (2.9.0)
celluloid (>= 0.15.2)
rb-fsevent (>= 0.9.3)
rb-inotify (>= 0.9)
middleman (3.3.6)
middleman (3.3.10)
coffee-script (~> 2.2)
compass (>= 1.0.0, < 2.0.0)
compass-import-once (= 1.0.5)
execjs (~> 2.0)
haml (>= 4.0.5)
kramdown (~> 1.2)
middleman-core (= 3.3.6)
middleman-core (= 3.3.10)
middleman-sprockets (>= 3.1.2)
sass (>= 3.4.0, < 4.0)
uglifier (~> 2.5)
middleman-core (3.3.6)
middleman-core (3.3.10)
activesupport (~> 4.1.0)
bundler (~> 1.1)
erubis
hooks (~> 0.3)
i18n (~> 0.6.9)
i18n (~> 0.7.0)
listen (>= 2.7.9, < 3.0)
padrino-helpers (~> 0.12.3)
rack (>= 1.4.5, < 2.0)
rack-test (~> 0.6.2)
thor (>= 0.15.2, < 2.0)
tilt (~> 1.4.1, < 2.0)
middleman-livereload (3.3.4)
middleman-livereload (3.4.2)
em-websocket (~> 0.5.1)
middleman-core (~> 3.2)
middleman-core (>= 3.3)
rack-livereload (~> 0.3.15)
middleman-minify-html (3.4.0)
htmlcompressor (~> 0.1.0)
middleman-core (>= 3.2)
middleman-sprockets (3.3.10)
middleman-core (~> 3.3)
middleman-sprockets (3.4.2)
middleman-core (>= 3.3)
sprockets (~> 2.12.1)
sprockets-helpers (~> 1.1.0)
sprockets-sass (~> 1.2.0)
sprockets-sass (~> 1.3.0)
middleman-syntax (2.0.0)
middleman-core (~> 3.2)
rouge (~> 1.0)
minitest (5.4.2)
multi_json (1.10.1)
padrino-helpers (0.12.4)
minitest (5.5.1)
multi_json (1.11.0)
padrino-helpers (0.12.5)
i18n (~> 0.6, >= 0.6.7)
padrino-support (= 0.12.4)
padrino-support (= 0.12.5)
tilt (~> 1.4.1)
padrino-support (0.12.4)
padrino-support (0.12.5)
activesupport (>= 3.1)
rack (1.5.2)
rack-contrib (1.1.0)
rack (1.6.0)
rack-contrib (1.2.0)
rack (>= 0.9.1)
rack-livereload (0.3.15)
rack
rack-rewrite (1.5.0)
rack-rewrite (1.5.1)
rack-ssl-enforcer (0.2.8)
rack-test (0.6.2)
rack-test (0.6.3)
rack (>= 1.0)
rb-fsevent (0.9.4)
rb-inotify (0.9.5)
ffi (>= 0.5.0)
redcarpet (3.2.0)
redcarpet (3.2.2)
ref (1.0.5)
rouge (1.7.2)
sass (3.4.6)
sprockets (2.12.2)
rouge (1.8.0)
sass (3.4.13)
sprockets (2.12.3)
hike (~> 1.2)
multi_json (~> 1.0)
rack (~> 1.0)
tilt (~> 1.1, != 1.3.0)
sprockets-helpers (1.1.0)
sprockets (~> 2.0)
sprockets-sass (1.2.0)
sprockets-sass (1.3.1)
sprockets (~> 2.0)
tilt (~> 1.1)
therubyracer (0.12.1)
......@@ -155,14 +159,14 @@ GEM
eventmachine (~> 1.0)
rack (~> 1.0)
thor (0.19.1)
thread_safe (0.3.4)
thread_safe (0.3.5)
tilt (1.4.1)
timers (4.0.1)
hitimes
tzinfo (1.2.2)
thread_safe (~> 0.1)
uber (0.0.10)
uglifier (2.5.3)
uber (0.0.13)
uglifier (2.7.1)
execjs (>= 0.3.0)
json (>= 1.8.0)
......
......@@ -98,6 +98,10 @@ p {
color: darken($green, 50%);
}
}
img {
width: 100%;
}
}
a {
......
@import "bootstrap-sprockets";
@import "bootstrap";
@import url("http://fonts.googleapis.com/css?family=Inconsolata");
@import url("//fonts.googleapis.com/css?family=Inconsolata");
@import "_helpers";
@import "_reset";
......
......@@ -69,7 +69,7 @@ list as contributors come and go.
<h3>Ross Smith II (<a href="https://github.com/rasa" target="_blank">@rasa</a>)</h3>
<p>
<a href="http://smithii.com/" target="_blank">Ross Smith</a> maintains our VMware builder on Windows, and provides other valuable assistance.
Ross is an open source enthusist, published author, and freelance consultant.</p>
Ross is an open source enthusiast, published author, and freelance consultant.</p>
</div>
</div>
......
......@@ -30,7 +30,7 @@ in your account, it is up to you to use, delete, etc. the AMI.
## How Does it Work?
This builder works by creating a new EBS volume from an existing source AMI
and attaching it into an already-running EC2 instance. One attached, a
and attaching it into an already-running EC2 instance. Once attached, a
[chroot](http://en.wikipedia.org/wiki/Chroot) is used to provision the
system within that volume. After provisioning, the volume is detached,
snapshotted, and an AMI is made.
......@@ -54,8 +54,8 @@ each category, the available configuration keys are alphabetized.
### Required:
* `access_key` (string) - The access key used to communicate with AWS.
If not specified, Packer will use the environment variables
`AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set.
If not specified, Packer will use the key from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file
or fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set.
If the environmental variables aren't set and Packer is running on
an EC2 instance, Packer will check the instance metadata for IAM role
keys.
......@@ -66,8 +66,8 @@ each category, the available configuration keys are alphabetized.
[configuration templates](/docs/templates/configuration-templates.html) for more info)
* `secret_key` (string) - The secret key used to communicate with AWS.
If not specified, Packer will use the environment variables
`AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set.
If not specified, Packer will use the secret from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file
or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set.
If the environmental variables aren't set and Packer is running on
an EC2 instance, Packer will check the instance metadata for IAM role
keys.
......@@ -122,7 +122,7 @@ each category, the available configuration keys are alphabetized.
which forces Packer to find an open device automatically.
* `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) on
HVM-compatible AMIs.
HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS IAM policy.
* `mount_path` (string) - The path where the volume will be mounted. This is
where the chroot environment will be. This defaults to
......
......@@ -2,7 +2,7 @@
layout: "docs"
page_title: "Amazon AMI Builder (EBS backed)"
description: |-
The `amazon-ebs` Packer builder is able to create Amazon AMIs backed by EBS volumes for use in EC2. For more information on the difference betwen EBS-backed instances and instance-store backed instances, see the storage for the root device section in the EC2 documentation.
The `amazon-ebs` Packer builder is able to create Amazon AMIs backed by EBS volumes for use in EC2. For more information on the difference between EBS-backed instances and instance-store backed instances, see the storage for the root device section in the EC2 documentation.
---
# AMI Builder (EBS backed)
......@@ -11,7 +11,7 @@ Type: `amazon-ebs`
The `amazon-ebs` Packer builder is able to create Amazon AMIs backed by EBS
volumes for use in [EC2](http://aws.amazon.com/ec2/). For more information
on the difference betwen EBS-backed instances and instance-store backed
on the difference between EBS-backed instances and instance-store backed
instances, see the
["storage for the root device" section in the EC2 documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device).
......@@ -34,8 +34,8 @@ each category, the available configuration keys are alphabetized.
### Required:
* `access_key` (string) - The access key used to communicate with AWS.
If not specified, Packer will use the environment variables
`AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set.
If not specified, Packer will use the key from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file
or fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set.
* `ami_name` (string) - The name of the resulting AMI that will appear
when managing AMIs in the AWS console or via APIs. This must be unique.
......@@ -49,8 +49,8 @@ each category, the available configuration keys are alphabetized.
to launch the EC2 instance to create the AMI.
* `secret_key` (string) - The secret key used to communicate with AWS.
If not specified, Packer will use the environment variables
`AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set.
If not specified, Packer will use the secret from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file
or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set.
* `source_ami` (string) - The initial AMI used as a base for the newly
created machine.
......@@ -94,7 +94,7 @@ each category, the available configuration keys are alphabetized.
Leave this empty to allow Amazon to auto-assign.
* `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) on
HVM-compatible AMIs.
HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS IAM policy.
* `iam_instance_profile` (string) - The name of an
[IAM instance profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html)
......@@ -118,11 +118,11 @@ each category, the available configuration keys are alphabetized.
described above. Note that if this is specified, you must omit the
`security_group_id`.
* `spot_price` (string) - The maximum hourly price to launch a spot instance
to create the AMI. It is a type of instances that EC2 starts when the maximum
price that you specify exceeds the current spot price. Spot price will be
updated based on available spot instance capacity and current spot Instance
requests. It may save you some costs. You can set this to "auto" for
* `spot_price` (string) - The maximum hourly price to pay for a spot instance
to create the AMI. Spot instances are a type of instance that EC2 starts when
the current spot price is less than the maximum price you specify. Spot price
will be updated based on available spot instance capacity and current spot
instance requests. It may save you some costs. You can set this to "auto" for
Packer to automatically discover the best spot price.
* `spot_price_auto_product` (string) - Required if `spot_price` is set to
......
......@@ -39,8 +39,8 @@ each category, the available configuration keys are alphabetized.
### Required:
* `access_key` (string) - The access key used to communicate with AWS.
If not specified, Packer will use the environment variables
`AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set.
If not specified, Packer will use the key from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file
or fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set.
* `account_id` (string) - Your AWS account ID. This is required for bundling
the AMI. This is _not the same_ as the access key. You can find your
......@@ -61,8 +61,8 @@ each category, the available configuration keys are alphabetized.
This bucket will be created if it doesn't exist.
* `secret_key` (string) - The secret key used to communicate with AWS.
If not specified, Packer will use the environment variables
`AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set.
If not specified, Packer will use the secret from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file
or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set.
* `source_ami` (string) - The initial AMI used as a base for the newly
created machine.
......@@ -134,7 +134,7 @@ each category, the available configuration keys are alphabetized.
See the "custom bundle commands" section below for more information.
* `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) on
HVM-compatible AMIs.
HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS IAM policy.
* `iam_instance_profile` (string) - The name of an
[IAM instance profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html)
......
......@@ -33,7 +33,7 @@ much easier to use and Amazon generally recommends EBS-backed images nowadays.
## Using an IAM Instance Profile
If AWS keys are not specified in the template or through environment variables
If AWS keys are not specified in the template, a [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file or through environment variables
Packer will use credentials provided by the instance's IAM profile, if it has one.
The following policy document provides the minimal set permissions necessary for Packer to work:
......
......@@ -55,7 +55,7 @@ each category, the available configuration keys are alphabetized.
* `image` (string) - The name (or slug) of the base image to use. This is the
image that will be used to launch a new droplet and provision it. This
defaults to 'ubuntu-12-04-x64' which is the slug for "Ubuntu 12.04.4 x64".
See https://developers.digitalocean.com/#list-all-images for details on how to get a list of the the accepted image names/slugs.
See https://developers.digitalocean.com/documentation/v2/#list-all-images for details on how to get a list of the the accepted image names/slugs.
* `image_id` (integer) - The ID of the base image to use. This is the image that
will be used to launch a new droplet and provision it.
......@@ -66,8 +66,8 @@ each category, the available configuration keys are alphabetized.
* `region` (string) - The name (or slug) of the region to launch the droplet in.
Consequently, this is the region where the snapshot will be available.
This defaults to "nyc1", which is the slug for "New York 1".
See https://developers.digitalocean.com/regions/ for the accepted region names/slugs.
This defaults to "nyc3", which is the slug for "New York 3".
See https://developers.digitalocean.com/documentation/v2/#list-all-regions for the accepted region names/slugs.
* `region_id` (integer) - The ID of the region to launch the droplet in. Consequently,
this is the region where the snapshot will be available.
......@@ -75,7 +75,7 @@ each category, the available configuration keys are alphabetized.
* `size` (string) - The name (or slug) of the droplet size to use.
This defaults to "512mb", which is the slug for "512MB".
See https://developers.digitalocean.com/sizes/ for the accepted size names/slugs.
See https://developers.digitalocean.com/documentation/v2/#list-all-sizes for the accepted size names/slugs.
* `size_id` (integer) - The ID of the droplet size to use.
This setting is deprecated. Use `size` instead.
......
......@@ -9,19 +9,49 @@ description: |-
Type: `googlecompute`
The `googlecompute` Packer builder is able to create
[images](https://developers.google.com/compute/docs/images)
for use with [Google Compute Engine](https://cloud.google.com/products/compute-engine)
(GCE) based on existing images. Google Compute Engine doesn't allow the creation
of images from scratch.
The `googlecompute` Packer builder is able to create [images](https://developers.google.com/compute/docs/images) for use with
[Google Compute Engine](https://cloud.google.com/products/compute-engine)(GCE) based on existing images. Google
Compute Engine doesn't allow the creation of images from scratch.
## Authentication
Authenticating with Google Cloud services requires two separate JSON
files: one which we call the _account file_ and the _client secrets file_.
Authenticating with Google Cloud services requires at most one JSON file,
called the _account file_. The _account file_ is **not** required if you are running
the `googlecompute` Packer builder from a GCE instance with a properly-configured
[Compute Engine Service Account](https://cloud.google.com/compute/docs/authentication).
Both of these files are downloaded directly from the
[Google Developers Console](https://console.developers.google.com). To make
### Running With a Compute Engine Service Account
If you run the `googlecompute` Packer builder from a GCE instance, you can configure that
instance to use a [Compute Engine Service Account](https://cloud.google.com/compute/docs/authentication). This will allow Packer to authenticate
to Google Cloud without having to bake in a separate credential/authentication file.
To create a GCE instance that uses a service account, provide the required scopes when
launching the instance.
For `gcloud`, do this via the `--scopes` parameter:
```sh
gcloud compute --project YOUR_PROJECT instances create "INSTANCE-NAME" ... \
--scopes "https://www.googleapis.com/auth/compute" \
"https://www.googleapis.com/auth/devstorage.full_control" \
...
```
For the [Google Developers Console](https://console.developers.google.com):
1. Choose "Show advanced options"
2. Tick "Enable Compute Engine service account"
3. Choose "Read Write" for Compute
4. Chose "Full" for "Storage"
**The service account will be used automatically by Packer as long as there is
no _account file_ specified in the Packer configuration file.**
### Running Without a Compute Engine Service Account
The [Google Developers Console](https://console.developers.google.com) allows you to
create and download a credential file that will let you use the `googlecompute` Packer
builder anywhere. To make
the process more straightforwarded, it is documented here.
1. Log into the [Google Developers Console](https://console.developers.google.com)
......@@ -29,29 +59,23 @@ the process more straightforwarded, it is documented here.
2. Under the "APIs & Auth" section, click "Credentials."
3. Click the "Download JSON" button under the "Compute Engine and App Engine"
account in the OAuth section. The file should start with "client\_secrets".
This is your _client secrets file_.
3. Click the "Create new Client ID" button, select "Service account", and click "Create Client ID"
4. Create a new OAuth client ID and select "Service Account" as the type
of account. Once created, a JSON file should be downloaded. This is your
4. Click "Generate new JSON key" for the Service Account you just created. A JSON file will be downloaded automatically. This is your
_account file_.
## Basic Example
Below is a fully functioning example. It doesn't do anything useful,
since no provisioners are defined, but it will effectively repackage an
existing GCE image. The client secrets file and private key file are the
files obtained in the previous section.
existing GCE image. The account file is obtained in the previous section.
```javascript
{
"type": "googlecompute",
"bucket_name": "my-project-packer-images",
"account_file": "account.json",
"client_secrets_file": "client_secret.json",
"project_id": "my-project",
"source_image": "debian-7-wheezy-v20140718",
"source_image": "debian-7-wheezy-v20150127",
"zone": "us-central1-a"
}
```
......@@ -63,56 +87,48 @@ each category, the available options are alphabetized and described.
### Required:
* `account_file` (string) - The JSON file containing your account credentials.
Instructions for how to retrieve these are above.
* `bucket_name` (string) - The Google Cloud Storage bucket to store the
images that are created. The bucket must already exist in your project.
* `client_secrets_file` (string) - The client secrets JSON file that
was set up in the section above.
* `private_key_file` (string) - The client private key file that was
generated in the section above.
* `project_id` (string) - The project ID that will be used to launch instances
and store images.
* `source_image` (string) - The source image to use to create the new image
from. Example: "debian-7"
from. Example: `"debian-7-wheezy-v20150127"`
* `zone` (string) - The zone in which to launch the instance used to create
the image. Example: "us-central1-a"
the image. Example: `"us-central1-a"`
### Optional:
* `account_file` (string) - The JSON file containing your account credentials.
Not required if you run Packer on a GCE instance with a service account.
Instructions for creating file or using service accounts are above.
* `disk_size` (integer) - The size of the disk in GB.
This defaults to 10, which is 10GB.
This defaults to `10`, which is 10GB.
* `image_name` (string) - The unique name of the resulting image.
Defaults to `packer-{{timestamp}}`.
Defaults to `"packer-{{timestamp}}"`.
* `image_description` (string) - The description of the resulting image.
* `instance_name` (string) - A name to give the launched instance. Beware
that this must be unique. Defaults to "packer-{{uuid}}".
that this must be unique. Defaults to `"packer-{{uuid}}"`.
* `machine_type` (string) - The machine type. Defaults to `n1-standard-1`.
* `machine_type` (string) - The machine type. Defaults to `"n1-standard-1"`.
* `metadata` (object of key/value strings)
* `network` (string) - The Google Compute network to use for the launched
instance. Defaults to `default`.
instance. Defaults to `"default"`.
* `ssh_port` (integer) - The SSH port. Defaults to 22.
* `ssh_port` (integer) - The SSH port. Defaults to `22`.
* `ssh_timeout` (string) - The time to wait for SSH to become available.
Defaults to "1m".
Defaults to `"1m"`.
* `ssh_username` (string) - The SSH username. Defaults to "root".
* `ssh_username` (string) - The SSH username. Defaults to `"root"`.
* `state_timeout` (string) - The time to wait for instance state changes.
Defaults to "5m".
Defaults to `"5m"`.
* `tags` (array of strings)
......
......@@ -38,11 +38,6 @@ each category, the available configuration keys are alphabetized.
If not specified, Packer will use the environment variables
`SDK_PASSWORD` or `OS_PASSWORD` (in that order), if set.
* `provider` (string) - The provider used to connect to the OpenStack service.
If not specified, Packer will use the environment variable
`SDK_PROVIDER`, if set.
For Rackspace this should be `rackspace-us` or `rackspace-uk`.
* `source_image` (string) - The ID or full URL to the base image to use.
This is the image that will be used to launch a new server and provision it.
......@@ -70,18 +65,19 @@ each category, the available configuration keys are alphabetized.
* `networks` (array of strings) - A list of networks by UUID to attach
to this instance.
* `openstack_provider` (string)
* `openstack_provider` (string) - A name of a provider that has a slightly
different API model. Currently supported values are "openstack" (default),
and "rackspace".
* `project` (string) - The project name to boot the instance into. Some
OpenStack installations require this.
If not specified, Packer will use the environment variables
`SDK_PROJECT` or `OS_TENANT_NAME` (in that order), if set.
* `provider` (string) - A name of a provider that has a slightly
different API model. Currently supported values are "openstack" (default),
and "rackspace".
If not specified, Packer will use the environment variables
`SDK_PROVIDER` or `OS_AUTH_URL` (in that order), if set.
* `provider` (string) - The provider used to connect to the OpenStack service.
If not specified, Packer will use the environment variables `SDK_PROVIDER`
or `OS_AUTH_URL` (in that order), if set.
For Rackspace this should be `rackspace-us` or `rackspace-uk`.
* `proxy_url` (string)
......@@ -106,12 +102,20 @@ each category, the available configuration keys are alphabetized.
* `ssh_username` (string) - The username to use in order to communicate
over SSH to the running server. The default is "root".
* `ssh_interface` (string) - The type of interface to connect via SSH. Values
useful for Rackspace are "public" or "private", and the default behavior is
to connect via whichever is returned first from the OpenStack API.
* `tenant_id` (string) - Tenant ID for accessing OpenStack if your
installation requires this.
* `use_floating_ip` (boolean) - Whether or not to use a floating IP for
the instance. Defaults to false.
* `rackconnect_wait` (boolean) - For rackspace, whether or not to wait for
Rackconnect to assign the machine an IP address before connecting via SSH.
Defaults to false.
## Basic Example: Rackspace public cloud
Here is a basic example. This is a working example to build a
......@@ -124,7 +128,6 @@ Ubuntu 12.04 LTS (Precise Pangolin) on Rackspace OpenStack cloud offering.
"api_key": "",
"openstack_provider": "rackspace",
"provider": "rackspace-us",
"openstack_provider":"rackspace",
"region": "DFW",
"ssh_username": "root",
"image_name": "Test image",
......
......@@ -73,7 +73,7 @@ each category, the available options are alphabetized and described.
* `parallels_tools_flavor` (string) - The flavor of the Parallels Tools ISO to
install into the VM. Valid values are "win", "lin", "mac", "os2" and "other".
This can be ommited only if `parallels_tools_mode` is "disable".
This can be omitted only if `parallels_tools_mode` is "disable".
### Optional:
......@@ -257,9 +257,9 @@ The available variables are:
Example boot command. This is actually a working boot command used to start
an Ubuntu 12.04 installer:
```javascript
```text
[
"&lt;esc&gt;&lt;esc&gt;&lt;enter&gt;&lt;wait&gt;",
"<esc><esc><enter><wait>",
"/install/vmlinuz noapic ",
"preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/preseed.cfg ",
"debian-installer=en_US auto locale=en_US kbd-chooser/method=us ",
......@@ -267,7 +267,7 @@ an Ubuntu 12.04 installer:
"fb=false debconf/frontend=noninteractive ",
"keyboard-configuration/modelcode=SKIP keyboard-configuration/layout=USA ",
"keyboard-configuration/variant=USA console-setup/ask_detect=false ",
"initrd=/install/initrd.gz -- &lt;enter&gt;"
"initrd=/install/initrd.gz -- <enter>;"
]
```
......
......@@ -56,7 +56,7 @@ each category, the available options are alphabetized and described.
* `parallels_tools_flavor` (string) - The flavor of the Parallels Tools ISO to
install into the VM. Valid values are "win", "lin", "mac", "os2" and "other".
This can be ommited only if `parallels_tools_mode` is "disable".
This can be omitted only if `parallels_tools_mode` is "disable".
### Optional:
......
......@@ -111,17 +111,22 @@ each category, the available options are alphabetized and described.
five seconds and one minute 30 seconds, respectively. If this isn't specified,
the default is 10 seconds.
* `disk_size` (integer) - The size, in megabytes, of the hard disk to create
for the VM. By default, this is 40000 (about 40 GB).
* `disk_cache` (string) - The cache mode to use for disk. Allowed values
values include any of "writethrough", "writeback", "none", "unsafe" or
"directsync".
* `disk_image` (boolean) - Packer defaults to building from an ISO file,
this parameter controls whether the ISO URL supplied is actually a bootable
QEMU image. When this value is set to true, the machine will clone the
source, resize it according to `disk_size` and boot the image.
* `disk_interface` (string) - The interface to use for the disk. Allowed
values include any of "ide," "scsi" or "virtio." Note also that any boot
commands or kickstart type scripts must have proper adjustments for
resulting device names. The Qemu builder uses "virtio" by default.
* `disk_cache` (string) - The cache mode to use for disk. Allowed values
values include any of "writethrough", "writeback", "none", "unsafe" or
"directsync".
* `disk_size` (integer) - The size, in megabytes, of the hard disk to create
for the VM. By default, this is 40000 (about 40 GB).
* `floppy_files` (array of strings) - A list of files to place onto a floppy
disk that is attached when the VM is booted. This is most useful
......@@ -180,7 +185,7 @@ each category, the available options are alphabetized and described.
the qemu command line (though not, at this time, qemu-img). Each array
of strings makes up a command line switch that overrides matching default
switch/value pairs. Any value specified as an empty string is ignored.
All values after the switch are concatenated with no separater.
All values after the switch are concatenated with no separator.
~> **Warning:** The qemu command line allows extreme flexibility, so beware of
conflicting arguments causing failures of your run. For instance, using
......@@ -260,11 +265,6 @@ qemu-system-x86 command. The arguments are all printed for review.
Packer will choose a randomly available port in this range to use as the
host port.
* `disk_image` (boolean) - Packer defaults to building from an ISO file,
this parameter controls whether the ISO URL supplied is actually a bootable
QEMU image. When this value is set to true, the machine will clone the
source, resize it according to `disk_size` and boot the image.
## Boot Command
The `boot_command` configuration is very important: it specifies the keys
......
......@@ -136,7 +136,8 @@ each category, the available options are alphabetized and described.
* `hard_drive_interface` (string) - The type of controller that the primary
hard drive is attached to, defaults to "ide". When set to "sata", the
drive is attached to an AHCI SATA controller.
drive is attached to an AHCI SATA controller. When set to "scsi", the drive
is attached to an LsiLogic SCSI controller.
* `headless` (boolean) - Packer defaults to building VirtualBox
virtual machines by launching a GUI that shows the console of the
......@@ -282,9 +283,9 @@ The available variables are:
Example boot command. This is actually a working boot command used to start
an Ubuntu 12.04 installer:
```javascript
```text
[
"&lt;esc&gt;&lt;esc&gt;&lt;enter&gt;&lt;wait&gt;",
"<esc><esc><enter><wait>",
"/install/vmlinuz noapic ",
"preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/preseed.cfg ",
"debian-installer=en_US auto locale=en_US kbd-chooser/method=us ",
......@@ -292,7 +293,7 @@ an Ubuntu 12.04 installer:
"fb=false debconf/frontend=noninteractive ",
"keyboard-configuration/modelcode=SKIP keyboard-configuration/layout=USA ",
"keyboard-configuration/variant=USA console-setup/ask_detect=false ",
"initrd=/install/initrd.gz -- &lt;enter&gt;"
"initrd=/install/initrd.gz -- <enter>"
]
```
......
......@@ -13,6 +13,16 @@ This VirtualBox Packer builder is able to create [VirtualBox](https://www.virtua
virtual machines and export them in the OVF format, starting from an
existing OVF/OVA (exported virtual machine image).
When exporting from VirtualBox make sure to choose OVF Version 2, since Version 1 is not compatible and will generate errors like this:
```
==> virtualbox-ovf: Progress state: VBOX_E_FILE_ERROR
==> virtualbox-ovf: VBoxManage: error: Appliance read failed
==> virtualbox-ovf: VBoxManage: error: Error reading "source.ova": element "Section" has no "type" attribute, line 21
==> virtualbox-ovf: VBoxManage: error: Details: code VBOX_E_FILE_ERROR (0x80bb0004), component Appliance, interface IAppliance
==> virtualbox-ovf: VBoxManage: error: Context: "int handleImportAppliance(HandlerArg*)" at line 304 of file VBoxManageAppliance.cpp
```
The builder builds a virtual machine by importing an existing OVF or OVA
file. It then boots this image, runs provisioners on this new VM, and
exports that VM to create the image. The imported machine is deleted prior
......
......@@ -73,7 +73,7 @@ each category, the available options are alphabetized and described.
### Optional:
* `boot_command` (array of strings) - This is an array of commands to type
when the virtual machine is firsted booted. The goal of these commands should
when the virtual machine is first booted. The goal of these commands should
be to type just enough to initialize the operating system installer. Special
keys can be typed as well, and are covered in the section below on the boot
command. If this is not specified, it is assumed the installer will start
......@@ -226,9 +226,9 @@ each category, the available options are alphabetized and described.
This is a [configuration template](/docs/templates/configuration-templates.html)
that has a single valid variable: `Flavor`, which will be the value of
`tools_upload_flavor`. By default the upload path is set to
`{{.Flavor}}.iso`.
`{{.Flavor}}.iso`. This setting is not used when `remote_type` is "esx5".
* `version` (string) - The [vmx hardware version](http://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=1003746) for the new virtual machine. Only the default value has been tested, any other value is expiermental. Default value is '9'.
* `version` (string) - The [vmx hardware version](http://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=1003746) for the new virtual machine. Only the default value has been tested, any other value is experimental. Default value is '9'.
* `vm_name` (string) - This is the name of the VMX file for the new virtual
machine, without the file extension. By default this is "packer-BUILDNAME",
......@@ -311,9 +311,9 @@ The available variables are:
Example boot command. This is actually a working boot command used to start
an Ubuntu 12.04 installer:
```javascript
```text
[
"&lt;esc&gt;&lt;esc&gt;&lt;enter&gt;&lt;wait&gt;",
"<esc><esc><enter><wait>",
"/install/vmlinuz noapic ",
"preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/preseed.cfg ",
"debian-installer=en_US auto locale=en_US kbd-chooser/method=us ",
......@@ -321,7 +321,7 @@ an Ubuntu 12.04 installer:
"fb=false debconf/frontend=noninteractive ",
"keyboard-configuration/modelcode=SKIP keyboard-configuration/layout=USA ",
"keyboard-configuration/variant=USA console-setup/ask_detect=false ",
"initrd=/install/initrd.gz -- &lt;enter&gt;"
"initrd=/install/initrd.gz -- <enter>"
]
```
......
......@@ -54,7 +54,7 @@ each category, the available options are alphabetized and described.
### Optional:
* `boot_command` (array of strings) - This is an array of commands to type
when the virtual machine is firsted booted. The goal of these commands should
when the virtual machine is first booted. The goal of these commands should
be to type just enough to initialize the operating system installer. Special
keys can be typed as well, and are covered in the section below on the boot
command. If this is not specified, it is assumed the installer will start
......
---
layout: "docs"
page_title: "Push - Command-Line"
description: |-
The `packer push` Packer command takes a template and pushes it to a build service that will automatically build this Packer template.
---
# Command-Line: Push
The `packer push` Packer command takes a template and pushes it to a Packer
build service such as [HashiCorp's Atlas](https://atlas.hashicorp.com). The
build service will automatically build your Packer template and expose the
artifacts.
External build services such as HashiCorp's Atlas make it easy to iterate on
Packer templates, especially when the builder you are running may not be easily
accessable (such as developing `qemu` builders on Mac or Windows).
!> The Packer build service will receive the raw copy of your Packer template
when you push. **If you have sensitive data in your Packer template, you should
move that data into Packer variables or environment variables!**
For the `push` command to work, the [push configuration](/docs/templates/push.html)
must be completed within the template.
## Options
* `-message` - A message to identify the purpose or changes in this Packer
template much like a VCS commit message. This message will be passed to the
Packer build service. This option is also available as a short option `-m`.
* `-token` - An access token for authenticating the push to the Packer build
service such as Atlas. This can also be specified within the push
configuration in the template.
## Examples
Push a Packer template:
```shell
$ packer push -m "Updating the apache version" template.json
```
Push a Packer template with a custom token:
```shell
$ packer push -token ABCD1234 template.json
```
......@@ -73,7 +73,7 @@ passed directly into something like the standard Go `flag` package for
command-line flag parsing.
The return value of `Run` is the exit status for the command. If everything
ran successfully, this should be 0. If any errors occured, it should be any
ran successfully, this should be 0. If any errors occurred, it should be any
positive integer.
### The "Synopsis" Method
......
......@@ -34,7 +34,7 @@ uses, because they're completely isolated into the process space of the
plugin itself.
And, thanks to Go's [interfaces](http://golang.org/doc/effective_go.html#interfaces_and_types),
it doesn't even look like inter-process communication is occuring. You just
it doesn't even look like inter-process communication is occurring. You just
use the interfaces like normal, but in fact they're being executed in
a remote process. Pretty cool.
......@@ -93,7 +93,7 @@ in the relevant subsections available in the navigation to the left.
~> **Lock your dependencies!** Unfortunately, Go's dependency
management story is fairly sad. There are various unofficial methods out
there for locking dependencies, and using one of them is highly recomended
there for locking dependencies, and using one of them is highly recommended
since the Packer codebase will continue to improve, potentially breaking
APIs along the way until there is a stable release. By locking your dependencies,
your plugins will continue to work with the version of Packer you lock to.
......
......@@ -50,7 +50,7 @@ found later, it will take precedence over one found earlier.
1. The directory where `packer` is, or the executable directory.
2. `~/.packer.d/plugins` on Unix systems or `%APPDATA%/packer.d` on
2. `~/.packer.d/plugins` on Unix systems or `%APPDATA%/packer.d/plugins` on
Windows.
3. The current working directory.
......
......@@ -73,3 +73,13 @@ it may not be the latest available version.
$ brew tap homebrew/binary
$ brew install packer
```
### Chocolatey
If you're using Windows and [Chocolatey](http://chocolatey.org), you can install Packer from
Windows command line (cmd). Remember that this is updated by a 3rd party, so
it may not be the latest available version.
```text
$ choco install packer
```
......@@ -14,7 +14,7 @@ of `packer build`.
<dt>artifact (>= 2)</dt>
<dd>
<p>
Information about an artifact of the targetted item. This is a
Information about an artifact of the targeted item. This is a
fairly complex (but uniform!) machine-readable type that contains
subtypes. The subtypes are documented within this page in the
syntax of "artifact subtype: SUBTYPE". The number of arguments within
......@@ -119,7 +119,7 @@ of `packer build`.
<dt>artifact subtype: nil (0)</dt>
<dd>
<p>
If present, this means that the artifact was nil, or that the targetted
If present, this means that the artifact was nil, or that the targeted
build completed successfully but no artifact was created.
</p>
</dd>
......
---
layout: "docs"
page_title: "Atlas Post-Processor"
description: |-
The Atlas post-processor for Packer receives an artifact from a Packer build and uploads it to Atlas. Atlas hosts and serves artifacts, allowing you to version and distribute them in a simple way.
---
# Atlas Post-Processor
Type: `atlas`
The Atlas post-processor for Packer receives an artifact from a Packer build and uploads it to Atlas. [Atlas](https://atlas.hashicorp.com) hosts and serves artifacts, allowing you to version and distribute them in a simple way.
## Workflow
To take full advantage of Packer and Atlas, it's important to understand the
workflow for creating artifacts with Packer and storing them in Atlas using this post-processor. The goal of the Atlas post-processor is to streamline the distribution of public or private artifacts by hosting them in a central location in Atlas.
Here is an example workflow:
1. Packer builds an AMI with the [Amazon AMI builder](/docs/builders/amazon.html)
2. The `atlas` post-processor takes the resulting AMI and uploads it to Atlas. The `atlas` post-processor is configured with the name of the AMI, for example `hashicorp/foobar`, to create the artifact in Atlas or update the version if the artifact already exists
3. The new version is ready and available to be used in deployments with a tool like [Terraform](https://terraform.io)
## Configuration
The configuration allows you to specify and access the artifact in Atlas.
### Required:
* `token` (string) - Your access token for the Atlas API.
This can be generated on your [tokens page](https://atlas.hashicorp.com/settings/tokens). Alternatively you can export your Atlas token as an environmental variable and remove it from the configuration.
* `artifact` (string) - The shorthand tag for your artifact that maps to
Atlas, i.e `hashicorp/foobar` for `atlas.hashicorp.com/hashicorp/foobar`. You must
have access to the organization, hashicorp in this example, in order to add an artifact to
the organization in Atlas.
* `artifact_type` (string) - For uploading AMIs to Atlas, `artifact_type` will always be `aws.ami`.
This field must be defined because Atlas can host other artifact types, such as Vagrant boxes.
-> **Note:** If you want to upload Vagrant boxes to Atlas, for now use the [Vagrant Cloud post-processor](/docs/post-processors/vagrant-cloud.html).
### Optional:
* `atlas_url` (string) - Override the base URL for Atlas. This
is useful if you're using Atlas Enterprise in your own network. Defaults
to `https://atlas.hashicorp.com/api/v1`.
* `metadata` (map) - Send metadata about the artifact.
### Example Configuration
```javascript
{
"variables": {
"aws_access_key": "ACCESS_KEY_HERE",
"aws_secret_key": "SECRET_KEY_HERE",
"atlas_token": "ATLAS_TOKEN_HERE"
},
"builders": [{
"type": "amazon-ebs",
"access_key": "{{user `aws_access_key`}}",
"secret_key": "{{user `aws_secret_key`}}",
"region": "us-east-1",
"source_ami": "ami-de0d9eb7",
"instance_type": "t1.micro",
"ssh_username": "ubuntu",
"ami_name": "atlas-example {{timestamp}}"
}],
"provisioners": [
{
"type": "shell",
"inline": [
"sleep 30",
"sudo apt-get update",
"sudo apt-get install apache2 -y"
]
}],
"post-processors": [
{
"type": "atlas",
"token": "{{user `atlas_token`}}",
"artifact": "hashicorp/foobar",
"artifact_type": "aws.ami",
"metadata": {
"created_at": "{{timestamp}}"
}
}
]
}
```
......@@ -24,7 +24,7 @@ It's important to understand the workflow that using this post-processor
enforces in order to take full advantage of Vagrant and Vagrant Cloud.
The use of this processor assume that you currently distribute, or plan
to distrubute, boxes via Vagrant Cloud. It also assumes you create Vagrant
to distribute, boxes via Vagrant Cloud. It also assumes you create Vagrant
Boxes and deliver them to your team in some fashion.
Here is an example workflow:
......
......@@ -51,7 +51,7 @@ However, if you want to configure things a bit more, the post-processor
does expose some configuration options. The available options are listed
below, with more details about certain options in following sections.
* `compression_level` (integer) - An integer repesenting the
* `compression_level` (integer) - An integer representing the
compression level to use when creating the Vagrant box. Valid
values range from 0 to 9, with 0 being no compression and 9 being
the best compression. By default, compression is enabled at level 6.
......
......@@ -31,7 +31,8 @@ Required:
* `password` (string) - Password to use to authenticate to the vSphere
endpoint.
* `resource_pool` (string) - The resource pool to upload the VM to.
* `resource_pool` (string) - The resource pool to upload the VM to. This can be
" " if you do not have resource pools configured
* `username` (string) - The username to use to authenticate to the vSphere
endpoint.
......
......@@ -29,14 +29,14 @@ remote machine and run Chef client.
}
```
Note: to properly clean up the Chef node and client, you must have
`knife` on your path and properly configured.
Note: to properly clean up the Chef node and client the machine on which
packer is running must have knife on the path and configured globally,
i.e, ~/.chef/knife.rb must be present and configured for the target chef server
## Configuration Reference
The reference of available configuration options is listed below. No
configuration is actually required, but `node_name` is recommended
since it will allow the provisioner to clean up the node/client.
configuration is actually required.
* `chef_environment` (string) - The name of the chef_environment sent to the
Chef server. By default this is empty and will not use an environment.
......@@ -60,8 +60,7 @@ since it will allow the provisioner to clean up the node/client.
node attributes while running Chef.
* `node_name` (string) - The name of the node to register with the Chef
Server. This is optional and by defalt is empty. If you don't set this,
Packer can't clean up the node from the Chef Server using knife.
Server. This is optional and by default is packer-{{uuid}}.
* `prevent_sudo` (boolean) - By default, the configured commands that are
executed to install and run Chef are executed with `sudo`. If this is true,
......@@ -78,7 +77,6 @@ since it will allow the provisioner to clean up the node/client.
* `skip_clean_node` (boolean) - If true, Packer won't remove the node
from the Chef server after it is done running. By default, this is false.
This will be true by default if `node_name` is not set.
* `skip_install` (boolean) - If true, Chef will not automatically be installed
on the machine using the Opscode omnibus installers.
......@@ -116,9 +114,7 @@ validation_client_name "chef-validator"
{{if ne .ValidationKeyPath ""}}
validation_key "{{.ValidationKeyPath}}"
{{end}}
{{if ne .NodeName ""}}
node_name "{{.NodeName}}"
{{end}}
```
This template is a [configuration template](/docs/templates/configuration-templates.html)
......
......@@ -54,6 +54,10 @@ configuration is actually required, but at least `run_list` is recommended.
the secret for encrypted data bags. By default, this is empty, so no
secret will be available.
* `environments_path` (string) - The path to the "environments" directory on your local filesystem.
These will be uploaded to the remote machine in the directory specified by the
`staging_directory`. By default, this is empty.
* `execute_command` (string) - The command used to execute Chef. This has
various [configuration template variables](/docs/templates/configuration-templates.html)
available. See below for more information.
......
......@@ -19,7 +19,7 @@ master.
-> **Note:** Puppet will _not_ be installed automatically
by this provisioner. This provisioner expects that Puppet is already
installed on the machine. It is common practice to use the
[shell provisioner[(/docs/provisioners/shell.html) before the
[shell provisioner](/docs/provisioners/shell.html) before the
Puppet provisioner to do this.
## Basic Example
......@@ -50,7 +50,7 @@ Optional parameters:
various [configuration template variables](/docs/templates/configuration-templates.html)
available. See below for more information.
* `facter` (object, string keys and values) - Additonal
* `facter` (object, string keys and values) - Additional
[facts](http://puppetlabs.com/puppet/related-projects/facter) to make
available when Puppet is running.
......
......@@ -136,7 +136,7 @@ sleep 60
```
Some OS configurations don't properly kill all network connections on
reboot, causing the provisioner to hang despite a reboot occuring.
reboot, causing the provisioner to hang despite a reboot occurring.
In this case, make sure you shut down the network interfaces
on reboot or in your shell script. For example, on Gentoo:
......@@ -148,20 +148,20 @@ on reboot or in your shell script. For example, on Gentoo:
*My shell script doesn't work correctly on Ubuntu*
* On Ubuntu the /bin/sh shell is
* On Ubuntu, the `/bin/sh` shell is
[dash](http://en.wikipedia.org/wiki/Debian_Almquist_shell). If your script has
[bash](http://en.wikipedia.org/wiki/Bash_(Unix_shell\)) specific commands in it
[bash](http://en.wikipedia.org/wiki/Bash_(Unix_shell))-specific commands in it,
then put `#!/bin/bash` at the top of your script. Differences
between dash and bash can be found on the [DashAsBinSh](https://wiki.ubuntu.com/DashAsBinSh) Ubuntu wiki page.
*My shell works when I login but fails with the shell provisioner*
* See the above tip. More than likely your login shell is using /bin/bash
while the provisioner is using /bin/sh.
* See the above tip. More than likely, your login shell is using `/bin/bash`
while the provisioner is using `/bin/sh`.
*My installs hang when using `apt-get` or `yum`*
* Make sure you add a "-y" to the command to prevent it from requiring
* Make sure you add a `-y` to the command to prevent it from requiring
user input before proceeding.
*How do I tell what my shell script is doing?*
......@@ -172,7 +172,7 @@ will echo the script statements as it is executing.
*My builds don't always work the same*
* Some distributions start the SSH daemon before other core services which
can create race conditions. Your first provisoner can tell the machine to
can create race conditions. Your first provisioner can tell the machine to
wait until it completely boots.
```javascript
......
......@@ -121,6 +121,25 @@ isotime = June 7, 7:22:43pm 2014
{{isotime "Hour15Year200603"}} = Hour19Year201407
```
Please note that double quote characters need escaping inside of templates:
```javascript
{
"builders": [
{
"type": "amazon-ebs",
"access_key": "...",
"secret_key": "...",
"region": "us-east-1",
"source_ami": "ami-de0d9eb7",
"instance_type": "t1.micro",
"ssh_username": "ubuntu",
"ami_name": "packer {{isotime \"2006-01-02\"}}"
}
]
}
```
## Amazon Specific Functions
Specific to Amazon builders:
......
---
layout: "docs"
page_title: "Templates: Push"
description: |-
Within the template, the push section configures how a template can be
pushed to a remote build service.
---
# Templates: Push
Within the template, the push section configures how a template can be
[pushed](/docs/command-line/push.html) to a remote build service.
Push configuration is responsible for defining what files are required
to build this template, what the name of build configuration is in the
build service, etc.
The only build service that Packer can currently push to is
[Atlas](https://atlas.hashicorp.com) by HashiCorp. Support for other build
services will come in the form of plugins in the future.
Within a template, a push configuration section looks like this:
```javascript
{
"push": {
// ... push configuration here
}
}
```
## Configuration Reference
There are many configuration options available for the builder. They are
segmented below into two categories: required and optional parameters. Within
each category, the available configuration keys are alphabetized.
### Required
* `name` (string) - Name of the build configuration in the build service.
If this doesn't exist, it will be created (by default).
### Optional
* `address` (string) - The address of the build service to use. By default
this is `https://atlas.hashicorp.com`.
* `base_dir` (string) - The base directory of the files to upload. This
will be the current working directory when the build service executes your
template. This path is relative to the template.
* `include` (array of strings) - Glob patterns to include relative to
the `base_dir`. If this is specified, only files that match the include
pattern are included.
* `exclude` (array of strings) - Glob patterns to exclude relative to
the `base_dir`.
* `token` (string) - An access token to use to authenticate to the build
service.
* `vcs` (bool) - If true, Packer will detect your VCS (if there is one)
and only upload the files that are tracked by the VCS. This is useful
for automatically excluding ignored files. This defaults to false.
## Examples
A push configuration section with minimal options:
```javascript
{
"push": {
"name": "hashicorp/precise64"
}
}
```
A push configuration specifying Packer to inspect the VCS and list individual
files to include:
```javascript
{
"push": {
"name": "hashicorp/precise64",
"vcs": true,
"include": [
"other_file/outside_of.vcs"
]
}
}
```
~> **Variable interpolation** is not currently possible in Packer push
configurations. This will be fixed in an upcoming release.
......@@ -2,7 +2,7 @@
layout: "docs"
page_title: "User Variables in Templates"
description: |-
User variables allow your templates to be further configured with variables from the command-line, environmental variables, or files. This lets you parameterize your templates so that you can keep secret tokens, environment-specific data, and other types of information out of your templates. This maximizes the portablility and shareability of the template.
User variables allow your templates to be further configured with variables from the command-line, environmental variables, or files. This lets you parameterize your templates so that you can keep secret tokens, environment-specific data, and other types of information out of your templates. This maximizes the portability and shareability of the template.
---
# User Variables
......
......@@ -16,7 +16,7 @@ with Redis pre-installed. This is just an example. Packer can create images
for [many platforms](/intro/platforms.html) with anything pre-installed.
If you don't have an AWS account, [create one now](http://aws.amazon.com/free/).
For the example, we'll use a "t1.micro" instance to build our image, which
For the example, we'll use a "t2.micro" instance to build our image, which
qualifies under the AWS [free-tier](http://aws.amazon.com/free/), meaning
it will be free. If you already have an AWS account, you may be charged some
amount of money, but it shouldn't be more than a few cents.
......@@ -54,8 +54,8 @@ briefly. Create a file `example.json` and fill it with the following contents:
"access_key": "{{user `aws_access_key`}}",
"secret_key": "{{user `aws_secret_key`}}",
"region": "us-east-1",
"source_ami": "ami-de0d9eb7",
"instance_type": "t1.micro",
"source_ami": "ami-9eaa1cf6",
"instance_type": "t2.micro",
"ssh_username": "ubuntu",
"ami_name": "packer-example {{timestamp}}"
}]
......@@ -157,10 +157,13 @@ the Packer output.
Packer only builds images. It does not attempt to manage them in any way.
After they're built, it is up to you to launch or destroy them as you see
fit. As a result of this, after running the above example, your AWS account
now has an AMI associated with it.
fit. If you want to store and namespace images for easy reference, you
can use [Atlas by HashiCorp](https://atlas.hashicorp.com). We'll cover
remotely building and storing images at the end of this getting started guide.
AMIs are stored in S3 by Amazon, so unless you want to be charged about $0.01
After running the above example, your AWS account
now has an AMI associated with it. AMIs are stored in S3 by Amazon,
so unless you want to be charged about $0.01
per month, you'll probably want to remove it. Remove the AMI by
first deregistering it on the [AWS AMI management page](https://console.aws.amazon.com/ec2/home?region=us-east-1#s=Images).
Next, delete the associated snapshot on the
......
......@@ -16,6 +16,9 @@ From this point forward, the most important reference for you will be
the [documentation](/docs). The documentation is less of a guide and
more of a reference of all the overall features and options of Packer.
If you're interested in learning more about how Packer fits into the
HashiCorp ecosystem of tools, read our [Atlas getting started overview](https://atlas.hashicorp.com/help/getting-started/getting-started-overview).
As you use Packer more, please voice your comments and concerns on
the [mailing list or IRC](/community). Additionally, Packer is
[open source](https://github.com/mitchellh/packer) so please contribute
......
......@@ -83,7 +83,7 @@ The entire template should now [look like this](https://gist.github.com/pearkes/
Additional builders are simply added to the `builders` array in the template.
This tells Packer to build multiple images. The builder `type` values don't
even need to be different! In fact, if you wanted to build multiple AMIs,
you can do that as well.
you can do that as long as you specify a unique `name` for each build.
Validate the template with `packer validate`. This is always a good practice.
......
---
layout: "intro"
page_title: "Remote Builds and Storage"
prev_url: "/intro/getting-started/vagrant.html"
next_url: "/intro/getting-started/next.html"
next_title: "Next Steps"
description: |-
Up to this point in the guide, you have been running Packer on your local machine to build and provision images on AWS and DigitalOcean. However, you can use Atlas by HashiCorp to both run Packer builds remotely and store the output of builds.
---
# Remote Builds and Storage
Up to this point in the guide, you have been running Packer on your local machine to build and provision images on AWS and DigitalOcean. However, you can use [Atlas by HashiCorp](https://atlas.hashicorp.com) to run Packer builds remotely and store the output of builds.
## Why Build Remotely?
By building remotely, you can move access credentials off of developer machines, release local machines from long-running Packer processes, and automatically start Packer builds from trigger sources such as `vagrant push`, a version control system, or CI tool.
## Run Packer Builds Remotely
To run Packer remotely, there are two changes that must be made to the Packer template. The first is the addition of the `push` [configuration](https://www.packer.io/docs/templates/push.html), which sends the Packer template to Atlas so it can run Packer remotely. The second modification is updating the variables section to read variables from the Atlas environment rather than the local environment. Remove the `post-processors` section for now if it is still in your template.
```javascript
{
"variables": {
"aws_access_key": "{{env `aws_access_key`}}",
"aws_secret_key": "{{env `aws_secret_key`}}"
},
"builders": [{
"type": "amazon-ebs",
"access_key": "{{user `aws_access_key`}}",
"secret_key": "{{user `aws_secret_key`}}",
"region": "us-east-1",
"source_ami": "ami-9eaa1cf6",
"instance_type": "t2.micro",
"ssh_username": "ubuntu",
"ami_name": "packer-example {{timestamp}}"
}],
"provisioners": [{
"type": "shell",
"inline": [
"sleep 30",
"sudo apt-get update",
"sudo apt-get install -y redis-server"
]
}],
"push": {
"name": "ATLAS_USERNAME/packer-tutorial"
}
}
```
To get an Atlas username, [create an account here](https://atlas.hashicorp.com/account/new?utm_source=oss&utm_medium=getting-started&utm_campaign=packer). Replace "ATLAS_USERNAME" with your username, then run `packer push -create example.json` to send the configuration to Atlas, which automatically starts the build.
This build will fail since neither `aws_access_key` or `aws_secret_key` are set in the Atlas environment. To set environment variables in Atlas, navigate to the [operations tab](https://atlas.hashicorp.com/operations), click the "packer-tutorial" build configuration that was just created, and then click 'variables' in the left navigation. Set `aws_access_key` and `aws_secret_key` with their respective values. Now restart the Packer build by either clicking 'rebuild' in the Atlas UI or by running `packer push example.json` again. Now when you click on the active build, you can view the logs in real-time.
-> **Note:** Whenever a change is made to the Packer template, you must `packer push` to update the configuration in Atlas.
## Store Packer Outputs
Now we have Atlas building an AMI with Redis pre-configured. This is great, but it's even better to store and version the AMI output so it can be easily deployed by a tool like [Terraform](https://terraform.io). The `atlas` [post-processor](/docs/post-processors/atlas.html) makes this process simple:
```javascript
{
"variables": ["..."],
"builders": ["..."],
"provisioners": ["..."],
"push": ["..."]
"post-processors": [
{
"type": "atlas",
"artifact": "ATLAS_USERNAME/packer-tutorial",
"artifact_type": "aws.ami"
}
]
}
```
Update the `post-processors` block with your Atlas username, then `packer push example.json` and watch the build kick off in Atlas! When the build completes, the resulting artifact will be saved and stored in Atlas.
\ No newline at end of file
......@@ -46,10 +46,12 @@ $ packer
usage: packer [--version] [--help] <command> [<args>]
Available commands are:
build build image(s) from template
fix fixes templates from old versions of packer
inspect see components of a template
validate check that a template is valid
build build image(s) from template
fix fixes templates from old versions of packer
inspect see components of a template
push push template files to a Packer build service
validate check that a template is valid
version Prints the Packer version
```
If you get an error that `packer` could not be found, then your PATH
......
......@@ -2,8 +2,8 @@
layout: "intro"
page_title: "Vagrant Boxes"
prev_url: "/intro/getting-started/parallel-builds.html"
next_url: "/intro/getting-started/next.html"
next_title: "Next Steps"
next_url: "/intro/getting-started/remote-builds.html"
next_title: "Remote Builds and Storage"
description: |-
Packer also has the ability to take the results of a builder (such as an AMI or plain VMware image) and turn it into a Vagrant box.
---
......
---
layout: "intro"
page_title: "Packer and the HashiCorp Ecosystem"
prev_url: "/intro/platforms.html"
next_url: "/intro/getting-started/setup.html"
next_title: "Getting Started: Install Packer"
description: |-
Learn how Packer fits in with the rest of the HashiCorp ecosystem of tools
---
# Packer and the HashiCorp Ecosystem
HashiCorp is the creator of the open source projects Vagrant, Packer, Terraform, Serf, and Consul, and the commercial product Atlas. Packer is just one piece of the ecosystem HashiCorp has built to make application delivery a versioned, auditable, repeatable, and collaborative process. To learn more about our beliefs on the qualities of the modern datacenter and responsible application delivery, read [The Atlas Mindset: Version Control for Infrastructure](https://hashicorp.com/blog/atlas-mindset.html/?utm_source=packer&utm_campaign=HashicorpEcosystem).
If you are using Packer to build machine images and deployable artifacts, it’s likely that you need a solution for deploying those artifacts. Terraform is our tool for creating, combining, and modifying infrastructure.
Below are summaries of HashiCorp’s open source projects and a graphic showing how Atlas connects them to create a full application delivery workflow.
# HashiCorp Ecosystem
![Atlas Workflow](docs/atlas-workflow.png)
[Atlas](https://atlas.hashicorp.com/?utm_source=packer&utm_campaign=HashicorpEcosystem) is HashiCorp's only commercial product. It unites Packer, Terraform, and Consul to make application delivery a versioned, auditable, repeatable, and collaborative process.
[Packer](https://packer.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for creating machine images and deployable artifacts such as AMIs, OpenStack images, Docker containers, etc.
[Terraform](https://terraform.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for creating, combining, and modifying infrastructure. In the Atlas workflow Terraform reads from the artifact registry and provisions infrastructure.
[Consul](https://consul.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for service discovery, service registry, and health checks. In the Atlas workflow Consul is configured at the Packer build stage and identifies the service(s) contained in each artifact. Since Consul is configured at the build phase with Packer, when the artifact is deployed with Terraform, it is fully configured with dependencies and service discovery pre-baked. This greatly reduces the risk of an unhealthy node in production due to configuration failure at runtime.
[Serf](https://serfdom.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for cluster membership and failure detection. Consul uses Serf’s gossip protocol as the foundation for service discovery.
[Vagrant](https://www.vagrantup.com/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for managing development environments that mirror production. Vagrant environments reduce the friction of developing a project and reduce the risk of unexpected behavior appearing after deployment. Vagrant boxes can be built in parallel with production artifacts with Packer to maintain parity between development and production.
......@@ -2,8 +2,8 @@
layout: "intro"
page_title: "Supported Platforms"
prev_url: "/intro/use-cases.html"
next_url: "/intro/getting-started/setup.html"
next_title: "Getting Started: Install Packer"
next_url: "/intro/hashicorp-ecosystem.html"
next_title: "Packer & the HashiCorp Ecosystem"
description: |-
Packer can create machine images for any platform. Packer ships with support for a set of platforms, but can be extended through plugins to support any platform. This page documents the list of supported image types that Packer supports creating.
---
......
......@@ -13,6 +13,7 @@
<li><a href="/docs/command-line/build.html">Build</a></li>
<li><a href="/docs/command-line/fix.html">Fix</a></li>
<li><a href="/docs/command-line/inspect.html">Inspect</a></li>
<li><a href="/docs/command-line/push.html">Push</a></li>
<li><a href="/docs/command-line/validate.html">Validate</a></li>
<li><a href="/docs/command-line/machine-readable.html">Machine-Readable Output</a></li>
</ul>
......@@ -23,6 +24,7 @@
<li><a href="/docs/templates/builders.html">Builders</a></li>
<li><a href="/docs/templates/provisioners.html">Provisioners</a></li>
<li><a href="/docs/templates/post-processors.html">Post-Processors</a></li>
<li><a href="/docs/templates/push.html">Push</a></li>
<li><a href="/docs/templates/configuration-templates.html">Configuration Templates</a></li>
<li><a href="/docs/templates/user-variables.html">User Variables</a></li>
<li><a href="/docs/templates/veewee-to-packer.html">Veewee-to-Packer</a></li>
......@@ -58,6 +60,7 @@
<ul>
<li><h4>Post-Processors</h4></li>
<li><a href="/docs/post-processors/atlas.html">Atlas</a></li>
<li><a href="/docs/post-processors/compress.html">compress</a></li>
<li><a href="/docs/post-processors/docker-import.html">docker-import</a></li>
<li><a href="/docs/post-processors/docker-push.html">docker-push</a></li>
......
......@@ -8,6 +8,7 @@
<li><a href="/intro/why.html">Why Use Packer?</a></li>
<li><a href="/intro/use-cases.html">Use Cases</a></li>
<li><a href="/intro/platforms.html">Supported Platforms</a></li>
<li><a href="/intro/hashicorp-ecosystem.html">Packer & the HashiCorp Ecosystem</a></li>
</ul>
<ul>
......@@ -17,6 +18,7 @@
<li><a href="/intro/getting-started/provision.html">Provision</a></li>
<li><a href="/intro/getting-started/parallel-builds.html">Parallel Builds</a></li>
<li><a href="/intro/getting-started/vagrant.html">Vagrant Boxes</a></li>
<li><a href="/intro/getting-started/remote-builds.html">Remote Builds</a></li>
<li><a href="/intro/getting-started/next.html">Next Steps</a></li>
</ul>
<% end %>
......
......@@ -6,6 +6,16 @@
<%= stylesheet_link_tag "application" %>
<meta name="HandheldFriendly" content="True" />
<meta name="MobileOptimized" content="320" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta name="apple-mobile-web-app-capable" content="yes">
<meta name="apple-mobile-web-app-status-bar-style" content="black">
<link rel="shortcut icon" href="<%= image_path("favicon.ico") %>" type="image/x-icon">
<link rel="icon" href="<%= image_path("favicon.ico") %>" type="image/x-icon">
<script type="text/javascript" src="//use.typekit.net/apr3jjs.js"></script>
<script type="text/javascript">try{Typekit.load();}catch(e){}</script>
</head>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment