Commit d0d3229f authored by Nathan Hartwell's avatar Nathan Hartwell

Merge branch 'master' of https://github.com/mitchellh/packer

Conflicts:
	provisioner/salt-masterless/provisioner.go
parents e5c6f1a7 c8b3dfff
sudo: false
language: go language: go
go: go:
- 1.2 - 1.2
- 1.3 - 1.3
- tip - 1.4
- tip
install: make updatedeps install: make updatedeps
script: script:
- GOMAXPROCS=2 make test - GOMAXPROCS=2 make test
#- go test -race ./... #- go test -race ./...
branches:
only:
- master
notifications:
irc:
channels:
- "irc.freenode.org#packer-tool"
skip_join: true
use_notice: true
matrix: matrix:
allow_failures: fast_finish: true
- go: tip allow_failures:
- go: tip
## 0.8.0 (unreleased) ## 0.8.0 (unreleased)
FEATURES:
IMPROVEMENTS:
* builder/openstack: Add rackconnect_wait for Rackspace customers to wait for
RackConnect data to appear
* buidler/openstakc: Add ssh_interface option for rackconnect for users that
have prohibitive firewalls
BUG FIXES:
* builder/amazon: Remove deprecated ec2-upload-bundle paramger [GH-1931]
* builder/digitalocean: Ignore invalid fields from the ever-changing v2 API
* builder/docker: Fixed hang on prompt while copying script
* builder/virtualbox: Added SCSI support
* postprocessor/vagrant-cloud: Fixed failing on response
* provisioner/puppet-masterless: Allow manifest_file to be a directory
* provisioner/salt-masterless: Add `--retcode-passthrough` to salt-call
## 0.7.5 (December 9, 2014)
FEATURES:
* **New command: `packer push`**: Push template and files to HashiCorp's
Atlas for building your templates automatically.
* **New post-processor: `atlas`**: Send artifact to HashiCorp's Atlas for
versioning and storing artifacts. These artifacts can then be queried
using the API, Terraform, etc.
IMPROVEMENTS:
* builder/googlecompute: Support for ubuntu-os-cloud project
* builder/googlecompute: Support for OAuth2 to avoid client secrets file
* builder/googlecompute: GCE image from persistant disk instead of tarball
* builder/qemu: Checksum type "none" can be used
* provisioner/chef: Generate a node name if none available
* provisioner/chef: Added ssl_verify_mode configuration
BUG FIXES:
* builder/parallels: Fixed attachment of ISO to cdrom device
* builder/parallels: Fixed boot load ordering
* builder/digitalocean: Fixed decoding of size
* builder/digitalocean: Fixed missing content-type header in request
* builder/digitalocean: Fixed use of private IP
* builder/digitalocean: Fixed the artifact ID generation
* builder/vsphere: Fixed credential escaping
* builder/qemu: Fixed use of CDROM with disk_image
* builder/aws: Fixed IP address for SSH in VPC
* builder/aws: Fixed issue with multiple block devices
* builder/vmware: Upload VMX to ESX5 after editing
* communicator/docker: Fix handling of symlinks during upload
* provisioner/chef: Fixed use of sudo in some cases
* core: Fixed build name interpolation
* postprocessor/vagrant: Fixed check for Vagrantfile template
## 0.7.2 (October 28, 2014) ## 0.7.2 (October 28, 2014)
......
TEST?=./... TEST?=./...
VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods \
-nilfunc -printf -rangeloops -shift -structtags -unsafeptr
default: test default: test
...@@ -10,6 +12,7 @@ dev: ...@@ -10,6 +12,7 @@ dev:
test: test:
go test $(TEST) $(TESTARGS) -timeout=10s go test $(TEST) $(TESTARGS) -timeout=10s
@$(MAKE) vet
testrace: testrace:
go test -race $(TEST) $(TESTARGS) go test -race $(TEST) $(TESTARGS)
...@@ -17,4 +20,14 @@ testrace: ...@@ -17,4 +20,14 @@ testrace:
updatedeps: updatedeps:
go get -d -v -p 2 ./... go get -d -v -p 2 ./...
.PHONY: bin default test updatedeps vet:
@go tool vet 2>/dev/null ; if [ $$? -eq 3 ]; then \
go get golang.org/x/tools/cmd/vet; \
fi
@go tool vet $(VETARGS) . ; if [ $$? -eq 1 ]; then \
echo ""; \
echo "Vet found suspicious constructs. Please check the reported constructs"; \
echo "and fix them if necessary before submitting the code for reviewal."; \
fi
.PHONY: bin default test updatedeps vet
# Packer # Packer
[![Build Status](https://travis-ci.org/mitchellh/packer.svg?branch=master)](https://travis-ci.org/mitchellh/packer)
* Website: http://www.packer.io * Website: http://www.packer.io
* IRC: `#packer-tool` on Freenode * IRC: `#packer-tool` on Freenode
* Mailing list: [Google Groups](http://groups.google.com/group/packer-tool) * Mailing list: [Google Groups](http://groups.google.com/group/packer-tool)
......
...@@ -2,12 +2,13 @@ package common ...@@ -2,12 +2,13 @@ package common
import ( import (
"fmt" "fmt"
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/ec2"
"github.com/mitchellh/packer/packer"
"log" "log"
"sort" "sort"
"strings" "strings"
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/ec2"
"github.com/mitchellh/packer/packer"
) )
// Artifact is an artifact implementation that contains built AMIs. // Artifact is an artifact implementation that contains built AMIs.
...@@ -53,7 +54,12 @@ func (a *Artifact) String() string { ...@@ -53,7 +54,12 @@ func (a *Artifact) String() string {
} }
func (a *Artifact) State(name string) interface{} { func (a *Artifact) State(name string) interface{} {
return nil switch name {
case "atlas.artifact.metadata":
return a.stateAtlasMetadata()
default:
return nil
}
} }
func (a *Artifact) Destroy() error { func (a *Artifact) Destroy() error {
...@@ -79,3 +85,13 @@ func (a *Artifact) Destroy() error { ...@@ -79,3 +85,13 @@ func (a *Artifact) Destroy() error {
return nil return nil
} }
func (a *Artifact) stateAtlasMetadata() interface{} {
metadata := make(map[string]string)
for region, imageId := range a.Amis {
k := fmt.Sprintf("region.%s", region)
metadata[k] = imageId
}
return metadata
}
package common package common
import ( import (
"github.com/mitchellh/packer/packer" "reflect"
"testing" "testing"
"github.com/mitchellh/packer/packer"
) )
func TestArtifact_Impl(t *testing.T) { func TestArtifact_Impl(t *testing.T) {
...@@ -26,6 +28,24 @@ func TestArtifactId(t *testing.T) { ...@@ -26,6 +28,24 @@ func TestArtifactId(t *testing.T) {
} }
} }
func TestArtifactState_atlasMetadata(t *testing.T) {
a := &Artifact{
Amis: map[string]string{
"east": "foo",
"west": "bar",
},
}
actual := a.State("atlas.artifact.metadata")
expected := map[string]string{
"region.east": "foo",
"region.west": "bar",
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
}
func TestArtifactString(t *testing.T) { func TestArtifactString(t *testing.T) {
expected := `AMIs were created: expected := `AMIs were created:
......
...@@ -60,12 +60,12 @@ func (b *BlockDevices) Prepare(t *packer.ConfigTemplate) []error { ...@@ -60,12 +60,12 @@ func (b *BlockDevices) Prepare(t *packer.ConfigTemplate) []error {
var errs []error var errs []error
for outer, bds := range lists { for outer, bds := range lists {
for i, bd := range bds { for i := 0; i < len(bds); i++ {
templates := map[string]*string{ templates := map[string]*string{
"device_name": &bd.DeviceName, "device_name": &bds[i].DeviceName,
"snapshot_id": &bd.SnapshotId, "snapshot_id": &bds[i].SnapshotId,
"virtual_name": &bd.VirtualName, "virtual_name": &bds[i].VirtualName,
"volume_type": &bd.VolumeType, "volume_type": &bds[i].VolumeType,
} }
errs := make([]error, 0) errs := make([]error, 0)
......
...@@ -16,14 +16,14 @@ func SSHAddress(e *ec2.EC2, port int, private bool) func(multistep.StateBag) (st ...@@ -16,14 +16,14 @@ func SSHAddress(e *ec2.EC2, port int, private bool) func(multistep.StateBag) (st
for j := 0; j < 2; j++ { for j := 0; j < 2; j++ {
var host string var host string
i := state.Get("instance").(*ec2.Instance) i := state.Get("instance").(*ec2.Instance)
if i.DNSName != "" { if i.VpcId != "" {
host = i.DNSName
} else if i.VpcId != "" {
if i.PublicIpAddress != "" && !private { if i.PublicIpAddress != "" && !private {
host = i.PublicIpAddress host = i.PublicIpAddress
} else { } else {
host = i.PrivateIpAddress host = i.PrivateIpAddress
} }
} else if i.DNSName != "" {
host = i.DNSName
} }
if host != "" { if host != "" {
......
...@@ -79,7 +79,7 @@ func amiRegionCopy(state multistep.StateBag, auth aws.Auth, imageId string, ...@@ -79,7 +79,7 @@ func amiRegionCopy(state multistep.StateBag, auth aws.Auth, imageId string,
if err != nil { if err != nil {
return "", fmt.Errorf("Error Copying AMI (%s) to region (%s): %s", return "", fmt.Errorf("Error Copying AMI (%s) to region (%s): %s",
imageId, target, err) imageId, target.Name, err)
} }
stateChange := StateChangeConf{ stateChange := StateChangeConf{
...@@ -91,7 +91,7 @@ func amiRegionCopy(state multistep.StateBag, auth aws.Auth, imageId string, ...@@ -91,7 +91,7 @@ func amiRegionCopy(state multistep.StateBag, auth aws.Auth, imageId string,
if _, err := WaitForState(&stateChange); err != nil { if _, err := WaitForState(&stateChange); err != nil {
return "", fmt.Errorf("Error waiting for AMI (%s) in region (%s): %s", return "", fmt.Errorf("Error waiting for AMI (%s) in region (%s): %s",
resp.ImageId, target, err) resp.ImageId, target.Name, err)
} }
return resp.ImageId, nil return resp.ImageId, nil
......
...@@ -87,7 +87,7 @@ func (s *stepCreateAMI) Cleanup(state multistep.StateBag) { ...@@ -87,7 +87,7 @@ func (s *stepCreateAMI) Cleanup(state multistep.StateBag) {
ui.Error(fmt.Sprintf("Error deregistering AMI, may still be around: %s", err)) ui.Error(fmt.Sprintf("Error deregistering AMI, may still be around: %s", err))
return return
} else if resp.Return == false { } else if resp.Return == false {
ui.Error(fmt.Sprintf("Error deregistering AMI, may still be around: %s", resp.Return)) ui.Error(fmt.Sprintf("Error deregistering AMI, may still be around: %t", resp.Return))
return return
} }
} }
...@@ -75,7 +75,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { ...@@ -75,7 +75,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
"-s {{.SecretKey}} " + "-s {{.SecretKey}} " +
"-d {{.BundleDirectory}} " + "-d {{.BundleDirectory}} " +
"--batch " + "--batch " +
"--region {{.Region}} " + "--location {{.Region}} " +
"--retry" "--retry"
} }
......
...@@ -5,12 +5,16 @@ ...@@ -5,12 +5,16 @@
package digitalocean package digitalocean
type Region struct { type Region struct {
Id uint `json:"id,omitempty"` //only in v1 api Slug string `json:"slug"`
Slug string `json:"slug"` //presen in both api Name string `json:"name"`
Name string `json:"name"` //presen in both api
Sizes []string `json:"sizes,omitempty"` //only in v2 api // v1 only
Available bool `json:"available,omitempty"` //only in v2 api Id uint `json:"id,omitempty"`
Features []string `json:"features,omitempty"` //only in v2 api
// v2 only
Sizes []string `json:"sizes,omitempty"`
Available bool `json:"available,omitempty"`
Features []string `json:"features,omitempty"`
} }
type RegionsResp struct { type RegionsResp struct {
...@@ -18,16 +22,19 @@ type RegionsResp struct { ...@@ -18,16 +22,19 @@ type RegionsResp struct {
} }
type Size struct { type Size struct {
Id uint `json:"id,omitempty"` //only in v1 api Slug string `json:"slug"`
Name string `json:"name,omitempty"` //only in v1 api
Slug string `json:"slug"` //presen in both api // v1 only
Memory uint `json:"memory,omitempty"` //only in v2 api Id uint `json:"id,omitempty"`
VCPUS uint `json:"vcpus,omitempty"` //only in v2 api Name string `json:"name,omitempty"`
Disk uint `json:"disk,omitempty"` //only in v2 api
Transfer float64 `json:"transfer,omitempty"` //only in v2 api // v2 only
PriceMonthly float64 `json:"price_monthly,omitempty"` //only in v2 api Memory uint `json:"memory,omitempty"`
PriceHourly float64 `json:"price_hourly,omitempty"` //only in v2 api VCPUS uint `json:"vcpus,omitempty"`
Regions []string `json:"regions,omitempty"` //only in v2 api Disk uint `json:"disk,omitempty"`
Transfer float64 `json:"transfer,omitempty"`
PriceMonthly float64 `json:"price_monthly,omitempty"`
PriceHourly float64 `json:"price_hourly,omitempty"`
} }
type SizesResp struct { type SizesResp struct {
...@@ -35,14 +42,15 @@ type SizesResp struct { ...@@ -35,14 +42,15 @@ type SizesResp struct {
} }
type Image struct { type Image struct {
Id uint `json:"id"` //presen in both api Id uint `json:"id"`
Name string `json:"name"` //presen in both api Name string `json:"name"`
Slug string `json:"slug"` //presen in both api Slug string `json:"slug"`
Distribution string `json:"distribution"` //presen in both api Distribution string `json:"distribution"`
Public bool `json:"public,omitempty"` //only in v2 api
Regions []string `json:"regions,omitempty"` //only in v2 api // v2 only
ActionIds []string `json:"action_ids,omitempty"` //only in v2 api Public bool `json:"public,omitempty"`
CreatedAt string `json:"created_at,omitempty"` //only in v2 api ActionIds []string `json:"action_ids,omitempty"`
CreatedAt string `json:"created_at,omitempty"`
} }
type ImagesResp struct { type ImagesResp struct {
......
...@@ -262,8 +262,10 @@ func (d DigitalOceanClientV2) DropletStatus(id uint) (string, string, error) { ...@@ -262,8 +262,10 @@ func (d DigitalOceanClientV2) DropletStatus(id uint) (string, string, error) {
} }
var ip string var ip string
if len(res.Droplet.Networks.V4) > 0 { for _, n := range res.Droplet.Networks.V4 {
ip = res.Droplet.Networks.V4[0].IPAddr if n.Type == "public" {
ip = n.IPAddr
}
} }
return ip, res.Droplet.Status, err return ip, res.Droplet.Status, err
...@@ -285,17 +287,21 @@ func NewRequestV2(d DigitalOceanClientV2, path string, method string, req interf ...@@ -285,17 +287,21 @@ func NewRequestV2(d DigitalOceanClientV2, path string, method string, req interf
enc.Encode(req) enc.Encode(req)
defer buf.Reset() defer buf.Reset()
request, err = http.NewRequest(method, url, buf) request, err = http.NewRequest(method, url, buf)
request.Header.Add("Content-Type", "application/json")
} else { } else {
request, err = http.NewRequest(method, url, nil) request, err = http.NewRequest(method, url, nil)
} }
if err != nil { if err != nil {
return err return err
} }
// Add the authentication parameters // Add the authentication parameters
request.Header.Add("Authorization", "Bearer "+d.APIToken) request.Header.Add("Authorization", "Bearer "+d.APIToken)
if buf != nil {
log.Printf("sending new request to digitalocean: %s", url) log.Printf("sending new request to digitalocean: %s buffer: %s", url, buf)
} else {
log.Printf("sending new request to digitalocean: %s", url)
}
resp, err := client.Do(request) resp, err := client.Do(request)
if err != nil { if err != nil {
return err return err
...@@ -325,7 +331,10 @@ func NewRequestV2(d DigitalOceanClientV2, path string, method string, req interf ...@@ -325,7 +331,10 @@ func NewRequestV2(d DigitalOceanClientV2, path string, method string, req interf
return errors.New(fmt.Sprintf("Failed to decode JSON response %s (HTTP %v) from DigitalOcean: %s", err.Error(), return errors.New(fmt.Sprintf("Failed to decode JSON response %s (HTTP %v) from DigitalOcean: %s", err.Error(),
resp.StatusCode, body)) resp.StatusCode, body))
} }
switch resp.StatusCode {
case 403, 401, 429, 422, 404, 503, 500:
return errors.New(fmt.Sprintf("digitalocean request error: %+v", res))
}
return nil return nil
} }
......
...@@ -3,6 +3,7 @@ package digitalocean ...@@ -3,6 +3,7 @@ package digitalocean
import ( import (
"fmt" "fmt"
"log" "log"
"strconv"
) )
type Artifact struct { type Artifact struct {
...@@ -29,8 +30,7 @@ func (*Artifact) Files() []string { ...@@ -29,8 +30,7 @@ func (*Artifact) Files() []string {
} }
func (a *Artifact) Id() string { func (a *Artifact) Id() string {
// mimicing the aws builder return strconv.FormatUint(uint64(a.snapshotId), 10)
return fmt.Sprintf("%s:%s", a.regionName, a.snapshotName)
} }
func (a *Artifact) String() string { func (a *Artifact) String() string {
......
package digitalocean package digitalocean
import ( import (
"github.com/mitchellh/packer/packer"
"testing" "testing"
"github.com/mitchellh/packer/packer"
) )
func TestArtifact_Impl(t *testing.T) { func TestArtifact_Impl(t *testing.T) {
...@@ -13,6 +14,15 @@ func TestArtifact_Impl(t *testing.T) { ...@@ -13,6 +14,15 @@ func TestArtifact_Impl(t *testing.T) {
} }
} }
func TestArtifactId(t *testing.T) {
a := &Artifact{"packer-foobar", 42, "San Francisco", nil}
expected := "42"
if a.Id() != expected {
t.Fatalf("artifact ID should match: %v", expected)
}
}
func TestArtifactString(t *testing.T) { func TestArtifactString(t *testing.T) {
a := &Artifact{"packer-foobar", 42, "San Francisco", nil} a := &Artifact{"packer-foobar", 42, "San Francisco", nil}
expected := "A snapshot was created: 'packer-foobar' in region 'San Francisco'" expected := "A snapshot was created: 'packer-foobar' in region 'San Francisco'"
......
...@@ -17,12 +17,12 @@ import ( ...@@ -17,12 +17,12 @@ import (
) )
// see https://api.digitalocean.com/images/?client_id=[client_id]&api_key=[api_key] // see https://api.digitalocean.com/images/?client_id=[client_id]&api_key=[api_key]
// name="Ubuntu 12.04.4 x64", id=3101045, // name="Ubuntu 12.04.4 x64", id=6374128,
const DefaultImage = "ubuntu-12-04-x64" const DefaultImage = "ubuntu-12-04-x64"
// see https://api.digitalocean.com/regions/?client_id=[client_id]&api_key=[api_key] // see https://api.digitalocean.com/regions/?client_id=[client_id]&api_key=[api_key]
// name="New York", id=1 // name="New York 3", id=8
const DefaultRegion = "nyc1" const DefaultRegion = "nyc3"
// see https://api.digitalocean.com/sizes/?client_id=[client_id]&api_key=[api_key] // see https://api.digitalocean.com/sizes/?client_id=[client_id]&api_key=[api_key]
// name="512MB", id=66 (the smallest droplet size) // name="512MB", id=66 (the smallest droplet size)
......
...@@ -264,7 +264,7 @@ func TestBuilderPrepare_SSHUsername(t *testing.T) { ...@@ -264,7 +264,7 @@ func TestBuilderPrepare_SSHUsername(t *testing.T) {
} }
if b.config.SSHUsername != "root" { if b.config.SSHUsername != "root" {
t.Errorf("invalid: %d", b.config.SSHUsername) t.Errorf("invalid: %s", b.config.SSHUsername)
} }
// Test set // Test set
...@@ -297,7 +297,7 @@ func TestBuilderPrepare_SSHTimeout(t *testing.T) { ...@@ -297,7 +297,7 @@ func TestBuilderPrepare_SSHTimeout(t *testing.T) {
} }
if b.config.RawSSHTimeout != "1m" { if b.config.RawSSHTimeout != "1m" {
t.Errorf("invalid: %d", b.config.RawSSHTimeout) t.Errorf("invalid: %s", b.config.RawSSHTimeout)
} }
// Test set // Test set
...@@ -338,7 +338,7 @@ func TestBuilderPrepare_StateTimeout(t *testing.T) { ...@@ -338,7 +338,7 @@ func TestBuilderPrepare_StateTimeout(t *testing.T) {
} }
if b.config.RawStateTimeout != "6m" { if b.config.RawStateTimeout != "6m" {
t.Errorf("invalid: %d", b.config.RawStateTimeout) t.Errorf("invalid: %s", b.config.RawStateTimeout)
} }
// Test set // Test set
...@@ -379,7 +379,7 @@ func TestBuilderPrepare_PrivateNetworking(t *testing.T) { ...@@ -379,7 +379,7 @@ func TestBuilderPrepare_PrivateNetworking(t *testing.T) {
} }
if b.config.PrivateNetworking != false { if b.config.PrivateNetworking != false {
t.Errorf("invalid: %s", b.config.PrivateNetworking) t.Errorf("invalid: %t", b.config.PrivateNetworking)
} }
// Test set // Test set
...@@ -394,7 +394,7 @@ func TestBuilderPrepare_PrivateNetworking(t *testing.T) { ...@@ -394,7 +394,7 @@ func TestBuilderPrepare_PrivateNetworking(t *testing.T) {
} }
if b.config.PrivateNetworking != true { if b.config.PrivateNetworking != true {
t.Errorf("invalid: %s", b.config.PrivateNetworking) t.Errorf("invalid: %t", b.config.PrivateNetworking)
} }
} }
......
...@@ -75,7 +75,7 @@ func (c *Communicator) Upload(dst string, src io.Reader, fi *os.FileInfo) error ...@@ -75,7 +75,7 @@ func (c *Communicator) Upload(dst string, src io.Reader, fi *os.FileInfo) error
// Copy the file into place by copying the temporary file we put // Copy the file into place by copying the temporary file we put
// into the shared folder into the proper location in the container // into the shared folder into the proper location in the container
cmd := &packer.RemoteCmd{ cmd := &packer.RemoteCmd{
Command: fmt.Sprintf("cp %s/%s %s", c.ContainerDir, Command: fmt.Sprintf("command cp %s/%s %s", c.ContainerDir,
filepath.Base(tempfile.Name()), dst), filepath.Base(tempfile.Name()), dst),
} }
...@@ -117,6 +117,16 @@ func (c *Communicator) UploadDir(dst string, src string, exclude []string) error ...@@ -117,6 +117,16 @@ func (c *Communicator) UploadDir(dst string, src string, exclude []string) error
return os.MkdirAll(hostpath, info.Mode()) return os.MkdirAll(hostpath, info.Mode())
} }
if info.Mode() & os.ModeSymlink == os.ModeSymlink {
dest, err := os.Readlink(path)
if err != nil {
return err
}
return os.Symlink(dest, hostpath)
}
// It is a file, copy it over, including mode. // It is a file, copy it over, including mode.
src, err := os.Open(path) src, err := os.Open(path)
if err != nil { if err != nil {
...@@ -156,7 +166,7 @@ func (c *Communicator) UploadDir(dst string, src string, exclude []string) error ...@@ -156,7 +166,7 @@ func (c *Communicator) UploadDir(dst string, src string, exclude []string) error
// Make the directory, then copy into it // Make the directory, then copy into it
cmd := &packer.RemoteCmd{ cmd := &packer.RemoteCmd{
Command: fmt.Sprintf("set -e; mkdir -p %s; cp -R %s/* %s", Command: fmt.Sprintf("set -e; mkdir -p %s; command cp -R %s/* %s",
containerDst, containerSrc, containerDst), containerDst, containerSrc, containerDst),
} }
if err := c.Start(cmd); err != nil { if err := c.Start(cmd); err != nil {
......
...@@ -13,16 +13,6 @@ type accountFile struct { ...@@ -13,16 +13,6 @@ type accountFile struct {
ClientId string `json:"client_id"` ClientId string `json:"client_id"`
} }
// clientSecretsFile represents the structure of the client secrets JSON file.
type clientSecretsFile struct {
Web struct {
AuthURI string `json:"auth_uri"`
ClientEmail string `json:"client_email"`
ClientId string `json:"client_id"`
TokenURI string `json:"token_uri"`
}
}
func loadJSON(result interface{}, path string) error { func loadJSON(result interface{}, path string) error {
f, err := os.Open(path) f, err := os.Open(path)
if err != nil { if err != nil {
......
...@@ -35,7 +35,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { ...@@ -35,7 +35,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
// representing a GCE machine image. // representing a GCE machine image.
func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {
driver, err := NewDriverGCE( driver, err := NewDriverGCE(
ui, b.config.ProjectId, &b.config.account, &b.config.clientSecrets) ui, b.config.ProjectId, &b.config.account)
if err != nil { if err != nil {
return nil, err return nil, err
} }
...@@ -49,6 +49,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe ...@@ -49,6 +49,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
// Build the steps. // Build the steps.
steps := []multistep.Step{ steps := []multistep.Step{
new(StepCheckExistingImage),
&StepCreateSSHKey{ &StepCreateSSHKey{
Debug: b.config.PackerDebug, Debug: b.config.PackerDebug,
DebugKeyPath: fmt.Sprintf("gce_%s.pem", b.config.PackerBuildName), DebugKeyPath: fmt.Sprintf("gce_%s.pem", b.config.PackerBuildName),
...@@ -65,10 +66,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe ...@@ -65,10 +66,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
SSHWaitTimeout: 5 * time.Minute, SSHWaitTimeout: 5 * time.Minute,
}, },
new(common.StepProvision), new(common.StepProvision),
new(StepUpdateGcloud), new(StepTeardownInstance),
new(StepCreateImage), new(StepCreateImage),
new(StepUploadImage),
new(StepRegisterImage),
} }
// Run the steps. // Run the steps.
......
...@@ -16,11 +16,10 @@ import ( ...@@ -16,11 +16,10 @@ import (
type Config struct { type Config struct {
common.PackerConfig `mapstructure:",squash"` common.PackerConfig `mapstructure:",squash"`
AccountFile string `mapstructure:"account_file"` AccountFile string `mapstructure:"account_file"`
ClientSecretsFile string `mapstructure:"client_secrets_file"` ProjectId string `mapstructure:"project_id"`
ProjectId string `mapstructure:"project_id"`
BucketName string `mapstructure:"bucket_name"` DiskName string `mapstructure:"disk_name"`
DiskSizeGb int64 `mapstructure:"disk_size"` DiskSizeGb int64 `mapstructure:"disk_size"`
ImageName string `mapstructure:"image_name"` ImageName string `mapstructure:"image_name"`
ImageDescription string `mapstructure:"image_description"` ImageDescription string `mapstructure:"image_description"`
...@@ -38,8 +37,6 @@ type Config struct { ...@@ -38,8 +37,6 @@ type Config struct {
Zone string `mapstructure:"zone"` Zone string `mapstructure:"zone"`
account accountFile account accountFile
clientSecrets clientSecretsFile
instanceName string
privateKeyBytes []byte privateKeyBytes []byte
sshTimeout time.Duration sshTimeout time.Duration
stateTimeout time.Duration stateTimeout time.Duration
...@@ -83,6 +80,10 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { ...@@ -83,6 +80,10 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
c.InstanceName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID()) c.InstanceName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID())
} }
if c.DiskName == "" {
c.DiskName = c.InstanceName
}
if c.MachineType == "" { if c.MachineType == "" {
c.MachineType = "n1-standard-1" c.MachineType = "n1-standard-1"
} }
...@@ -105,10 +106,9 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { ...@@ -105,10 +106,9 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
// Process Templates // Process Templates
templates := map[string]*string{ templates := map[string]*string{
"account_file": &c.AccountFile, "account_file": &c.AccountFile,
"client_secrets_file": &c.ClientSecretsFile,
"bucket_name": &c.BucketName, "disk_name": &c.DiskName,
"image_name": &c.ImageName, "image_name": &c.ImageName,
"image_description": &c.ImageDescription, "image_description": &c.ImageDescription,
"instance_name": &c.InstanceName, "instance_name": &c.InstanceName,
...@@ -133,21 +133,6 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { ...@@ -133,21 +133,6 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
} }
// Process required parameters. // Process required parameters.
if c.BucketName == "" {
errs = packer.MultiErrorAppend(
errs, errors.New("a bucket_name must be specified"))
}
if c.AccountFile == "" {
errs = packer.MultiErrorAppend(
errs, errors.New("an account_file must be specified"))
}
if c.ClientSecretsFile == "" {
errs = packer.MultiErrorAppend(
errs, errors.New("a client_secrets_file must be specified"))
}
if c.ProjectId == "" { if c.ProjectId == "" {
errs = packer.MultiErrorAppend( errs = packer.MultiErrorAppend(
errs, errors.New("a project_id must be specified")) errs, errors.New("a project_id must be specified"))
...@@ -185,13 +170,6 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { ...@@ -185,13 +170,6 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
} }
} }
if c.ClientSecretsFile != "" {
if err := loadJSON(&c.clientSecrets, c.ClientSecretsFile); err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Failed parsing client secrets file: %s", err))
}
}
// Check for any errors. // Check for any errors.
if errs != nil && len(errs.Errors) > 0 { if errs != nil && len(errs.Errors) > 0 {
return nil, nil, errs return nil, nil, errs
......
...@@ -7,12 +7,10 @@ import ( ...@@ -7,12 +7,10 @@ import (
func testConfig(t *testing.T) map[string]interface{} { func testConfig(t *testing.T) map[string]interface{} {
return map[string]interface{}{ return map[string]interface{}{
"account_file": testAccountFile(t), "account_file": testAccountFile(t),
"bucket_name": "foo", "project_id": "hashicorp",
"client_secrets_file": testClientSecretsFile(t), "source_image": "foo",
"project_id": "hashicorp", "zone": "us-east-1a",
"source_image": "foo",
"zone": "us-east-1a",
} }
} }
...@@ -58,33 +56,6 @@ func TestConfigPrepare(t *testing.T) { ...@@ -58,33 +56,6 @@ func TestConfigPrepare(t *testing.T) {
true, true,
}, },
{
"bucket_name",
nil,
true,
},
{
"bucket_name",
"good",
false,
},
{
"client_secrets_file",
nil,
true,
},
{
"client_secrets_file",
testClientSecretsFile(t),
false,
},
{
"client_secrets_file",
"/tmp/i/should/not/exist",
true,
},
{ {
"private_key_file", "private_key_file",
"/tmp/i/should/not/exist", "/tmp/i/should/not/exist",
...@@ -180,22 +151,6 @@ func testAccountFile(t *testing.T) string { ...@@ -180,22 +151,6 @@ func testAccountFile(t *testing.T) string {
return tf.Name() return tf.Name()
} }
func testClientSecretsFile(t *testing.T) string {
tf, err := ioutil.TempFile("", "packer")
if err != nil {
t.Fatalf("err: %s", err)
}
defer tf.Close()
if _, err := tf.Write([]byte(testClientSecretsContent)); err != nil {
t.Fatalf("err: %s", err)
}
return tf.Name()
}
// This is just some dummy data that doesn't actually work (it was revoked // This is just some dummy data that doesn't actually work (it was revoked
// a long time ago). // a long time ago).
const testAccountContent = `{}` const testAccountContent = `{}`
const testClientSecretsContent = `{"web":{"auth_uri":"https://accounts.google.com/o/oauth2/auth","token_uri":"https://accounts.google.com/o/oauth2/token","client_email":"774313886706-eorlsj0r4eqkh5e7nvea5fuf59ifr873@developer.gserviceaccount.com","client_x509_cert_url":"https://www.googleapis.com/robot/v1/metadata/x509/774313886706-eorlsj0r4eqkh5e7nvea5fuf59ifr873@developer.gserviceaccount.com","client_id":"774313886706-eorlsj0r4eqkh5e7nvea5fuf59ifr873.apps.googleusercontent.com","auth_provider_x509_cert_url":"https://www.googleapis.com/oauth2/v1/certs"}}`
...@@ -4,15 +4,23 @@ package googlecompute ...@@ -4,15 +4,23 @@ package googlecompute
// with GCE. The Driver interface exists mostly to allow a mock implementation // with GCE. The Driver interface exists mostly to allow a mock implementation
// to be used to test the steps. // to be used to test the steps.
type Driver interface { type Driver interface {
// CreateImage creates an image with the given URL in Google Storage. // ImageExists returns true if the specified image exists. If an error
CreateImage(name, description, url string) <-chan error // occurs calling the API, this method returns false.
ImageExists(name string) bool
// CreateImage creates an image from the given disk in Google Compute
// Engine.
CreateImage(name, description, zone, disk string) <-chan error
// DeleteImage deletes the image with the given name. // DeleteImage deletes the image with the given name.
DeleteImage(name string) <-chan error DeleteImage(name string) <-chan error
// DeleteInstance deletes the given instance. // DeleteInstance deletes the given instance, keeping the boot disk.
DeleteInstance(zone, name string) (<-chan error, error) DeleteInstance(zone, name string) (<-chan error, error)
// DeleteDisk deletes the disk with the given name.
DeleteDisk(zone, name string) (<-chan error, error)
// GetNatIP gets the NAT IP address for the instance. // GetNatIP gets the NAT IP address for the instance.
GetNatIP(zone, name string) (string, error) GetNatIP(zone, name string) (string, error)
......
...@@ -4,12 +4,15 @@ import ( ...@@ -4,12 +4,15 @@ import (
"fmt" "fmt"
"log" "log"
"net/http" "net/http"
"runtime"
"time" "time"
"code.google.com/p/goauth2/oauth"
"code.google.com/p/goauth2/oauth/jwt"
"code.google.com/p/google-api-go-client/compute/v1"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"golang.org/x/oauth2/jwt"
"google.golang.org/api/compute/v1"
) )
// driverGCE is a Driver implementation that actually talks to GCE. // driverGCE is a Driver implementation that actually talks to GCE.
...@@ -20,39 +23,59 @@ type driverGCE struct { ...@@ -20,39 +23,59 @@ type driverGCE struct {
ui packer.Ui ui packer.Ui
} }
const DriverScopes string = "https://www.googleapis.com/auth/compute " + var DriverScopes = []string{"https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/devstorage.full_control"}
"https://www.googleapis.com/auth/devstorage.full_control"
func NewDriverGCE(ui packer.Ui, p string, a *accountFile) (Driver, error) {
func NewDriverGCE(ui packer.Ui, p string, a *accountFile, c *clientSecretsFile) (Driver, error) { var err error
// Get the token for use in our requests
log.Printf("[INFO] Requesting Google token...") var client *http.Client
log.Printf("[INFO] -- Email: %s", a.ClientEmail)
log.Printf("[INFO] -- Scopes: %s", DriverScopes) // Auth with AccountFile first if provided
log.Printf("[INFO] -- Private Key Length: %d", len(a.PrivateKey)) if a.PrivateKey != "" {
log.Printf("[INFO] -- Token URL: %s", c.Web.TokenURI) log.Printf("[INFO] Requesting Google token via AccountFile...")
jwtTok := jwt.NewToken( log.Printf("[INFO] -- Email: %s", a.ClientEmail)
a.ClientEmail, log.Printf("[INFO] -- Scopes: %s", DriverScopes)
DriverScopes, log.Printf("[INFO] -- Private Key Length: %d", len(a.PrivateKey))
[]byte(a.PrivateKey))
jwtTok.ClaimSet.Aud = c.Web.TokenURI conf := jwt.Config{
token, err := jwtTok.Assert(new(http.Client)) Email: a.ClientEmail,
if err != nil { PrivateKey: []byte(a.PrivateKey),
return nil, fmt.Errorf("Error retrieving auth token: %s", err) Scopes: DriverScopes,
TokenURL: "https://accounts.google.com/o/oauth2/token",
}
// Initiate an http.Client. The following GET request will be
// authorized and authenticated on the behalf of
// your service account.
client = conf.Client(oauth2.NoContext)
} else {
log.Printf("[INFO] Requesting Google token via GCE Service Role...")
client = &http.Client{
Transport: &oauth2.Transport{
// Fetch from Google Compute Engine's metadata server to retrieve
// an access token for the provided account.
// If no account is specified, "default" is used.
Source: google.ComputeTokenSource(""),
},
}
} }
// Instantiate the transport to communicate to Google if err != nil {
transport := &oauth.Transport{ return nil, err
Config: &oauth.Config{
ClientId: a.ClientId,
Scope: DriverScopes,
TokenURL: c.Web.TokenURI,
AuthURL: c.Web.AuthURI,
},
Token: token,
} }
log.Printf("[INFO] Instantiating GCE client...") log.Printf("[INFO] Instantiating GCE client...")
service, err := compute.New(transport.Client()) service, err := compute.New(client)
// Set UserAgent
versionString := "0.0.0"
// TODO(dcunnin): Use Packer's version code from version.go
// versionString := main.Version
// if main.VersionPrerelease != "" {
// versionString = fmt.Sprintf("%s-%s", versionString, main.VersionPrerelease)
// }
service.UserAgent = fmt.Sprintf(
"(%s %s) Packer/%s", runtime.GOOS, runtime.GOARCH, versionString)
if err != nil { if err != nil {
return nil, err return nil, err
} }
...@@ -64,15 +87,19 @@ func NewDriverGCE(ui packer.Ui, p string, a *accountFile, c *clientSecretsFile) ...@@ -64,15 +87,19 @@ func NewDriverGCE(ui packer.Ui, p string, a *accountFile, c *clientSecretsFile)
}, nil }, nil
} }
func (d *driverGCE) CreateImage(name, description, url string) <-chan error { func (d *driverGCE) ImageExists(name string) bool {
_, err := d.service.Images.Get(d.projectId, name).Do()
// The API may return an error for reasons other than the image not
// existing, but this heuristic is sufficient for now.
return err == nil
}
func (d *driverGCE) CreateImage(name, description, zone, disk string) <-chan error {
image := &compute.Image{ image := &compute.Image{
Description: description, Description: description,
Name: name, Name: name,
RawDisk: &compute.ImageRawDisk{ SourceDisk: fmt.Sprintf("%s%s/zones/%s/disks/%s", d.service.BasePath, d.projectId, zone, disk),
ContainerType: "TAR", SourceType: "RAW",
Source: url,
},
SourceType: "RAW",
} }
errCh := make(chan error, 1) errCh := make(chan error, 1)
...@@ -109,6 +136,17 @@ func (d *driverGCE) DeleteInstance(zone, name string) (<-chan error, error) { ...@@ -109,6 +136,17 @@ func (d *driverGCE) DeleteInstance(zone, name string) (<-chan error, error) {
return errCh, nil return errCh, nil
} }
func (d *driverGCE) DeleteDisk(zone, name string) (<-chan error, error) {
op, err := d.service.Disks.Delete(d.projectId, zone, name).Do()
if err != nil {
return nil, err
}
errCh := make(chan error, 1)
go waitForState(errCh, "DONE", d.refreshZoneOp(zone, op))
return errCh, nil
}
func (d *driverGCE) GetNatIP(zone, name string) (string, error) { func (d *driverGCE) GetNatIP(zone, name string) (string, error) {
instance, err := d.service.Instances.Get(d.projectId, zone, name).Do() instance, err := d.service.Instances.Get(d.projectId, zone, name).Do()
if err != nil { if err != nil {
...@@ -179,7 +217,7 @@ func (d *driverGCE) RunInstance(c *InstanceConfig) (<-chan error, error) { ...@@ -179,7 +217,7 @@ func (d *driverGCE) RunInstance(c *InstanceConfig) (<-chan error, error) {
Mode: "READ_WRITE", Mode: "READ_WRITE",
Kind: "compute#attachedDisk", Kind: "compute#attachedDisk",
Boot: true, Boot: true,
AutoDelete: true, AutoDelete: false,
InitializeParams: &compute.AttachedDiskInitializeParams{ InitializeParams: &compute.AttachedDiskInitializeParams{
SourceImage: image.SelfLink, SourceImage: image.SelfLink,
DiskSizeGb: c.DiskSizeGb, DiskSizeGb: c.DiskSizeGb,
...@@ -235,7 +273,7 @@ func (d *driverGCE) WaitForInstance(state, zone, name string) <-chan error { ...@@ -235,7 +273,7 @@ func (d *driverGCE) WaitForInstance(state, zone, name string) <-chan error {
} }
func (d *driverGCE) getImage(img Image) (image *compute.Image, err error) { func (d *driverGCE) getImage(img Image) (image *compute.Image, err error) {
projects := []string{img.ProjectId, "centos-cloud", "coreos-cloud", "debian-cloud", "google-containers", "opensuse-cloud", "rhel-cloud", "suse-cloud", "windows-cloud"} projects := []string{img.ProjectId, "centos-cloud", "coreos-cloud", "debian-cloud", "google-containers", "opensuse-cloud", "rhel-cloud", "suse-cloud", "ubuntu-os-cloud", "windows-cloud"}
for _, project := range projects { for _, project := range projects {
image, err = d.service.Images.Get(project, img.Name).Do() image, err = d.service.Images.Get(project, img.Name).Do()
if err == nil && image != nil && image.SelfLink != "" { if err == nil && image != nil && image.SelfLink != "" {
......
...@@ -3,9 +3,13 @@ package googlecompute ...@@ -3,9 +3,13 @@ package googlecompute
// DriverMock is a Driver implementation that is a mocked out so that // DriverMock is a Driver implementation that is a mocked out so that
// it can be used for tests. // it can be used for tests.
type DriverMock struct { type DriverMock struct {
ImageExistsName string
ImageExistsResult bool
CreateImageName string CreateImageName string
CreateImageDesc string CreateImageDesc string
CreateImageURL string CreateImageZone string
CreateImageDisk string
CreateImageErrCh <-chan error CreateImageErrCh <-chan error
DeleteImageName string DeleteImageName string
...@@ -16,6 +20,11 @@ type DriverMock struct { ...@@ -16,6 +20,11 @@ type DriverMock struct {
DeleteInstanceErrCh <-chan error DeleteInstanceErrCh <-chan error
DeleteInstanceErr error DeleteInstanceErr error
DeleteDiskZone string
DeleteDiskName string
DeleteDiskErrCh <-chan error
DeleteDiskErr error
GetNatIPZone string GetNatIPZone string
GetNatIPName string GetNatIPName string
GetNatIPResult string GetNatIPResult string
...@@ -31,10 +40,16 @@ type DriverMock struct { ...@@ -31,10 +40,16 @@ type DriverMock struct {
WaitForInstanceErrCh <-chan error WaitForInstanceErrCh <-chan error
} }
func (d *DriverMock) CreateImage(name, description, url string) <-chan error { func (d *DriverMock) ImageExists(name string) bool {
d.ImageExistsName = name
return d.ImageExistsResult
}
func (d *DriverMock) CreateImage(name, description, zone, disk string) <-chan error {
d.CreateImageName = name d.CreateImageName = name
d.CreateImageDesc = description d.CreateImageDesc = description
d.CreateImageURL = url d.CreateImageZone = zone
d.CreateImageDisk = disk
resultCh := d.CreateImageErrCh resultCh := d.CreateImageErrCh
if resultCh == nil { if resultCh == nil {
...@@ -73,6 +88,20 @@ func (d *DriverMock) DeleteInstance(zone, name string) (<-chan error, error) { ...@@ -73,6 +88,20 @@ func (d *DriverMock) DeleteInstance(zone, name string) (<-chan error, error) {
return resultCh, d.DeleteInstanceErr return resultCh, d.DeleteInstanceErr
} }
func (d *DriverMock) DeleteDisk(zone, name string) (<-chan error, error) {
d.DeleteDiskZone = zone
d.DeleteDiskName = name
resultCh := d.DeleteDiskErrCh
if resultCh == nil {
ch := make(chan error)
close(ch)
resultCh = ch
}
return resultCh, d.DeleteDiskErr
}
func (d *DriverMock) GetNatIP(zone, name string) (string, error) { func (d *DriverMock) GetNatIP(zone, name string) (string, error) {
d.GetNatIPZone = zone d.GetNatIPZone = zone
d.GetNatIPName = name d.GetNatIPName = name
......
package googlecompute
import (
"fmt"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
)
// StepCheckExistingImage represents a Packer build step that checks if the
// target image already exists, and aborts immediately if so.
type StepCheckExistingImage int
// Run executes the Packer build step that checks if the image already exists.
func (s *StepCheckExistingImage) Run(state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
driver := state.Get("driver").(Driver)
ui := state.Get("ui").(packer.Ui)
ui.Say("Checking image does not exist...")
exists := driver.ImageExists(config.ImageName)
if exists {
err := fmt.Errorf("Image %s already exists", config.ImageName)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
return multistep.ActionContinue
}
// Cleanup.
func (s *StepCheckExistingImage) Cleanup(state multistep.StateBag) {}
package googlecompute
import (
"github.com/mitchellh/multistep"
"testing"
)
func TestStepCheckExistingImage_impl(t *testing.T) {
var _ multistep.Step = new(StepCheckExistingImage)
}
func TestStepCheckExistingImage(t *testing.T) {
state := testState(t)
step := new(StepCheckExistingImage)
defer step.Cleanup(state)
state.Put("instance_name", "foo")
config := state.Get("config").(*Config)
driver := state.Get("driver").(*DriverMock)
driver.ImageExistsResult = true
// run the step
if action := step.Run(state); action != multistep.ActionHalt {
t.Fatalf("bad action: %#v", action)
}
// Verify state
if driver.ImageExistsName != config.ImageName {
t.Fatalf("bad: %#v", driver.ImageExistsName)
}
}
package googlecompute package googlecompute
import ( import (
"errors"
"fmt" "fmt"
"path/filepath" "time"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
...@@ -14,39 +15,32 @@ type StepCreateImage int ...@@ -14,39 +15,32 @@ type StepCreateImage int
// Run executes the Packer build step that creates a GCE machine image. // Run executes the Packer build step that creates a GCE machine image.
// //
// Currently the only way to create a GCE image is to run the gcimagebundle // The image is created from the persistent disk used by the instance. The
// command on the running GCE instance. // instance must be deleted and the disk retained before doing this step.
func (s *StepCreateImage) Run(state multistep.StateBag) multistep.StepAction { func (s *StepCreateImage) Run(state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config) config := state.Get("config").(*Config)
comm := state.Get("communicator").(packer.Communicator) driver := state.Get("driver").(Driver)
ui := state.Get("ui").(packer.Ui) ui := state.Get("ui").(packer.Ui)
sudoPrefix := ""
if config.SSHUsername != "root" {
sudoPrefix = "sudo "
}
imageFilename := fmt.Sprintf("%s.tar.gz", config.ImageName)
imageBundleCmd := "/usr/bin/gcimagebundle -d /dev/sda -o /tmp/"
ui.Say("Creating image...") ui.Say("Creating image...")
cmd := new(packer.RemoteCmd) errCh := driver.CreateImage(config.ImageName, config.ImageDescription, config.Zone, config.DiskName)
cmd.Command = fmt.Sprintf("%s%s --output_file_name %s --fssize %d", var err error
sudoPrefix, imageBundleCmd, imageFilename, config.DiskSizeGb*1024*1024*1024) select {
err := cmd.StartWithUi(comm, ui) case err = <-errCh:
if err == nil && cmd.ExitStatus != 0 { case <-time.After(config.stateTimeout):
err = fmt.Errorf( err = errors.New("time out while waiting for image to register")
"gcimagebundle exited with non-zero exit status: %d", cmd.ExitStatus)
} }
if err != nil { if err != nil {
err := fmt.Errorf("Error creating image: %s", err) err := fmt.Errorf("Error waiting for image: %s", err)
state.Put("error", err) state.Put("error", err)
ui.Error(err.Error()) ui.Error(err.Error())
return multistep.ActionHalt return multistep.ActionHalt
} }
state.Put("image_file_name", filepath.Join("/tmp", imageFilename)) state.Put("image_name", config.ImageName)
return multistep.ActionContinue return multistep.ActionContinue
} }
// Cleanup.
func (s *StepCreateImage) Cleanup(state multistep.StateBag) {} func (s *StepCreateImage) Cleanup(state multistep.StateBag) {}
package googlecompute package googlecompute
import ( import (
"strings" "errors"
"testing" "testing"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
) )
func TestStepCreateImage_impl(t *testing.T) { func TestStepCreateImage_impl(t *testing.T) {
...@@ -17,38 +16,49 @@ func TestStepCreateImage(t *testing.T) { ...@@ -17,38 +16,49 @@ func TestStepCreateImage(t *testing.T) {
step := new(StepCreateImage) step := new(StepCreateImage)
defer step.Cleanup(state) defer step.Cleanup(state)
comm := new(packer.MockCommunicator) config := state.Get("config").(*Config)
state.Put("communicator", comm) driver := state.Get("driver").(*DriverMock)
// run the step // run the step
if action := step.Run(state); action != multistep.ActionContinue { if action := step.Run(state); action != multistep.ActionContinue {
t.Fatalf("bad action: %#v", action) t.Fatalf("bad action: %#v", action)
} }
// Verify // Verify state
if !comm.StartCalled { if driver.CreateImageName != config.ImageName {
t.Fatal("start should be called") t.Fatalf("bad: %#v", driver.CreateImageName)
}
if driver.CreateImageDesc != config.ImageDescription {
t.Fatalf("bad: %#v", driver.CreateImageDesc)
} }
if strings.HasPrefix(comm.StartCmd.Command, "sudo") { if driver.CreateImageZone != config.Zone {
t.Fatal("should not sudo") t.Fatalf("bad: %#v", driver.CreateImageZone)
} }
if !strings.Contains(comm.StartCmd.Command, "gcimagebundle") { if driver.CreateImageDisk != config.DiskName {
t.Fatalf("bad command: %#v", comm.StartCmd.Command) t.Fatalf("bad: %#v", driver.CreateImageDisk)
} }
if _, ok := state.GetOk("image_file_name"); !ok { nameRaw, ok := state.GetOk("image_name")
t.Fatal("should have image") if !ok {
t.Fatal("should have name")
}
if name, ok := nameRaw.(string); !ok {
t.Fatal("name is not a string")
} else if name != config.ImageName {
t.Fatalf("bad name: %s", name)
} }
} }
func TestStepCreateImage_badExitStatus(t *testing.T) { func TestStepCreateImage_errorOnChannel(t *testing.T) {
state := testState(t) state := testState(t)
step := new(StepCreateImage) step := new(StepCreateImage)
defer step.Cleanup(state) defer step.Cleanup(state)
comm := new(packer.MockCommunicator) errCh := make(chan error, 1)
comm.StartExitStatus = 12 errCh <- errors.New("error")
state.Put("communicator", comm)
driver := state.Get("driver").(*DriverMock)
driver.CreateImageErrCh = errCh
// run the step // run the step
if action := step.Run(state); action != multistep.ActionHalt { if action := step.Run(state); action != multistep.ActionHalt {
...@@ -58,39 +68,7 @@ func TestStepCreateImage_badExitStatus(t *testing.T) { ...@@ -58,39 +68,7 @@ func TestStepCreateImage_badExitStatus(t *testing.T) {
if _, ok := state.GetOk("error"); !ok { if _, ok := state.GetOk("error"); !ok {
t.Fatal("should have error") t.Fatal("should have error")
} }
if _, ok := state.GetOk("image_file_name"); ok { if _, ok := state.GetOk("image_name"); ok {
t.Fatal("should NOT have image") t.Fatal("should NOT have image")
} }
} }
func TestStepCreateImage_nonRoot(t *testing.T) {
state := testState(t)
step := new(StepCreateImage)
defer step.Cleanup(state)
comm := new(packer.MockCommunicator)
state.Put("communicator", comm)
config := state.Get("config").(*Config)
config.SSHUsername = "bob"
// run the step
if action := step.Run(state); action != multistep.ActionContinue {
t.Fatalf("bad action: %#v", action)
}
// Verify
if !comm.StartCalled {
t.Fatal("start should be called")
}
if !strings.HasPrefix(comm.StartCmd.Command, "sudo") {
t.Fatal("should sudo")
}
if !strings.Contains(comm.StartCmd.Command, "gcimagebundle") {
t.Fatalf("bad command: %#v", comm.StartCmd.Command)
}
if _, ok := state.GetOk("image_file_name"); !ok {
t.Fatal("should have image")
}
}
...@@ -12,8 +12,6 @@ import ( ...@@ -12,8 +12,6 @@ import (
// StepCreateInstance represents a Packer build step that creates GCE instances. // StepCreateInstance represents a Packer build step that creates GCE instances.
type StepCreateInstance struct { type StepCreateInstance struct {
Debug bool Debug bool
instanceName string
} }
func (config *Config) getImage() Image { func (config *Config) getImage() Image {
...@@ -91,14 +89,18 @@ func (s *StepCreateInstance) Run(state multistep.StateBag) multistep.StepAction ...@@ -91,14 +89,18 @@ func (s *StepCreateInstance) Run(state multistep.StateBag) multistep.StepAction
// Things succeeded, store the name so we can remove it later // Things succeeded, store the name so we can remove it later
state.Put("instance_name", name) state.Put("instance_name", name)
s.instanceName = name
return multistep.ActionContinue return multistep.ActionContinue
} }
// Cleanup destroys the GCE instance created during the image creation process. // Cleanup destroys the GCE instance created during the image creation process.
func (s *StepCreateInstance) Cleanup(state multistep.StateBag) { func (s *StepCreateInstance) Cleanup(state multistep.StateBag) {
if s.instanceName == "" { nameRaw, ok := state.GetOk("instance_name")
if !ok {
return
}
name := nameRaw.(string)
if name == "" {
return return
} }
...@@ -107,7 +109,7 @@ func (s *StepCreateInstance) Cleanup(state multistep.StateBag) { ...@@ -107,7 +109,7 @@ func (s *StepCreateInstance) Cleanup(state multistep.StateBag) {
ui := state.Get("ui").(packer.Ui) ui := state.Get("ui").(packer.Ui)
ui.Say("Deleting instance...") ui.Say("Deleting instance...")
errCh, err := driver.DeleteInstance(config.Zone, s.instanceName) errCh, err := driver.DeleteInstance(config.Zone, name)
if err == nil { if err == nil {
select { select {
case err = <-errCh: case err = <-errCh:
...@@ -120,9 +122,32 @@ func (s *StepCreateInstance) Cleanup(state multistep.StateBag) { ...@@ -120,9 +122,32 @@ func (s *StepCreateInstance) Cleanup(state multistep.StateBag) {
ui.Error(fmt.Sprintf( ui.Error(fmt.Sprintf(
"Error deleting instance. Please delete it manually.\n\n"+ "Error deleting instance. Please delete it manually.\n\n"+
"Name: %s\n"+ "Name: %s\n"+
"Error: %s", s.instanceName, err)) "Error: %s", name, err))
} }
s.instanceName = "" ui.Message("Instance has been deleted!")
state.Put("instance_name", "")
// Deleting the instance does not remove the boot disk. This cleanup removes
// the disk.
ui.Say("Deleting disk...")
errCh, err = driver.DeleteDisk(config.Zone, config.DiskName)
if err == nil {
select {
case err = <-errCh:
case <-time.After(config.stateTimeout):
err = errors.New("time out while waiting for disk to delete")
}
}
if err != nil {
ui.Error(fmt.Sprintf(
"Error deleting disk. Please delete it manually.\n\n"+
"Name: %s\n"+
"Error: %s", config.InstanceName, err))
}
ui.Message("Disk has been deleted!")
return return
} }
...@@ -39,7 +39,14 @@ func TestStepCreateInstance(t *testing.T) { ...@@ -39,7 +39,14 @@ func TestStepCreateInstance(t *testing.T) {
t.Fatal("should've deleted instance") t.Fatal("should've deleted instance")
} }
if driver.DeleteInstanceZone != config.Zone { if driver.DeleteInstanceZone != config.Zone {
t.Fatal("bad zone: %#v", driver.DeleteInstanceZone) t.Fatalf("bad instance zone: %#v", driver.DeleteInstanceZone)
}
if driver.DeleteDiskName != config.InstanceName {
t.Fatal("should've deleted disk")
}
if driver.DeleteDiskZone != config.Zone {
t.Fatalf("bad disk zone: %#v", driver.DeleteDiskZone)
} }
} }
......
package googlecompute
import (
"errors"
"fmt"
"time"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
)
// StepRegisterImage represents a Packer build step that registers GCE machine images.
type StepRegisterImage int
// Run executes the Packer build step that registers a GCE machine image.
func (s *StepRegisterImage) Run(state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
driver := state.Get("driver").(Driver)
ui := state.Get("ui").(packer.Ui)
var err error
imageURL := fmt.Sprintf(
"https://storage.cloud.google.com/%s/%s.tar.gz",
config.BucketName, config.ImageName)
ui.Say("Registering image...")
errCh := driver.CreateImage(config.ImageName, config.ImageDescription, imageURL)
select {
case err = <-errCh:
case <-time.After(config.stateTimeout):
err = errors.New("time out while waiting for image to register")
}
if err != nil {
err := fmt.Errorf("Error waiting for image: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
state.Put("image_name", config.ImageName)
return multistep.ActionContinue
}
// Cleanup.
func (s *StepRegisterImage) Cleanup(state multistep.StateBag) {}
package googlecompute
import (
"errors"
"github.com/mitchellh/multistep"
"testing"
"time"
)
func TestStepRegisterImage_impl(t *testing.T) {
var _ multistep.Step = new(StepRegisterImage)
}
func TestStepRegisterImage(t *testing.T) {
state := testState(t)
step := new(StepRegisterImage)
defer step.Cleanup(state)
config := state.Get("config").(*Config)
driver := state.Get("driver").(*DriverMock)
// run the step
if action := step.Run(state); action != multistep.ActionContinue {
t.Fatalf("bad action: %#v", action)
}
// Verify state
if driver.CreateImageName != config.ImageName {
t.Fatalf("bad: %#v", driver.CreateImageName)
}
if driver.CreateImageDesc != config.ImageDescription {
t.Fatalf("bad: %#v", driver.CreateImageDesc)
}
nameRaw, ok := state.GetOk("image_name")
if !ok {
t.Fatal("should have name")
}
if name, ok := nameRaw.(string); !ok {
t.Fatal("name is not a string")
} else if name != config.ImageName {
t.Fatalf("bad name: %s", name)
}
}
func TestStepRegisterImage_waitError(t *testing.T) {
state := testState(t)
step := new(StepRegisterImage)
defer step.Cleanup(state)
errCh := make(chan error, 1)
errCh <- errors.New("error")
driver := state.Get("driver").(*DriverMock)
driver.CreateImageErrCh = errCh
// run the step
if action := step.Run(state); action != multistep.ActionHalt {
t.Fatalf("bad action: %#v", action)
}
// Verify state
if _, ok := state.GetOk("error"); !ok {
t.Fatal("should have error")
}
if _, ok := state.GetOk("image_name"); ok {
t.Fatal("should NOT have image_name")
}
}
func TestStepRegisterImage_errorTimeout(t *testing.T) {
state := testState(t)
step := new(StepRegisterImage)
defer step.Cleanup(state)
errCh := make(chan error, 1)
go func() {
<-time.After(10 * time.Millisecond)
errCh <- nil
}()
config := state.Get("config").(*Config)
config.stateTimeout = 1 * time.Microsecond
driver := state.Get("driver").(*DriverMock)
driver.CreateImageErrCh = errCh
// run the step
if action := step.Run(state); action != multistep.ActionHalt {
t.Fatalf("bad action: %#v", action)
}
// Verify state
if _, ok := state.GetOk("error"); !ok {
t.Fatal("should have error")
}
if _, ok := state.GetOk("image_name"); ok {
t.Fatal("should NOT have image name")
}
}
package googlecompute
import (
"errors"
"fmt"
"time"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
)
// StepTeardownInstance represents a Packer build step that tears down GCE
// instances.
type StepTeardownInstance struct {
Debug bool
}
// Run executes the Packer build step that tears down a GCE instance.
func (s *StepTeardownInstance) Run(state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
driver := state.Get("driver").(Driver)
ui := state.Get("ui").(packer.Ui)
name := config.InstanceName
if name == "" {
return multistep.ActionHalt
}
ui.Say("Deleting instance...")
errCh, err := driver.DeleteInstance(config.Zone, name)
if err == nil {
select {
case err = <-errCh:
case <-time.After(config.stateTimeout):
err = errors.New("time out while waiting for instance to delete")
}
}
if err != nil {
ui.Error(fmt.Sprintf(
"Error deleting instance. Please delete it manually.\n\n"+
"Name: %s\n"+
"Error: %s", name, err))
return multistep.ActionHalt
}
ui.Message("Instance has been deleted!")
state.Put("instance_name", "")
return multistep.ActionContinue
}
// Deleting the instance does not remove the boot disk. This cleanup removes
// the disk.
func (s *StepTeardownInstance) Cleanup(state multistep.StateBag) {
config := state.Get("config").(*Config)
driver := state.Get("driver").(Driver)
ui := state.Get("ui").(packer.Ui)
ui.Say("Deleting disk...")
errCh, err := driver.DeleteDisk(config.Zone, config.DiskName)
if err == nil {
select {
case err = <-errCh:
case <-time.After(config.stateTimeout):
err = errors.New("time out while waiting for disk to delete")
}
}
if err != nil {
ui.Error(fmt.Sprintf(
"Error deleting disk. Please delete it manually.\n\n"+
"Name: %s\n"+
"Error: %s", config.InstanceName, err))
}
ui.Message("Disk has been deleted!")
return
}
package googlecompute
import (
"github.com/mitchellh/multistep"
"testing"
)
func TestStepTeardownInstance_impl(t *testing.T) {
var _ multistep.Step = new(StepTeardownInstance)
}
func TestStepTeardownInstance(t *testing.T) {
state := testState(t)
step := new(StepTeardownInstance)
defer step.Cleanup(state)
config := state.Get("config").(*Config)
driver := state.Get("driver").(*DriverMock)
// run the step
if action := step.Run(state); action != multistep.ActionContinue {
t.Fatalf("bad action: %#v", action)
}
if driver.DeleteInstanceName != config.InstanceName {
t.Fatal("should've deleted instance")
}
if driver.DeleteInstanceZone != config.Zone {
t.Fatalf("bad zone: %#v", driver.DeleteInstanceZone)
}
// cleanup
step.Cleanup(state)
if driver.DeleteDiskName != config.InstanceName {
t.Fatal("should've deleted disk")
}
if driver.DeleteDiskZone != config.Zone {
t.Fatalf("bad zone: %#v", driver.DeleteDiskZone)
}
}
package googlecompute
import (
"fmt"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
)
// StepUpdateGcloud represents a Packer build step that updates the gsutil
// utility to the latest version available.
type StepUpdateGcloud int
// Run executes the Packer build step that updates the gsutil utility to the
// latest version available.
//
// This step is required to prevent the image creation process from hanging;
// the image creation process utilizes the gcimagebundle cli tool which will
// prompt to update gsutil if a newer version is available.
func (s *StepUpdateGcloud) Run(state multistep.StateBag) multistep.StepAction {
comm := state.Get("communicator").(packer.Communicator)
config := state.Get("config").(*Config)
ui := state.Get("ui").(packer.Ui)
sudoPrefix := ""
if config.SSHUsername != "root" {
sudoPrefix = "sudo "
}
gsutilUpdateCmd := "/usr/local/bin/gcloud -q components update"
cmd := new(packer.RemoteCmd)
cmd.Command = fmt.Sprintf("%s%s", sudoPrefix, gsutilUpdateCmd)
ui.Say("Updating gcloud components...")
err := cmd.StartWithUi(comm, ui)
if err == nil && cmd.ExitStatus != 0 {
err = fmt.Errorf(
"gcloud components update exited with non-zero exit status: %d", cmd.ExitStatus)
}
if err != nil {
err := fmt.Errorf("Error updating gcloud components: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
return multistep.ActionContinue
}
// Cleanup.
func (s *StepUpdateGcloud) Cleanup(state multistep.StateBag) {}
package googlecompute
import (
"strings"
"testing"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
)
func TestStepUpdateGcloud_impl(t *testing.T) {
var _ multistep.Step = new(StepUpdateGcloud)
}
func TestStepUpdateGcloud(t *testing.T) {
state := testState(t)
step := new(StepUpdateGcloud)
defer step.Cleanup(state)
comm := new(packer.MockCommunicator)
state.Put("communicator", comm)
// run the step
if action := step.Run(state); action != multistep.ActionContinue {
t.Fatalf("bad action: %#v", action)
}
// Verify
if !comm.StartCalled {
t.Fatal("start should be called")
}
if strings.HasPrefix(comm.StartCmd.Command, "sudo") {
t.Fatal("should not sudo")
}
if !strings.Contains(comm.StartCmd.Command, "gcloud -q components update") {
t.Fatalf("bad command: %#v", comm.StartCmd.Command)
}
}
func TestStepUpdateGcloud_badExitStatus(t *testing.T) {
state := testState(t)
step := new(StepUpdateGcloud)
defer step.Cleanup(state)
comm := new(packer.MockCommunicator)
comm.StartExitStatus = 12
state.Put("communicator", comm)
// run the step
if action := step.Run(state); action != multistep.ActionHalt {
t.Fatalf("bad action: %#v", action)
}
if _, ok := state.GetOk("error"); !ok {
t.Fatal("should have error")
}
}
func TestStepUpdateGcloud_nonRoot(t *testing.T) {
state := testState(t)
step := new(StepUpdateGcloud)
defer step.Cleanup(state)
comm := new(packer.MockCommunicator)
state.Put("communicator", comm)
config := state.Get("config").(*Config)
config.SSHUsername = "bob"
// run the step
if action := step.Run(state); action != multistep.ActionContinue {
t.Fatalf("bad action: %#v", action)
}
// Verify
if !comm.StartCalled {
t.Fatal("start should be called")
}
if !strings.HasPrefix(comm.StartCmd.Command, "sudo") {
t.Fatal("should sudo")
}
if !strings.Contains(comm.StartCmd.Command, "gcloud -q components update") {
t.Fatalf("bad command: %#v", comm.StartCmd.Command)
}
}
package googlecompute
import (
"fmt"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
)
// StepUploadImage represents a Packer build step that uploads GCE machine images.
type StepUploadImage int
// Run executes the Packer build step that uploads a GCE machine image.
func (s *StepUploadImage) Run(state multistep.StateBag) multistep.StepAction {
comm := state.Get("communicator").(packer.Communicator)
config := state.Get("config").(*Config)
imageFilename := state.Get("image_file_name").(string)
ui := state.Get("ui").(packer.Ui)
sudoPrefix := ""
if config.SSHUsername != "root" {
sudoPrefix = "sudo "
}
ui.Say("Uploading image...")
cmd := new(packer.RemoteCmd)
cmd.Command = fmt.Sprintf("%s/usr/local/bin/gsutil cp %s gs://%s",
sudoPrefix, imageFilename, config.BucketName)
err := cmd.StartWithUi(comm, ui)
if err == nil && cmd.ExitStatus != 0 {
err = fmt.Errorf(
"gsutil exited with non-zero exit status: %d", cmd.ExitStatus)
}
if err != nil {
err := fmt.Errorf("Error uploading image: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
return multistep.ActionContinue
}
// Cleanup.
func (s *StepUploadImage) Cleanup(state multistep.StateBag) {}
package googlecompute
import (
"strings"
"testing"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
)
func TestStepUploadImage_impl(t *testing.T) {
var _ multistep.Step = new(StepUploadImage)
}
func TestStepUploadImage(t *testing.T) {
state := testState(t)
step := new(StepUploadImage)
defer step.Cleanup(state)
comm := new(packer.MockCommunicator)
state.Put("communicator", comm)
state.Put("image_file_name", "foo")
// run the step
if action := step.Run(state); action != multistep.ActionContinue {
t.Fatalf("bad action: %#v", action)
}
// Verify
if !comm.StartCalled {
t.Fatal("start should be called")
}
if strings.HasPrefix(comm.StartCmd.Command, "sudo") {
t.Fatal("should not sudo")
}
if !strings.Contains(comm.StartCmd.Command, "gsutil cp") {
t.Fatalf("bad command: %#v", comm.StartCmd.Command)
}
}
func TestStepUploadImage_badExitStatus(t *testing.T) {
state := testState(t)
step := new(StepUploadImage)
defer step.Cleanup(state)
comm := new(packer.MockCommunicator)
comm.StartExitStatus = 12
state.Put("communicator", comm)
state.Put("image_file_name", "foo")
// run the step
if action := step.Run(state); action != multistep.ActionHalt {
t.Fatalf("bad action: %#v", action)
}
if _, ok := state.GetOk("error"); !ok {
t.Fatal("should have error")
}
}
func TestStepUploadImage_nonRoot(t *testing.T) {
state := testState(t)
step := new(StepUploadImage)
defer step.Cleanup(state)
comm := new(packer.MockCommunicator)
state.Put("communicator", comm)
state.Put("image_file_name", "foo")
config := state.Get("config").(*Config)
config.SSHUsername = "bob"
// run the step
if action := step.Run(state); action != multistep.ActionContinue {
t.Fatalf("bad action: %#v", action)
}
// Verify
if !comm.StartCalled {
t.Fatal("start should be called")
}
if !strings.HasPrefix(comm.StartCmd.Command, "sudo") {
t.Fatal("should sudo")
}
if !strings.Contains(comm.StartCmd.Command, "gsutil cp") {
t.Fatalf("bad command: %#v", comm.StartCmd.Command)
}
}
...@@ -41,6 +41,6 @@ func (a *Artifact) State(name string) interface{} { ...@@ -41,6 +41,6 @@ func (a *Artifact) State(name string) interface{} {
} }
func (a *Artifact) Destroy() error { func (a *Artifact) Destroy() error {
log.Printf("Destroying image: %d", a.ImageId) log.Printf("Destroying image: %s", a.ImageId)
return a.Conn.DeleteImageById(a.ImageId) return a.Conn.DeleteImageById(a.ImageId)
} }
...@@ -95,12 +95,15 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe ...@@ -95,12 +95,15 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
SecurityGroups: b.config.SecurityGroups, SecurityGroups: b.config.SecurityGroups,
Networks: b.config.Networks, Networks: b.config.Networks,
}, },
&StepWaitForRackConnect{
Wait: b.config.RackconnectWait,
},
&StepAllocateIp{ &StepAllocateIp{
FloatingIpPool: b.config.FloatingIpPool, FloatingIpPool: b.config.FloatingIpPool,
FloatingIp: b.config.FloatingIp, FloatingIp: b.config.FloatingIp,
}, },
&common.StepConnectSSH{ &common.StepConnectSSH{
SSHAddress: SSHAddress(csp, b.config.SSHPort), SSHAddress: SSHAddress(csp, b.config.SSHInterface, b.config.SSHPort),
SSHConfig: SSHConfig(b.config.SSHUsername), SSHConfig: SSHConfig(b.config.SSHUsername),
SSHWaitTimeout: b.config.SSHTimeout(), SSHWaitTimeout: b.config.SSHTimeout(),
}, },
......
...@@ -15,8 +15,10 @@ type RunConfig struct { ...@@ -15,8 +15,10 @@ type RunConfig struct {
RawSSHTimeout string `mapstructure:"ssh_timeout"` RawSSHTimeout string `mapstructure:"ssh_timeout"`
SSHUsername string `mapstructure:"ssh_username"` SSHUsername string `mapstructure:"ssh_username"`
SSHPort int `mapstructure:"ssh_port"` SSHPort int `mapstructure:"ssh_port"`
SSHInterface string `mapstructure:"ssh_interface"`
OpenstackProvider string `mapstructure:"openstack_provider"` OpenstackProvider string `mapstructure:"openstack_provider"`
UseFloatingIp bool `mapstructure:"use_floating_ip"` UseFloatingIp bool `mapstructure:"use_floating_ip"`
RackconnectWait bool `mapstructure:"rackconnect_wait"`
FloatingIpPool string `mapstructure:"floating_ip_pool"` FloatingIpPool string `mapstructure:"floating_ip_pool"`
FloatingIp string `mapstructure:"floating_ip"` FloatingIp string `mapstructure:"floating_ip"`
SecurityGroups []string `mapstructure:"security_groups"` SecurityGroups []string `mapstructure:"security_groups"`
...@@ -68,10 +70,14 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error { ...@@ -68,10 +70,14 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error {
} }
templates := map[string]*string{ templates := map[string]*string{
"flavor": &c.Flavor, "flavor": &c.Flavor,
"ssh_timeout": &c.RawSSHTimeout, "ssh_timeout": &c.RawSSHTimeout,
"ssh_username": &c.SSHUsername, "ssh_username": &c.SSHUsername,
"source_image": &c.SourceImage, "ssh_interface": &c.SSHInterface,
"source_image": &c.SourceImage,
"openstack_provider": &c.OpenstackProvider,
"floating_ip_pool": &c.FloatingIpPool,
"floating_ip": &c.FloatingIp,
} }
for n, ptr := range templates { for n, ptr := range templates {
......
...@@ -12,7 +12,7 @@ import ( ...@@ -12,7 +12,7 @@ import (
// SSHAddress returns a function that can be given to the SSH communicator // SSHAddress returns a function that can be given to the SSH communicator
// for determining the SSH address based on the server AccessIPv4 setting.. // for determining the SSH address based on the server AccessIPv4 setting..
func SSHAddress(csp gophercloud.CloudServersProvider, port int) func(multistep.StateBag) (string, error) { func SSHAddress(csp gophercloud.CloudServersProvider, sshinterface string, port int) func(multistep.StateBag) (string, error) {
return func(state multistep.StateBag) (string, error) { return func(state multistep.StateBag) (string, error) {
s := state.Get("server").(*gophercloud.Server) s := state.Get("server").(*gophercloud.Server)
...@@ -25,6 +25,11 @@ func SSHAddress(csp gophercloud.CloudServersProvider, port int) func(multistep.S ...@@ -25,6 +25,11 @@ func SSHAddress(csp gophercloud.CloudServersProvider, port int) func(multistep.S
return "", errors.New("Error parsing SSH addresses") return "", errors.New("Error parsing SSH addresses")
} }
for pool, addresses := range ip_pools { for pool, addresses := range ip_pools {
if sshinterface != "" {
if pool != sshinterface {
continue
}
}
if pool != "" { if pool != "" {
for _, address := range addresses { for _, address := range addresses {
if address.Addr != "" && address.Version == 4 { if address.Addr != "" && address.Version == 4 {
......
package openstack
import (
"fmt"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
"time"
"github.com/mitchellh/gophercloud-fork-40444fb"
)
type StepWaitForRackConnect struct {
Wait bool
}
func (s *StepWaitForRackConnect) Run(state multistep.StateBag) multistep.StepAction {
if !s.Wait {
return multistep.ActionContinue
}
csp := state.Get("csp").(gophercloud.CloudServersProvider)
server := state.Get("server").(*gophercloud.Server)
ui := state.Get("ui").(packer.Ui)
ui.Say(fmt.Sprintf("Waiting for server (%s) to become RackConnect ready...", server.Id))
for {
server, err := csp.ServerById(server.Id)
if err != nil {
return multistep.ActionHalt
}
if server.Metadata["rackconnect_automation_status"] == "DEPLOYED" {
break
}
time.Sleep(2 * time.Second)
}
return multistep.ActionContinue
}
func (s *StepWaitForRackConnect) Cleanup(state multistep.StateBag) {
}
...@@ -55,6 +55,7 @@ func NewDriver() (Driver, error) { ...@@ -55,6 +55,7 @@ func NewDriver() (Driver, error) {
var drivers map[string]Driver var drivers map[string]Driver
var prlctlPath string var prlctlPath string
var supportedVersions []string var supportedVersions []string
dhcp_lease_file := "/Library/Preferences/Parallels/parallels_dhcp_leases"
if runtime.GOOS != "darwin" { if runtime.GOOS != "darwin" {
return nil, fmt.Errorf( return nil, fmt.Errorf(
...@@ -74,11 +75,13 @@ func NewDriver() (Driver, error) { ...@@ -74,11 +75,13 @@ func NewDriver() (Driver, error) {
drivers = map[string]Driver{ drivers = map[string]Driver{
"10": &Parallels10Driver{ "10": &Parallels10Driver{
Parallels9Driver: Parallels9Driver{ Parallels9Driver: Parallels9Driver{
PrlctlPath: prlctlPath, PrlctlPath: prlctlPath,
dhcp_lease_file: dhcp_lease_file,
}, },
}, },
"9": &Parallels9Driver{ "9": &Parallels9Driver{
PrlctlPath: prlctlPath, PrlctlPath: prlctlPath,
dhcp_lease_file: dhcp_lease_file,
}, },
} }
......
...@@ -9,6 +9,7 @@ import ( ...@@ -9,6 +9,7 @@ import (
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"regexp" "regexp"
"strconv"
"strings" "strings"
"time" "time"
...@@ -18,6 +19,8 @@ import ( ...@@ -18,6 +19,8 @@ import (
type Parallels9Driver struct { type Parallels9Driver struct {
// This is the path to the "prlctl" application. // This is the path to the "prlctl" application.
PrlctlPath string PrlctlPath string
// The path to the parallels_dhcp_leases file
dhcp_lease_file string
} }
func (d *Parallels9Driver) Import(name, srcPath, dstDir string, reassignMac bool) error { func (d *Parallels9Driver) Import(name, srcPath, dstDir string, reassignMac bool) error {
...@@ -276,31 +279,43 @@ func (d *Parallels9Driver) Mac(vmName string) (string, error) { ...@@ -276,31 +279,43 @@ func (d *Parallels9Driver) Mac(vmName string) (string, error) {
} }
// Finds the IP address of a VM connected that uses DHCP by its MAC address // Finds the IP address of a VM connected that uses DHCP by its MAC address
//
// Parses the file /Library/Preferences/Parallels/parallels_dhcp_leases
// file contain a list of DHCP leases given by Parallels Desktop
// Example line:
// 10.211.55.181="1418921112,1800,001c42f593fb,ff42f593fb000100011c25b9ff001c42f593fb"
// IP Address ="Lease expiry, Lease time, MAC, MAC or DUID"
func (d *Parallels9Driver) IpAddress(mac string) (string, error) { func (d *Parallels9Driver) IpAddress(mac string) (string, error) {
var stdout bytes.Buffer
dhcp_lease_file := "/Library/Preferences/Parallels/parallels_dhcp_leases"
if len(mac) != 12 { if len(mac) != 12 {
return "", fmt.Errorf("Not a valid MAC address: %s. It should be exactly 12 digits.", mac) return "", fmt.Errorf("Not a valid MAC address: %s. It should be exactly 12 digits.", mac)
} }
cmd := exec.Command("grep", "-i", mac, dhcp_lease_file) leases, err := ioutil.ReadFile(d.dhcp_lease_file)
cmd.Stdout = &stdout if err != nil {
if err := cmd.Run(); err != nil {
return "", err return "", err
} }
stdoutString := strings.TrimSpace(stdout.String()) re := regexp.MustCompile("(.*)=\"(.*),(.*)," + strings.ToLower(mac) + ",.*\"")
re := regexp.MustCompile("(.*)=.*") mostRecentIp := ""
ipMatch := re.FindAllStringSubmatch(stdoutString, 1) mostRecentLease := uint64(0)
for _, l := range re.FindAllStringSubmatch(string(leases), -1) {
ip := l[1]
expiry, _ := strconv.ParseUint(l[2], 10, 64)
leaseTime, _ := strconv.ParseUint(l[3], 10, 32)
log.Printf("Found lease: %s for MAC: %s, expiring at %d, leased for %d s.\n", ip, mac, expiry, leaseTime)
if mostRecentLease <= expiry-leaseTime {
mostRecentIp = ip
mostRecentLease = expiry - leaseTime
}
}
if len(ipMatch) != 1 { if len(mostRecentIp) == 0 {
return "", fmt.Errorf("IP lease not found for MAC address %s in: %s\n", mac, dhcp_lease_file) return "", fmt.Errorf("IP lease not found for MAC address %s in: %s\n", mac, d.dhcp_lease_file)
} }
ip := ipMatch[0][1] log.Printf("Found IP lease: %s for MAC address %s\n", mostRecentIp, mac)
log.Printf("Found IP lease: %s for MAC address %s\n", ip, mac) return mostRecentIp, nil
return ip, nil
} }
func (d *Parallels9Driver) ToolsIsoPath(k string) (string, error) { func (d *Parallels9Driver) ToolsIsoPath(k string) (string, error) {
......
package common package common
import ( import (
"io/ioutil"
"os"
"testing" "testing"
) )
func TestParallels9Driver_impl(t *testing.T) { func TestParallels9Driver_impl(t *testing.T) {
var _ Driver = new(Parallels9Driver) var _ Driver = new(Parallels9Driver)
} }
func TestIpAddress(t *testing.T) {
tf, err := ioutil.TempFile("", "packer")
if err != nil {
t.Fatalf("err: %s", err)
}
defer os.Remove(tf.Name())
d := Parallels9Driver{
dhcp_lease_file: tf.Name(),
}
// No lease should be found in an empty file
ip, err := d.IpAddress("123456789012")
if err == nil {
t.Fatalf("Found IP: \"%v\". No IP should be found!\n", ip)
}
// The most recent lease, 10.211.55.126 should be found
c := []byte(`
[vnic0]
10.211.55.125="1418288000,1800,001c4235240c,ff4235240c000100011c1c10e7001c4235240c"
10.211.55.126="1418288969,1800,001c4235240c,ff4235240c000100011c1c11ad001c4235240c"
10.211.55.254="1411712008,1800,001c42a51419,01001c42a51419"
`)
ioutil.WriteFile(tf.Name(), c, 0666)
ip, err = d.IpAddress("001C4235240c")
if err != nil {
t.Fatalf("Error: %v\n", err)
}
if ip != "10.211.55.126" {
t.Fatalf("Should have found 10.211.55.126, not %s!\n", ip)
}
// The most recent lease, 10.211.55.124 should be found
c = []byte(`[vnic0]
10.211.55.124="1418288969,1800,001c4235240c,ff4235240c000100011c1c11ad001c4235240c"
10.211.55.125="1418288000,1800,001c4235240c,ff4235240c000100011c1c10e7001c4235240c"
10.211.55.254="1411712008,1800,001c42a51419,01001c42a51419"
`)
ioutil.WriteFile(tf.Name(), c, 0666)
ip, err = d.IpAddress("001c4235240c")
if err != nil {
t.Fatalf("Error: %v\n", err)
}
if ip != "10.211.55.124" {
t.Fatalf("Should have found 10.211.55.124, not %s!\n", ip)
}
}
...@@ -256,6 +256,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe ...@@ -256,6 +256,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
new(stepHTTPServer), new(stepHTTPServer),
new(stepCreateVM), new(stepCreateVM),
new(stepCreateDisk), new(stepCreateDisk),
new(stepSetBootOrder),
new(stepAttachISO), new(stepAttachISO),
&parallelscommon.StepAttachParallelsTools{ &parallelscommon.StepAttachParallelsTools{
ParallelsToolsMode: b.config.ParallelsToolsMode, ParallelsToolsMode: b.config.ParallelsToolsMode,
......
...@@ -75,7 +75,7 @@ func TestBuilderPrepare_DiskSize(t *testing.T) { ...@@ -75,7 +75,7 @@ func TestBuilderPrepare_DiskSize(t *testing.T) {
} }
if b.config.DiskSize != 60000 { if b.config.DiskSize != 60000 {
t.Fatalf("bad size: %s", b.config.DiskSize) t.Fatalf("bad size: %d", b.config.DiskSize)
} }
} }
......
...@@ -17,9 +17,8 @@ import ( ...@@ -17,9 +17,8 @@ import (
// vmName string // vmName string
// //
// Produces: // Produces:
type stepAttachISO struct { // attachedIso bool
cdromDevice string type stepAttachISO struct{}
}
func (s *stepAttachISO) Run(state multistep.StateBag) multistep.StepAction { func (s *stepAttachISO) Run(state multistep.StateBag) multistep.StepAction {
driver := state.Get("driver").(parallelscommon.Driver) driver := state.Get("driver").(parallelscommon.Driver)
...@@ -27,76 +26,42 @@ func (s *stepAttachISO) Run(state multistep.StateBag) multistep.StepAction { ...@@ -27,76 +26,42 @@ func (s *stepAttachISO) Run(state multistep.StateBag) multistep.StepAction {
ui := state.Get("ui").(packer.Ui) ui := state.Get("ui").(packer.Ui)
vmName := state.Get("vmName").(string) vmName := state.Get("vmName").(string)
// Attach the disk to the controller // Attach the disk to the cdrom0 device. We couldn't use a separated device because it is failed to boot in PD9 [GH-1667]
ui.Say("Attaching ISO to the new CD/DVD drive...") ui.Say("Attaching ISO to the default CD/DVD ROM device...")
cdrom, err := driver.DeviceAddCdRom(vmName, isoPath)
if err != nil {
err := fmt.Errorf("Error attaching ISO: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
// Set new boot order
ui.Say("Setting the boot order...")
command := []string{ command := []string{
"set", vmName, "set", vmName,
"--device-bootorder", fmt.Sprintf("hdd0 %s cdrom0 net0", cdrom), "--device-set", "cdrom0",
"--image", isoPath,
"--enable", "--connect",
} }
if err := driver.Prlctl(command...); err != nil { if err := driver.Prlctl(command...); err != nil {
err := fmt.Errorf("Error setting the boot order: %s", err) err := fmt.Errorf("Error attaching ISO: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
// Disable 'cdrom0' device
ui.Say("Disabling default CD/DVD drive...")
command = []string{
"set", vmName,
"--device-set", "cdrom0", "--disable",
}
if err := driver.Prlctl(command...); err != nil {
err := fmt.Errorf("Error disabling default CD/DVD drive: %s", err)
state.Put("error", err) state.Put("error", err)
ui.Error(err.Error()) ui.Error(err.Error())
return multistep.ActionHalt return multistep.ActionHalt
} }
// Track the device name so that we can can delete later // Set some state so we know to remove
s.cdromDevice = cdrom state.Put("attachedIso", true)
return multistep.ActionContinue return multistep.ActionContinue
} }
func (s *stepAttachISO) Cleanup(state multistep.StateBag) { func (s *stepAttachISO) Cleanup(state multistep.StateBag) {
if _, ok := state.GetOk("attachedIso"); !ok {
return
}
driver := state.Get("driver").(parallelscommon.Driver) driver := state.Get("driver").(parallelscommon.Driver)
ui := state.Get("ui").(packer.Ui) ui := state.Get("ui").(packer.Ui)
vmName := state.Get("vmName").(string) vmName := state.Get("vmName").(string)
// Enable 'cdrom0' device back // Detach ISO by setting an empty string image.
log.Println("Enabling default CD/DVD drive...") log.Println("Detaching ISO from the default CD/DVD ROM device...")
command := []string{ command := []string{
"set", vmName, "set", vmName,
"--device-set", "cdrom0", "--enable", "--disconnect", "--device-set", "cdrom0",
} "--image", "", "--disconnect", "--enable",
if err := driver.Prlctl(command...); err != nil {
ui.Error(fmt.Sprintf("Error enabling default CD/DVD drive: %s", err))
}
// Detach ISO
if s.cdromDevice == "" {
return
}
log.Println("Detaching ISO...")
command = []string{
"set", vmName,
"--device-del", s.cdromDevice,
} }
if err := driver.Prlctl(command...); err != nil { if err := driver.Prlctl(command...); err != nil {
......
package iso
import (
"fmt"
"github.com/mitchellh/multistep"
parallelscommon "github.com/mitchellh/packer/builder/parallels/common"
"github.com/mitchellh/packer/packer"
)
// This step sets the device boot order for the virtual machine.
//
// Uses:
// driver Driver
// ui packer.Ui
// vmName string
//
// Produces:
type stepSetBootOrder struct{}
func (s *stepSetBootOrder) Run(state multistep.StateBag) multistep.StepAction {
driver := state.Get("driver").(parallelscommon.Driver)
ui := state.Get("ui").(packer.Ui)
vmName := state.Get("vmName").(string)
// Set new boot order
ui.Say("Setting the boot order...")
command := []string{
"set", vmName,
"--device-bootorder", fmt.Sprintf("hdd0 cdrom0 net0"),
}
if err := driver.Prlctl(command...); err != nil {
err := fmt.Errorf("Error setting the boot order: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
return multistep.ActionContinue
}
func (s *stepSetBootOrder) Cleanup(state multistep.StateBag) {}
...@@ -122,6 +122,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { ...@@ -122,6 +122,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
warnings := make([]string, 0)
b.config.tpl, err = packer.NewConfigTemplate() b.config.tpl, err = packer.NewConfigTemplate()
if err != nil { if err != nil {
...@@ -304,22 +305,24 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { ...@@ -304,22 +305,24 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
errs, errors.New("http_port_min must be less than http_port_max")) errs, errors.New("http_port_min must be less than http_port_max"))
} }
if b.config.ISOChecksum == "" {
errs = packer.MultiErrorAppend(
errs, errors.New("Due to large file sizes, an iso_checksum is required"))
} else {
b.config.ISOChecksum = strings.ToLower(b.config.ISOChecksum)
}
if b.config.ISOChecksumType == "" { if b.config.ISOChecksumType == "" {
errs = packer.MultiErrorAppend( errs = packer.MultiErrorAppend(
errs, errors.New("The iso_checksum_type must be specified.")) errs, errors.New("The iso_checksum_type must be specified."))
} else { } else {
b.config.ISOChecksumType = strings.ToLower(b.config.ISOChecksumType) b.config.ISOChecksumType = strings.ToLower(b.config.ISOChecksumType)
if h := common.HashForType(b.config.ISOChecksumType); h == nil { if b.config.ISOChecksumType != "none" {
errs = packer.MultiErrorAppend( if b.config.ISOChecksum == "" {
errs, errs = packer.MultiErrorAppend(
fmt.Errorf("Unsupported checksum type: %s", b.config.ISOChecksumType)) errs, errors.New("Due to large file sizes, an iso_checksum is required"))
} else {
b.config.ISOChecksum = strings.ToLower(b.config.ISOChecksum)
}
if h := common.HashForType(b.config.ISOChecksumType); h == nil {
errs = packer.MultiErrorAppend(
errs,
fmt.Errorf("Unsupported checksum type: %s", b.config.ISOChecksumType))
}
} }
} }
...@@ -404,11 +407,17 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { ...@@ -404,11 +407,17 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
b.config.QemuArgs = make([][]string, 0) b.config.QemuArgs = make([][]string, 0)
} }
if b.config.ISOChecksumType == "none" {
warnings = append(warnings,
"A checksum type of 'none' was specified. Since ISO files are so big,\n"+
"a checksum is highly recommended.")
}
if errs != nil && len(errs.Errors) > 0 { if errs != nil && len(errs.Errors) > 0 {
return nil, errs return warnings, errs
} }
return nil, nil return warnings, nil
} }
func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {
...@@ -418,6 +427,15 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe ...@@ -418,6 +427,15 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
return nil, fmt.Errorf("Failed creating Qemu driver: %s", err) return nil, fmt.Errorf("Failed creating Qemu driver: %s", err)
} }
steprun := &stepRun{}
if !b.config.DiskImage {
steprun.BootDrive = "once=d"
steprun.Message = "Starting VM, booting from CD-ROM"
} else {
steprun.BootDrive = "c"
steprun.Message = "Starting VM, booting disk image"
}
steps := []multistep.Step{ steps := []multistep.Step{
&common.StepDownload{ &common.StepDownload{
Checksum: b.config.ISOChecksum, Checksum: b.config.ISOChecksum,
...@@ -436,10 +454,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe ...@@ -436,10 +454,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
new(stepHTTPServer), new(stepHTTPServer),
new(stepForwardSSH), new(stepForwardSSH),
new(stepConfigureVNC), new(stepConfigureVNC),
&stepRun{ steprun,
BootDrive: "once=d",
Message: "Starting VM, booting from CD-ROM",
},
&stepBootWait{}, &stepBootWait{},
&stepTypeBootCommand{}, &stepTypeBootCommand{},
&common.StepConnectSSH{ &common.StepConnectSSH{
......
...@@ -160,7 +160,7 @@ func TestBuilderPrepare_DiskSize(t *testing.T) { ...@@ -160,7 +160,7 @@ func TestBuilderPrepare_DiskSize(t *testing.T) {
} }
if b.config.DiskSize != 60000 { if b.config.DiskSize != 60000 {
t.Fatalf("bad size: %s", b.config.DiskSize) t.Fatalf("bad size: %d", b.config.DiskSize)
} }
} }
......
...@@ -2,10 +2,11 @@ package qemu ...@@ -2,10 +2,11 @@ package qemu
import ( import (
"fmt" "fmt"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
) )
// This step copies the virtual disk that will be used as the // This step copies the virtual disk that will be used as the
...@@ -19,6 +20,7 @@ func (s *stepCopyDisk) Run(state multistep.StateBag) multistep.StepAction { ...@@ -19,6 +20,7 @@ func (s *stepCopyDisk) Run(state multistep.StateBag) multistep.StepAction {
ui := state.Get("ui").(packer.Ui) ui := state.Get("ui").(packer.Ui)
path := filepath.Join(config.OutputDir, fmt.Sprintf("%s.%s", config.VMName, path := filepath.Join(config.OutputDir, fmt.Sprintf("%s.%s", config.VMName,
strings.ToLower(config.Format))) strings.ToLower(config.Format)))
name := config.VMName + "." + strings.ToLower(config.Format)
command := []string{ command := []string{
"convert", "convert",
...@@ -39,6 +41,8 @@ func (s *stepCopyDisk) Run(state multistep.StateBag) multistep.StepAction { ...@@ -39,6 +41,8 @@ func (s *stepCopyDisk) Run(state multistep.StateBag) multistep.StepAction {
return multistep.ActionHalt return multistep.ActionHalt
} }
state.Put("disk_filename", name)
return multistep.ActionContinue return multistep.ActionContinue
} }
......
...@@ -82,7 +82,9 @@ func getCommandArgs(bootDrive string, state multistep.StateBag) ([]string, error ...@@ -82,7 +82,9 @@ func getCommandArgs(bootDrive string, state multistep.StateBag) ([]string, error
defaultArgs["-netdev"] = fmt.Sprintf("user,id=user.0,hostfwd=tcp::%v-:22", sshHostPort) defaultArgs["-netdev"] = fmt.Sprintf("user,id=user.0,hostfwd=tcp::%v-:22", sshHostPort)
defaultArgs["-device"] = fmt.Sprintf("%s,netdev=user.0", config.NetDevice) defaultArgs["-device"] = fmt.Sprintf("%s,netdev=user.0", config.NetDevice)
defaultArgs["-drive"] = fmt.Sprintf("file=%s,if=%s,cache=%s", imgPath, config.DiskInterface, config.DiskCache) defaultArgs["-drive"] = fmt.Sprintf("file=%s,if=%s,cache=%s", imgPath, config.DiskInterface, config.DiskCache)
defaultArgs["-cdrom"] = isoPath if !config.DiskImage {
defaultArgs["-cdrom"] = isoPath
}
defaultArgs["-boot"] = bootDrive defaultArgs["-boot"] = bootDrive
defaultArgs["-m"] = "512M" defaultArgs["-m"] = "512M"
defaultArgs["-vnc"] = vnc defaultArgs["-vnc"] = vnc
......
...@@ -19,6 +19,9 @@ type Driver interface { ...@@ -19,6 +19,9 @@ type Driver interface {
// Create a SATA controller. // Create a SATA controller.
CreateSATAController(vm string, controller string) error CreateSATAController(vm string, controller string) error
// Create a SCSI controller.
CreateSCSIController(vm string, controller string) error
// Delete a VM by name // Delete a VM by name
Delete(string) error Delete(string) error
......
...@@ -36,6 +36,18 @@ func (d *VBox42Driver) CreateSATAController(vmName string, name string) error { ...@@ -36,6 +36,18 @@ func (d *VBox42Driver) CreateSATAController(vmName string, name string) error {
return d.VBoxManage(command...) return d.VBoxManage(command...)
} }
func (d *VBox42Driver) CreateSCSIController(vmName string, name string) error {
command := []string{
"storagectl", vmName,
"--name", name,
"--add", "scsi",
"--controller", "LSILogic",
}
return d.VBoxManage(command...)
}
func (d *VBox42Driver) Delete(name string) error { func (d *VBox42Driver) Delete(name string) error {
return d.VBoxManage("unregistervm", name, "--delete") return d.VBoxManage("unregistervm", name, "--delete")
} }
......
...@@ -9,6 +9,10 @@ type DriverMock struct { ...@@ -9,6 +9,10 @@ type DriverMock struct {
CreateSATAControllerController string CreateSATAControllerController string
CreateSATAControllerErr error CreateSATAControllerErr error
CreateSCSIControllerVM string
CreateSCSIControllerController string
CreateSCSIControllerErr error
DeleteCalled bool DeleteCalled bool
DeleteName string DeleteName string
DeleteErr error DeleteErr error
...@@ -49,6 +53,12 @@ func (d *DriverMock) CreateSATAController(vm string, controller string) error { ...@@ -49,6 +53,12 @@ func (d *DriverMock) CreateSATAController(vm string, controller string) error {
return d.CreateSATAControllerErr return d.CreateSATAControllerErr
} }
func (d *DriverMock) CreateSCSIController(vm string, controller string) error {
d.CreateSCSIControllerVM = vm
d.CreateSCSIControllerController = vm
return d.CreateSCSIControllerErr
}
func (d *DriverMock) Delete(name string) error { func (d *DriverMock) Delete(name string) error {
d.DeleteCalled = true d.DeleteCalled = true
d.DeleteName = name d.DeleteName = name
......
...@@ -158,9 +158,9 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { ...@@ -158,9 +158,9 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
} }
} }
if b.config.HardDriveInterface != "ide" && b.config.HardDriveInterface != "sata" { if b.config.HardDriveInterface != "ide" && b.config.HardDriveInterface != "sata" && b.config.HardDriveInterface != "scsi" {
errs = packer.MultiErrorAppend( errs = packer.MultiErrorAppend(
errs, errors.New("hard_drive_interface can only be ide or sata")) errs, errors.New("hard_drive_interface can only be ide, sata, or scsi"))
} }
if b.config.ISOChecksumType == "" { if b.config.ISOChecksumType == "" {
......
...@@ -83,7 +83,7 @@ func TestBuilderPrepare_DiskSize(t *testing.T) { ...@@ -83,7 +83,7 @@ func TestBuilderPrepare_DiskSize(t *testing.T) {
} }
if b.config.DiskSize != 60000 { if b.config.DiskSize != 60000 {
t.Fatalf("bad size: %s", b.config.DiskSize) t.Fatalf("bad size: %d", b.config.DiskSize)
} }
} }
......
...@@ -63,12 +63,25 @@ func (s *stepCreateDisk) Run(state multistep.StateBag) multistep.StepAction { ...@@ -63,12 +63,25 @@ func (s *stepCreateDisk) Run(state multistep.StateBag) multistep.StepAction {
} }
} }
if config.HardDriveInterface == "scsi" {
if err := driver.CreateSCSIController(vmName, "SCSI Controller"); err != nil {
err := fmt.Errorf("Error creating disk controller: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
}
// Attach the disk to the controller // Attach the disk to the controller
controllerName := "IDE Controller" controllerName := "IDE Controller"
if config.HardDriveInterface == "sata" { if config.HardDriveInterface == "sata" {
controllerName = "SATA Controller" controllerName = "SATA Controller"
} }
if config.HardDriveInterface == "scsi" {
controllerName = "SCSI Controller"
}
command = []string{ command = []string{
"storageattach", vmName, "storageattach", vmName,
"--storagectl", controllerName, "--storagectl", controllerName,
......
...@@ -127,7 +127,7 @@ func TestStepShutdown_locks(t *testing.T) { ...@@ -127,7 +127,7 @@ func TestStepShutdown_locks(t *testing.T) {
lockPath := filepath.Join(dir.dir, "nope.lck") lockPath := filepath.Join(dir.dir, "nope.lck")
err := ioutil.WriteFile(lockPath, []byte("foo"), 0644) err := ioutil.WriteFile(lockPath, []byte("foo"), 0644)
if err != nil { if err != nil {
t.Fatalf("err: %s") t.Fatalf("err: %s", err)
} }
// Remove the lock file after a certain time // Remove the lock file after a certain time
......
...@@ -368,6 +368,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe ...@@ -368,6 +368,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
SkipFloppy: true, SkipFloppy: true,
}, },
&vmwcommon.StepCleanVMX{}, &vmwcommon.StepCleanVMX{},
&StepUploadVMX{
RemoteType: b.config.RemoteType,
},
&vmwcommon.StepCompactDisk{ &vmwcommon.StepCompactDisk{
Skip: b.config.SkipCompaction, Skip: b.config.SkipCompaction,
}, },
......
...@@ -175,7 +175,7 @@ func TestBuilderPrepare_DiskSize(t *testing.T) { ...@@ -175,7 +175,7 @@ func TestBuilderPrepare_DiskSize(t *testing.T) {
} }
if b.config.DiskSize != 60000 { if b.config.DiskSize != 60000 {
t.Fatalf("bad size: %s", b.config.DiskSize) t.Fatalf("bad size: %d", b.config.DiskSize)
} }
} }
......
...@@ -56,6 +56,10 @@ func (d *ESX5Driver) IsRunning(string) (bool, error) { ...@@ -56,6 +56,10 @@ func (d *ESX5Driver) IsRunning(string) (bool, error) {
return strings.Contains(state, "Powered on"), nil return strings.Contains(state, "Powered on"), nil
} }
func (d *ESX5Driver) ReloadVM() error {
return d.sh("vim-cmd", "vmsvc/reload", d.vmId)
}
func (d *ESX5Driver) Start(vmxPathLocal string, headless bool) error { func (d *ESX5Driver) Start(vmxPathLocal string, headless bool) error {
for i := 0; i < 20; i++ { for i := 0; i < 20; i++ {
err := d.sh("vim-cmd", "vmsvc/power.on", d.vmId) err := d.sh("vim-cmd", "vmsvc/power.on", d.vmId)
......
...@@ -17,4 +17,10 @@ type RemoteDriver interface { ...@@ -17,4 +17,10 @@ type RemoteDriver interface {
// Removes a VM from inventory specified by the path to the VMX given. // Removes a VM from inventory specified by the path to the VMX given.
Unregister(string) error Unregister(string) error
// Uploads a local file to remote side.
upload(dst, src string) error
// Reload VM on remote side.
ReloadVM() error
} }
...@@ -19,6 +19,10 @@ type RemoteDriverMock struct { ...@@ -19,6 +19,10 @@ type RemoteDriverMock struct {
UnregisterCalled bool UnregisterCalled bool
UnregisterPath string UnregisterPath string
UnregisterErr error UnregisterErr error
uploadErr error
ReloadVMErr error
} }
func (d *RemoteDriverMock) UploadISO(path string, checksum string, checksumType string) (string, error) { func (d *RemoteDriverMock) UploadISO(path string, checksum string, checksumType string) (string, error) {
...@@ -38,3 +42,11 @@ func (d *RemoteDriverMock) Unregister(path string) error { ...@@ -38,3 +42,11 @@ func (d *RemoteDriverMock) Unregister(path string) error {
d.UnregisterPath = path d.UnregisterPath = path
return d.UnregisterErr return d.UnregisterErr
} }
func (d *RemoteDriverMock) upload(dst, src string) error {
return d.uploadErr
}
func (d *RemoteDriverMock) ReloadVM() error {
return d.ReloadVMErr
}
package iso
import (
"fmt"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
vmwcommon "github.com/mitchellh/packer/builder/vmware/common"
"path/filepath"
)
// This step upload the VMX to the remote host
//
// Uses:
// driver Driver
// ui packer.Ui
// vmx_path string
//
// Produces:
// <nothing>
type StepUploadVMX struct{
RemoteType string
}
func (c *StepUploadVMX) Run(state multistep.StateBag) multistep.StepAction {
driver := state.Get("driver").(vmwcommon.Driver)
ui := state.Get("ui").(packer.Ui)
vmxPath := state.Get("vmx_path").(string)
if c.RemoteType == "esx5" {
remoteDriver, ok := driver.(RemoteDriver)
if ok {
remoteVmxPath := filepath.ToSlash(filepath.Join(fmt.Sprintf("%s",remoteDriver), filepath.Base(vmxPath)))
if err := remoteDriver.upload(remoteVmxPath, vmxPath); err != nil {
state.Put("error", fmt.Errorf("Error writing VMX: %s", err))
return multistep.ActionHalt
}
}
if err := remoteDriver.ReloadVM(); err != nil {
ui.Error(fmt.Sprintf("Error reload VM: %s", err))
}
}
return multistep.ActionContinue
}
func (StepUploadVMX) Cleanup(multistep.StateBag) {}
...@@ -12,7 +12,7 @@ import ( ...@@ -12,7 +12,7 @@ import (
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
) )
// Builder implements packer.Builder and builds the actual VirtualBox // Builder implements packer.Builder and builds the actual VMware
// images. // images.
type Builder struct { type Builder struct {
config *Config config *Config
......
...@@ -34,7 +34,7 @@ func runCheckpoint(c *config) { ...@@ -34,7 +34,7 @@ func runCheckpoint(c *config) {
version := Version version := Version
if VersionPrerelease != "" { if VersionPrerelease != "" {
version += fmt.Sprintf(".%s", VersionPrerelease) version += fmt.Sprintf("-%s", VersionPrerelease)
} }
signaturePath := filepath.Join(configDir, "checkpoint_signature") signaturePath := filepath.Join(configDir, "checkpoint_signature")
......
package command
import (
"path/filepath"
"testing"
"github.com/mitchellh/cli"
)
const fixturesDir = "./test-fixtures"
func fatalCommand(t *testing.T, m Meta) {
ui := m.Ui.(*cli.MockUi)
t.Fatalf(
"Bad exit code.\n\nStdout:\n\n%s\n\nStderr:\n\n%s",
ui.OutputWriter.String(),
ui.ErrorWriter.String())
}
func testFixture(n string) string {
return filepath.Join(fixturesDir, n)
}
func testMeta(t *testing.T) Meta {
return Meta{
Ui: new(cli.MockUi),
}
}
package command
import (
"flag"
"fmt"
"io"
"os"
"os/signal"
"path/filepath"
"strings"
"github.com/hashicorp/atlas-go/archive"
"github.com/hashicorp/atlas-go/v1"
"github.com/mitchellh/packer/packer"
)
// archiveTemplateEntry is the name the template always takes within the slug.
const archiveTemplateEntry = ".packer-template"
type PushCommand struct {
Meta
client *atlas.Client
// For tests:
uploadFn pushUploadFn
}
// pushUploadFn is the callback type used for tests to stub out the uploading
// logic of the push command.
type pushUploadFn func(
io.Reader, *uploadOpts) (<-chan struct{}, <-chan error, error)
func (c *PushCommand) Run(args []string) int {
var token string
var message string
var create bool
f := flag.NewFlagSet("push", flag.ContinueOnError)
f.Usage = func() { c.Ui.Error(c.Help()) }
f.StringVar(&token, "token", "", "token")
f.StringVar(&message, "m", "", "message")
f.StringVar(&message, "message", "", "message")
f.BoolVar(&create, "create", false, "create (deprecated)")
if err := f.Parse(args); err != nil {
return 1
}
args = f.Args()
if len(args) != 1 {
f.Usage()
return 1
}
// Print deprecations
if create {
c.Ui.Error(fmt.Sprintf("The '-create' option is now the default and is\n" +
"longer used. It will be removed in the next version."))
}
// Read the template
tpl, err := packer.ParseTemplateFile(args[0], nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Failed to parse template: %s", err))
return 1
}
// Validate some things
if tpl.Push.Name == "" {
c.Ui.Error(fmt.Sprintf(
"The 'push' section must be specified in the template with\n" +
"at least the 'name' option set."))
return 1
}
// Determine our token
if token == "" {
token = tpl.Push.Token
}
// Build our client
defer func() { c.client = nil }()
c.client = atlas.DefaultClient()
if tpl.Push.Address != "" {
c.client, err = atlas.NewClient(tpl.Push.Address)
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error setting up API client: %s", err))
return 1
}
}
if token != "" {
c.client.Token = token
}
// Build the archiving options
var opts archive.ArchiveOpts
opts.Include = tpl.Push.Include
opts.Exclude = tpl.Push.Exclude
opts.VCS = tpl.Push.VCS
opts.Extra = map[string]string{
archiveTemplateEntry: args[0],
}
// Determine the path we're archiving. This logic is a bit complicated
// as there are three possibilities:
//
// 1.) BaseDir is an absolute path, just use that.
//
// 2.) BaseDir is empty, so we use the directory of the template.
//
// 3.) BaseDir is relative, so we use the path relative to the directory
// of the template.
//
path := tpl.Push.BaseDir
if path == "" || !filepath.IsAbs(path) {
tplPath, err := filepath.Abs(args[0])
if err != nil {
c.Ui.Error(fmt.Sprintf("Error determining path to archive: %s", err))
return 1
}
tplPath = filepath.Dir(tplPath)
if path != "" {
tplPath = filepath.Join(tplPath, path)
}
path, err = filepath.Abs(tplPath)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error determining path to archive: %s", err))
return 1
}
}
// Find the Atlas post-processors, if possible
var atlasPPs []packer.RawPostProcessorConfig
for _, list := range tpl.PostProcessors {
for _, pp := range list {
if pp.Type == "atlas" {
atlasPPs = append(atlasPPs, pp)
}
}
}
// Build the upload options
var uploadOpts uploadOpts
uploadOpts.Slug = tpl.Push.Name
uploadOpts.Builds = make(map[string]*uploadBuildInfo)
for _, b := range tpl.Builders {
info := &uploadBuildInfo{Type: b.Type}
// Determine if we're artifacting this build
for _, pp := range atlasPPs {
if !pp.Skip(b.Name) {
info.Artifact = true
break
}
}
uploadOpts.Builds[b.Name] = info
}
// Add the upload metadata
metadata := make(map[string]interface{})
if message != "" {
metadata["message"] = message
}
metadata["template"] = tpl.RawContents
metadata["template_name"] = filepath.Base(args[0])
uploadOpts.Metadata = metadata
// Warn about builds not having post-processors.
var badBuilds []string
for name, b := range uploadOpts.Builds {
if b.Artifact {
continue
}
badBuilds = append(badBuilds, name)
}
if len(badBuilds) > 0 {
c.Ui.Error(fmt.Sprintf(
"Warning! One or more of the builds in this template does not\n"+
"have an Atlas post-processor. Artifacts from this template will\n"+
"not appear in the Atlas artifact registry.\n\n"+
"This is just a warning. Atlas will still build your template\n"+
"and assume other post-processors are sending the artifacts where\n"+
"they need to go.\n\n"+
"Builds: %s\n\n", strings.Join(badBuilds, ", ")))
}
// Start the archiving process
r, err := archive.CreateArchive(path, &opts)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error archiving: %s", err))
return 1
}
defer r.Close()
// Start the upload process
doneCh, uploadErrCh, err := c.upload(r, &uploadOpts)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error starting upload: %s", err))
return 1
}
// Make a ctrl-C channel
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, os.Interrupt)
defer signal.Stop(sigCh)
err = nil
select {
case err = <-uploadErrCh:
err = fmt.Errorf("Error uploading: %s", err)
case <-sigCh:
err = fmt.Errorf("Push cancelled from Ctrl-C")
case <-doneCh:
}
if err != nil {
c.Ui.Error(err.Error())
return 1
}
c.Ui.Output(fmt.Sprintf("Push successful to '%s'", tpl.Push.Name))
return 0
}
func (*PushCommand) Help() string {
helpText := `
Usage: packer push [options] TEMPLATE
Push the given template and supporting files to a Packer build service such as
Atlas.
If a build configuration for the given template does not exist, it will be
created automatically. If the build configuration already exists, a new
version will be created with this template and the supporting files.
Additional configuration options (such as the Atlas server URL and files to
include) may be specified in the "push" section of the Packer template. Please
see the online documentation for more information about these configurables.
Options:
-m, -message=<detail> A message to identify the purpose or changes in this
Packer template much like a VCS commit message
-token=<token> The access token to use to when uploading
`
return strings.TrimSpace(helpText)
}
func (*PushCommand) Synopsis() string {
return "push a template and supporting files to a Packer build service"
}
func (c *PushCommand) upload(
r *archive.Archive, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
if c.uploadFn != nil {
return c.uploadFn(r, opts)
}
// Separate the slug into the user and name components
user, name, err := atlas.ParseSlug(opts.Slug)
if err != nil {
return nil, nil, fmt.Errorf("upload: %s", err)
}
// Get the build configuration
bc, err := c.client.BuildConfig(user, name)
if err != nil {
if err == atlas.ErrNotFound {
// Build configuration doesn't exist, attempt to create it
bc, err = c.client.CreateBuildConfig(user, name)
}
if err != nil {
return nil, nil, fmt.Errorf("upload: %s", err)
}
}
// Build the version to send up
version := atlas.BuildConfigVersion{
User: bc.User,
Name: bc.Name,
Builds: make([]atlas.BuildConfigBuild, 0, len(opts.Builds)),
}
for name, info := range opts.Builds {
version.Builds = append(version.Builds, atlas.BuildConfigBuild{
Name: name,
Type: info.Type,
Artifact: info.Artifact,
})
}
// Start the upload
doneCh, errCh := make(chan struct{}), make(chan error)
go func() {
err := c.client.UploadBuildConfigVersion(&version, opts.Metadata, r, r.Size)
if err != nil {
errCh <- err
return
}
close(doneCh)
}()
return doneCh, errCh, nil
}
type uploadOpts struct {
URL string
Slug string
Builds map[string]*uploadBuildInfo
Metadata map[string]interface{}
}
type uploadBuildInfo struct {
Type string
Artifact bool
}
package command
import (
"archive/tar"
"bytes"
"compress/gzip"
"fmt"
"io"
"path/filepath"
"reflect"
"sort"
"testing"
)
func TestPush_noArgs(t *testing.T) {
c := &PushCommand{Meta: testMeta(t)}
code := c.Run(nil)
if code != 1 {
t.Fatalf("bad: %#v", code)
}
}
func TestPush_multiArgs(t *testing.T) {
c := &PushCommand{Meta: testMeta(t)}
code := c.Run([]string{"one", "two"})
if code != 1 {
t.Fatalf("bad: %#v", code)
}
}
func TestPush(t *testing.T) {
var actual []string
var actualOpts *uploadOpts
uploadFn := func(r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
actual = testArchive(t, r)
actualOpts = opts
doneCh := make(chan struct{})
close(doneCh)
return doneCh, nil, nil
}
c := &PushCommand{
Meta: testMeta(t),
uploadFn: uploadFn,
}
args := []string{filepath.Join(testFixture("push"), "template.json")}
if code := c.Run(args); code != 0 {
fatalCommand(t, c.Meta)
}
expected := []string{
archiveTemplateEntry,
"template.json",
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
expectedBuilds := map[string]*uploadBuildInfo{
"dummy": &uploadBuildInfo{
Type: "dummy",
},
}
if !reflect.DeepEqual(actualOpts.Builds, expectedBuilds) {
t.Fatalf("bad: %#v", actualOpts.Builds)
}
}
func TestPush_builds(t *testing.T) {
var actualOpts *uploadOpts
uploadFn := func(
r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
actualOpts = opts
doneCh := make(chan struct{})
close(doneCh)
return doneCh, nil, nil
}
c := &PushCommand{
Meta: testMeta(t),
uploadFn: uploadFn,
}
args := []string{filepath.Join(testFixture("push-builds"), "template.json")}
if code := c.Run(args); code != 0 {
fatalCommand(t, c.Meta)
}
expectedBuilds := map[string]*uploadBuildInfo{
"dummy": &uploadBuildInfo{
Type: "dummy",
Artifact: true,
},
"foo": &uploadBuildInfo{
Type: "dummy",
},
}
if !reflect.DeepEqual(actualOpts.Builds, expectedBuilds) {
t.Fatalf("bad: %#v", actualOpts.Builds)
}
}
func TestPush_noName(t *testing.T) {
uploadFn := func(r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
return nil, nil, nil
}
c := &PushCommand{
Meta: testMeta(t),
uploadFn: uploadFn,
}
args := []string{filepath.Join(testFixture("push-no-name"), "template.json")}
if code := c.Run(args); code != 1 {
fatalCommand(t, c.Meta)
}
}
func TestPush_uploadError(t *testing.T) {
uploadFn := func(r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
return nil, nil, fmt.Errorf("bad")
}
c := &PushCommand{
Meta: testMeta(t),
uploadFn: uploadFn,
}
args := []string{filepath.Join(testFixture("push"), "template.json")}
if code := c.Run(args); code != 1 {
fatalCommand(t, c.Meta)
}
}
func TestPush_uploadErrorCh(t *testing.T) {
uploadFn := func(r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
errCh := make(chan error, 1)
errCh <- fmt.Errorf("bad")
return nil, errCh, nil
}
c := &PushCommand{
Meta: testMeta(t),
uploadFn: uploadFn,
}
args := []string{filepath.Join(testFixture("push"), "template.json")}
if code := c.Run(args); code != 1 {
fatalCommand(t, c.Meta)
}
}
func testArchive(t *testing.T, r io.Reader) []string {
// Finish the archiving process in-memory
var buf bytes.Buffer
if _, err := io.Copy(&buf, r); err != nil {
t.Fatalf("err: %s", err)
}
gzipR, err := gzip.NewReader(&buf)
if err != nil {
t.Fatalf("err: %s", err)
}
tarR := tar.NewReader(gzipR)
// Read all the entries
result := make([]string, 0, 5)
for {
hdr, err := tarR.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatalf("err: %s", err)
}
result = append(result, hdr.Name)
}
sort.Strings(result)
return result
}
{
"builders": [
{"type": "dummy"},
{"type": "dummy", "name": "foo"}
],
"post-processors": [{
"type": "atlas",
"only": ["dummy"]
}],
"push": {
"name": "foo/bar"
}
}
{
"builders": [{"type": "dummy"}]
}
{
"builders": [{"type": "dummy"}],
"push": {
"name": "foo/bar"
}
}
...@@ -50,6 +50,12 @@ func init() { ...@@ -50,6 +50,12 @@ func init() {
}, nil }, nil
}, },
"push": func() (cli.Command, error) {
return &command.PushCommand{
Meta: meta,
}, nil
},
"validate": func() (cli.Command, error) { "validate": func() (cli.Command, error) {
return &command.ValidateCommand{ return &command.ValidateCommand{
Meta: meta, Meta: meta,
......
...@@ -66,6 +66,12 @@ func (f *BuildOptions) AllUserVars() (map[string]string, error) { ...@@ -66,6 +66,12 @@ func (f *BuildOptions) AllUserVars() (map[string]string, error) {
func (f *BuildOptions) Builds(t *packer.Template, cf *packer.ComponentFinder) ([]packer.Build, error) { func (f *BuildOptions) Builds(t *packer.Template, cf *packer.ComponentFinder) ([]packer.Build, error) {
buildNames := t.BuildNames() buildNames := t.BuildNames()
// Process the name
tpl, _, err := t.NewConfigTemplate()
if err != nil {
return nil, err
}
checks := make(map[string][]string) checks := make(map[string][]string)
checks["except"] = f.Except checks["except"] = f.Except
checks["only"] = f.Only checks["only"] = f.Only
...@@ -73,7 +79,12 @@ func (f *BuildOptions) Builds(t *packer.Template, cf *packer.ComponentFinder) ([ ...@@ -73,7 +79,12 @@ func (f *BuildOptions) Builds(t *packer.Template, cf *packer.ComponentFinder) ([
for _, n := range ns { for _, n := range ns {
found := false found := false
for _, actual := range buildNames { for _, actual := range buildNames {
if actual == n { var processed string
processed, err = tpl.Process(actual, nil)
if err != nil {
return nil, err
}
if actual == n || processed == n {
found = true found = true
break break
} }
...@@ -88,17 +99,22 @@ func (f *BuildOptions) Builds(t *packer.Template, cf *packer.ComponentFinder) ([ ...@@ -88,17 +99,22 @@ func (f *BuildOptions) Builds(t *packer.Template, cf *packer.ComponentFinder) ([
builds := make([]packer.Build, 0, len(buildNames)) builds := make([]packer.Build, 0, len(buildNames))
for _, buildName := range buildNames { for _, buildName := range buildNames {
var processedBuildName string
processedBuildName, err = tpl.Process(buildName, nil)
if err != nil {
return nil, err
}
if len(f.Except) > 0 { if len(f.Except) > 0 {
found := false found := false
for _, except := range f.Except { for _, except := range f.Except {
if buildName == except { if buildName == except || processedBuildName == except {
found = true found = true
break break
} }
} }
if found { if found {
log.Printf("Skipping build '%s' because specified by -except.", buildName) log.Printf("Skipping build '%s' because specified by -except.", processedBuildName)
continue continue
} }
} }
...@@ -106,19 +122,19 @@ func (f *BuildOptions) Builds(t *packer.Template, cf *packer.ComponentFinder) ([ ...@@ -106,19 +122,19 @@ func (f *BuildOptions) Builds(t *packer.Template, cf *packer.ComponentFinder) ([
if len(f.Only) > 0 { if len(f.Only) > 0 {
found := false found := false
for _, only := range f.Only { for _, only := range f.Only {
if buildName == only { if buildName == only || processedBuildName == only {
found = true found = true
break break
} }
} }
if !found { if !found {
log.Printf("Skipping build '%s' because not specified by -only.", buildName) log.Printf("Skipping build '%s' because not specified by -only.", processedBuildName)
continue continue
} }
} }
log.Printf("Creating build: %s", buildName) log.Printf("Creating build: %s", processedBuildName)
build, err := t.Build(buildName, cf) build, err := t.Build(buildName, cf)
if err != nil { if err != nil {
return nil, fmt.Errorf("Failed to create build '%s': \n\n%s", buildName, err) return nil, fmt.Errorf("Failed to create build '%s': \n\n%s", buildName, err)
......
...@@ -7,17 +7,23 @@ import ( ...@@ -7,17 +7,23 @@ import (
func testTemplate() (*packer.Template, *packer.ComponentFinder) { func testTemplate() (*packer.Template, *packer.ComponentFinder) {
tplData := `{ tplData := `{
"builders": [ "variables": {
{ "foo": null
"type": "foo" },
},
{ "builders": [
"type": "bar" {
} "type": "foo"
] },
}` {
"name": "{{user \"foo\"}}",
tpl, err := packer.ParseTemplate([]byte(tplData), nil) "type": "bar"
}
]
}
`
tpl, err := packer.ParseTemplate([]byte(tplData), map[string]string{"foo": "bar"})
if err != nil { if err != nil {
panic(err) panic(err)
} }
...@@ -59,6 +65,44 @@ func TestBuildOptionsBuilds_except(t *testing.T) { ...@@ -59,6 +65,44 @@ func TestBuildOptionsBuilds_except(t *testing.T) {
} }
} }
//Test to make sure the build name pattern matches
func TestBuildOptionsBuilds_exceptConfigTemplateRaw(t *testing.T) {
opts := new(BuildOptions)
opts.Except = []string{"{{user \"foo\"}}"}
bs, err := opts.Builds(testTemplate())
if err != nil {
t.Fatalf("err: %s", err)
}
if len(bs) != 1 {
t.Fatalf("bad: %d", len(bs))
}
if bs[0].Name() != "foo" {
t.Fatalf("bad: %s", bs[0].Name())
}
}
//Test to make sure the processed build name matches
func TestBuildOptionsBuilds_exceptConfigTemplateProcessed(t *testing.T) {
opts := new(BuildOptions)
opts.Except = []string{"bar"}
bs, err := opts.Builds(testTemplate())
if err != nil {
t.Fatalf("err: %s", err)
}
if len(bs) != 1 {
t.Fatalf("bad: %d", len(bs))
}
if bs[0].Name() != "foo" {
t.Fatalf("bad: %s", bs[0].Name())
}
}
func TestBuildOptionsBuilds_only(t *testing.T) { func TestBuildOptionsBuilds_only(t *testing.T) {
opts := new(BuildOptions) opts := new(BuildOptions)
opts.Only = []string{"foo"} opts.Only = []string{"foo"}
...@@ -77,6 +121,44 @@ func TestBuildOptionsBuilds_only(t *testing.T) { ...@@ -77,6 +121,44 @@ func TestBuildOptionsBuilds_only(t *testing.T) {
} }
} }
//Test to make sure the build name pattern matches
func TestBuildOptionsBuilds_onlyConfigTemplateRaw(t *testing.T) {
opts := new(BuildOptions)
opts.Only = []string{"{{user \"foo\"}}"}
bs, err := opts.Builds(testTemplate())
if err != nil {
t.Fatalf("err: %s", err)
}
if len(bs) != 1 {
t.Fatalf("bad: %d", len(bs))
}
if bs[0].Name() != "bar" {
t.Fatalf("bad: %s", bs[0].Name())
}
}
//Test to make sure the processed build name matches
func TestBuildOptionsBuilds_onlyConfigTemplateProcessed(t *testing.T) {
opts := new(BuildOptions)
opts.Only = []string{"bar"}
bs, err := opts.Builds(testTemplate())
if err != nil {
t.Fatalf("err: %s", err)
}
if len(bs) != 1 {
t.Fatalf("bad: %d", len(bs))
}
if bs[0].Name() != "bar" {
t.Fatalf("bad: %s", bs[0].Name())
}
}
func TestBuildOptionsBuilds_exceptNonExistent(t *testing.T) { func TestBuildOptionsBuilds_exceptNonExistent(t *testing.T) {
opts := new(BuildOptions) opts := new(BuildOptions)
opts.Except = []string{"i-dont-exist"} opts.Except = []string{"i-dont-exist"}
......
...@@ -33,7 +33,7 @@ func CheckUnusedConfig(md *mapstructure.Metadata) *packer.MultiError { ...@@ -33,7 +33,7 @@ func CheckUnusedConfig(md *mapstructure.Metadata) *packer.MultiError {
for _, unused := range md.Unused { for _, unused := range md.Unused {
if unused != "type" && !strings.HasPrefix(unused, "packer_") { if unused != "type" && !strings.HasPrefix(unused, "packer_") {
errs = append( errs = append(
errs, fmt.Errorf("Unknown configuration key: %s", unused)) errs, fmt.Errorf("Unknown configuration key: %q", unused))
} }
} }
} }
......
...@@ -76,7 +76,7 @@ func TestStepCreateFloppy(t *testing.T) { ...@@ -76,7 +76,7 @@ func TestStepCreateFloppy(t *testing.T) {
floppy_path := state.Get("floppy_path").(string) floppy_path := state.Get("floppy_path").(string)
if _, err := os.Stat(floppy_path); err != nil { if _, err := os.Stat(floppy_path); err != nil {
t.Fatal("file not found: %s for %v", floppy_path, step.Files) t.Fatalf("file not found: %s for %v", floppy_path, step.Files)
} }
if len(step.FilesAdded) != expected { if len(step.FilesAdded) != expected {
...@@ -86,7 +86,7 @@ func TestStepCreateFloppy(t *testing.T) { ...@@ -86,7 +86,7 @@ func TestStepCreateFloppy(t *testing.T) {
step.Cleanup(state) step.Cleanup(state)
if _, err := os.Stat(floppy_path); err == nil { if _, err := os.Stat(floppy_path); err == nil {
t.Fatal("file found: %s for %v", floppy_path, step.Files) t.Fatalf("file found: %s for %v", floppy_path, step.Files)
} }
} }
} }
...@@ -177,7 +177,7 @@ func xxxTestStepCreateFloppy_notfound(t *testing.T) { ...@@ -177,7 +177,7 @@ func xxxTestStepCreateFloppy_notfound(t *testing.T) {
floppy_path := state.Get("floppy_path").(string) floppy_path := state.Get("floppy_path").(string)
if _, err := os.Stat(floppy_path); err != nil { if _, err := os.Stat(floppy_path); err != nil {
t.Fatal("file not found: %s for %v", floppy_path, step.Files) t.Fatalf("file not found: %s for %v", floppy_path, step.Files)
} }
if len(step.FilesAdded) != expected { if len(step.FilesAdded) != expected {
...@@ -187,7 +187,7 @@ func xxxTestStepCreateFloppy_notfound(t *testing.T) { ...@@ -187,7 +187,7 @@ func xxxTestStepCreateFloppy_notfound(t *testing.T) {
step.Cleanup(state) step.Cleanup(state)
if _, err := os.Stat(floppy_path); err == nil { if _, err := os.Stat(floppy_path); err == nil {
t.Fatal("file found: %s for %v", floppy_path, step.Files) t.Fatalf("file found: %s for %v", floppy_path, step.Files)
} }
} }
} }
...@@ -336,7 +336,7 @@ func scpUploadFile(dst string, src io.Reader, w io.Writer, r *bufio.Reader, fi * ...@@ -336,7 +336,7 @@ func scpUploadFile(dst string, src io.Reader, w io.Writer, r *bufio.Reader, fi *
var mode os.FileMode var mode os.FileMode
var size int64 var size int64
if fi != nil { if fi != nil && (*fi).Mode().IsRegular() {
mode = (*fi).Mode().Perm() mode = (*fi).Mode().Perm()
size = (*fi).Size() size = (*fi).Size()
} else { } else {
......
...@@ -83,10 +83,10 @@ func newMockLineServer(t *testing.T) string { ...@@ -83,10 +83,10 @@ func newMockLineServer(t *testing.T) string {
} }
t.Log("Accepted channel") t.Log("Accepted channel")
go func() { go func(channelType string) {
defer channel.Close() defer channel.Close()
conn.OpenChannel(newChannel.ChannelType(), nil) conn.OpenChannel(channelType, nil)
}() }(newChannel.ChannelType())
} }
conn.Close() conn.Close()
}() }()
......
...@@ -49,11 +49,13 @@ func decodeConfig(r io.Reader, c *config) error { ...@@ -49,11 +49,13 @@ func decodeConfig(r io.Reader, c *config) error {
// Discover discovers plugins. // Discover discovers plugins.
// //
// This looks in the directory of the executable and the CWD, in that // Search the directory of the executable, then the plugins directory, and
// order for priority. // finally the CWD, in that order. Any conflicts will overwrite previously
// found plugins, in that order.
// Hence, the priority order is the reverse of the search order - i.e., the
// CWD has the highest priority.
func (c *config) Discover() error { func (c *config) Discover() error {
// Next, look in the same directory as the executable. Any conflicts // First, look in the same directory as the executable.
// will overwrite those found in our current directory.
exePath, err := osext.Executable() exePath, err := osext.Executable()
if err != nil { if err != nil {
log.Printf("[ERR] Error loading exe directory: %s", err) log.Printf("[ERR] Error loading exe directory: %s", err)
...@@ -63,7 +65,7 @@ func (c *config) Discover() error { ...@@ -63,7 +65,7 @@ func (c *config) Discover() error {
} }
} }
// Look in the plugins directory // Next, look in the plugins directory.
dir, err := ConfigDir() dir, err := ConfigDir()
if err != nil { if err != nil {
log.Printf("[ERR] Error loading config directory: %s", err) log.Printf("[ERR] Error loading config directory: %s", err)
...@@ -73,7 +75,7 @@ func (c *config) Discover() error { ...@@ -73,7 +75,7 @@ func (c *config) Discover() error {
} }
} }
// Look in the cwd. // Last, look in the CWD.
if err := c.discover("."); err != nil { if err := c.discover("."); err != nil {
return err return err
} }
...@@ -180,7 +182,7 @@ func (c *config) discoverSingle(glob string, m *map[string]string) error { ...@@ -180,7 +182,7 @@ func (c *config) discoverSingle(glob string, m *map[string]string) error {
// Look for foo-bar-baz. The plugin name is "baz" // Look for foo-bar-baz. The plugin name is "baz"
plugin := file[len(prefix):] plugin := file[len(prefix):]
log.Printf("[DEBUG] Discoverd plugin: %s = %s", plugin, match) log.Printf("[DEBUG] Discovered plugin: %s = %s", plugin, match)
(*m)[plugin] = match (*m)[plugin] = match
} }
......
...@@ -166,6 +166,7 @@ func wrappedMain() int { ...@@ -166,6 +166,7 @@ func wrappedMain() int {
Commands: Commands, Commands: Commands,
HelpFunc: cli.BasicHelpFunc("packer"), HelpFunc: cli.BasicHelpFunc("packer"),
HelpWriter: os.Stdout, HelpWriter: os.Stdout,
Version: Version,
} }
exitCode, err := cli.Run() exitCode, err := cli.Run()
......
...@@ -3,15 +3,16 @@ package packer ...@@ -3,15 +3,16 @@ package packer
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"github.com/hashicorp/go-version"
"github.com/mitchellh/mapstructure"
jsonutil "github.com/mitchellh/packer/common/json"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"sort" "sort"
"text/template" "text/template"
"time" "time"
"github.com/hashicorp/go-version"
"github.com/mitchellh/mapstructure"
jsonutil "github.com/mitchellh/packer/common/json"
) )
// The rawTemplate struct represents the structure of a template read // The rawTemplate struct represents the structure of a template read
...@@ -24,6 +25,7 @@ type rawTemplate struct { ...@@ -24,6 +25,7 @@ type rawTemplate struct {
Description string Description string
Builders []map[string]interface{} Builders []map[string]interface{}
Hooks map[string][]string Hooks map[string][]string
Push PushConfig
PostProcessors []interface{} `mapstructure:"post-processors"` PostProcessors []interface{} `mapstructure:"post-processors"`
Provisioners []map[string]interface{} Provisioners []map[string]interface{}
Variables map[string]interface{} Variables map[string]interface{}
...@@ -32,14 +34,27 @@ type rawTemplate struct { ...@@ -32,14 +34,27 @@ type rawTemplate struct {
// The Template struct represents a parsed template, parsed into the most // The Template struct represents a parsed template, parsed into the most
// completed form it can be without additional processing by the caller. // completed form it can be without additional processing by the caller.
type Template struct { type Template struct {
RawContents []byte
Description string Description string
Variables map[string]RawVariable Variables map[string]RawVariable
Builders map[string]RawBuilderConfig Builders map[string]RawBuilderConfig
Hooks map[string][]string Hooks map[string][]string
Push *PushConfig
PostProcessors [][]RawPostProcessorConfig PostProcessors [][]RawPostProcessorConfig
Provisioners []RawProvisionerConfig Provisioners []RawProvisionerConfig
} }
// PushConfig is the configuration structure for the push settings.
type PushConfig struct {
Name string
Address string
BaseDir string `mapstructure:"base_dir"`
Include []string
Exclude []string
Token string
VCS bool
}
// The RawBuilderConfig struct represents a raw, unprocessed builder // The RawBuilderConfig struct represents a raw, unprocessed builder
// configuration. It contains the name of the builder as well as the // configuration. It contains the name of the builder as well as the
// raw configuration. If requested, this is used to compile into a full // raw configuration. If requested, this is used to compile into a full
...@@ -150,10 +165,12 @@ func ParseTemplate(data []byte, vars map[string]string) (t *Template, err error) ...@@ -150,10 +165,12 @@ func ParseTemplate(data []byte, vars map[string]string) (t *Template, err error)
} }
t = &Template{} t = &Template{}
t.RawContents = data
t.Description = rawTpl.Description t.Description = rawTpl.Description
t.Variables = make(map[string]RawVariable) t.Variables = make(map[string]RawVariable)
t.Builders = make(map[string]RawBuilderConfig) t.Builders = make(map[string]RawBuilderConfig)
t.Hooks = rawTpl.Hooks t.Hooks = rawTpl.Hooks
t.Push = &rawTpl.Push
t.PostProcessors = make([][]RawPostProcessorConfig, len(rawTpl.PostProcessors)) t.PostProcessors = make([][]RawPostProcessorConfig, len(rawTpl.PostProcessors))
t.Provisioners = make([]RawProvisionerConfig, len(rawTpl.Provisioners)) t.Provisioners = make([]RawProvisionerConfig, len(rawTpl.Provisioners))
...@@ -475,52 +492,13 @@ func (t *Template) Build(name string, components *ComponentFinder) (b Build, err ...@@ -475,52 +492,13 @@ func (t *Template) Build(name string, components *ComponentFinder) (b Build, err
return return
} }
// Prepare the variable template processor, which is a bit unique
// because we don't allow user variable usage and we add a function
// to read from the environment.
varTpl, err := NewConfigTemplate()
if err != nil {
return nil, err
}
varTpl.Funcs(template.FuncMap{
"env": templateEnv,
"user": templateDisableUser,
})
// Prepare the variables
var varErrors []error
variables := make(map[string]string)
for k, v := range t.Variables {
if v.Required && !v.HasValue {
varErrors = append(varErrors,
fmt.Errorf("Required user variable '%s' not set", k))
}
var val string
if v.HasValue {
val = v.Value
} else {
val, err = varTpl.Process(v.Default, nil)
if err != nil {
varErrors = append(varErrors,
fmt.Errorf("Error processing user variable '%s': %s'", k, err))
}
}
variables[k] = val
}
if len(varErrors) > 0 {
return nil, &MultiError{varErrors}
}
// Process the name // Process the name
tpl, err := NewConfigTemplate() tpl, variables, err := t.NewConfigTemplate()
if err != nil { if err != nil {
return nil, err return nil, err
} }
tpl.UserVars = variables
rawName := name
name, err = tpl.Process(name, nil) name, err = tpl.Process(name, nil)
if err != nil { if err != nil {
return nil, err return nil, err
...@@ -554,7 +532,7 @@ func (t *Template) Build(name string, components *ComponentFinder) (b Build, err ...@@ -554,7 +532,7 @@ func (t *Template) Build(name string, components *ComponentFinder) (b Build, err
for _, rawPPs := range t.PostProcessors { for _, rawPPs := range t.PostProcessors {
current := make([]coreBuildPostProcessor, 0, len(rawPPs)) current := make([]coreBuildPostProcessor, 0, len(rawPPs))
for _, rawPP := range rawPPs { for _, rawPP := range rawPPs {
if rawPP.TemplateOnlyExcept.Skip(name) { if rawPP.TemplateOnlyExcept.Skip(rawName) {
continue continue
} }
...@@ -587,7 +565,7 @@ func (t *Template) Build(name string, components *ComponentFinder) (b Build, err ...@@ -587,7 +565,7 @@ func (t *Template) Build(name string, components *ComponentFinder) (b Build, err
// Prepare the provisioners // Prepare the provisioners
provisioners := make([]coreBuildProvisioner, 0, len(t.Provisioners)) provisioners := make([]coreBuildProvisioner, 0, len(t.Provisioners))
for _, rawProvisioner := range t.Provisioners { for _, rawProvisioner := range t.Provisioners {
if rawProvisioner.TemplateOnlyExcept.Skip(name) { if rawProvisioner.TemplateOnlyExcept.Skip(rawName) {
continue continue
} }
...@@ -636,6 +614,59 @@ func (t *Template) Build(name string, components *ComponentFinder) (b Build, err ...@@ -636,6 +614,59 @@ func (t *Template) Build(name string, components *ComponentFinder) (b Build, err
return return
} }
//Build a ConfigTemplate object populated by the values within a
//parsed template
func (t *Template) NewConfigTemplate() (c *ConfigTemplate, variables map[string]string, err error) {
// Prepare the variable template processor, which is a bit unique
// because we don't allow user variable usage and we add a function
// to read from the environment.
varTpl, err := NewConfigTemplate()
if err != nil {
return nil, nil, err
}
varTpl.Funcs(template.FuncMap{
"env": templateEnv,
"user": templateDisableUser,
})
// Prepare the variables
var varErrors []error
variables = make(map[string]string)
for k, v := range t.Variables {
if v.Required && !v.HasValue {
varErrors = append(varErrors,
fmt.Errorf("Required user variable '%s' not set", k))
}
var val string
if v.HasValue {
val = v.Value
} else {
val, err = varTpl.Process(v.Default, nil)
if err != nil {
varErrors = append(varErrors,
fmt.Errorf("Error processing user variable '%s': %s'", k, err))
}
}
variables[k] = val
}
if len(varErrors) > 0 {
return nil, variables, &MultiError{varErrors}
}
// Process the name
tpl, err := NewConfigTemplate()
if err != nil {
return nil, variables, err
}
tpl.UserVars = variables
return tpl, variables, nil
}
// TemplateOnlyExcept contains the logic required for "only" and "except" // TemplateOnlyExcept contains the logic required for "only" and "except"
// meta-parameters. // meta-parameters.
type TemplateOnlyExcept struct { type TemplateOnlyExcept struct {
......
...@@ -58,6 +58,10 @@ func TestParseTemplateFile_basic(t *testing.T) { ...@@ -58,6 +58,10 @@ func TestParseTemplateFile_basic(t *testing.T) {
if len(result.Builders) != 1 { if len(result.Builders) != 1 {
t.Fatalf("bad: %#v", result.Builders) t.Fatalf("bad: %#v", result.Builders)
} }
if string(result.RawContents) != data {
t.Fatalf("expected %q to be %q", result.RawContents, data)
}
} }
func TestParseTemplateFile_minPackerVersionBad(t *testing.T) { func TestParseTemplateFile_minPackerVersionBad(t *testing.T) {
...@@ -493,7 +497,7 @@ func TestParseTemplate_Provisioners(t *testing.T) { ...@@ -493,7 +497,7 @@ func TestParseTemplate_Provisioners(t *testing.T) {
result, err := ParseTemplate([]byte(data), nil) result, err := ParseTemplate([]byte(data), nil)
if err != nil { if err != nil {
t.Fatal("err: %s", err) t.Fatalf("err: %s", err)
} }
if result == nil { if result == nil {
t.Fatal("should have result") t.Fatal("should have result")
...@@ -525,7 +529,7 @@ func TestParseTemplate_ProvisionerPauseBefore(t *testing.T) { ...@@ -525,7 +529,7 @@ func TestParseTemplate_ProvisionerPauseBefore(t *testing.T) {
result, err := ParseTemplate([]byte(data), nil) result, err := ParseTemplate([]byte(data), nil)
if err != nil { if err != nil {
t.Fatal("err: %s", err) t.Fatalf("err: %s", err)
} }
if result == nil { if result == nil {
t.Fatal("should have result") t.Fatal("should have result")
...@@ -541,6 +545,41 @@ func TestParseTemplate_ProvisionerPauseBefore(t *testing.T) { ...@@ -541,6 +545,41 @@ func TestParseTemplate_ProvisionerPauseBefore(t *testing.T) {
} }
} }
func TestParseTemplateFile_push(t *testing.T) {
data := `
{
"builders": [{"type": "something"}],
"push": {
"name": "hello",
"include": ["one"],
"exclude": ["two"]
}
}
`
tf, err := ioutil.TempFile("", "packer")
if err != nil {
t.Fatalf("err: %s", err)
}
tf.Write([]byte(data))
tf.Close()
result, err := ParseTemplateFile(tf.Name(), nil)
if err != nil {
t.Fatalf("err: %s", err)
}
expected := &PushConfig{
Name: "hello",
Include: []string{"one"},
Exclude: []string{"two"},
}
if !reflect.DeepEqual(result.Push, expected) {
t.Fatalf("bad: %#v", result.Push)
}
}
func TestParseTemplate_Variables(t *testing.T) { func TestParseTemplate_Variables(t *testing.T) {
data := ` data := `
{ {
...@@ -1165,7 +1204,62 @@ func TestTemplateBuild_exceptPP(t *testing.T) { ...@@ -1165,7 +1204,62 @@ func TestTemplateBuild_exceptPP(t *testing.T) {
t.Fatal("should have no postProcessors") t.Fatal("should have no postProcessors")
} }
// Verify test2 has no post-processors // Verify test2 has one post-processors
build, err = template.Build("test2", testTemplateComponentFinder())
if err != nil {
t.Fatalf("err: %s", err)
}
cbuild = build.(*coreBuild)
if len(cbuild.postProcessors) != 1 {
t.Fatalf("invalid: %d", len(cbuild.postProcessors))
}
}
func TestTemplateBuild_exceptPPConfigTemplateName(t *testing.T) {
data := `
{
"variables": {
"foo": null
},
"builders": [
{
"name": "test1-{{user \"foo\"}}",
"type": "test-builder"
},
{
"name": "test2",
"type": "test-builder"
}
],
"post-processors": [
{
"type": "test-pp",
"except": ["test1-{{user \"foo\"}}"]
}
]
}
`
template, err := ParseTemplate([]byte(data), map[string]string{"foo": "bar"})
if err != nil {
t.Fatalf("err: %s", err)
}
// Verify test1 has no post-processors
build, err := template.Build("test1-{{user \"foo\"}}", testTemplateComponentFinder())
if err != nil {
t.Fatalf("err: %s", err)
}
cbuild := build.(*coreBuild)
if len(cbuild.postProcessors) > 0 {
t.Fatal("should have no postProcessors")
}
// Verify test2 has one post-processors
build, err = template.Build("test2", testTemplateComponentFinder()) build, err = template.Build("test2", testTemplateComponentFinder())
if err != nil { if err != nil {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
...@@ -1245,7 +1339,62 @@ func TestTemplateBuild_exceptProv(t *testing.T) { ...@@ -1245,7 +1339,62 @@ func TestTemplateBuild_exceptProv(t *testing.T) {
t.Fatal("should have no provisioners") t.Fatal("should have no provisioners")
} }
// Verify test2 has no provisioners // Verify test2 has one provisioners
build, err = template.Build("test2", testTemplateComponentFinder())
if err != nil {
t.Fatalf("err: %s", err)
}
cbuild = build.(*coreBuild)
if len(cbuild.provisioners) != 1 {
t.Fatalf("invalid: %d", len(cbuild.provisioners))
}
}
func TestTemplateBuild_exceptProvConfigTemplateName(t *testing.T) {
data := `
{
"variables": {
"foo": null
},
"builders": [
{
"name": "test1-{{user \"foo\"}}",
"type": "test-builder"
},
{
"name": "test2",
"type": "test-builder"
}
],
"provisioners": [
{
"type": "test-prov",
"except": ["test1-{{user \"foo\"}}"]
}
]
}
`
template, err := ParseTemplate([]byte(data), map[string]string{"foo": "bar"})
if err != nil {
t.Fatalf("err: %s", err)
}
// Verify test1 has no provisioners
build, err := template.Build("test1-{{user \"foo\"}}", testTemplateComponentFinder())
if err != nil {
t.Fatalf("err: %s", err)
}
cbuild := build.(*coreBuild)
if len(cbuild.provisioners) > 0 {
t.Fatal("should have no provisioners")
}
// Verify test2 has one provisioners
build, err = template.Build("test2", testTemplateComponentFinder()) build, err = template.Build("test2", testTemplateComponentFinder())
if err != nil { if err != nil {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
...@@ -1325,7 +1474,7 @@ func TestTemplateBuild_onlyPP(t *testing.T) { ...@@ -1325,7 +1474,7 @@ func TestTemplateBuild_onlyPP(t *testing.T) {
t.Fatal("should have no postProcessors") t.Fatal("should have no postProcessors")
} }
// Verify test2 has no post-processors // Verify test2 has one post-processors
build, err = template.Build("test2", testTemplateComponentFinder()) build, err = template.Build("test2", testTemplateComponentFinder())
if err != nil { if err != nil {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
...@@ -1337,6 +1486,61 @@ func TestTemplateBuild_onlyPP(t *testing.T) { ...@@ -1337,6 +1486,61 @@ func TestTemplateBuild_onlyPP(t *testing.T) {
} }
} }
func TestTemplateBuild_onlyPPConfigTemplateName(t *testing.T) {
data := `
{
"variables": {
"foo": null
},
"builders": [
{
"name": "test1",
"type": "test-builder"
},
{
"name": "test2-{{user \"foo\"}}",
"type": "test-builder"
}
],
"post-processors": [
{
"type": "test-pp",
"only": ["test2-{{user \"foo\"}}"]
}
]
}
`
template, err := ParseTemplate([]byte(data), map[string]string{"foo": "bar"})
if err != nil {
t.Fatalf("err: %s", err)
}
// Verify test1 has no post-processors
build, err := template.Build("test1", testTemplateComponentFinder())
if err != nil {
t.Fatalf("err: %s", err)
}
cbuild := build.(*coreBuild)
if len(cbuild.postProcessors) > 0 {
t.Fatal("should have no postProcessors")
}
// Verify test2 has one post-processors
build, err = template.Build("test2-{{user \"foo\"}}", testTemplateComponentFinder())
if err != nil {
t.Fatalf("err: %s", err)
}
cbuild = build.(*coreBuild)
if len(cbuild.postProcessors) != 1 {
t.Fatalf("invalid: %d", len(cbuild.postProcessors))
}
}
func TestTemplateBuild_onlyProvInvalid(t *testing.T) { func TestTemplateBuild_onlyProvInvalid(t *testing.T) {
data := ` data := `
{ {
...@@ -1405,7 +1609,7 @@ func TestTemplateBuild_onlyProv(t *testing.T) { ...@@ -1405,7 +1609,7 @@ func TestTemplateBuild_onlyProv(t *testing.T) {
t.Fatal("should have no provisioners") t.Fatal("should have no provisioners")
} }
// Verify test2 has no provisioners // Verify test2 has one provisioners
build, err = template.Build("test2", testTemplateComponentFinder()) build, err = template.Build("test2", testTemplateComponentFinder())
if err != nil { if err != nil {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
...@@ -1417,6 +1621,61 @@ func TestTemplateBuild_onlyProv(t *testing.T) { ...@@ -1417,6 +1621,61 @@ func TestTemplateBuild_onlyProv(t *testing.T) {
} }
} }
func TestTemplateBuild_onlyProvConfigTemplateName(t *testing.T) {
data := `
{
"variables": {
"foo": null
},
"builders": [
{
"name": "test1",
"type": "test-builder"
},
{
"name": "test2-{{user \"foo\"}}",
"type": "test-builder"
}
],
"provisioners": [
{
"type": "test-prov",
"only": ["test2-{{user \"foo\"}}"]
}
]
}
`
template, err := ParseTemplate([]byte(data), map[string]string{"foo": "bar"})
if err != nil {
t.Fatalf("err: %s", err)
}
// Verify test1 has no provisioners
build, err := template.Build("test1", testTemplateComponentFinder())
if err != nil {
t.Fatalf("err: %s", err)
}
cbuild := build.(*coreBuild)
if len(cbuild.provisioners) > 0 {
t.Fatal("should have no provisioners")
}
// Verify test2 has one provisioners
build, err = template.Build("test2-{{user \"foo\"}}", testTemplateComponentFinder())
if err != nil {
t.Fatalf("err: %s", err)
}
cbuild = build.(*coreBuild)
if len(cbuild.provisioners) != 1 {
t.Fatalf("invalid: %d", len(cbuild.provisioners))
}
}
func TestTemplate_Build_ProvisionerOverride(t *testing.T) { func TestTemplate_Build_ProvisionerOverride(t *testing.T) {
data := ` data := `
{ {
......
package main
import (
"github.com/mitchellh/packer/packer/plugin"
"github.com/mitchellh/packer/post-processor/atlas"
)
func main() {
server, err := plugin.Server()
if err != nil {
panic(err)
}
server.RegisterPostProcessor(new(atlas.PostProcessor))
server.Serve()
}
package atlas
import (
"fmt"
)
const BuilderId = "packer.post-processor.atlas"
type Artifact struct {
Name string
Type string
Version int
}
func (*Artifact) BuilderId() string {
return BuilderId
}
func (a *Artifact) Files() []string {
return nil
}
func (a *Artifact) Id() string {
return fmt.Sprintf("%s/%s/%d", a.Name, a.Type, a.Version)
}
func (a *Artifact) String() string {
return fmt.Sprintf("%s/%s (v%d)", a.Name, a.Type, a.Version)
}
func (*Artifact) State(name string) interface{} {
return nil
}
func (a *Artifact) Destroy() error {
return nil
}
package atlas
import (
"fmt"
"os"
"strconv"
"strings"
"github.com/hashicorp/atlas-go/archive"
"github.com/hashicorp/atlas-go/v1"
"github.com/mitchellh/mapstructure"
"github.com/mitchellh/packer/common"
"github.com/mitchellh/packer/packer"
)
const BuildEnvKey = "ATLAS_BUILD_ID"
// Artifacts can return a string for this state key and the post-processor
// will use automatically use this as the type. The user's value overrides
// this if `artifact_type_override` is set to true.
const ArtifactStateType = "atlas.artifact.type"
// Artifacts can return a map[string]string for this state key and this
// post-processor will automatically merge it into the metadata for any
// uploaded artifact versions.
const ArtifactStateMetadata = "atlas.artifact.metadata"
type Config struct {
common.PackerConfig `mapstructure:",squash"`
Artifact string
Type string `mapstructure:"artifact_type"`
TypeOverride bool `mapstructure:"artifact_type_override"`
Metadata map[string]string
ServerAddr string `mapstructure:"server_address"`
Token string
// This shouldn't ever be set outside of unit tests.
Test bool `mapstructure:"test"`
tpl *packer.ConfigTemplate
user, name string
buildId int
}
type PostProcessor struct {
config Config
client *atlas.Client
}
func (p *PostProcessor) Configure(raws ...interface{}) error {
_, err := common.DecodeConfig(&p.config, raws...)
if err != nil {
return err
}
p.config.tpl, err = packer.NewConfigTemplate()
if err != nil {
return err
}
p.config.tpl.UserVars = p.config.PackerUserVars
templates := map[string]*string{
"artifact": &p.config.Artifact,
"type": &p.config.Type,
"server_address": &p.config.ServerAddr,
"token": &p.config.Token,
}
errs := new(packer.MultiError)
for key, ptr := range templates {
*ptr, err = p.config.tpl.Process(*ptr, nil)
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error processing %s: %s", key, err))
}
}
required := map[string]*string{
"artifact": &p.config.Artifact,
"artifact_type": &p.config.Type,
}
for key, ptr := range required {
if *ptr == "" {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("%s must be set", key))
}
}
if len(errs.Errors) > 0 {
return errs
}
p.config.user, p.config.name, err = atlas.ParseSlug(p.config.Artifact)
if err != nil {
return err
}
// If we have a build ID, save it
if v := os.Getenv(BuildEnvKey); v != "" {
raw, err := strconv.ParseInt(v, 0, 0)
if err != nil {
return fmt.Errorf(
"Error parsing build ID: %s", err)
}
p.config.buildId = int(raw)
}
// Build the client
p.client = atlas.DefaultClient()
if p.config.ServerAddr != "" {
p.client, err = atlas.NewClient(p.config.ServerAddr)
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error initializing atlas client: %s", err))
return errs
}
}
if p.config.Token != "" {
p.client.Token = p.config.Token
}
if !p.config.Test {
// Verify the client
if err := p.client.Verify(); err != nil {
if err == atlas.ErrAuth {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error connecting to atlas server, please check your ATLAS_TOKEN env: %s", err))
} else {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error initializing atlas client: %s", err))
}
return errs
}
}
return nil
}
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) {
if _, err := p.client.Artifact(p.config.user, p.config.name); err != nil {
if err != atlas.ErrNotFound {
return nil, false, fmt.Errorf(
"Error finding artifact: %s", err)
}
// Artifact doesn't exist, create it
ui.Message(fmt.Sprintf("Creating artifact: %s", p.config.Artifact))
_, err = p.client.CreateArtifact(p.config.user, p.config.name)
if err != nil {
return nil, false, fmt.Errorf(
"Error creating artifact: %s", err)
}
}
opts := &atlas.UploadArtifactOpts{
User: p.config.user,
Name: p.config.name,
Type: p.config.Type,
ID: artifact.Id(),
Metadata: p.metadata(artifact),
BuildID: p.config.buildId,
}
if fs := artifact.Files(); len(fs) > 0 {
var archiveOpts archive.ArchiveOpts
// We have files. We want to compress/upload them. If we have just
// one file, then we use it as-is. Otherwise, we compress all of
// them into a single file.
var path string
if len(fs) == 1 {
path = fs[0]
} else {
path = longestCommonPrefix(fs)
if path == "" {
return nil, false, fmt.Errorf(
"No common prefix for achiving files: %v", fs)
}
// Modify the archive options to only include the files
// that are in our file list.
include := make([]string, 0, len(fs))
for i, f := range fs {
include[i] = strings.Replace(f, path, "", 1)
}
archiveOpts.Include = include
}
r, err := archive.CreateArchive(path, &archiveOpts)
if err != nil {
return nil, false, fmt.Errorf(
"Error archiving artifact: %s", err)
}
defer r.Close()
opts.File = r
opts.FileSize = r.Size
}
ui.Message("Uploading artifact version...")
var av *atlas.ArtifactVersion
doneCh := make(chan struct{})
errCh := make(chan error, 1)
go func() {
var err error
av, err = p.client.UploadArtifact(opts)
if err != nil {
errCh <- err
return
}
close(doneCh)
}()
select {
case err := <-errCh:
return nil, false, fmt.Errorf("Error uploading: %s", err)
case <-doneCh:
}
return &Artifact{
Name: p.config.Artifact,
Type: p.config.Type,
Version: av.Version,
}, true, nil
}
func (p *PostProcessor) metadata(artifact packer.Artifact) map[string]string {
var metadata map[string]string
metadataRaw := artifact.State(ArtifactStateMetadata)
if metadataRaw != nil {
if err := mapstructure.Decode(metadataRaw, &metadata); err != nil {
panic(err)
}
}
if p.config.Metadata != nil {
// If we have no extra metadata, just return as-is
if metadata == nil {
return p.config.Metadata
}
// Merge the metadata
for k, v := range p.config.Metadata {
metadata[k] = v
}
}
return metadata
}
func (p *PostProcessor) artifactType(artifact packer.Artifact) string {
if !p.config.TypeOverride {
if v := artifact.State(ArtifactStateType); v != nil {
return v.(string)
}
}
return p.config.Type
}
package atlas
import (
"os"
"reflect"
"testing"
"github.com/mitchellh/packer/packer"
)
func TestPostProcessorConfigure(t *testing.T) {
currentEnv := os.Getenv("ATLAS_TOKEN")
os.Setenv("ATLAS_TOKEN", "")
defer os.Setenv("ATLAS_TOKEN", currentEnv)
var p PostProcessor
if err := p.Configure(validDefaults()); err != nil {
t.Fatalf("err: %s", err)
}
if p.client == nil {
t.Fatal("should have client")
}
if p.client.Token != "" {
t.Fatal("should not have token")
}
}
func TestPostProcessorConfigure_buildId(t *testing.T) {
defer os.Setenv(BuildEnvKey, os.Getenv(BuildEnvKey))
os.Setenv(BuildEnvKey, "5")
var p PostProcessor
if err := p.Configure(validDefaults()); err != nil {
t.Fatalf("err: %s", err)
}
if p.config.buildId != 5 {
t.Fatalf("bad: %#v", p.config.buildId)
}
}
func TestPostProcessorMetadata(t *testing.T) {
var p PostProcessor
if err := p.Configure(validDefaults()); err != nil {
t.Fatalf("err: %s", err)
}
artifact := new(packer.MockArtifact)
metadata := p.metadata(artifact)
if len(metadata) > 0 {
t.Fatalf("bad: %#v", metadata)
}
}
func TestPostProcessorMetadata_artifact(t *testing.T) {
config := validDefaults()
config["metadata"] = map[string]string{
"foo": "bar",
}
var p PostProcessor
if err := p.Configure(config); err != nil {
t.Fatalf("err: %s", err)
}
artifact := new(packer.MockArtifact)
artifact.StateValues = map[string]interface{}{
ArtifactStateMetadata: map[interface{}]interface{}{
"bar": "baz",
},
}
metadata := p.metadata(artifact)
expected := map[string]string{
"foo": "bar",
"bar": "baz",
}
if !reflect.DeepEqual(metadata, expected) {
t.Fatalf("bad: %#v", metadata)
}
}
func TestPostProcessorMetadata_config(t *testing.T) {
config := validDefaults()
config["metadata"] = map[string]string{
"foo": "bar",
}
var p PostProcessor
if err := p.Configure(config); err != nil {
t.Fatalf("err: %s", err)
}
artifact := new(packer.MockArtifact)
metadata := p.metadata(artifact)
expected := map[string]string{
"foo": "bar",
}
if !reflect.DeepEqual(metadata, expected) {
t.Fatalf("bad: %#v", metadata)
}
}
func TestPostProcessorType(t *testing.T) {
var p PostProcessor
if err := p.Configure(validDefaults()); err != nil {
t.Fatalf("err: %s", err)
}
artifact := new(packer.MockArtifact)
actual := p.artifactType(artifact)
if actual != "foo" {
t.Fatalf("bad: %#v", actual)
}
}
func TestPostProcessorType_artifact(t *testing.T) {
var p PostProcessor
if err := p.Configure(validDefaults()); err != nil {
t.Fatalf("err: %s", err)
}
artifact := new(packer.MockArtifact)
artifact.StateValues = map[string]interface{}{
ArtifactStateType: "bar",
}
actual := p.artifactType(artifact)
if actual != "bar" {
t.Fatalf("bad: %#v", actual)
}
}
func validDefaults() map[string]interface{} {
return map[string]interface{}{
"artifact": "mitchellh/test",
"artifact_type": "foo",
"test": true,
}
}
package atlas
import (
"math"
"strings"
)
// longestCommonPrefix finds the longest common prefix for all the strings
// given as an argument, or returns the empty string if a prefix can't be
// found.
//
// This function just uses brute force instead of a more optimized algorithm.
func longestCommonPrefix(vs []string) string {
var length int64
// Find the shortest string
var shortest string
length = math.MaxUint32
for _, v := range vs {
if int64(len(v)) < length {
shortest = v
length = int64(len(v))
}
}
// Now go through and find a prefix to all the strings using this
// short string, which itself must contain the prefix.
for i := len(shortest); i > 0; i-- {
// We only care about prefixes with path seps
if shortest[i-1] != '/' {
continue
}
bad := false
prefix := shortest[0 : i]
for _, v := range vs {
if !strings.HasPrefix(v, prefix) {
bad = true
break
}
}
if !bad {
return prefix
}
}
return ""
}
package atlas
import (
"testing"
)
func TestLongestCommonPrefix(t *testing.T) {
cases := []struct {
Input []string
Output string
}{
{
[]string{"foo", "bar"},
"",
},
{
[]string{"foo", "foobar"},
"",
},
{
[]string{"foo/", "foo/bar"},
"foo/",
},
{
[]string{"/foo/", "/bar"},
"/",
},
}
for _, tc := range cases {
actual := longestCommonPrefix(tc.Input)
if actual != tc.Output {
t.Fatalf("bad: %#v\n\n%#v", actual, tc.Input)
}
}
}
...@@ -78,7 +78,7 @@ func (v VagrantCloudClient) Get(path string) (*http.Response, error) { ...@@ -78,7 +78,7 @@ func (v VagrantCloudClient) Get(path string) (*http.Response, error) {
req.Header.Add("Content-Type", "application/json") req.Header.Add("Content-Type", "application/json")
resp, err := v.client.Do(req) resp, err := v.client.Do(req)
log.Printf("Post-Processor Vagrant Cloud API Response: \n\n%s", resp) log.Printf("Post-Processor Vagrant Cloud API Response: \n\n%+v", resp)
return resp, err return resp, err
} }
...@@ -96,7 +96,7 @@ func (v VagrantCloudClient) Delete(path string) (*http.Response, error) { ...@@ -96,7 +96,7 @@ func (v VagrantCloudClient) Delete(path string) (*http.Response, error) {
req.Header.Add("Content-Type", "application/json") req.Header.Add("Content-Type", "application/json")
resp, err := v.client.Do(req) resp, err := v.client.Do(req)
log.Printf("Post-Processor Vagrant Cloud API Response: \n\n%s", resp) log.Printf("Post-Processor Vagrant Cloud API Response: \n\n%+v", resp)
return resp, err return resp, err
} }
...@@ -128,7 +128,7 @@ func (v VagrantCloudClient) Upload(path string, url string) (*http.Response, err ...@@ -128,7 +128,7 @@ func (v VagrantCloudClient) Upload(path string, url string) (*http.Response, err
resp, err := v.client.Do(request) resp, err := v.client.Do(request)
log.Printf("Post-Processor Vagrant Cloud Upload Response: \n\n%s", resp) log.Printf("Post-Processor Vagrant Cloud Upload Response: \n\n%+v", resp)
return resp, err return resp, err
} }
...@@ -153,7 +153,7 @@ func (v VagrantCloudClient) Post(path string, body interface{}) (*http.Response, ...@@ -153,7 +153,7 @@ func (v VagrantCloudClient) Post(path string, body interface{}) (*http.Response,
resp, err := v.client.Do(req) resp, err := v.client.Do(req)
log.Printf("Post-Processor Vagrant Cloud API Response: \n\n%s", resp) log.Printf("Post-Processor Vagrant Cloud API Response: \n\n%+v", resp)
return resp, err return resp, err
} }
...@@ -172,7 +172,7 @@ func (v VagrantCloudClient) Put(path string) (*http.Response, error) { ...@@ -172,7 +172,7 @@ func (v VagrantCloudClient) Put(path string) (*http.Response, error) {
resp, err := v.client.Do(req) resp, err := v.client.Do(req)
log.Printf("Post-Processor Vagrant Cloud API Response: \n\n%s", resp) log.Printf("Post-Processor Vagrant Cloud API Response: \n\n%+v", resp)
return resp, err return resp, err
} }
...@@ -25,7 +25,7 @@ func (s *stepCreateProvider) Run(state multistep.StateBag) multistep.StepAction ...@@ -25,7 +25,7 @@ func (s *stepCreateProvider) Run(state multistep.StateBag) multistep.StepAction
providerName := state.Get("providerName").(string) providerName := state.Get("providerName").(string)
downloadUrl := state.Get("boxDownloadUrl").(string) downloadUrl := state.Get("boxDownloadUrl").(string)
path := fmt.Sprintf("box/%s/version/%v/providers", box.Tag, version.Number) path := fmt.Sprintf("box/%s/version/%v/providers", box.Tag, version.Version)
provider := &Provider{Name: providerName} provider := &Provider{Name: providerName}
...@@ -86,7 +86,7 @@ func (s *stepCreateProvider) Cleanup(state multistep.StateBag) { ...@@ -86,7 +86,7 @@ func (s *stepCreateProvider) Cleanup(state multistep.StateBag) {
ui.Say("Cleaning up provider") ui.Say("Cleaning up provider")
ui.Message(fmt.Sprintf("Deleting provider: %s", s.name)) ui.Message(fmt.Sprintf("Deleting provider: %s", s.name))
path := fmt.Sprintf("box/%s/version/%v/provider/%s", box.Tag, version.Number, s.name) path := fmt.Sprintf("box/%s/version/%v/provider/%s", box.Tag, version.Version, s.name)
// No need for resp from the cleanup DELETE // No need for resp from the cleanup DELETE
_, err := client.Delete(path) _, err := client.Delete(path)
......
...@@ -9,11 +9,9 @@ import ( ...@@ -9,11 +9,9 @@ import (
type Version struct { type Version struct {
Version string `json:"version"` Version string `json:"version"`
Description string `json:"description,omitempty"` Description string `json:"description,omitempty"`
Number uint `json:"number,omitempty"`
} }
type stepCreateVersion struct { type stepCreateVersion struct {
number uint // number of the version, if needed in cleanup
} }
func (s *stepCreateVersion) Run(state multistep.StateBag) multistep.StepAction { func (s *stepCreateVersion) Run(state multistep.StateBag) multistep.StepAction {
...@@ -52,9 +50,6 @@ func (s *stepCreateVersion) Run(state multistep.StateBag) multistep.StepAction { ...@@ -52,9 +50,6 @@ func (s *stepCreateVersion) Run(state multistep.StateBag) multistep.StepAction {
return multistep.ActionHalt return multistep.ActionHalt
} }
// Save the number for cleanup
s.number = version.Number
state.Put("version", version) state.Put("version", version)
return multistep.ActionContinue return multistep.ActionContinue
...@@ -63,15 +58,8 @@ func (s *stepCreateVersion) Run(state multistep.StateBag) multistep.StepAction { ...@@ -63,15 +58,8 @@ func (s *stepCreateVersion) Run(state multistep.StateBag) multistep.StepAction {
func (s *stepCreateVersion) Cleanup(state multistep.StateBag) { func (s *stepCreateVersion) Cleanup(state multistep.StateBag) {
client := state.Get("client").(*VagrantCloudClient) client := state.Get("client").(*VagrantCloudClient)
ui := state.Get("ui").(packer.Ui) ui := state.Get("ui").(packer.Ui)
config := state.Get("config").(Config)
box := state.Get("box").(*Box) box := state.Get("box").(*Box)
version := state.Get("version").(*Version)
// If we didn't save the version number, it likely doesn't exist or
// already existed
if s.number == 0 {
ui.Message("Version was not created or previously existed, not deleting")
return
}
_, cancelled := state.GetOk(multistep.StateCancelled) _, cancelled := state.GetOk(multistep.StateCancelled)
_, halted := state.GetOk(multistep.StateHalted) _, halted := state.GetOk(multistep.StateHalted)
...@@ -82,10 +70,10 @@ func (s *stepCreateVersion) Cleanup(state multistep.StateBag) { ...@@ -82,10 +70,10 @@ func (s *stepCreateVersion) Cleanup(state multistep.StateBag) {
return return
} }
path := fmt.Sprintf("box/%s/version/%v", box.Tag, s.number) path := fmt.Sprintf("box/%s/version/%v", box.Tag, version.Version)
ui.Say("Cleaning up version") ui.Say("Cleaning up version")
ui.Message(fmt.Sprintf("Deleting version: %s", config.Version)) ui.Message(fmt.Sprintf("Deleting version: %s", version.Version))
// No need for resp from the cleanup DELETE // No need for resp from the cleanup DELETE
_, err := client.Delete(path) _, err := client.Delete(path)
......
...@@ -22,7 +22,7 @@ func (s *stepPrepareUpload) Run(state multistep.StateBag) multistep.StepAction { ...@@ -22,7 +22,7 @@ func (s *stepPrepareUpload) Run(state multistep.StateBag) multistep.StepAction {
provider := state.Get("provider").(*Provider) provider := state.Get("provider").(*Provider)
artifactFilePath := state.Get("artifactFilePath").(string) artifactFilePath := state.Get("artifactFilePath").(string)
path := fmt.Sprintf("box/%s/version/%v/provider/%s/upload", box.Tag, version.Number, provider.Name) path := fmt.Sprintf("box/%s/version/%v/provider/%s/upload", box.Tag, version.Version, provider.Name)
upload := &Upload{} upload := &Upload{}
ui.Say(fmt.Sprintf("Preparing upload of box: %s", artifactFilePath)) ui.Say(fmt.Sprintf("Preparing upload of box: %s", artifactFilePath))
......
...@@ -24,7 +24,7 @@ func (s *stepReleaseVersion) Run(state multistep.StateBag) multistep.StepAction ...@@ -24,7 +24,7 @@ func (s *stepReleaseVersion) Run(state multistep.StateBag) multistep.StepAction
return multistep.ActionContinue return multistep.ActionContinue
} }
path := fmt.Sprintf("box/%s/version/%v/release", box.Tag, version.Number) path := fmt.Sprintf("box/%s/version/%v/release", box.Tag, version.Version)
resp, err := client.Put(path) resp, err := client.Put(path)
......
...@@ -19,7 +19,7 @@ func (s *stepVerifyUpload) Run(state multistep.StateBag) multistep.StepAction { ...@@ -19,7 +19,7 @@ func (s *stepVerifyUpload) Run(state multistep.StateBag) multistep.StepAction {
upload := state.Get("upload").(*Upload) upload := state.Get("upload").(*Upload)
provider := state.Get("provider").(*Provider) provider := state.Get("provider").(*Provider)
path := fmt.Sprintf("box/%s/version/%v/provider/%s", box.Tag, version.Number, provider.Name) path := fmt.Sprintf("box/%s/version/%v/provider/%s", box.Tag, version.Version, provider.Name)
providerCheck := &Provider{} providerCheck := &Provider{}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment