refactor: Reshuffle file structure for better code layout

This gets rid of the package called "server" and instead moves
everything into the project root, such that Go actually builds us a
binary called `nixery`.

This is the first step towards factoring out CLI-based functionality
for Nixery.
This commit is contained in:
Vincent Ambo 2019-11-11 21:07:16 +00:00 committed by Vincent Ambo
parent df88da126a
commit 2b82f1b71a
21 changed files with 83 additions and 114 deletions

View file

@ -0,0 +1,113 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
package builder
// This file implements logic for walking through a directory and creating a
// tarball of it.
//
// The tarball is written straight to the supplied reader, which makes it
// possible to create an image layer from the specified store paths, hash it and
// upload it in one reading pass.
import (
"archive/tar"
"compress/gzip"
"crypto/sha256"
"fmt"
"io"
"os"
"path/filepath"
)
// Create a new compressed tarball from each of the paths in the list
// and write it to the supplied writer.
//
// The uncompressed tarball is hashed because image manifests must
// contain both the hashes of compressed and uncompressed layers.
func packStorePaths(l *layer, w io.Writer) (string, error) {
shasum := sha256.New()
gz := gzip.NewWriter(w)
multi := io.MultiWriter(shasum, gz)
t := tar.NewWriter(multi)
for _, path := range l.Contents {
err := filepath.Walk(path, tarStorePath(t))
if err != nil {
return "", err
}
}
if err := t.Close(); err != nil {
return "", err
}
if err := gz.Close(); err != nil {
return "", err
}
return fmt.Sprintf("sha256:%x", shasum.Sum([]byte{})), nil
}
func tarStorePath(w *tar.Writer) filepath.WalkFunc {
return func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// If the entry is not a symlink or regular file, skip it.
if info.Mode()&os.ModeSymlink == 0 && !info.Mode().IsRegular() {
return nil
}
// the symlink target is read if this entry is a symlink, as it
// is required when creating the file header
var link string
if info.Mode()&os.ModeSymlink != 0 {
link, err = os.Readlink(path)
if err != nil {
return err
}
}
header, err := tar.FileInfoHeader(info, link)
if err != nil {
return err
}
// The name retrieved from os.FileInfo only contains the file's
// basename, but the full path is required within the layer
// tarball.
header.Name = path
if err = w.WriteHeader(header); err != nil {
return err
}
// At this point, return if no file content needs to be written
if !info.Mode().IsRegular() {
return nil
}
f, err := os.Open(path)
if err != nil {
return err
}
if _, err := io.Copy(w, f); err != nil {
return err
}
f.Close()
return nil
}
}

View file

@ -0,0 +1,521 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
// Package builder implements the logic for assembling container
// images. It shells out to Nix to retrieve all required Nix-packages
// and assemble the symlink layer and then creates the required
// tarballs in-process.
package builder
import (
"bufio"
"bytes"
"compress/gzip"
"context"
"crypto/sha256"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"sort"
"strings"
"github.com/google/nixery/config"
"github.com/google/nixery/manifest"
"github.com/google/nixery/storage"
log "github.com/sirupsen/logrus"
)
// The maximum number of layers in an image is 125. To allow for
// extensibility, the actual number of layers Nixery is "allowed" to
// use up is set at a lower point.
const LayerBudget int = 94
// State holds the runtime state that is carried around in Nixery and
// passed to builder functions.
type State struct {
Storage storage.Backend
Cache *LocalCache
Cfg config.Config
Pop Popularity
}
// Architecture represents the possible CPU architectures for which
// container images can be built.
//
// The default architecture is amd64, but support for ARM platforms is
// available within nixpkgs and can be toggled via meta-packages.
type Architecture struct {
// Name of the system tuple to pass to Nix
nixSystem string
// Name of the architecture as used in the OCI manifests
imageArch string
}
var amd64 = Architecture{"x86_64-linux", "amd64"}
var arm64 = Architecture{"aarch64-linux", "arm64"}
// Image represents the information necessary for building a container image.
// This can be either a list of package names (corresponding to keys in the
// nixpkgs set) or a Nix expression that results in a *list* of derivations.
type Image struct {
Name string
Tag string
// Names of packages to include in the image. These must correspond
// directly to top-level names of Nix packages in the nixpkgs tree.
Packages []string
// Architecture for which to build the image. Nixery defaults
// this to amd64 if not specified via meta-packages.
Arch *Architecture
}
// BuildResult represents the data returned from the server to the
// HTTP handlers. Error information is propagated straight from Nix
// for errors inside of the build that should be fed back to the
// client (such as missing packages).
type BuildResult struct {
Error string `json:"error"`
Pkgs []string `json:"pkgs"`
Manifest json.RawMessage `json:"manifest"`
}
// ImageFromName parses an image name into the corresponding structure which can
// be used to invoke Nix.
//
// It will expand convenience names under the hood (see the `convenienceNames`
// function below) and append packages that are always included (cacert, iana-etc).
//
// Once assembled the image structure uses a sorted representation of
// the name. This is to avoid unnecessarily cache-busting images if
// only the order of requested packages has changed.
func ImageFromName(name string, tag string) Image {
pkgs := strings.Split(name, "/")
arch, expanded := metaPackages(pkgs)
expanded = append(expanded, "cacert", "iana-etc")
sort.Strings(pkgs)
sort.Strings(expanded)
return Image{
Name: strings.Join(pkgs, "/"),
Tag: tag,
Packages: expanded,
Arch: arch,
}
}
// ImageResult represents the output of calling the Nix derivation
// responsible for preparing an image.
type ImageResult struct {
// These fields are populated in case of an error
Error string `json:"error"`
Pkgs []string `json:"pkgs"`
// These fields are populated in case of success
Graph runtimeGraph `json:"runtimeGraph"`
SymlinkLayer struct {
Size int `json:"size"`
TarHash string `json:"tarHash"`
Path string `json:"path"`
} `json:"symlinkLayer"`
}
// metaPackages expands package names defined by Nixery which either
// include sets of packages or trigger certain image-building
// behaviour.
//
// Meta-packages must be specified as the first packages in an image
// name.
//
// Currently defined meta-packages are:
//
// * `shell`: Includes bash, coreutils and other common command-line tools
// * `arm64`: Causes Nixery to build images for the ARM64 architecture
func metaPackages(packages []string) (*Architecture, []string) {
arch := &amd64
var metapkgs []string
lastMeta := 0
for idx, p := range packages {
if p == "shell" || p == "arm64" {
metapkgs = append(metapkgs, p)
lastMeta = idx + 1
} else {
break
}
}
// Chop off the meta-packages from the front of the package
// list
packages = packages[lastMeta:]
for _, p := range metapkgs {
switch p {
case "shell":
packages = append(packages, "bashInteractive", "coreutils", "moreutils", "nano")
case "arm64":
arch = &arm64
}
}
return arch, packages
}
// logNix logs each output line from Nix. It runs in a goroutine per
// output channel that should be live-logged.
func logNix(image, cmd string, r io.ReadCloser) {
scanner := bufio.NewScanner(r)
for scanner.Scan() {
log.WithFields(log.Fields{
"image": image,
"cmd": cmd,
}).Info("[nix] " + scanner.Text())
}
}
func callNix(program, image string, args []string) ([]byte, error) {
cmd := exec.Command(program, args...)
outpipe, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
errpipe, err := cmd.StderrPipe()
if err != nil {
return nil, err
}
go logNix(program, image, errpipe)
if err = cmd.Start(); err != nil {
log.WithError(err).WithFields(log.Fields{
"image": image,
"cmd": program,
}).Error("error invoking Nix")
return nil, err
}
log.WithFields(log.Fields{
"cmd": program,
"image": image,
}).Info("invoked Nix build")
stdout, _ := ioutil.ReadAll(outpipe)
if err = cmd.Wait(); err != nil {
log.WithError(err).WithFields(log.Fields{
"image": image,
"cmd": program,
"stdout": stdout,
}).Info("failed to invoke Nix")
return nil, err
}
resultFile := strings.TrimSpace(string(stdout))
buildOutput, err := ioutil.ReadFile(resultFile)
if err != nil {
log.WithError(err).WithFields(log.Fields{
"image": image,
"file": resultFile,
}).Info("failed to read Nix result file")
return nil, err
}
return buildOutput, nil
}
// Call out to Nix and request metadata for the image to be built. All
// required store paths for the image will be realised, but layers
// will not yet be created from them.
//
// This function is only invoked if the manifest is not found in any
// cache.
func prepareImage(s *State, image *Image) (*ImageResult, error) {
packages, err := json.Marshal(image.Packages)
if err != nil {
return nil, err
}
srcType, srcArgs := s.Cfg.Pkgs.Render(image.Tag)
args := []string{
"--timeout", s.Cfg.Timeout,
"--argstr", "packages", string(packages),
"--argstr", "srcType", srcType,
"--argstr", "srcArgs", srcArgs,
"--argstr", "system", image.Arch.nixSystem,
}
output, err := callNix("nixery-prepare-image", image.Name, args)
if err != nil {
// granular error logging is performed in callNix already
return nil, err
}
log.WithFields(log.Fields{
"image": image.Name,
"tag": image.Tag,
}).Info("finished image preparation via Nix")
var result ImageResult
err = json.Unmarshal(output, &result)
if err != nil {
return nil, err
}
return &result, nil
}
// Groups layers and checks whether they are present in the cache
// already, otherwise calls out to Nix to assemble layers.
//
// Newly built layers are uploaded to the bucket. Cache entries are
// added only after successful uploads, which guarantees that entries
// retrieved from the cache are present in the bucket.
func prepareLayers(ctx context.Context, s *State, image *Image, result *ImageResult) ([]manifest.Entry, error) {
grouped := groupLayers(&result.Graph, &s.Pop, LayerBudget)
var entries []manifest.Entry
// Splits the layers into those which are already present in
// the cache, and those that are missing.
//
// Missing layers are built and uploaded to the storage
// bucket.
for _, l := range grouped {
if entry, cached := layerFromCache(ctx, s, l.Hash()); cached {
entries = append(entries, *entry)
} else {
lh := l.Hash()
// While packing store paths, the SHA sum of
// the uncompressed layer is computed and
// written to `tarhash`.
//
// TODO(tazjin): Refactor this to make the
// flow of data cleaner.
var tarhash string
lw := func(w io.Writer) error {
var err error
tarhash, err = packStorePaths(&l, w)
return err
}
entry, err := uploadHashLayer(ctx, s, lh, lw)
if err != nil {
return nil, err
}
entry.MergeRating = l.MergeRating
entry.TarHash = tarhash
var pkgs []string
for _, p := range l.Contents {
pkgs = append(pkgs, packageFromPath(p))
}
log.WithFields(log.Fields{
"layer": lh,
"packages": pkgs,
"tarhash": tarhash,
}).Info("created image layer")
go cacheLayer(ctx, s, l.Hash(), *entry)
entries = append(entries, *entry)
}
}
// Symlink layer (built in the first Nix build) needs to be
// included here manually:
slkey := result.SymlinkLayer.TarHash
entry, err := uploadHashLayer(ctx, s, slkey, func(w io.Writer) error {
f, err := os.Open(result.SymlinkLayer.Path)
if err != nil {
log.WithError(err).WithFields(log.Fields{
"image": image.Name,
"tag": image.Tag,
"layer": slkey,
}).Error("failed to open symlink layer")
return err
}
defer f.Close()
gz := gzip.NewWriter(w)
_, err = io.Copy(gz, f)
if err != nil {
log.WithError(err).WithFields(log.Fields{
"image": image.Name,
"tag": image.Tag,
"layer": slkey,
}).Error("failed to upload symlink layer")
return err
}
return gz.Close()
})
if err != nil {
return nil, err
}
entry.TarHash = "sha256:" + result.SymlinkLayer.TarHash
go cacheLayer(ctx, s, slkey, *entry)
entries = append(entries, *entry)
return entries, nil
}
// layerWriter is the type for functions that can write a layer to the
// multiwriter used for uploading & hashing.
//
// This type exists to avoid duplication between the handling of
// symlink layers and store path layers.
type layerWriter func(w io.Writer) error
// byteCounter is a special io.Writer that counts all bytes written to
// it and does nothing else.
//
// This is required because the ad-hoc writing of tarballs leaves no
// single place to count the final tarball size otherwise.
type byteCounter struct {
count int64
}
func (b *byteCounter) Write(p []byte) (n int, err error) {
b.count += int64(len(p))
return len(p), nil
}
// Upload a layer tarball to the storage bucket, while hashing it at
// the same time. The supplied function is expected to provide the
// layer data to the writer.
//
// The initial upload is performed in a 'staging' folder, as the
// SHA256-hash is not yet available when the upload is initiated.
//
// After a successful upload, the file is moved to its final location
// in the bucket and the build cache is populated.
//
// The return value is the layer's SHA256 hash, which is used in the
// image manifest.
func uploadHashLayer(ctx context.Context, s *State, key string, lw layerWriter) (*manifest.Entry, error) {
path := "staging/" + key
sha256sum, size, err := s.Storage.Persist(ctx, path, func(sw io.Writer) (string, int64, error) {
// Sets up a "multiwriter" that simultaneously runs both hash
// algorithms and uploads to the storage backend.
shasum := sha256.New()
counter := &byteCounter{}
multi := io.MultiWriter(sw, shasum, counter)
err := lw(multi)
sha256sum := fmt.Sprintf("%x", shasum.Sum([]byte{}))
return sha256sum, counter.count, err
})
if err != nil {
log.WithError(err).WithFields(log.Fields{
"layer": key,
"backend": s.Storage.Name(),
}).Error("failed to create and store layer")
return nil, err
}
// Hashes are now known and the object is in the bucket, what
// remains is to move it to the correct location and cache it.
err = s.Storage.Move(ctx, "staging/"+key, "layers/"+sha256sum)
if err != nil {
log.WithError(err).WithField("layer", key).
Error("failed to move layer from staging")
return nil, err
}
log.WithFields(log.Fields{
"layer": key,
"sha256": sha256sum,
"size": size,
}).Info("created and persisted layer")
entry := manifest.Entry{
Digest: "sha256:" + sha256sum,
Size: size,
}
return &entry, nil
}
func BuildImage(ctx context.Context, s *State, image *Image) (*BuildResult, error) {
key := s.Cfg.Pkgs.CacheKey(image.Packages, image.Tag)
if key != "" {
if m, c := manifestFromCache(ctx, s, key); c {
return &BuildResult{
Manifest: m,
}, nil
}
}
imageResult, err := prepareImage(s, image)
if err != nil {
return nil, err
}
if imageResult.Error != "" {
return &BuildResult{
Error: imageResult.Error,
Pkgs: imageResult.Pkgs,
}, nil
}
layers, err := prepareLayers(ctx, s, image, imageResult)
if err != nil {
return nil, err
}
m, c := manifest.Manifest(image.Arch.imageArch, layers)
lw := func(w io.Writer) error {
r := bytes.NewReader(c.Config)
_, err := io.Copy(w, r)
return err
}
if _, err = uploadHashLayer(ctx, s, c.SHA256, lw); err != nil {
log.WithError(err).WithFields(log.Fields{
"image": image.Name,
"tag": image.Tag,
}).Error("failed to upload config")
return nil, err
}
if key != "" {
go cacheManifest(ctx, s, key, m)
}
result := BuildResult{
Manifest: m,
}
return &result, nil
}

View file

@ -0,0 +1,123 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
package builder
import (
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"testing"
)
var ignoreArch = cmpopts.IgnoreFields(Image{}, "Arch")
func TestImageFromNameSimple(t *testing.T) {
image := ImageFromName("hello", "latest")
expected := Image{
Name: "hello",
Tag: "latest",
Packages: []string{
"cacert",
"hello",
"iana-etc",
},
}
if diff := cmp.Diff(expected, image, ignoreArch); diff != "" {
t.Fatalf("Image(\"hello\", \"latest\") mismatch:\n%s", diff)
}
}
func TestImageFromNameMultiple(t *testing.T) {
image := ImageFromName("hello/git/htop", "latest")
expected := Image{
Name: "git/hello/htop",
Tag: "latest",
Packages: []string{
"cacert",
"git",
"hello",
"htop",
"iana-etc",
},
}
if diff := cmp.Diff(expected, image, ignoreArch); diff != "" {
t.Fatalf("Image(\"hello/git/htop\", \"latest\") mismatch:\n%s", diff)
}
}
func TestImageFromNameShell(t *testing.T) {
image := ImageFromName("shell", "latest")
expected := Image{
Name: "shell",
Tag: "latest",
Packages: []string{
"bashInteractive",
"cacert",
"coreutils",
"iana-etc",
"moreutils",
"nano",
},
}
if diff := cmp.Diff(expected, image, ignoreArch); diff != "" {
t.Fatalf("Image(\"shell\", \"latest\") mismatch:\n%s", diff)
}
}
func TestImageFromNameShellMultiple(t *testing.T) {
image := ImageFromName("shell/htop", "latest")
expected := Image{
Name: "htop/shell",
Tag: "latest",
Packages: []string{
"bashInteractive",
"cacert",
"coreutils",
"htop",
"iana-etc",
"moreutils",
"nano",
},
}
if diff := cmp.Diff(expected, image, ignoreArch); diff != "" {
t.Fatalf("Image(\"shell/htop\", \"latest\") mismatch:\n%s", diff)
}
}
func TestImageFromNameShellArm64(t *testing.T) {
image := ImageFromName("shell/arm64", "latest")
expected := Image{
Name: "arm64/shell",
Tag: "latest",
Packages: []string{
"bashInteractive",
"cacert",
"coreutils",
"iana-etc",
"moreutils",
"nano",
},
}
if diff := cmp.Diff(expected, image, ignoreArch); diff != "" {
t.Fatalf("Image(\"shell/arm64\", \"latest\") mismatch:\n%s", diff)
}
if image.Arch.imageArch != "arm64" {
t.Fatal("Image(\"shell/arm64\"): Expected arch arm64")
}
}

View file

@ -0,0 +1,236 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
package builder
import (
"bytes"
"context"
"encoding/json"
"io"
"io/ioutil"
"os"
"sync"
"github.com/google/nixery/manifest"
log "github.com/sirupsen/logrus"
)
// LocalCache implements the structure used for local caching of
// manifests and layer uploads.
type LocalCache struct {
// Manifest cache
mmtx sync.RWMutex
mdir string
// Layer cache
lmtx sync.RWMutex
lcache map[string]manifest.Entry
}
// Creates an in-memory cache and ensures that the local file path for
// manifest caching exists.
func NewCache() (LocalCache, error) {
path := os.TempDir() + "/nixery"
err := os.MkdirAll(path, 0755)
if err != nil {
return LocalCache{}, err
}
return LocalCache{
mdir: path + "/",
lcache: make(map[string]manifest.Entry),
}, nil
}
// Retrieve a cached manifest if the build is cacheable and it exists.
func (c *LocalCache) manifestFromLocalCache(key string) (json.RawMessage, bool) {
c.mmtx.RLock()
defer c.mmtx.RUnlock()
f, err := os.Open(c.mdir + key)
if err != nil {
// This is a debug log statement because failure to
// read the manifest key is currently expected if it
// is not cached.
log.WithError(err).WithField("manifest", key).
Debug("failed to read manifest from local cache")
return nil, false
}
defer f.Close()
m, err := ioutil.ReadAll(f)
if err != nil {
log.WithError(err).WithField("manifest", key).
Error("failed to read manifest from local cache")
return nil, false
}
return json.RawMessage(m), true
}
// Adds the result of a manifest build to the local cache, if the
// manifest is considered cacheable.
//
// Manifests can be quite large and are cached on disk instead of in
// memory.
func (c *LocalCache) localCacheManifest(key string, m json.RawMessage) {
c.mmtx.Lock()
defer c.mmtx.Unlock()
err := ioutil.WriteFile(c.mdir+key, []byte(m), 0644)
if err != nil {
log.WithError(err).WithField("manifest", key).
Error("failed to locally cache manifest")
}
}
// Retrieve a layer build from the local cache.
func (c *LocalCache) layerFromLocalCache(key string) (*manifest.Entry, bool) {
c.lmtx.RLock()
e, ok := c.lcache[key]
c.lmtx.RUnlock()
return &e, ok
}
// Add a layer build result to the local cache.
func (c *LocalCache) localCacheLayer(key string, e manifest.Entry) {
c.lmtx.Lock()
c.lcache[key] = e
c.lmtx.Unlock()
}
// Retrieve a manifest from the cache(s). First the local cache is
// checked, then the storage backend.
func manifestFromCache(ctx context.Context, s *State, key string) (json.RawMessage, bool) {
if m, cached := s.Cache.manifestFromLocalCache(key); cached {
return m, true
}
r, err := s.Storage.Fetch(ctx, "manifests/"+key)
if err != nil {
log.WithError(err).WithFields(log.Fields{
"manifest": key,
"backend": s.Storage.Name(),
}).Error("failed to fetch manifest from cache")
return nil, false
}
defer r.Close()
m, err := ioutil.ReadAll(r)
if err != nil {
log.WithError(err).WithFields(log.Fields{
"manifest": key,
"backend": s.Storage.Name(),
}).Error("failed to read cached manifest from storage backend")
return nil, false
}
go s.Cache.localCacheManifest(key, m)
log.WithField("manifest", key).Info("retrieved manifest from GCS")
return json.RawMessage(m), true
}
// Add a manifest to the bucket & local caches
func cacheManifest(ctx context.Context, s *State, key string, m json.RawMessage) {
go s.Cache.localCacheManifest(key, m)
path := "manifests/" + key
_, size, err := s.Storage.Persist(ctx, path, func(w io.Writer) (string, int64, error) {
size, err := io.Copy(w, bytes.NewReader([]byte(m)))
return "", size, err
})
if err != nil {
log.WithError(err).WithFields(log.Fields{
"manifest": key,
"backend": s.Storage.Name(),
}).Error("failed to cache manifest to storage backend")
return
}
log.WithFields(log.Fields{
"manifest": key,
"size": size,
"backend": s.Storage.Name(),
}).Info("cached manifest to storage backend")
}
// Retrieve a layer build from the cache, first checking the local
// cache followed by the bucket cache.
func layerFromCache(ctx context.Context, s *State, key string) (*manifest.Entry, bool) {
if entry, cached := s.Cache.layerFromLocalCache(key); cached {
return entry, true
}
r, err := s.Storage.Fetch(ctx, "builds/"+key)
if err != nil {
log.WithError(err).WithFields(log.Fields{
"layer": key,
"backend": s.Storage.Name(),
}).Debug("failed to retrieve cached layer from storage backend")
return nil, false
}
defer r.Close()
jb := bytes.NewBuffer([]byte{})
_, err = io.Copy(jb, r)
if err != nil {
log.WithError(err).WithFields(log.Fields{
"layer": key,
"backend": s.Storage.Name(),
}).Error("failed to read cached layer from storage backend")
return nil, false
}
var entry manifest.Entry
err = json.Unmarshal(jb.Bytes(), &entry)
if err != nil {
log.WithError(err).WithField("layer", key).
Error("failed to unmarshal cached layer")
return nil, false
}
go s.Cache.localCacheLayer(key, entry)
return &entry, true
}
func cacheLayer(ctx context.Context, s *State, key string, entry manifest.Entry) {
s.Cache.localCacheLayer(key, entry)
j, _ := json.Marshal(&entry)
path := "builds/" + key
_, _, err := s.Storage.Persist(ctx, path, func(w io.Writer) (string, int64, error) {
size, err := io.Copy(w, bytes.NewReader(j))
return "", size, err
})
if err != nil {
log.WithError(err).WithFields(log.Fields{
"layer": key,
"backend": s.Storage.Name(),
}).Error("failed to cache layer")
}
return
}

View file

@ -0,0 +1,364 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
// This package reads an export reference graph (i.e. a graph representing the
// runtime dependencies of a set of derivations) created by Nix and groups it in
// a way that is likely to match the grouping for other derivation sets with
// overlapping dependencies.
//
// This is used to determine which derivations to include in which layers of a
// container image.
//
// # Inputs
//
// * a graph of Nix runtime dependencies, generated via exportReferenceGraph
// * popularity values of each package in the Nix package set (in the form of a
// direct reference count)
// * a maximum number of layers to allocate for the image (the "layer budget")
//
// # Algorithm
//
// It works by first creating a (directed) dependency tree:
//
// img (root node)
// │
// ├───> A ─────┐
// │ v
// ├───> B ───> E
// │ ^
// ├───> C ─────┘
// │ │
// │ v
// └───> D ───> F
// │
// └────> G
//
// Each node (i.e. package) is then visited to determine how important
// it is to separate this node into its own layer, specifically:
//
// 1. Is the node within a certain threshold percentile of absolute
// popularity within all of nixpkgs? (e.g. `glibc`, `openssl`)
//
// 2. Is the node's runtime closure above a threshold size? (e.g. 100MB)
//
// In either case, a bit is flipped for this node representing each
// condition and an edge to it is inserted directly from the image
// root, if it does not already exist.
//
// For the rest of the example we assume 'G' is above the threshold
// size and 'E' is popular.
//
// This tree is then transformed into a dominator tree:
//
// img
// │
// ├───> A
// ├───> B
// ├───> C
// ├───> E
// ├───> D ───> F
// └───> G
//
// Specifically this means that the paths to A, B, C, E, G, and D
// always pass through the root (i.e. are dominated by it), whilst F
// is dominated by D (all paths go through it).
//
// The top-level subtrees are considered as the initially selected
// layers.
//
// If the list of layers fits within the layer budget, it is returned.
//
// Otherwise, a merge rating is calculated for each layer. This is the
// product of the layer's total size and its root node's popularity.
//
// Layers are then merged in ascending order of merge ratings until
// they fit into the layer budget.
//
// # Threshold values
//
// Threshold values for the partitioning conditions mentioned above
// have not yet been determined, but we will make a good first guess
// based on gut feeling and proceed to measure their impact on cache
// hits/misses.
//
// # Example
//
// Using the logic described above as well as the example presented in
// the introduction, this program would create the following layer
// groupings (assuming no additional partitioning):
//
// Layer budget: 1
// Layers: { A, B, C, D, E, F, G }
//
// Layer budget: 2
// Layers: { G }, { A, B, C, D, E, F }
//
// Layer budget: 3
// Layers: { G }, { E }, { A, B, C, D, F }
//
// Layer budget: 4
// Layers: { G }, { E }, { D, F }, { A, B, C }
//
// ...
//
// Layer budget: 10
// Layers: { E }, { D, F }, { A }, { B }, { C }
package builder
import (
"crypto/sha1"
"fmt"
"regexp"
"sort"
"strings"
log "github.com/sirupsen/logrus"
"gonum.org/v1/gonum/graph/flow"
"gonum.org/v1/gonum/graph/simple"
)
// runtimeGraph represents structured information from Nix about the runtime
// dependencies of a derivation.
//
// This is generated in Nix by using the exportReferencesGraph feature.
type runtimeGraph struct {
References struct {
Graph []string `json:"graph"`
} `json:"exportReferencesGraph"`
Graph []struct {
Size uint64 `json:"closureSize"`
Path string `json:"path"`
Refs []string `json:"references"`
} `json:"graph"`
}
// Popularity data for each Nix package that was calculated in advance.
//
// Popularity is a number from 1-100 that represents the
// popularity percentile in which this package resides inside
// of the nixpkgs tree.
type Popularity = map[string]int
// Layer represents the data returned for each layer that Nix should
// build for the container image.
type layer struct {
Contents []string `json:"contents"`
MergeRating uint64
}
// Hash the contents of a layer to create a deterministic identifier that can be
// used for caching.
func (l *layer) Hash() string {
sum := sha1.Sum([]byte(strings.Join(l.Contents, ":")))
return fmt.Sprintf("%x", sum)
}
func (a layer) merge(b layer) layer {
a.Contents = append(a.Contents, b.Contents...)
a.MergeRating += b.MergeRating
return a
}
// closure as pointed to by the graph nodes.
type closure struct {
GraphID int64
Path string
Size uint64
Refs []string
Popularity int
}
func (c *closure) ID() int64 {
return c.GraphID
}
var nixRegexp = regexp.MustCompile(`^/nix/store/[a-z0-9]+-`)
// PackageFromPath returns the name of a Nix package based on its
// output store path.
func packageFromPath(path string) string {
return nixRegexp.ReplaceAllString(path, "")
}
// DOTID provides a human-readable package name. The name stems from
// the dot format used by GraphViz, into which the dependency graph
// can be rendered.
func (c *closure) DOTID() string {
return packageFromPath(c.Path)
}
// bigOrPopular checks whether this closure should be considered for
// separation into its own layer, even if it would otherwise only
// appear in a subtree of the dominator tree.
func (c *closure) bigOrPopular() bool {
const sizeThreshold = 100 * 1000000 // 100MB
if c.Size > sizeThreshold {
return true
}
// Threshold value is picked arbitrarily right now. The reason
// for this is that some packages (such as `cacert`) have very
// few direct dependencies, but are required by pretty much
// everything.
if c.Popularity >= 100 {
return true
}
return false
}
func insertEdges(graph *simple.DirectedGraph, cmap *map[string]*closure, node *closure) {
// Big or popular nodes get a separate edge from the top to
// flag them for their own layer.
if node.bigOrPopular() && !graph.HasEdgeFromTo(0, node.ID()) {
edge := graph.NewEdge(graph.Node(0), node)
graph.SetEdge(edge)
}
for _, c := range node.Refs {
// Nix adds a self reference to each node, which
// should not be inserted.
if c != node.Path {
edge := graph.NewEdge(node, (*cmap)[c])
graph.SetEdge(edge)
}
}
}
// Create a graph structure from the references supplied by Nix.
func buildGraph(refs *runtimeGraph, pop *Popularity) *simple.DirectedGraph {
cmap := make(map[string]*closure)
graph := simple.NewDirectedGraph()
// Insert all closures into the graph, as well as a fake root
// closure which serves as the top of the tree.
//
// A map from store paths to IDs is kept to actually insert
// edges below.
root := &closure{
GraphID: 0,
Path: "image_root",
}
graph.AddNode(root)
for idx, c := range refs.Graph {
node := &closure{
GraphID: int64(idx + 1), // inc because of root node
Path: c.Path,
Size: c.Size,
Refs: c.Refs,
}
// The packages `nss-cacert` and `iana-etc` are added
// by Nixery to *every single image* and should have a
// very high popularity.
//
// Other popularity values are populated from the data
// set assembled by Nixery's popcount.
id := node.DOTID()
if strings.HasPrefix(id, "nss-cacert") || strings.HasPrefix(id, "iana-etc") {
// glibc has ~300k references, these packages need *more*
node.Popularity = 500000
} else if p, ok := (*pop)[id]; ok {
node.Popularity = p
} else {
node.Popularity = 1
}
graph.AddNode(node)
cmap[c.Path] = node
}
// Insert the top-level closures with edges from the root
// node, then insert all edges for each closure.
for _, p := range refs.References.Graph {
edge := graph.NewEdge(root, cmap[p])
graph.SetEdge(edge)
}
for _, c := range cmap {
insertEdges(graph, &cmap, c)
}
return graph
}
// Extracts a subgraph starting at the specified root from the
// dominator tree. The subgraph is converted into a flat list of
// layers, each containing the store paths and merge rating.
func groupLayer(dt *flow.DominatorTree, root *closure) layer {
size := root.Size
contents := []string{root.Path}
children := dt.DominatedBy(root.ID())
// This iteration does not use 'range' because the list being
// iterated is modified during the iteration (yes, I'm sorry).
for i := 0; i < len(children); i++ {
child := children[i].(*closure)
size += child.Size
contents = append(contents, child.Path)
children = append(children, dt.DominatedBy(child.ID())...)
}
// Contents are sorted to ensure that hashing is consistent
sort.Strings(contents)
return layer{
Contents: contents,
MergeRating: uint64(root.Popularity) * size,
}
}
// Calculate the dominator tree of the entire package set and group
// each top-level subtree into a layer.
//
// Layers are merged together until they fit into the layer budget,
// based on their merge rating.
func dominate(budget int, graph *simple.DirectedGraph) []layer {
dt := flow.Dominators(graph.Node(0), graph)
var layers []layer
for _, n := range dt.DominatedBy(dt.Root().ID()) {
layers = append(layers, groupLayer(&dt, n.(*closure)))
}
sort.Slice(layers, func(i, j int) bool {
return layers[i].MergeRating < layers[j].MergeRating
})
if len(layers) > budget {
log.WithFields(log.Fields{
"layers": len(layers),
"budget": budget,
}).Info("ideal image exceeds layer budget")
}
for len(layers) > budget {
merged := layers[0].merge(layers[1])
layers[1] = merged
layers = layers[1:]
}
return layers
}
// groupLayers applies the algorithm described above the its input and returns a
// list of layers, each consisting of a list of Nix store paths that it should
// contain.
func groupLayers(refs *runtimeGraph, pop *Popularity, budget int) []layer {
graph := buildGraph(refs, pop)
return dominate(budget, graph)
}