feat(tvix/nar-bridge): init
This provides a Nix HTTP Binary Cache interface in front of a tvix-store that's reachable via gRPC. TODOs: - remove import command, move serve up to toplevel. We have nix-copy- closure and tvix-store commands. - loop into CI. We should be able to fetch the protos as a third-party dependency. - Check if we can test nar-bridge slightly easier in an integration test. - Ensure we support connecting to unix sockets and grpc+http at least, using the same syntax as tvix-store. - Don't buffer the entire blob when rendering NAR Co-Authored-By: Connor Brewster <cbrewster@hey.com> Co-Authored-By: Márton Boros <martonboros@gmail.com> Co-Authored-By: Vo Minh Thu <noteed@gmail.com> Change-Id: I6064474e49dfe78cea67676957462d9f28658d4a Reviewed-on: https://cl.tvl.fyi/c/depot/+/9339 Tested-by: BuildkiteCI Reviewed-by: tazjin <tazjin@tvl.su>
This commit is contained in:
parent
683d3e0d2d
commit
0ecd10bf30
27 changed files with 2663 additions and 0 deletions
66
tvix/nar-bridge/pkg/reader/hashers.go
Normal file
66
tvix/nar-bridge/pkg/reader/hashers.go
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
package reader
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
)
|
||||
|
||||
var _ io.Reader = &Hasher{}
|
||||
|
||||
// Hasher wraps io.Reader.
|
||||
// You can ask it for the digest of the hash function used internally, and the
|
||||
// number of bytes written.
|
||||
type Hasher struct {
|
||||
r io.Reader
|
||||
h hash.Hash
|
||||
bytesRead uint32
|
||||
}
|
||||
|
||||
func NewHasher(r io.Reader, h hash.Hash) *Hasher {
|
||||
return &Hasher{
|
||||
r: r,
|
||||
h: h,
|
||||
bytesRead: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Hasher) Read(p []byte) (int, error) {
|
||||
nRead, rdErr := h.r.Read(p)
|
||||
|
||||
// write the number of bytes read from the reader to the hash.
|
||||
// We need to do this independently on whether there's been error.
|
||||
// n always describes the number of successfully written bytes.
|
||||
nHash, hashErr := h.h.Write(p[0:nRead])
|
||||
if hashErr != nil {
|
||||
return nRead, fmt.Errorf("unable to write to hash: %w", hashErr)
|
||||
}
|
||||
|
||||
// We assume here the hash function accepts the whole p in one Go,
|
||||
// and doesn't early-return on the Write.
|
||||
// We compare it with nRead and bail out if that was not the case.
|
||||
if nHash != nRead {
|
||||
return nRead, fmt.Errorf("hash didn't accept the full write")
|
||||
}
|
||||
|
||||
// update bytesWritten
|
||||
h.bytesRead += uint32(nRead)
|
||||
|
||||
if rdErr != nil {
|
||||
if errors.Is(rdErr, io.EOF) {
|
||||
return nRead, rdErr
|
||||
}
|
||||
return nRead, fmt.Errorf("error from underlying reader: %w", rdErr)
|
||||
}
|
||||
|
||||
return nRead, hashErr
|
||||
}
|
||||
|
||||
func (h *Hasher) BytesWritten() uint32 {
|
||||
return h.bytesRead
|
||||
}
|
||||
|
||||
func (h *Hasher) Sum(b []byte) []byte {
|
||||
return h.h.Sum(b)
|
||||
}
|
||||
264
tvix/nar-bridge/pkg/reader/reader.go
Normal file
264
tvix/nar-bridge/pkg/reader/reader.go
Normal file
|
|
@ -0,0 +1,264 @@
|
|||
package reader
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
storev1pb "code.tvl.fyi/tvix/store/protos"
|
||||
"github.com/nix-community/go-nix/pkg/nar"
|
||||
"lukechampine.com/blake3"
|
||||
)
|
||||
|
||||
type Reader struct {
|
||||
hrSha256 *Hasher
|
||||
}
|
||||
|
||||
// An item on the directories stack
|
||||
type item struct {
|
||||
path string
|
||||
directory *storev1pb.Directory
|
||||
}
|
||||
|
||||
func New(r io.Reader) *Reader {
|
||||
// Instead of using the underlying reader itself, wrap the reader
|
||||
// with a hasher calculating sha256 and one calculating sha512,
|
||||
// and feed that one into the NAR reader.
|
||||
hrSha256 := NewHasher(r, sha256.New())
|
||||
|
||||
return &Reader{
|
||||
hrSha256: hrSha256,
|
||||
}
|
||||
}
|
||||
|
||||
// Import reads from the internally-wrapped reader,
|
||||
// and calls the callback functions whenever regular file contents are
|
||||
// encountered, or a Directory node is about to be finished.
|
||||
func (r *Reader) Import(
|
||||
ctx context.Context,
|
||||
// callback function called with each regular file content
|
||||
fileCb func(fileReader io.Reader) error,
|
||||
// callback function called with each finalized directory node
|
||||
directoryCb func(directory *storev1pb.Directory) error,
|
||||
) (*storev1pb.PathInfo, error) {
|
||||
|
||||
// construct a NAR reader, by reading through hrSha256
|
||||
narReader, err := nar.NewReader(r.hrSha256)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to instantiate nar reader: %w", err)
|
||||
}
|
||||
defer narReader.Close()
|
||||
|
||||
// If we store a symlink or regular file at the root, these are not nil.
|
||||
// If they are nil, we instead have a stackDirectory.
|
||||
var rootSymlink *storev1pb.SymlinkNode
|
||||
var rootFile *storev1pb.FileNode
|
||||
var stackDirectory *storev1pb.Directory
|
||||
|
||||
var stack = []item{}
|
||||
|
||||
// popFromStack is used when we transition to a different directory or
|
||||
// drain the stack when we reach the end of the NAR.
|
||||
// It adds the popped element to the element underneath if any,
|
||||
// and passes it to the directoryCb callback.
|
||||
// This function may only be called if the stack is not already empty.
|
||||
popFromStack := func() error {
|
||||
// Keep the top item, and "resize" the stack slice.
|
||||
// This will only make the last element unaccessible, but chances are high
|
||||
// we're re-using that space anyways.
|
||||
toPop := stack[len(stack)-1]
|
||||
stack = stack[:len(stack)-1]
|
||||
|
||||
// if there's still a parent left on the stack, refer to it from there.
|
||||
if len(stack) > 0 {
|
||||
dgst, err := toPop.directory.Digest()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to calculate directory digest: %w", err)
|
||||
}
|
||||
|
||||
topOfStack := stack[len(stack)-1].directory
|
||||
topOfStack.Directories = append(topOfStack.Directories, &storev1pb.DirectoryNode{
|
||||
Name: []byte(path.Base(toPop.path)),
|
||||
Digest: dgst,
|
||||
Size: toPop.directory.Size(),
|
||||
})
|
||||
}
|
||||
// call the directoryCb
|
||||
if err := directoryCb(toPop.directory); err != nil {
|
||||
return fmt.Errorf("failed calling directoryCb: %w", err)
|
||||
}
|
||||
// Keep track that we have encounter at least one directory
|
||||
stackDirectory = toPop.directory
|
||||
return nil
|
||||
}
|
||||
|
||||
// Assemble a PathInfo struct, the Node is populated later.
|
||||
assemblePathInfo := func() *storev1pb.PathInfo {
|
||||
return &storev1pb.PathInfo{
|
||||
Node: nil,
|
||||
References: [][]byte{},
|
||||
Narinfo: &storev1pb.NARInfo{
|
||||
NarSize: uint64(r.hrSha256.BytesWritten()),
|
||||
NarSha256: r.hrSha256.Sum(nil),
|
||||
Signatures: []*storev1pb.NARInfo_Signature{},
|
||||
ReferenceNames: []string{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
getBasename := func(p string) string {
|
||||
// extract the basename. In case of "/", replace with empty string.
|
||||
basename := path.Base(p)
|
||||
if basename == "/" {
|
||||
basename = ""
|
||||
}
|
||||
return basename
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
default:
|
||||
// call narReader.Next() to get the next element
|
||||
hdr, err := narReader.Next()
|
||||
|
||||
// If this returns an error, it's either EOF (when we're done reading from the NAR),
|
||||
// or another error
|
||||
if err != nil {
|
||||
// if this returns no EOF, bail out
|
||||
if !errors.Is(err, io.EOF) {
|
||||
return nil, fmt.Errorf("failed getting next nar element: %w", err)
|
||||
}
|
||||
|
||||
// The NAR has been read all the way to the end…
|
||||
// Make sure we close the nar reader, which might read some final trailers.
|
||||
if err := narReader.Close(); err != nil {
|
||||
return nil, fmt.Errorf("unable to close nar reader: %w", err)
|
||||
}
|
||||
|
||||
// Check the stack. While it's not empty, we need to pop things off the stack.
|
||||
for len(stack) > 0 {
|
||||
err := popFromStack()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to pop from stack: %w", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Stack is empty. We now either have a regular or symlink root node, or we encountered at least one directory.
|
||||
// assemble pathInfo with these and return.
|
||||
pi := assemblePathInfo()
|
||||
if rootFile != nil {
|
||||
pi.Node = &storev1pb.Node{
|
||||
Node: &storev1pb.Node_File{
|
||||
File: rootFile,
|
||||
},
|
||||
}
|
||||
}
|
||||
if rootSymlink != nil {
|
||||
pi.Node = &storev1pb.Node{
|
||||
Node: &storev1pb.Node_Symlink{
|
||||
Symlink: rootSymlink,
|
||||
},
|
||||
}
|
||||
}
|
||||
if stackDirectory != nil {
|
||||
// calculate directory digest (i.e. after we received all its contents)
|
||||
dgst, err := stackDirectory.Digest()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to calculate root directory digest: %w", err)
|
||||
}
|
||||
|
||||
pi.Node = &storev1pb.Node{
|
||||
Node: &storev1pb.Node_Directory{
|
||||
Directory: &storev1pb.DirectoryNode{
|
||||
Name: []byte{},
|
||||
Digest: dgst,
|
||||
Size: stackDirectory.Size(),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
return pi, nil
|
||||
}
|
||||
|
||||
// Check for valid path transitions, pop from stack if needed
|
||||
// The nar reader already gives us some guarantees about ordering and illegal transitions,
|
||||
// So we really only need to check if the top-of-stack path is a prefix of the path,
|
||||
// and if it's not, pop from the stack.
|
||||
|
||||
// We don't need to worry about the root node case, because we can only finish the root "/"
|
||||
// If we're at the end of the NAR reader (covered by the EOF check)
|
||||
if len(stack) > 0 && !strings.HasPrefix(hdr.Path, stack[len(stack)-1].path) {
|
||||
err := popFromStack()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to pop from stack: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if hdr.Type == nar.TypeSymlink {
|
||||
symlinkNode := &storev1pb.SymlinkNode{
|
||||
Name: []byte(getBasename(hdr.Path)),
|
||||
Target: []byte(hdr.LinkTarget),
|
||||
}
|
||||
if len(stack) > 0 {
|
||||
topOfStack := stack[len(stack)-1].directory
|
||||
topOfStack.Symlinks = append(topOfStack.Symlinks, symlinkNode)
|
||||
} else {
|
||||
rootSymlink = symlinkNode
|
||||
}
|
||||
|
||||
}
|
||||
if hdr.Type == nar.TypeRegular {
|
||||
// wrap reader with a reader calculating the blake3 hash
|
||||
fileReader := NewHasher(narReader, blake3.New(32, nil))
|
||||
|
||||
err := fileCb(fileReader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failure from fileCb: %w", err)
|
||||
}
|
||||
|
||||
// drive the file reader to the end, in case the CB function doesn't read
|
||||
// all the way to the end on its own
|
||||
if fileReader.BytesWritten() != uint32(hdr.Size) {
|
||||
_, err := io.ReadAll(fileReader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read until the end of the file content: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// read the blake3 hash
|
||||
dgst := fileReader.Sum(nil)
|
||||
|
||||
fileNode := &storev1pb.FileNode{
|
||||
Name: []byte(getBasename(hdr.Path)),
|
||||
Digest: dgst,
|
||||
Size: uint32(hdr.Size),
|
||||
Executable: hdr.Executable,
|
||||
}
|
||||
if len(stack) > 0 {
|
||||
topOfStack := stack[len(stack)-1].directory
|
||||
topOfStack.Files = append(topOfStack.Files, fileNode)
|
||||
} else {
|
||||
rootFile = fileNode
|
||||
}
|
||||
}
|
||||
if hdr.Type == nar.TypeDirectory {
|
||||
directory := &storev1pb.Directory{
|
||||
Directories: []*storev1pb.DirectoryNode{},
|
||||
Files: []*storev1pb.FileNode{},
|
||||
Symlinks: []*storev1pb.SymlinkNode{},
|
||||
}
|
||||
stack = append(stack, item{
|
||||
directory: directory,
|
||||
path: hdr.Path,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
568
tvix/nar-bridge/pkg/reader/reader_test.go
Normal file
568
tvix/nar-bridge/pkg/reader/reader_test.go
Normal file
|
|
@ -0,0 +1,568 @@
|
|||
package reader_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"code.tvl.fyi/tvix/nar-bridge/pkg/reader"
|
||||
storev1pb "code.tvl.fyi/tvix/store/protos"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/protobuf/testing/protocmp"
|
||||
)
|
||||
|
||||
func requireProtoEq(t *testing.T, expected interface{}, actual interface{}) {
|
||||
if diff := cmp.Diff(expected, actual, protocmp.Transform()); diff != "" {
|
||||
t.Errorf("unexpected difference:\n%v", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func mustDigest(d *storev1pb.Directory) []byte {
|
||||
dgst, err := d.Digest()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dgst
|
||||
}
|
||||
|
||||
func TestSymlink(t *testing.T) {
|
||||
f, err := os.Open("../../testdata/symlink.nar")
|
||||
require.NoError(t, err)
|
||||
|
||||
r := reader.New(f)
|
||||
|
||||
actualPathInfo, err := r.Import(
|
||||
context.Background(),
|
||||
func(fileReader io.Reader) error {
|
||||
panic("no file contents expected!")
|
||||
}, func(directory *storev1pb.Directory) error {
|
||||
panic("no directories expected!")
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedPathInfo := &storev1pb.PathInfo{
|
||||
Node: &storev1pb.Node{
|
||||
Node: &storev1pb.Node_Symlink{
|
||||
Symlink: &storev1pb.SymlinkNode{
|
||||
Name: []byte(""),
|
||||
Target: []byte("/nix/store/somewhereelse"),
|
||||
},
|
||||
},
|
||||
},
|
||||
References: [][]byte{},
|
||||
Narinfo: &storev1pb.NARInfo{
|
||||
NarSize: 136,
|
||||
NarSha256: []byte{
|
||||
0x09, 0x7d, 0x39, 0x7e, 0x9b, 0x58, 0x26, 0x38, 0x4e, 0xaa, 0x16, 0xc4, 0x57, 0x71, 0x5d, 0x1c, 0x1a, 0x51, 0x67, 0x03, 0x13, 0xea, 0xd0, 0xf5, 0x85, 0x66, 0xe0, 0xb2, 0x32, 0x53, 0x9c, 0xf1,
|
||||
},
|
||||
Signatures: []*storev1pb.NARInfo_Signature{},
|
||||
ReferenceNames: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
requireProtoEq(t, expectedPathInfo, actualPathInfo)
|
||||
}
|
||||
|
||||
func TestRegular(t *testing.T) {
|
||||
f, err := os.Open("../../testdata/onebyteregular.nar")
|
||||
require.NoError(t, err)
|
||||
|
||||
r := reader.New(f)
|
||||
|
||||
actualPathInfo, err := r.Import(
|
||||
context.Background(),
|
||||
func(fileReader io.Reader) error {
|
||||
contents, err := io.ReadAll(fileReader)
|
||||
require.NoError(t, err, "reading fileReader should not error")
|
||||
require.Equal(t, []byte{0x01}, contents, "contents read from fileReader should match expectations")
|
||||
return nil
|
||||
}, func(directory *storev1pb.Directory) error {
|
||||
panic("no directories expected!")
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// The blake3 digest of the 0x01 byte.
|
||||
BLAKE3_DIGEST_0X01 := []byte{
|
||||
0x48, 0xfc, 0x72, 0x1f, 0xbb, 0xc1, 0x72, 0xe0, 0x92, 0x5f, 0xa2, 0x7a, 0xf1, 0x67, 0x1d,
|
||||
0xe2, 0x25, 0xba, 0x92, 0x71, 0x34, 0x80, 0x29, 0x98, 0xb1, 0x0a, 0x15, 0x68, 0xa1, 0x88,
|
||||
0x65, 0x2b,
|
||||
}
|
||||
|
||||
expectedPathInfo := &storev1pb.PathInfo{
|
||||
Node: &storev1pb.Node{
|
||||
Node: &storev1pb.Node_File{
|
||||
File: &storev1pb.FileNode{
|
||||
Name: []byte(""),
|
||||
Digest: BLAKE3_DIGEST_0X01,
|
||||
Size: 1,
|
||||
Executable: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
References: [][]byte{},
|
||||
Narinfo: &storev1pb.NARInfo{
|
||||
NarSize: 120,
|
||||
NarSha256: []byte{
|
||||
0x73, 0x08, 0x50, 0xa8, 0x11, 0x25, 0x9d, 0xbf, 0x3a, 0x68, 0xdc, 0x2e, 0xe8, 0x7a, 0x79, 0xaa, 0x6c, 0xae, 0x9f, 0x71, 0x37, 0x5e, 0xdf, 0x39, 0x6f, 0x9d, 0x7a, 0x91, 0xfb, 0xe9, 0x13, 0x4d,
|
||||
},
|
||||
Signatures: []*storev1pb.NARInfo_Signature{},
|
||||
ReferenceNames: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
requireProtoEq(t, expectedPathInfo, actualPathInfo)
|
||||
}
|
||||
|
||||
func TestEmptyDirectory(t *testing.T) {
|
||||
f, err := os.Open("../../testdata/emptydirectory.nar")
|
||||
require.NoError(t, err)
|
||||
|
||||
r := reader.New(f)
|
||||
|
||||
expectedDirectory := &storev1pb.Directory{
|
||||
Directories: []*storev1pb.DirectoryNode{},
|
||||
Files: []*storev1pb.FileNode{},
|
||||
Symlinks: []*storev1pb.SymlinkNode{},
|
||||
}
|
||||
actualPathInfo, err := r.Import(
|
||||
context.Background(),
|
||||
func(fileReader io.Reader) error {
|
||||
panic("no file contents expected!")
|
||||
}, func(directory *storev1pb.Directory) error {
|
||||
requireProtoEq(t, expectedDirectory, directory)
|
||||
return nil
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedPathInfo := &storev1pb.PathInfo{
|
||||
Node: &storev1pb.Node{
|
||||
Node: &storev1pb.Node_Directory{
|
||||
Directory: &storev1pb.DirectoryNode{
|
||||
Name: []byte(""),
|
||||
Digest: mustDigest(expectedDirectory),
|
||||
Size: expectedDirectory.Size(),
|
||||
},
|
||||
},
|
||||
},
|
||||
References: [][]byte{},
|
||||
Narinfo: &storev1pb.NARInfo{
|
||||
NarSize: 96,
|
||||
NarSha256: []byte{
|
||||
0xa5, 0x0a, 0x5a, 0xb6, 0xd9, 0x92, 0xf5, 0x59, 0x8e, 0xdd, 0x92, 0x10, 0x50, 0x59, 0xfa, 0xe9, 0xac, 0xfc, 0x19, 0x29, 0x81, 0xe0, 0x8b, 0xd8, 0x85, 0x34, 0xc2, 0x16, 0x7e, 0x92, 0x52, 0x6a,
|
||||
},
|
||||
Signatures: []*storev1pb.NARInfo_Signature{},
|
||||
ReferenceNames: []string{},
|
||||
},
|
||||
}
|
||||
requireProtoEq(t, expectedPathInfo, actualPathInfo)
|
||||
}
|
||||
|
||||
func TestFull(t *testing.T) {
|
||||
f, err := os.Open("../../testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar")
|
||||
require.NoError(t, err)
|
||||
|
||||
r := reader.New(f)
|
||||
|
||||
expectedDirectoryPaths := []string{
|
||||
"/bin",
|
||||
"/share/man/man1",
|
||||
"/share/man/man5",
|
||||
"/share/man/man8",
|
||||
"/share/man",
|
||||
"/share",
|
||||
"/",
|
||||
}
|
||||
expectedDirectories := make(map[string]*storev1pb.Directory, len(expectedDirectoryPaths))
|
||||
|
||||
// /bin is a leaf directory
|
||||
expectedDirectories["/bin"] = &storev1pb.Directory{
|
||||
Directories: []*storev1pb.DirectoryNode{},
|
||||
Files: []*storev1pb.FileNode{
|
||||
{
|
||||
Name: []byte("arp"),
|
||||
Digest: []byte{
|
||||
0xfb, 0xc4, 0x61, 0x4a, 0x29, 0x27, 0x11, 0xcb, 0xcc, 0xe4, 0x99, 0x81, 0x9c, 0xf0, 0xa9, 0x17, 0xf7, 0xd0, 0x91, 0xbe, 0xea, 0x08, 0xcb, 0x5b, 0xaa, 0x76, 0x76, 0xf5, 0xee, 0x4f, 0x82, 0xbb,
|
||||
},
|
||||
Size: 55288,
|
||||
Executable: true,
|
||||
},
|
||||
{
|
||||
Name: []byte("hostname"),
|
||||
Digest: []byte{
|
||||
0x9c, 0x6a, 0xe4, 0xb5, 0xe4, 0x6c, 0xb5, 0x67, 0x45, 0x0e, 0xaa, 0x2a, 0xd8, 0xdd, 0x9b, 0x38, 0xd7, 0xed, 0x01, 0x02, 0x84, 0xf7, 0x26, 0xe1, 0xc7, 0xf3, 0x1c, 0xeb, 0xaa, 0x8a, 0x01, 0x30,
|
||||
},
|
||||
Size: 17704,
|
||||
Executable: true,
|
||||
},
|
||||
{
|
||||
Name: []byte("ifconfig"),
|
||||
Digest: []byte{
|
||||
0x25, 0xbe, 0x3b, 0x1d, 0xf4, 0x1a, 0x45, 0x42, 0x79, 0x09, 0x2c, 0x2a, 0x83, 0xf0, 0x0b, 0xff, 0xe8, 0xc0, 0x9c, 0x26, 0x98, 0x70, 0x15, 0x4d, 0xa8, 0xca, 0x05, 0xfe, 0x92, 0x68, 0x35, 0x2e,
|
||||
},
|
||||
Size: 72576,
|
||||
Executable: true,
|
||||
},
|
||||
{
|
||||
Name: []byte("nameif"),
|
||||
Digest: []byte{
|
||||
0x8e, 0xaa, 0xc5, 0xdb, 0x71, 0x08, 0x8e, 0xe5, 0xe6, 0x30, 0x1f, 0x2c, 0x3a, 0xf2, 0x42, 0x39, 0x0c, 0x57, 0x15, 0xaf, 0x50, 0xaa, 0x1c, 0xdf, 0x84, 0x22, 0x08, 0x77, 0x03, 0x54, 0x62, 0xb1,
|
||||
},
|
||||
Size: 18776,
|
||||
Executable: true,
|
||||
},
|
||||
{
|
||||
Name: []byte("netstat"),
|
||||
Digest: []byte{
|
||||
0x13, 0x34, 0x7e, 0xdd, 0x2a, 0x9a, 0x17, 0x0b, 0x3f, 0xc7, 0x0a, 0xe4, 0x92, 0x89, 0x25, 0x9f, 0xaa, 0xb5, 0x05, 0x6b, 0x24, 0xa7, 0x91, 0xeb, 0xaf, 0xf9, 0xe9, 0x35, 0x56, 0xaa, 0x2f, 0xb2,
|
||||
},
|
||||
Size: 131784,
|
||||
Executable: true,
|
||||
},
|
||||
{
|
||||
Name: []byte("plipconfig"),
|
||||
Digest: []byte{
|
||||
0x19, 0x7c, 0x80, 0xdc, 0x81, 0xdc, 0xb4, 0xc0, 0x45, 0xe1, 0xf9, 0x76, 0x51, 0x4f, 0x50, 0xbf, 0xa4, 0x69, 0x51, 0x9a, 0xd4, 0xa9, 0xe7, 0xaa, 0xe7, 0x0d, 0x53, 0x32, 0xff, 0x28, 0x40, 0x60,
|
||||
},
|
||||
Size: 13160,
|
||||
Executable: true,
|
||||
},
|
||||
{
|
||||
Name: []byte("rarp"),
|
||||
Digest: []byte{
|
||||
0x08, 0x85, 0xb4, 0x85, 0x03, 0x2b, 0x3c, 0x7a, 0x3e, 0x24, 0x4c, 0xf8, 0xcc, 0x45, 0x01, 0x9e, 0x79, 0x43, 0x8c, 0x6f, 0x5e, 0x32, 0x46, 0x54, 0xb6, 0x68, 0x91, 0x8e, 0xa0, 0xcb, 0x6e, 0x0d,
|
||||
},
|
||||
Size: 30384,
|
||||
Executable: true,
|
||||
},
|
||||
{
|
||||
Name: []byte("route"),
|
||||
Digest: []byte{
|
||||
0x4d, 0x14, 0x20, 0x89, 0x9e, 0x76, 0xf4, 0xe2, 0x92, 0x53, 0xee, 0x9b, 0x78, 0x7d, 0x23, 0x80, 0x6c, 0xff, 0xe6, 0x33, 0xdc, 0x4a, 0x10, 0x29, 0x39, 0x02, 0xa0, 0x60, 0xff, 0xe2, 0xbb, 0xd7,
|
||||
},
|
||||
Size: 61928,
|
||||
Executable: true,
|
||||
},
|
||||
{
|
||||
Name: []byte("slattach"),
|
||||
Digest: []byte{
|
||||
0xfb, 0x25, 0xc3, 0x73, 0xb7, 0xb1, 0x0b, 0x25, 0xcd, 0x7b, 0x62, 0xf6, 0x71, 0x83, 0xfe, 0x36, 0x80, 0xf6, 0x48, 0xc3, 0xdb, 0xd8, 0x0c, 0xfe, 0xb8, 0xd3, 0xda, 0x32, 0x9b, 0x47, 0x4b, 0x05,
|
||||
},
|
||||
Size: 35672,
|
||||
Executable: true,
|
||||
},
|
||||
},
|
||||
Symlinks: []*storev1pb.SymlinkNode{
|
||||
{
|
||||
Name: []byte("dnsdomainname"),
|
||||
Target: []byte("hostname"),
|
||||
},
|
||||
{
|
||||
Name: []byte("domainname"),
|
||||
Target: []byte("hostname"),
|
||||
},
|
||||
{
|
||||
Name: []byte("nisdomainname"),
|
||||
Target: []byte("hostname"),
|
||||
},
|
||||
{
|
||||
Name: []byte("ypdomainname"),
|
||||
Target: []byte("hostname"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// /share/man/man1 is a leaf directory.
|
||||
// The parser traversed over /sbin, but only added it to / which is still on the stack.
|
||||
expectedDirectories["/share/man/man1"] = &storev1pb.Directory{
|
||||
Directories: []*storev1pb.DirectoryNode{},
|
||||
Files: []*storev1pb.FileNode{
|
||||
{
|
||||
Name: []byte("dnsdomainname.1.gz"),
|
||||
Digest: []byte{
|
||||
0x98, 0x8a, 0xbd, 0xfa, 0x64, 0xd5, 0xb9, 0x27, 0xfe, 0x37, 0x43, 0x56, 0xb3, 0x18, 0xc7, 0x2b, 0xcb, 0xe3, 0x17, 0x1c, 0x17, 0xf4, 0x17, 0xeb, 0x4a, 0xa4, 0x99, 0x64, 0x39, 0xca, 0x2d, 0xee,
|
||||
},
|
||||
Size: 40,
|
||||
Executable: false,
|
||||
},
|
||||
{
|
||||
Name: []byte("domainname.1.gz"),
|
||||
Digest: []byte{
|
||||
0x98, 0x8a, 0xbd, 0xfa, 0x64, 0xd5, 0xb9, 0x27, 0xfe, 0x37, 0x43, 0x56, 0xb3, 0x18, 0xc7, 0x2b, 0xcb, 0xe3, 0x17, 0x1c, 0x17, 0xf4, 0x17, 0xeb, 0x4a, 0xa4, 0x99, 0x64, 0x39, 0xca, 0x2d, 0xee,
|
||||
},
|
||||
Size: 40,
|
||||
Executable: false,
|
||||
},
|
||||
{
|
||||
Name: []byte("hostname.1.gz"),
|
||||
Digest: []byte{
|
||||
0xbf, 0x89, 0xe6, 0x28, 0x00, 0x24, 0x66, 0x79, 0x70, 0x04, 0x38, 0xd6, 0xdd, 0x9d, 0xf6, 0x0e, 0x0d, 0xee, 0x00, 0xf7, 0x64, 0x4f, 0x05, 0x08, 0x9d, 0xf0, 0x36, 0xde, 0x85, 0xf4, 0x75, 0xdb,
|
||||
},
|
||||
Size: 1660,
|
||||
Executable: false,
|
||||
},
|
||||
{
|
||||
Name: []byte("nisdomainname.1.gz"),
|
||||
Digest: []byte{
|
||||
0x98, 0x8a, 0xbd, 0xfa, 0x64, 0xd5, 0xb9, 0x27, 0xfe, 0x37, 0x43, 0x56, 0xb3, 0x18, 0xc7, 0x2b, 0xcb, 0xe3, 0x17, 0x1c, 0x17, 0xf4, 0x17, 0xeb, 0x4a, 0xa4, 0x99, 0x64, 0x39, 0xca, 0x2d, 0xee,
|
||||
},
|
||||
Size: 40,
|
||||
Executable: false,
|
||||
},
|
||||
{
|
||||
Name: []byte("ypdomainname.1.gz"),
|
||||
Digest: []byte{
|
||||
0x98, 0x8a, 0xbd, 0xfa, 0x64, 0xd5, 0xb9, 0x27, 0xfe, 0x37, 0x43, 0x56, 0xb3, 0x18, 0xc7, 0x2b, 0xcb, 0xe3, 0x17, 0x1c, 0x17, 0xf4, 0x17, 0xeb, 0x4a, 0xa4, 0x99, 0x64, 0x39, 0xca, 0x2d, 0xee,
|
||||
},
|
||||
Size: 40,
|
||||
Executable: false,
|
||||
},
|
||||
},
|
||||
Symlinks: []*storev1pb.SymlinkNode{},
|
||||
}
|
||||
|
||||
// /share/man/man5 is a leaf directory
|
||||
expectedDirectories["/share/man/man5"] = &storev1pb.Directory{
|
||||
Directories: []*storev1pb.DirectoryNode{},
|
||||
Files: []*storev1pb.FileNode{
|
||||
{
|
||||
Name: []byte("ethers.5.gz"),
|
||||
Digest: []byte{
|
||||
0x42, 0x63, 0x8c, 0xc4, 0x18, 0x93, 0xcf, 0x60, 0xd6, 0xff, 0x43, 0xbc, 0x16, 0xb4, 0xfd, 0x22, 0xd2, 0xf2, 0x05, 0x0b, 0x52, 0xdc, 0x6a, 0x6b, 0xff, 0x34, 0xe2, 0x6a, 0x38, 0x3a, 0x07, 0xe3,
|
||||
},
|
||||
Size: 563,
|
||||
Executable: false,
|
||||
},
|
||||
},
|
||||
Symlinks: []*storev1pb.SymlinkNode{},
|
||||
}
|
||||
|
||||
// /share/man/man8 is a leaf directory
|
||||
expectedDirectories["/share/man/man8"] = &storev1pb.Directory{
|
||||
Directories: []*storev1pb.DirectoryNode{},
|
||||
Files: []*storev1pb.FileNode{
|
||||
{
|
||||
Name: []byte("arp.8.gz"),
|
||||
Digest: []byte{
|
||||
0xf5, 0x35, 0x4e, 0xf5, 0xf6, 0x44, 0xf7, 0x52, 0x0f, 0x42, 0xa0, 0x26, 0x51, 0xd9, 0x89, 0xf9, 0x68, 0xf2, 0xef, 0xeb, 0xba, 0xe1, 0xf4, 0x55, 0x01, 0x57, 0x77, 0xb7, 0x68, 0x55, 0x92, 0xef,
|
||||
},
|
||||
Size: 2464,
|
||||
Executable: false,
|
||||
},
|
||||
{
|
||||
Name: []byte("ifconfig.8.gz"),
|
||||
Digest: []byte{
|
||||
0x18, 0x65, 0x25, 0x11, 0x32, 0xee, 0x77, 0x91, 0x35, 0x4c, 0x3c, 0x24, 0xdb, 0xaf, 0x66, 0xdb, 0xfc, 0x17, 0x7b, 0xba, 0xe1, 0x3d, 0x05, 0xd2, 0xca, 0x6e, 0x2c, 0xe4, 0xef, 0xb8, 0xa8, 0xbe,
|
||||
},
|
||||
Size: 3382,
|
||||
Executable: false,
|
||||
},
|
||||
{
|
||||
Name: []byte("nameif.8.gz"),
|
||||
Digest: []byte{
|
||||
0x73, 0xc1, 0x27, 0xe8, 0x3b, 0xa8, 0x49, 0xdc, 0x0e, 0xdf, 0x70, 0x5f, 0xaf, 0x06, 0x01, 0x2c, 0x62, 0xe9, 0x18, 0x67, 0x01, 0x94, 0x64, 0x26, 0xca, 0x95, 0x22, 0xc0, 0xdc, 0xe4, 0x42, 0xb6,
|
||||
},
|
||||
Size: 523,
|
||||
Executable: false,
|
||||
},
|
||||
{
|
||||
Name: []byte("netstat.8.gz"),
|
||||
Digest: []byte{
|
||||
0xc0, 0x86, 0x43, 0x4a, 0x43, 0x57, 0xaa, 0x84, 0xa7, 0x24, 0xa0, 0x7c, 0x65, 0x38, 0x46, 0x1c, 0xf2, 0x45, 0xa2, 0xef, 0x12, 0x44, 0x18, 0xba, 0x52, 0x56, 0xe9, 0x8e, 0x6a, 0x0f, 0x70, 0x63,
|
||||
},
|
||||
Size: 4284,
|
||||
Executable: false,
|
||||
},
|
||||
{
|
||||
Name: []byte("plipconfig.8.gz"),
|
||||
Digest: []byte{
|
||||
0x2a, 0xd9, 0x1d, 0xa8, 0x9e, 0x0d, 0x05, 0xd0, 0xb0, 0x49, 0xaa, 0x64, 0xba, 0x29, 0x28, 0xc6, 0x45, 0xe1, 0xbb, 0x5e, 0x72, 0x8d, 0x48, 0x7b, 0x09, 0x4f, 0x0a, 0x82, 0x1e, 0x26, 0x83, 0xab,
|
||||
},
|
||||
Size: 889,
|
||||
Executable: false,
|
||||
},
|
||||
{
|
||||
Name: []byte("rarp.8.gz"),
|
||||
Digest: []byte{
|
||||
0x3d, 0x51, 0xc1, 0xd0, 0x6a, 0x59, 0x1e, 0x6d, 0x9a, 0xf5, 0x06, 0xd2, 0xe7, 0x7d, 0x7d, 0xd0, 0x70, 0x3d, 0x84, 0x64, 0xc3, 0x7d, 0xfb, 0x10, 0x84, 0x3b, 0xe1, 0xa9, 0xdf, 0x46, 0xee, 0x9f,
|
||||
},
|
||||
Size: 1198,
|
||||
Executable: false,
|
||||
},
|
||||
{
|
||||
Name: []byte("route.8.gz"),
|
||||
Digest: []byte{
|
||||
0x2a, 0x5a, 0x4b, 0x4f, 0x91, 0xf2, 0x78, 0xe4, 0xa9, 0x25, 0xb2, 0x7f, 0xa7, 0x2a, 0xc0, 0x8a, 0x4a, 0x65, 0xc9, 0x5f, 0x07, 0xa0, 0x48, 0x44, 0xeb, 0x46, 0xf9, 0xc9, 0xe1, 0x17, 0x96, 0x21,
|
||||
},
|
||||
Size: 3525,
|
||||
Executable: false,
|
||||
},
|
||||
{
|
||||
Name: []byte("slattach.8.gz"),
|
||||
Digest: []byte{
|
||||
0x3f, 0x05, 0x6b, 0x20, 0xe1, 0xe4, 0xf0, 0xba, 0x16, 0x15, 0x66, 0x6b, 0x57, 0x96, 0xe9, 0x9d, 0x83, 0xa8, 0x20, 0xaf, 0x8a, 0xca, 0x16, 0x4d, 0xa2, 0x6d, 0x94, 0x8e, 0xca, 0x91, 0x8f, 0xd4,
|
||||
},
|
||||
Size: 1441,
|
||||
Executable: false,
|
||||
},
|
||||
},
|
||||
Symlinks: []*storev1pb.SymlinkNode{},
|
||||
}
|
||||
|
||||
// /share/man holds /share/man/man{1,5,8}.
|
||||
expectedDirectories["/share/man"] = &storev1pb.Directory{
|
||||
Directories: []*storev1pb.DirectoryNode{
|
||||
{
|
||||
Name: []byte("man1"),
|
||||
Digest: mustDigest(expectedDirectories["/share/man/man1"]),
|
||||
Size: expectedDirectories["/share/man/man1"].Size(),
|
||||
},
|
||||
{
|
||||
Name: []byte("man5"),
|
||||
Digest: mustDigest(expectedDirectories["/share/man/man5"]),
|
||||
Size: expectedDirectories["/share/man/man5"].Size(),
|
||||
},
|
||||
{
|
||||
Name: []byte("man8"),
|
||||
Digest: mustDigest(expectedDirectories["/share/man/man8"]),
|
||||
Size: expectedDirectories["/share/man/man8"].Size(),
|
||||
},
|
||||
},
|
||||
Files: []*storev1pb.FileNode{},
|
||||
Symlinks: []*storev1pb.SymlinkNode{},
|
||||
}
|
||||
|
||||
// /share holds /share/man.
|
||||
expectedDirectories["/share"] = &storev1pb.Directory{
|
||||
Directories: []*storev1pb.DirectoryNode{
|
||||
{
|
||||
Name: []byte("man"),
|
||||
Digest: mustDigest(expectedDirectories["/share/man"]),
|
||||
Size: expectedDirectories["/share/man"].Size(),
|
||||
},
|
||||
},
|
||||
Files: []*storev1pb.FileNode{},
|
||||
Symlinks: []*storev1pb.SymlinkNode{},
|
||||
}
|
||||
|
||||
// / holds /bin, /share, and a /sbin symlink.
|
||||
expectedDirectories["/"] = &storev1pb.Directory{
|
||||
Directories: []*storev1pb.DirectoryNode{
|
||||
{
|
||||
Name: []byte("bin"),
|
||||
Digest: mustDigest(expectedDirectories["/bin"]),
|
||||
Size: expectedDirectories["/bin"].Size(),
|
||||
},
|
||||
{
|
||||
Name: []byte("share"),
|
||||
Digest: mustDigest(expectedDirectories["/share"]),
|
||||
Size: expectedDirectories["/share"].Size(),
|
||||
},
|
||||
},
|
||||
Files: []*storev1pb.FileNode{},
|
||||
Symlinks: []*storev1pb.SymlinkNode{
|
||||
{
|
||||
Name: []byte("sbin"),
|
||||
Target: []byte("bin"),
|
||||
},
|
||||
},
|
||||
}
|
||||
// assert we populated the two fixtures properly
|
||||
require.Equal(t, len(expectedDirectoryPaths), len(expectedDirectories))
|
||||
|
||||
numDirectoriesReceived := 0
|
||||
|
||||
actualPathInfo, err := r.Import(
|
||||
context.Background(),
|
||||
func(fileReader io.Reader) error {
|
||||
// Don't really bother reading and comparing the contents here,
|
||||
// We already verify the right digests are produced by comparing the
|
||||
// directoryCb calls, and TestRegular ensures the reader works.
|
||||
// This also covers the case when the client doesn't read from the reader, and that the
|
||||
// importer will take care of reading all the way to the end no matter what.
|
||||
return nil
|
||||
}, func(directory *storev1pb.Directory) error {
|
||||
// use actualDirectoryOrder to look up the Directory object we expect at this specific invocation.
|
||||
currentDirectoryPath := expectedDirectoryPaths[numDirectoriesReceived]
|
||||
|
||||
expectedDirectory, found := expectedDirectories[currentDirectoryPath]
|
||||
require.True(t, found, "must find the current directory")
|
||||
|
||||
requireProtoEq(t, expectedDirectory, directory)
|
||||
|
||||
numDirectoriesReceived += 1
|
||||
return nil
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedPathInfo := &storev1pb.PathInfo{
|
||||
Node: &storev1pb.Node{
|
||||
Node: &storev1pb.Node_Directory{
|
||||
Directory: &storev1pb.DirectoryNode{
|
||||
Name: []byte(""),
|
||||
Digest: mustDigest(expectedDirectories["/"]),
|
||||
Size: expectedDirectories["/"].Size(),
|
||||
},
|
||||
},
|
||||
},
|
||||
References: [][]byte{},
|
||||
Narinfo: &storev1pb.NARInfo{
|
||||
NarSize: 464152,
|
||||
NarSha256: []byte{
|
||||
0xc6, 0xe1, 0x55, 0xb3, 0x45, 0x6e, 0x30, 0xb7, 0x61, 0x22, 0x63, 0xec, 0x09, 0x50, 0x70, 0x81, 0x1c, 0xaf, 0x8a, 0xbf, 0xd5, 0x9f, 0xaa, 0x72, 0xab, 0x82, 0xa5, 0x92, 0xef, 0xde, 0xb2, 0x53,
|
||||
},
|
||||
Signatures: []*storev1pb.NARInfo_Signature{},
|
||||
ReferenceNames: []string{},
|
||||
},
|
||||
}
|
||||
requireProtoEq(t, expectedPathInfo, actualPathInfo)
|
||||
}
|
||||
|
||||
// TestCallbackErrors ensures that errors returned from the callback function
|
||||
// bubble up to the importer process, and are not ignored.
|
||||
func TestCallbackErrors(t *testing.T) {
|
||||
t.Run("callback file", func(t *testing.T) {
|
||||
// Pick an example NAR with a regular file.
|
||||
f, err := os.Open("../../testdata/onebyteregular.nar")
|
||||
require.NoError(t, err)
|
||||
|
||||
r := reader.New(f)
|
||||
|
||||
targetErr := errors.New("expected error")
|
||||
|
||||
_, err = r.Import(
|
||||
context.Background(),
|
||||
func(fileReader io.Reader) error {
|
||||
return targetErr
|
||||
}, func(directory *storev1pb.Directory) error {
|
||||
panic("no directories expected!")
|
||||
},
|
||||
)
|
||||
require.ErrorIs(t, err, targetErr)
|
||||
})
|
||||
t.Run("callback directory", func(t *testing.T) {
|
||||
// Pick an example NAR with a directory node
|
||||
f, err := os.Open("../../testdata/emptydirectory.nar")
|
||||
require.NoError(t, err)
|
||||
|
||||
r := reader.New(f)
|
||||
|
||||
targetErr := errors.New("expected error")
|
||||
|
||||
_, err = r.Import(
|
||||
context.Background(),
|
||||
func(fileReader io.Reader) error {
|
||||
panic("no file contents expected!")
|
||||
}, func(directory *storev1pb.Directory) error {
|
||||
return targetErr
|
||||
},
|
||||
)
|
||||
require.ErrorIs(t, err, targetErr)
|
||||
})
|
||||
}
|
||||
48
tvix/nar-bridge/pkg/server/blob_upload.go
Normal file
48
tvix/nar-bridge/pkg/server/blob_upload.go
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
storev1pb "code.tvl.fyi/tvix/store/protos"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"io"
|
||||
)
|
||||
|
||||
// this returns a callback function that can be used as fileCb
|
||||
// for the reader.Import function call
|
||||
func genBlobServiceWriteCb(ctx context.Context, blobServiceClient storev1pb.BlobServiceClient) func(io.Reader) error {
|
||||
return func(fileReader io.Reader) error {
|
||||
// Read from fileReader into a buffer.
|
||||
// We currently buffer all contents and send them to blobServiceClient at once,
|
||||
// but that's about to change.
|
||||
contents, err := io.ReadAll(fileReader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read all contents from file reader: %w", err)
|
||||
}
|
||||
|
||||
log := log.WithField("blob_size", len(contents))
|
||||
|
||||
log.Infof("about to upload blob")
|
||||
|
||||
putter, err := blobServiceClient.Put(ctx)
|
||||
if err != nil {
|
||||
// return error to the importer
|
||||
return fmt.Errorf("error from blob service: %w", err)
|
||||
}
|
||||
err = putter.Send(&storev1pb.BlobChunk{
|
||||
Data: contents,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("putting blob chunk: %w", err)
|
||||
}
|
||||
resp, err := putter.CloseAndRecv()
|
||||
if err != nil {
|
||||
return fmt.Errorf("close blob putter: %w", err)
|
||||
}
|
||||
|
||||
log.WithField("digest", base64.StdEncoding.EncodeToString(resp.GetDigest())).Info("uploaded blob")
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
66
tvix/nar-bridge/pkg/server/directory_upload.go
Normal file
66
tvix/nar-bridge/pkg/server/directory_upload.go
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
|
||||
storev1pb "code.tvl.fyi/tvix/store/protos"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type DirectoriesUploader struct {
|
||||
ctx context.Context
|
||||
directoryServiceClient storev1pb.DirectoryServiceClient
|
||||
directoryServicePutStream storev1pb.DirectoryService_PutClient
|
||||
}
|
||||
|
||||
func NewDirectoriesUploader(ctx context.Context, directoryServiceClient storev1pb.DirectoryServiceClient) *DirectoriesUploader {
|
||||
return &DirectoriesUploader{
|
||||
ctx: ctx,
|
||||
directoryServiceClient: directoryServiceClient,
|
||||
directoryServicePutStream: nil,
|
||||
}
|
||||
}
|
||||
|
||||
func (du *DirectoriesUploader) Put(directory *storev1pb.Directory) error {
|
||||
directoryDgst, err := directory.Digest()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed calculating directory digest: %w", err)
|
||||
}
|
||||
|
||||
// Send the directory to the directory service
|
||||
// If the stream hasn't been initialized yet, do it first
|
||||
if du.directoryServicePutStream == nil {
|
||||
directoryServicePutStream, err := du.directoryServiceClient.Put(du.ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to initialize directory service put stream: %v", err)
|
||||
}
|
||||
du.directoryServicePutStream = directoryServicePutStream
|
||||
}
|
||||
|
||||
// send the directory out
|
||||
err = du.directoryServicePutStream.Send(directory)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error sending directory: %w", err)
|
||||
}
|
||||
log.WithField("digest", base64.StdEncoding.EncodeToString(directoryDgst)).Info("uploaded directory")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Done is called whenever we're
|
||||
func (du *DirectoriesUploader) Done() (*storev1pb.PutDirectoryResponse, error) {
|
||||
// only close once, and only if we opened.
|
||||
if du.directoryServicePutStream == nil {
|
||||
return nil, nil
|
||||
}
|
||||
putDirectoryResponse, err := du.directoryServicePutStream.CloseAndRecv()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to close directory service put stream: %v", err)
|
||||
}
|
||||
|
||||
du.directoryServicePutStream = nil
|
||||
|
||||
return putDirectoryResponse, nil
|
||||
}
|
||||
212
tvix/nar-bridge/pkg/server/nar_get.go
Normal file
212
tvix/nar-bridge/pkg/server/nar_get.go
Normal file
|
|
@ -0,0 +1,212 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"code.tvl.fyi/tvix/nar-bridge/pkg/writer"
|
||||
storev1pb "code.tvl.fyi/tvix/store/protos"
|
||||
"github.com/go-chi/chi/v5"
|
||||
nixhash "github.com/nix-community/go-nix/pkg/hash"
|
||||
"github.com/nix-community/go-nix/pkg/nixbase32"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
narUrl = "/nar/{narhash:^([" + nixbase32.Alphabet + "]{52})$}.nar"
|
||||
)
|
||||
|
||||
func renderNar(
|
||||
ctx context.Context,
|
||||
log *log.Entry,
|
||||
directoryServiceClient storev1pb.DirectoryServiceClient,
|
||||
blobServiceClient storev1pb.BlobServiceClient,
|
||||
narHashToPathInfoMu *sync.Mutex,
|
||||
narHashToPathInfo map[string]*storev1pb.PathInfo,
|
||||
w io.Writer,
|
||||
narHash *nixhash.Hash,
|
||||
headOnly bool,
|
||||
) error {
|
||||
// look in the lookup table
|
||||
narHashToPathInfoMu.Lock()
|
||||
pathInfo, found := narHashToPathInfo[narHash.SRIString()]
|
||||
narHashToPathInfoMu.Unlock()
|
||||
|
||||
// if we didn't find anything, return 404.
|
||||
if !found {
|
||||
return fmt.Errorf("narHash not found: %w", fs.ErrNotExist)
|
||||
}
|
||||
|
||||
// if this was only a head request, we're done.
|
||||
if headOnly {
|
||||
return nil
|
||||
}
|
||||
|
||||
directories := make(map[string]*storev1pb.Directory)
|
||||
|
||||
// If the root node is a directory, ask the directory service for all directories
|
||||
if pathInfoDirectory := pathInfo.GetNode().GetDirectory(); pathInfoDirectory != nil {
|
||||
rootDirectoryDigest := pathInfoDirectory.GetDigest()
|
||||
log = log.WithField("root_directory", base64.StdEncoding.EncodeToString(rootDirectoryDigest))
|
||||
|
||||
directoryStream, err := directoryServiceClient.Get(ctx, &storev1pb.GetDirectoryRequest{
|
||||
ByWhat: &storev1pb.GetDirectoryRequest_Digest{
|
||||
Digest: rootDirectoryDigest,
|
||||
},
|
||||
Recursive: true,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to query directory stream: %w", err)
|
||||
}
|
||||
|
||||
// For now, we just stream all of these locally and put them into a hashmap,
|
||||
// which is used in the lookup function below.
|
||||
for {
|
||||
directory, err := directoryStream.Recv()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return fmt.Errorf("unable to receive from directory stream: %w", err)
|
||||
}
|
||||
|
||||
// calculate directory digest
|
||||
// TODO: do we need to do any more validation?
|
||||
directoryDgst, err := directory.Digest()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to calculate directory digest: %w", err)
|
||||
}
|
||||
|
||||
// TODO: debug level
|
||||
log.WithField("directory", base64.StdEncoding.EncodeToString(directoryDgst)).Info("received directory node")
|
||||
|
||||
directories[hex.EncodeToString(directoryDgst)] = directory
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// render the NAR file
|
||||
err := writer.Export(
|
||||
w,
|
||||
pathInfo,
|
||||
func(directoryDigest []byte) (*storev1pb.Directory, error) {
|
||||
// TODO: debug level
|
||||
log.WithField("directory", base64.StdEncoding.EncodeToString(directoryDigest)).Info("Get directory")
|
||||
directoryRefStr := hex.EncodeToString(directoryDigest)
|
||||
directory, found := directories[directoryRefStr]
|
||||
if !found {
|
||||
return nil, fmt.Errorf(
|
||||
"directory with hash %v does not exist: %w",
|
||||
directoryDigest,
|
||||
fs.ErrNotExist,
|
||||
)
|
||||
}
|
||||
|
||||
return directory, nil
|
||||
},
|
||||
func(blobDigest []byte) (io.ReadCloser, error) {
|
||||
// TODO: debug level
|
||||
log.WithField("blob", base64.StdEncoding.EncodeToString(blobDigest)).Info("Get blob")
|
||||
resp, err := blobServiceClient.Read(ctx, &storev1pb.ReadBlobRequest{
|
||||
Digest: blobDigest,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get blob: %w", err)
|
||||
|
||||
}
|
||||
|
||||
// TODO: spin up a goroutine producing this.
|
||||
data := &bytes.Buffer{}
|
||||
for {
|
||||
chunk, err := resp.Recv()
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read chunk: %w", err)
|
||||
}
|
||||
_, err = data.Write(chunk.GetData())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("buffer chunk: %w", err)
|
||||
}
|
||||
}
|
||||
return io.NopCloser(data), nil
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to export nar: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func registerNarGet(s *Server) {
|
||||
// TODO: properly compose this
|
||||
s.handler.Head(narUrl, func(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
|
||||
ctx := r.Context()
|
||||
|
||||
// parse the narhash sent in the request URL
|
||||
narHash, err := parseNarHashFromUrl(chi.URLParamFromCtx(ctx, "narhash"))
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("url", r.URL).Error("unable to decode nar hash from url")
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
_, err := w.Write([]byte("unable to decode nar hash from url"))
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("unable to write error message to client")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log := log.WithField("narhash_url", narHash.SRIString())
|
||||
|
||||
err = renderNar(ctx, log, s.directoryServiceClient, s.blobServiceClient, &s.narHashToPathInfoMu, s.narHashToPathInfo, w, narHash, true)
|
||||
if err != nil {
|
||||
log.WithError(err).Info("unable to render nar")
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
s.handler.Get(narUrl, func(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
|
||||
ctx := r.Context()
|
||||
|
||||
// parse the narhash sent in the request URL
|
||||
narHash, err := parseNarHashFromUrl(chi.URLParamFromCtx(ctx, "narhash"))
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("url", r.URL).Error("unable to decode nar hash from url")
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
_, err := w.Write([]byte("unable to decode nar hash from url"))
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("unable to write error message to client")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log := log.WithField("narhash_url", narHash.SRIString())
|
||||
|
||||
err = renderNar(ctx, log, s.directoryServiceClient, s.blobServiceClient, &s.narHashToPathInfoMu, s.narHashToPathInfo, w, narHash, false)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
140
tvix/nar-bridge/pkg/server/nar_put.go
Normal file
140
tvix/nar-bridge/pkg/server/nar_put.go
Normal file
|
|
@ -0,0 +1,140 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"code.tvl.fyi/tvix/nar-bridge/pkg/reader"
|
||||
storev1pb "code.tvl.fyi/tvix/store/protos"
|
||||
"github.com/go-chi/chi/v5"
|
||||
nixhash "github.com/nix-community/go-nix/pkg/hash"
|
||||
"github.com/nix-community/go-nix/pkg/nixbase32"
|
||||
"github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func registerNarPut(s *Server) {
|
||||
s.handler.Put(narUrl, func(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
|
||||
ctx := r.Context()
|
||||
|
||||
// parse the narhash sent in the request URL
|
||||
narHashFromUrl, err := parseNarHashFromUrl(chi.URLParamFromCtx(ctx, "narhash"))
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("url", r.URL).Error("unable to decode nar hash from url")
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
_, err := w.Write([]byte("unable to decode nar hash from url"))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("unable to write error message to client")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log := log.WithField("narhash_url", narHashFromUrl.SRIString())
|
||||
|
||||
directoriesUploader := NewDirectoriesUploader(ctx, s.directoryServiceClient)
|
||||
defer directoriesUploader.Done() //nolint:errcheck
|
||||
|
||||
rd := reader.New(bufio.NewReader(r.Body))
|
||||
pathInfo, err := rd.Import(
|
||||
ctx,
|
||||
genBlobServiceWriteCb(ctx, s.blobServiceClient),
|
||||
func(directory *storev1pb.Directory) error {
|
||||
return directoriesUploader.Put(directory)
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("error during NAR import: %v", err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
_, err := w.Write([]byte(fmt.Sprintf("error during NAR import: %v", err)))
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("unable to write error message to client")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("closing the stream")
|
||||
|
||||
// Close the directories uploader
|
||||
directoriesPutResponse, err := directoriesUploader.Done()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error during directory upload")
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
_, err := w.Write([]byte("error during directory upload"))
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("unable to write error message to client")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
// If we uploaded directories (so directoriesPutResponse doesn't return null),
|
||||
// the RootDigest field in directoriesPutResponse should match the digest
|
||||
// returned in the PathInfo struct returned by the `Import` call.
|
||||
// This check ensures the server-side came up with the same root hash.
|
||||
|
||||
if directoriesPutResponse != nil {
|
||||
rootDigestPathInfo := pathInfo.GetNode().GetDirectory().GetDigest()
|
||||
rootDigestDirectoriesPutResponse := directoriesPutResponse.GetRootDigest()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root_digest_pathinfo": rootDigestPathInfo,
|
||||
"root_digest_directories_put_resp": rootDigestDirectoriesPutResponse,
|
||||
})
|
||||
if !bytes.Equal(rootDigestPathInfo, rootDigestDirectoriesPutResponse) {
|
||||
log.Errorf("returned root digest doesn't match what's calculated")
|
||||
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
_, err := w.Write([]byte("error in root digest calculation"))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("unable to write error message to client")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Compare the nar hash specified in the URL with the one that has been
|
||||
// calculated while processing the NAR file
|
||||
piNarHash, err := nixhash.ParseNixBase32(
|
||||
"sha256:" + nixbase32.EncodeToString(pathInfo.GetNarinfo().NarSha256),
|
||||
)
|
||||
if err != nil {
|
||||
panic("must parse nixbase32")
|
||||
}
|
||||
|
||||
if !bytes.Equal(narHashFromUrl.Digest(), piNarHash.Digest()) {
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"narhash_received_sha256": piNarHash.SRIString(),
|
||||
"narsize": pathInfo.GetNarinfo().GetNarSize(),
|
||||
})
|
||||
log.Error("received bytes don't match narhash from URL")
|
||||
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
_, err := w.Write([]byte("received bytes don't match narHash specified in URL"))
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("unable to write error message to client")
|
||||
}
|
||||
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
// Insert the partial pathinfo structs into our lookup map,
|
||||
// so requesting the NAR file will be possible.
|
||||
// The same might exist already, but it'll have the same contents (so
|
||||
// replacing will be a no-op), except maybe the root node Name field value, which
|
||||
// is safe to ignore (as not part of the NAR).
|
||||
s.narHashToPathInfoMu.Lock()
|
||||
s.narHashToPathInfo[piNarHash.SRIString()] = pathInfo
|
||||
s.narHashToPathInfoMu.Unlock()
|
||||
|
||||
// Done!
|
||||
})
|
||||
|
||||
}
|
||||
146
tvix/nar-bridge/pkg/server/narinfo_get.go
Normal file
146
tvix/nar-bridge/pkg/server/narinfo_get.go
Normal file
|
|
@ -0,0 +1,146 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
storev1pb "code.tvl.fyi/tvix/store/protos"
|
||||
"github.com/go-chi/chi/v5"
|
||||
nixhash "github.com/nix-community/go-nix/pkg/hash"
|
||||
"github.com/nix-community/go-nix/pkg/narinfo"
|
||||
"github.com/nix-community/go-nix/pkg/narinfo/signature"
|
||||
"github.com/nix-community/go-nix/pkg/nixbase32"
|
||||
"github.com/nix-community/go-nix/pkg/nixpath"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// renderNarinfo writes narinfo contents to a passes io.Writer, or a returns a
|
||||
// (wrapped) io.ErrNoExist error if something doesn't exist.
|
||||
// if headOnly is set to true, only the existence is checked, but no content is
|
||||
// actually written.
|
||||
func renderNarinfo(
|
||||
ctx context.Context,
|
||||
log *log.Entry,
|
||||
pathInfoServiceClient storev1pb.PathInfoServiceClient,
|
||||
narHashToPathInfoMu *sync.Mutex,
|
||||
narHashToPathInfo map[string]*storev1pb.PathInfo,
|
||||
outputHash []byte,
|
||||
w io.Writer,
|
||||
headOnly bool,
|
||||
) error {
|
||||
pathInfo, err := pathInfoServiceClient.Get(ctx, &storev1pb.GetPathInfoRequest{
|
||||
ByWhat: &storev1pb.GetPathInfoRequest_ByOutputHash{
|
||||
ByOutputHash: outputHash,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
st, ok := status.FromError(err)
|
||||
if ok {
|
||||
if st.Code() == codes.NotFound {
|
||||
return fmt.Errorf("output hash %v not found: %w", base64.StdEncoding.EncodeToString(outputHash), fs.ErrNotExist)
|
||||
}
|
||||
return fmt.Errorf("unable to get pathinfo, code %v: %w", st.Code(), err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("unable to get pathinfo: %w", err)
|
||||
}
|
||||
|
||||
narHash, err := nixhash.ParseNixBase32("sha256:" + nixbase32.EncodeToString(pathInfo.GetNarinfo().GetNarSha256()))
|
||||
if err != nil {
|
||||
// TODO: return proper error
|
||||
return fmt.Errorf("No usable NarHash found in PathInfo")
|
||||
}
|
||||
|
||||
// add things to the lookup table, in case the same process didn't handle the NAR hash yet.
|
||||
narHashToPathInfoMu.Lock()
|
||||
narHashToPathInfo[narHash.SRIString()] = pathInfo
|
||||
narHashToPathInfoMu.Unlock()
|
||||
|
||||
if headOnly {
|
||||
return nil
|
||||
}
|
||||
|
||||
// convert the signatures from storev1pb signatures to narinfo signatures
|
||||
narinfoSignatures := make([]signature.Signature, 0)
|
||||
for _, pathInfoSignature := range pathInfo.Narinfo.Signatures {
|
||||
narinfoSignatures = append(narinfoSignatures, signature.Signature{
|
||||
Name: pathInfoSignature.GetName(),
|
||||
Data: pathInfoSignature.GetData(),
|
||||
})
|
||||
}
|
||||
|
||||
// extract the name of the node in the pathInfo structure, which will become the output path
|
||||
var nodeName []byte
|
||||
switch v := (pathInfo.GetNode().GetNode()).(type) {
|
||||
case *storev1pb.Node_File:
|
||||
nodeName = v.File.GetName()
|
||||
case *storev1pb.Node_Symlink:
|
||||
nodeName = v.Symlink.GetName()
|
||||
case *storev1pb.Node_Directory:
|
||||
nodeName = v.Directory.GetName()
|
||||
}
|
||||
|
||||
narInfo := narinfo.NarInfo{
|
||||
StorePath: path.Join(nixpath.StoreDir, string(nodeName)),
|
||||
URL: "nar/" + nixbase32.EncodeToString(narHash.Digest()) + ".nar",
|
||||
Compression: "none", // TODO: implement zstd compression
|
||||
NarHash: narHash,
|
||||
NarSize: uint64(pathInfo.Narinfo.NarSize),
|
||||
References: pathInfo.Narinfo.GetReferenceNames(),
|
||||
Signatures: narinfoSignatures,
|
||||
}
|
||||
|
||||
// render .narinfo from pathInfo
|
||||
_, err = io.Copy(w, strings.NewReader(narInfo.String()))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to write narinfo to client: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func registerNarinfoGet(s *Server) {
|
||||
// GET $outHash.narinfo looks up the PathInfo from the tvix-store,
|
||||
// and then render a .narinfo file to the client.
|
||||
// It will keep the PathInfo in the lookup map,
|
||||
// so a subsequent GET /nar/ $narhash.nar request can find it.
|
||||
s.handler.Get("/{outputhash:^["+nixbase32.Alphabet+"]{32}}.narinfo", func(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
|
||||
ctx := r.Context()
|
||||
log := log.WithField("outputhash", chi.URLParamFromCtx(ctx, "outputhash"))
|
||||
|
||||
// parse the output hash sent in the request URL
|
||||
outputHash, err := nixbase32.DecodeString(chi.URLParamFromCtx(ctx, "outputhash"))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("unable to decode output hash from url")
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
_, err := w.Write([]byte("unable to decode output hash from url"))
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("unable to write error message to client")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
err = renderNarinfo(ctx, log, s.pathInfoServiceClient, &s.narHashToPathInfoMu, s.narHashToPathInfo, outputHash, w, false)
|
||||
if err != nil {
|
||||
log.WithError(err).Info("unable to render narinfo")
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
174
tvix/nar-bridge/pkg/server/narinfo_put.go
Normal file
174
tvix/nar-bridge/pkg/server/narinfo_put.go
Normal file
|
|
@ -0,0 +1,174 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"path"
|
||||
|
||||
storev1pb "code.tvl.fyi/tvix/store/protos"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/nix-community/go-nix/pkg/narinfo"
|
||||
"github.com/nix-community/go-nix/pkg/nixbase32"
|
||||
"github.com/nix-community/go-nix/pkg/nixpath"
|
||||
"github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func registerNarinfoPut(s *Server) {
|
||||
s.handler.Put("/{outputhash:^["+nixbase32.Alphabet+"]{32}}.narinfo", func(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
|
||||
ctx := r.Context()
|
||||
log := log.WithField("outputhash", chi.URLParamFromCtx(ctx, "outputhash"))
|
||||
|
||||
// TODO: decide on merging behaviour.
|
||||
// Maybe it's fine to add if contents are the same, but more sigs can be added?
|
||||
// Right now, just replace a .narinfo for a path that already exists.
|
||||
|
||||
// read and parse the .narinfo file
|
||||
narInfo, err := narinfo.Parse(r.Body)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("unable to parse narinfo")
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
_, err := w.Write([]byte("unable to parse narinfo"))
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("unable to write error message to client")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log = log.WithFields(logrus.Fields{
|
||||
"narhash": narInfo.NarHash.SRIString(),
|
||||
"output_path": narInfo.StorePath,
|
||||
})
|
||||
|
||||
var pathInfo *storev1pb.PathInfo
|
||||
|
||||
// look up the narHash in our temporary map
|
||||
s.narHashToPathInfoMu.Lock()
|
||||
pathInfo, found := s.narHashToPathInfo[narInfo.NarHash.SRIString()]
|
||||
s.narHashToPathInfoMu.Unlock()
|
||||
if !found {
|
||||
log.Error("unable to find referred NAR")
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
_, err := w.Write([]byte("unable to find referred NAR"))
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("unable to write error message to client")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// compare fields with what we computed while receiving the NAR file
|
||||
|
||||
// NarSize needs to match
|
||||
if pathInfo.Narinfo.NarSize != narInfo.NarSize {
|
||||
log.Error("narsize mismatch")
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
_, err := w.Write([]byte("unable to parse narinfo"))
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("unable to write error message to client")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
// We know the narhash in the .narinfo matches one of the two narhashes in the partial pathInfo,
|
||||
// because that's how we found it.
|
||||
|
||||
// FUTUREWORK: We can't compare References yet, but it'd be a good idea to
|
||||
// do reference checking on .nar files server-side during upload.
|
||||
// We however still need to be parse them, because we store
|
||||
// the bytes in pathInfo.References, and the full strings in pathInfo.Narinfo.ReferenceNames.
|
||||
referencesBytes := make([][]byte, 0)
|
||||
for _, reference := range narInfo.References {
|
||||
np, err := nixpath.FromString(path.Join(nixpath.StoreDir, reference))
|
||||
if err != nil {
|
||||
log.WithField("reference", reference).WithError(err).Error("unable to parse reference")
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
_, err := w.Write([]byte("unable to parse reference"))
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("unable to write error message to client")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
referencesBytes = append(referencesBytes, np.Digest)
|
||||
}
|
||||
|
||||
// assemble the []*storev1pb.NARInfo_Signature{} from narinfo.Signatures.
|
||||
pbNarinfoSignatures := make([]*storev1pb.NARInfo_Signature, 0)
|
||||
for _, narinfoSig := range narInfo.Signatures {
|
||||
|
||||
pbNarinfoSignatures = append(pbNarinfoSignatures, &storev1pb.NARInfo_Signature{
|
||||
Name: narinfoSig.Name,
|
||||
Data: narinfoSig.Data,
|
||||
})
|
||||
}
|
||||
|
||||
// If everything matches, We will add References, NAR signatures and the
|
||||
// output path name, and then upload to the pathinfo service.
|
||||
// We want a copy here, because we don't want to mutate the contents in the lookup table
|
||||
// until we get things back from the remote store.
|
||||
pathInfoToUpload := &storev1pb.PathInfo{
|
||||
Node: nil, // set below
|
||||
References: referencesBytes,
|
||||
Narinfo: &storev1pb.NARInfo{
|
||||
NarSize: pathInfo.Narinfo.NarSize,
|
||||
NarSha256: pathInfo.Narinfo.NarSha256,
|
||||
Signatures: pbNarinfoSignatures,
|
||||
ReferenceNames: narInfo.References,
|
||||
},
|
||||
}
|
||||
|
||||
// We need to add the basename of the storepath from the .narinfo
|
||||
// to the pathInfo to be sent.
|
||||
switch v := (pathInfo.GetNode().GetNode()).(type) {
|
||||
case *storev1pb.Node_File:
|
||||
pathInfoToUpload.Node = &storev1pb.Node{
|
||||
Node: &storev1pb.Node_File{
|
||||
File: &storev1pb.FileNode{
|
||||
Name: []byte(path.Base(narInfo.StorePath)),
|
||||
Digest: v.File.Digest,
|
||||
Size: v.File.Size,
|
||||
Executable: v.File.Executable,
|
||||
},
|
||||
},
|
||||
}
|
||||
case *storev1pb.Node_Symlink:
|
||||
pathInfoToUpload.Node = &storev1pb.Node{
|
||||
Node: &storev1pb.Node_Symlink{
|
||||
Symlink: &storev1pb.SymlinkNode{
|
||||
Name: []byte(path.Base(narInfo.StorePath)),
|
||||
Target: v.Symlink.Target,
|
||||
},
|
||||
},
|
||||
}
|
||||
case *storev1pb.Node_Directory:
|
||||
pathInfoToUpload.Node = &storev1pb.Node{
|
||||
Node: &storev1pb.Node_Directory{
|
||||
Directory: &storev1pb.DirectoryNode{
|
||||
Name: []byte(path.Base(narInfo.StorePath)),
|
||||
Digest: v.Directory.Digest,
|
||||
Size: v.Directory.Size,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
receivedPathInfo, err := s.pathInfoServiceClient.Put(ctx, pathInfoToUpload)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("unable to upload pathinfo to service")
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
_, err := w.Write([]byte("unable to upload pathinfo to server"))
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("unable to write error message to client")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("received new pathInfo: %v+", receivedPathInfo)
|
||||
|
||||
// TODO: update the local temporary pathinfo with this?
|
||||
})
|
||||
}
|
||||
86
tvix/nar-bridge/pkg/server/server.go
Normal file
86
tvix/nar-bridge/pkg/server/server.go
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
storev1pb "code.tvl.fyi/tvix/store/protos"
|
||||
"github.com/go-chi/chi/middleware"
|
||||
"github.com/go-chi/chi/v5"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
handler chi.Router
|
||||
|
||||
directoryServiceClient storev1pb.DirectoryServiceClient
|
||||
blobServiceClient storev1pb.BlobServiceClient
|
||||
pathInfoServiceClient storev1pb.PathInfoServiceClient
|
||||
|
||||
// When uploading NAR files to a HTTP binary cache, the .nar
|
||||
// files are uploaded before the .narinfo files.
|
||||
// We need *both* to be able to fully construct a PathInfo object.
|
||||
// Keep a in-memory map of narhash(es) (in SRI) to sparse PathInfo.
|
||||
// This is necessary until we can ask a PathInfoService for a node with a given
|
||||
// narSha256.
|
||||
narHashToPathInfoMu sync.Mutex
|
||||
narHashToPathInfo map[string]*storev1pb.PathInfo
|
||||
}
|
||||
|
||||
func New(
|
||||
directoryServiceClient storev1pb.DirectoryServiceClient,
|
||||
blobServiceClient storev1pb.BlobServiceClient,
|
||||
pathInfoServiceClient storev1pb.PathInfoServiceClient,
|
||||
enableAccessLog bool,
|
||||
priority int,
|
||||
) *Server {
|
||||
r := chi.NewRouter()
|
||||
|
||||
if enableAccessLog {
|
||||
r.Use(middleware.Logger)
|
||||
}
|
||||
|
||||
r.Get("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
_, err := w.Write([]byte("nar-bridge"))
|
||||
if err != nil {
|
||||
log.Errorf("Unable to write response: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
r.Get("/nix-cache-info", func(w http.ResponseWriter, r *http.Request) {
|
||||
_, err := w.Write([]byte(fmt.Sprintf("StoreDir: /nix/store\nWantMassQuery: 1\nPriority: %d\n", priority)))
|
||||
if err != nil {
|
||||
log.Errorf("Unable to write response: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
s := &Server{
|
||||
handler: r,
|
||||
directoryServiceClient: directoryServiceClient,
|
||||
blobServiceClient: blobServiceClient,
|
||||
pathInfoServiceClient: pathInfoServiceClient,
|
||||
narHashToPathInfo: make(map[string]*storev1pb.PathInfo),
|
||||
}
|
||||
|
||||
registerNarPut(s)
|
||||
registerNarinfoPut(s)
|
||||
|
||||
registerNarinfoGet(s)
|
||||
registerNarGet(s)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Server) ListenAndServe(addr string) error {
|
||||
srv := &http.Server{
|
||||
Addr: addr,
|
||||
Handler: s.handler,
|
||||
ReadTimeout: 50 * time.Second,
|
||||
WriteTimeout: 100 * time.Second,
|
||||
IdleTimeout: 150 * time.Second,
|
||||
}
|
||||
|
||||
return srv.ListenAndServe()
|
||||
}
|
||||
24
tvix/nar-bridge/pkg/server/util.go
Normal file
24
tvix/nar-bridge/pkg/server/util.go
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
nixhash "github.com/nix-community/go-nix/pkg/hash"
|
||||
)
|
||||
|
||||
// parseNarHashFromUrl parses a nixbase32 string representing a sha256 NarHash
|
||||
// and returns a nixhash.Hash when it was able to parse, or an error.
|
||||
func parseNarHashFromUrl(narHashFromUrl string) (*nixhash.Hash, error) {
|
||||
// peek at the length. If it's 52 characters, assume sha256,
|
||||
// if it's something else, this is an error.
|
||||
l := len(narHashFromUrl)
|
||||
if l != 52 {
|
||||
return nil, fmt.Errorf("invalid length of narHash: %v", l)
|
||||
}
|
||||
|
||||
nixHash, err := nixhash.ParseNixBase32("sha256:" + narHashFromUrl)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse nixbase32 hash: %w", err)
|
||||
}
|
||||
|
||||
return nixHash, nil
|
||||
}
|
||||
278
tvix/nar-bridge/pkg/writer/writer.go
Normal file
278
tvix/nar-bridge/pkg/writer/writer.go
Normal file
|
|
@ -0,0 +1,278 @@
|
|||
package writer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
|
||||
storev1pb "code.tvl.fyi/tvix/store/protos"
|
||||
"github.com/nix-community/go-nix/pkg/nar"
|
||||
)
|
||||
|
||||
type DirectoryLookupFn func([]byte) (*storev1pb.Directory, error)
|
||||
type BlobLookupFn func([]byte) (io.ReadCloser, error)
|
||||
|
||||
// Export will traverse a given pathInfo structure, and write the contents
|
||||
// in NAR format to the passed Writer.
|
||||
// It uses directoryLookupFn and blobLookupFn to resolve references.
|
||||
func Export(
|
||||
w io.Writer,
|
||||
pathInfo *storev1pb.PathInfo,
|
||||
directoryLookupFn DirectoryLookupFn,
|
||||
blobLookupFn BlobLookupFn,
|
||||
) error {
|
||||
// initialize a NAR writer
|
||||
narWriter, err := nar.NewWriter(w)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to initialize nar writer: %w", err)
|
||||
}
|
||||
defer narWriter.Close()
|
||||
|
||||
// populate rootHeader
|
||||
rootHeader := &nar.Header{
|
||||
Path: "/",
|
||||
}
|
||||
|
||||
// populate a stack
|
||||
// we will push paths and directories to it when entering a directory,
|
||||
// and emit individual elements to the NAR writer, draining the Directory object.
|
||||
// once it's empty, we can pop it off the stack.
|
||||
var stackPaths = []string{}
|
||||
var stackDirectories = []*storev1pb.Directory{}
|
||||
|
||||
// peek at the pathInfo root and assemble the root node and write to writer
|
||||
// in the case of a regular file, we retrieve and write the contents, close and exit
|
||||
// in the case of a symlink, we write the symlink, close and exit
|
||||
switch v := (pathInfo.GetNode().GetNode()).(type) {
|
||||
case *storev1pb.Node_File:
|
||||
rootHeader.Type = nar.TypeRegular
|
||||
rootHeader.Size = int64(v.File.GetSize())
|
||||
rootHeader.Executable = v.File.GetExecutable()
|
||||
err := narWriter.WriteHeader(rootHeader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to write root header: %w", err)
|
||||
}
|
||||
|
||||
// if it's a regular file, retrieve and write the contents
|
||||
contentReader, err := blobLookupFn(v.File.GetDigest())
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to lookup blob: %w", err)
|
||||
}
|
||||
defer contentReader.Close()
|
||||
|
||||
_, err = io.Copy(narWriter, contentReader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to copy contents from contentReader: %w", err)
|
||||
}
|
||||
|
||||
err = contentReader.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to close content reader: %w", err)
|
||||
}
|
||||
|
||||
err = narWriter.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to close nar reader: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
case *storev1pb.Node_Symlink:
|
||||
rootHeader.Type = nar.TypeSymlink
|
||||
rootHeader.LinkTarget = string(v.Symlink.GetTarget())
|
||||
err := narWriter.WriteHeader(rootHeader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to write root header: %w", err)
|
||||
}
|
||||
|
||||
err = narWriter.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to close nar reader: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
case *storev1pb.Node_Directory:
|
||||
// We have a directory at the root, look it up and put in on the stack.
|
||||
directory, err := directoryLookupFn(v.Directory.Digest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to lookup directory: %w", err)
|
||||
}
|
||||
stackDirectories = append(stackDirectories, directory)
|
||||
stackPaths = append(stackPaths, "/")
|
||||
|
||||
err = narWriter.WriteHeader(&nar.Header{
|
||||
Path: "/",
|
||||
Type: nar.TypeDirectory,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing header: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// as long as the stack is not empty, we keep running.
|
||||
for {
|
||||
if len(stackDirectories) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Peek at the current top of the stack.
|
||||
topOfStack := stackDirectories[len(stackDirectories)-1]
|
||||
topOfStackPath := stackPaths[len(stackPaths)-1]
|
||||
|
||||
// get the next element that's lexicographically smallest, and drain it from
|
||||
// the current directory on top of the stack.
|
||||
nextNode := drainNextNode(topOfStack)
|
||||
|
||||
// If nextNode returns nil, there's nothing left in the directory node, so we
|
||||
// can emit it from the stack.
|
||||
// Contrary to the import case, we don't emit the node popping from the stack, but when pushing.
|
||||
if nextNode == nil {
|
||||
// pop off stack
|
||||
stackDirectories = stackDirectories[:len(stackDirectories)-1]
|
||||
stackPaths = stackPaths[:len(stackPaths)-1]
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
switch n := (nextNode).(type) {
|
||||
case *storev1pb.DirectoryNode:
|
||||
err := narWriter.WriteHeader(&nar.Header{
|
||||
Path: path.Join(topOfStackPath, string(n.GetName())),
|
||||
Type: nar.TypeDirectory,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to write nar header: %w", err)
|
||||
}
|
||||
|
||||
d, err := directoryLookupFn(n.GetDigest())
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to lookup directory: %w", err)
|
||||
}
|
||||
|
||||
// add to stack
|
||||
stackDirectories = append(stackDirectories, d)
|
||||
stackPaths = append(stackPaths, path.Join(topOfStackPath, string(n.GetName())))
|
||||
case *storev1pb.FileNode:
|
||||
err := narWriter.WriteHeader(&nar.Header{
|
||||
Path: path.Join(topOfStackPath, string(n.GetName())),
|
||||
Type: nar.TypeRegular,
|
||||
Size: int64(n.GetSize()),
|
||||
Executable: n.GetExecutable(),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to write nar header: %w", err)
|
||||
}
|
||||
|
||||
// copy file contents
|
||||
contentReader, err := blobLookupFn(n.GetDigest())
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get blob: %w", err)
|
||||
}
|
||||
defer contentReader.Close()
|
||||
|
||||
_, err = io.Copy(narWriter, contentReader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to copy contents from contentReader: %w", err)
|
||||
}
|
||||
|
||||
err = contentReader.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to close content reader: %w", err)
|
||||
}
|
||||
case *storev1pb.SymlinkNode:
|
||||
err := narWriter.WriteHeader(&nar.Header{
|
||||
Path: path.Join(topOfStackPath, string(n.GetName())),
|
||||
Type: nar.TypeSymlink,
|
||||
LinkTarget: string(n.GetTarget()),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to write nar header: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: add validation functions to Directory in both rust and golang, to
|
||||
// validate the keys in directories, files and symlinks are sorted.
|
||||
|
||||
// drainNextNode will drain a directory message with one of its child nodes,
|
||||
// whichever comes first alphabetically.
|
||||
func drainNextNode(d *storev1pb.Directory) interface{} {
|
||||
switch v := (smallestNode(d)).(type) {
|
||||
case *storev1pb.DirectoryNode:
|
||||
d.Directories = d.Directories[1:]
|
||||
return v
|
||||
case *storev1pb.FileNode:
|
||||
d.Files = d.Files[1:]
|
||||
return v
|
||||
case *storev1pb.SymlinkNode:
|
||||
d.Symlinks = d.Symlinks[1:]
|
||||
return v
|
||||
case nil:
|
||||
return nil
|
||||
default:
|
||||
panic("invalid type encountered")
|
||||
}
|
||||
}
|
||||
|
||||
// smallestNode will return the node from a directory message,
|
||||
// whichever comes first alphabetically.
|
||||
func smallestNode(d *storev1pb.Directory) interface{} {
|
||||
childDirectories := d.GetDirectories()
|
||||
childFiles := d.GetFiles()
|
||||
childSymlinks := d.GetSymlinks()
|
||||
|
||||
if len(childDirectories) > 0 {
|
||||
if len(childFiles) > 0 {
|
||||
if len(childSymlinks) > 0 {
|
||||
// directories,files,symlinks
|
||||
return smallerNode(smallerNode(childDirectories[0], childFiles[0]), childSymlinks[0])
|
||||
} else {
|
||||
// directories,files,!symlinks
|
||||
return smallerNode(childDirectories[0], childFiles[0])
|
||||
}
|
||||
} else {
|
||||
// directories,!files
|
||||
if len(childSymlinks) > 0 {
|
||||
// directories,!files,symlinks
|
||||
return smallerNode(childDirectories[0], childSymlinks[0])
|
||||
} else {
|
||||
// directories,!files,!symlinks
|
||||
return childDirectories[0]
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// !directories
|
||||
if len(childFiles) > 0 {
|
||||
// !directories,files
|
||||
if len(childSymlinks) > 0 {
|
||||
// !directories,files,symlinks
|
||||
return smallerNode(childFiles[0], childSymlinks[0])
|
||||
} else {
|
||||
// !directories,files,!symlinks
|
||||
return childFiles[0]
|
||||
}
|
||||
} else {
|
||||
//!directories,!files
|
||||
if len(childSymlinks) > 0 {
|
||||
//!directories,!files,symlinks
|
||||
return childSymlinks[0]
|
||||
} else {
|
||||
//!directories,!files,!symlinks
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// smallerNode compares two nodes by their name,
|
||||
// and returns the one with the smaller name.
|
||||
// both nodes may not be nil, we do check for these cases in smallestNode.
|
||||
func smallerNode(a interface{ GetName() []byte }, b interface{ GetName() []byte }) interface{ GetName() []byte } {
|
||||
if string(a.GetName()) < string(b.GetName()) {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
51
tvix/nar-bridge/pkg/writer/writer_pick_next_node_test.go
Normal file
51
tvix/nar-bridge/pkg/writer/writer_pick_next_node_test.go
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
package writer
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
storev1pb "code.tvl.fyi/tvix/store/protos"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/protobuf/testing/protocmp"
|
||||
)
|
||||
|
||||
func requireProtoEq(t *testing.T, expected interface{}, actual interface{}) {
|
||||
if diff := cmp.Diff(expected, actual, protocmp.Transform()); diff != "" {
|
||||
t.Errorf("unexpected difference:\n%v", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPopNextNode(t *testing.T) {
|
||||
t.Run("empty directory", func(t *testing.T) {
|
||||
d := &storev1pb.Directory{
|
||||
Directories: []*storev1pb.DirectoryNode{},
|
||||
Files: []*storev1pb.FileNode{},
|
||||
Symlinks: []*storev1pb.SymlinkNode{},
|
||||
}
|
||||
|
||||
n := drainNextNode(d)
|
||||
require.Equal(t, nil, n)
|
||||
})
|
||||
t.Run("only directories", func(t *testing.T) {
|
||||
ds := &storev1pb.Directory{
|
||||
Directories: []*storev1pb.DirectoryNode{{
|
||||
Name: []byte("a"),
|
||||
Digest: []byte{},
|
||||
Size: 0,
|
||||
}, {
|
||||
Name: []byte("b"),
|
||||
Digest: []byte{},
|
||||
Size: 0,
|
||||
}},
|
||||
Files: []*storev1pb.FileNode{},
|
||||
Symlinks: []*storev1pb.SymlinkNode{},
|
||||
}
|
||||
|
||||
n := drainNextNode(ds)
|
||||
requireProtoEq(t, &storev1pb.DirectoryNode{
|
||||
Name: []byte("a"),
|
||||
Digest: []byte{},
|
||||
Size: 0,
|
||||
}, n)
|
||||
})
|
||||
}
|
||||
211
tvix/nar-bridge/pkg/writer/writer_test.go
Normal file
211
tvix/nar-bridge/pkg/writer/writer_test.go
Normal file
|
|
@ -0,0 +1,211 @@
|
|||
package writer_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"code.tvl.fyi/tvix/nar-bridge/pkg/reader"
|
||||
"code.tvl.fyi/tvix/nar-bridge/pkg/writer"
|
||||
storev1pb "code.tvl.fyi/tvix/store/protos"
|
||||
"github.com/stretchr/testify/require"
|
||||
"lukechampine.com/blake3"
|
||||
)
|
||||
|
||||
func mustDigest(d *storev1pb.Directory) []byte {
|
||||
dgst, err := d.Digest()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dgst
|
||||
}
|
||||
|
||||
func TestSymlink(t *testing.T) {
|
||||
pathInfo := &storev1pb.PathInfo{
|
||||
|
||||
Node: &storev1pb.Node{
|
||||
Node: &storev1pb.Node_Symlink{
|
||||
Symlink: &storev1pb.SymlinkNode{
|
||||
Name: []byte("doesntmatter"),
|
||||
Target: []byte("/nix/store/somewhereelse"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
err := writer.Export(&buf, pathInfo, func([]byte) (*storev1pb.Directory, error) {
|
||||
panic("no directories expected")
|
||||
}, func([]byte) (io.ReadCloser, error) {
|
||||
panic("no files expected")
|
||||
})
|
||||
require.NoError(t, err, "exporter shouldn't fail")
|
||||
|
||||
f, err := os.Open("../../testdata/symlink.nar")
|
||||
require.NoError(t, err)
|
||||
|
||||
bytesExpected, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
require.Equal(t, bytesExpected, buf.Bytes(), "expected nar contents to match")
|
||||
}
|
||||
|
||||
func TestRegular(t *testing.T) {
|
||||
// The blake3 digest of the 0x01 byte.
|
||||
BLAKE3_DIGEST_0X01 := []byte{
|
||||
0x48, 0xfc, 0x72, 0x1f, 0xbb, 0xc1, 0x72, 0xe0, 0x92, 0x5f, 0xa2, 0x7a, 0xf1, 0x67, 0x1d,
|
||||
0xe2, 0x25, 0xba, 0x92, 0x71, 0x34, 0x80, 0x29, 0x98, 0xb1, 0x0a, 0x15, 0x68, 0xa1, 0x88,
|
||||
0x65, 0x2b,
|
||||
}
|
||||
|
||||
pathInfo := &storev1pb.PathInfo{
|
||||
Node: &storev1pb.Node{
|
||||
Node: &storev1pb.Node_File{
|
||||
File: &storev1pb.FileNode{
|
||||
Name: []byte("doesntmatter"),
|
||||
Digest: BLAKE3_DIGEST_0X01,
|
||||
Size: 1,
|
||||
Executable: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
err := writer.Export(&buf, pathInfo, func([]byte) (*storev1pb.Directory, error) {
|
||||
panic("no directories expected")
|
||||
}, func(blobRef []byte) (io.ReadCloser, error) {
|
||||
if !bytes.Equal(blobRef, BLAKE3_DIGEST_0X01) {
|
||||
panic("unexpected blobref")
|
||||
}
|
||||
return io.NopCloser(bytes.NewBuffer([]byte{0x01})), nil
|
||||
})
|
||||
require.NoError(t, err, "exporter shouldn't fail")
|
||||
|
||||
f, err := os.Open("../../testdata/onebyteregular.nar")
|
||||
require.NoError(t, err)
|
||||
|
||||
bytesExpected, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
require.Equal(t, bytesExpected, buf.Bytes(), "expected nar contents to match")
|
||||
}
|
||||
|
||||
func TestEmptyDirectory(t *testing.T) {
|
||||
// construct empty directory node this refers to
|
||||
emptyDirectory := &storev1pb.Directory{
|
||||
Directories: []*storev1pb.DirectoryNode{},
|
||||
Files: []*storev1pb.FileNode{},
|
||||
Symlinks: []*storev1pb.SymlinkNode{},
|
||||
}
|
||||
emptyDirectoryDigest := mustDigest(emptyDirectory)
|
||||
|
||||
pathInfo := &storev1pb.PathInfo{
|
||||
Node: &storev1pb.Node{
|
||||
Node: &storev1pb.Node_Directory{
|
||||
Directory: &storev1pb.DirectoryNode{
|
||||
Name: []byte("doesntmatter"),
|
||||
Digest: emptyDirectoryDigest,
|
||||
Size: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
err := writer.Export(&buf, pathInfo, func(directoryRef []byte) (*storev1pb.Directory, error) {
|
||||
if !bytes.Equal(directoryRef, emptyDirectoryDigest) {
|
||||
panic("unexpected directoryRef")
|
||||
}
|
||||
return emptyDirectory, nil
|
||||
}, func([]byte) (io.ReadCloser, error) {
|
||||
panic("no files expected")
|
||||
})
|
||||
require.NoError(t, err, "exporter shouldn't fail")
|
||||
|
||||
f, err := os.Open("../../testdata/emptydirectory.nar")
|
||||
require.NoError(t, err)
|
||||
|
||||
bytesExpected, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
require.Equal(t, bytesExpected, buf.Bytes(), "expected nar contents to match")
|
||||
}
|
||||
|
||||
func TestFull(t *testing.T) {
|
||||
// We pipe nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar to the exporter,
|
||||
// and store all the file contents and directory objects received in two hashmaps.
|
||||
// We then feed it to the writer, and test we come up with the same NAR file.
|
||||
|
||||
f, err := os.Open("../../testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar")
|
||||
require.NoError(t, err)
|
||||
|
||||
narContents, err := io.ReadAll(f)
|
||||
require.NoError(t, err)
|
||||
|
||||
filesMap := make(map[string][]byte, 0)
|
||||
directoriesMap := make(map[string]*storev1pb.Directory)
|
||||
|
||||
r := reader.New(bytes.NewBuffer(narContents))
|
||||
pathInfo, err := r.Import(
|
||||
context.Background(),
|
||||
func(fileReader io.Reader) error {
|
||||
fileContents, err := io.ReadAll(fileReader)
|
||||
require.NoError(t, err)
|
||||
|
||||
b3Writer := blake3.New(32, nil)
|
||||
_, err = io.Copy(b3Writer, bytes.NewReader(fileContents))
|
||||
require.NoError(t, err)
|
||||
|
||||
// put it in filesMap
|
||||
filesMap[hex.EncodeToString(b3Writer.Sum(nil))] = fileContents
|
||||
|
||||
return nil
|
||||
},
|
||||
func(directory *storev1pb.Directory) error {
|
||||
dgst := mustDigest(directory)
|
||||
|
||||
directoriesMap[hex.EncodeToString(dgst)] = directory
|
||||
return nil
|
||||
},
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
// done populating everything, now actually test the export :-)
|
||||
|
||||
var buf bytes.Buffer
|
||||
err = writer.Export(
|
||||
&buf,
|
||||
pathInfo,
|
||||
func(directoryRef []byte) (*storev1pb.Directory, error) {
|
||||
d, found := directoriesMap[hex.EncodeToString(directoryRef)]
|
||||
if !found {
|
||||
panic("directories not found")
|
||||
}
|
||||
return d, nil
|
||||
},
|
||||
func(fileRef []byte) (io.ReadCloser, error) {
|
||||
fileContents, found := filesMap[hex.EncodeToString(fileRef)]
|
||||
if !found {
|
||||
panic("file not found")
|
||||
}
|
||||
return io.NopCloser(bytes.NewReader(fileContents)), nil
|
||||
},
|
||||
)
|
||||
|
||||
require.NoError(t, err, "exporter shouldn't fail")
|
||||
require.Equal(t, narContents, buf.Bytes())
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue