* Make the import operation through the daemon much more efficient
(way fewer roundtrips) by allowing the client to send data in bigger chunks. * Some refactoring.
This commit is contained in:
parent
78598d06f0
commit
e0bd307802
6 changed files with 68 additions and 44 deletions
|
|
@ -1199,10 +1199,11 @@ struct HashAndReadSource : Source
|
|||
{
|
||||
hashing = true;
|
||||
}
|
||||
virtual void operator () (unsigned char * data, size_t len)
|
||||
size_t read(unsigned char * data, size_t len)
|
||||
{
|
||||
readSource(data, len);
|
||||
if (hashing) hashSink(data, len);
|
||||
size_t n = readSource.read(data, len);
|
||||
if (hashing) hashSink(data, n);
|
||||
return n;
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -501,11 +501,11 @@ void RemoteStore::processStderr(Sink * sink, Source * source)
|
|||
}
|
||||
else if (msg == STDERR_READ) {
|
||||
if (!source) throw Error("no source");
|
||||
unsigned int len = readInt(from);
|
||||
size_t len = readInt(from);
|
||||
unsigned char * buf = new unsigned char[len];
|
||||
AutoDeleteArray<unsigned char> d(buf);
|
||||
(*source)(buf, len);
|
||||
writeString(string((const char *) buf, len), to);
|
||||
size_t n = source->read(buf, len);
|
||||
writeString(string((const char *) buf, n), to); // !!! inefficient
|
||||
to.flush();
|
||||
}
|
||||
else {
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ namespace nix {
|
|||
#define WORKER_MAGIC_1 0x6e697863
|
||||
#define WORKER_MAGIC_2 0x6478696f
|
||||
|
||||
#define PROTOCOL_VERSION 0x108
|
||||
#define PROTOCOL_VERSION 0x109
|
||||
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
|
||||
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue