9P read(dir) handle case when offset exceeds available
Previously: (1) The number of bytes to read (aka count) is readjusted to become negative. Driver reports bogus count value. (2) However, when sending the data through virtio, the payload buffer was incorrectly subarrayed with the negative end-offset, so wrong data was still sent through. Now: (1) 9P read(dir) now clamps the count value to non-negative numbers. (2) Add asserts for negative replybuffersize and payloadsize as those should not happen in the first place.
This commit is contained in:
parent
5995414f87
commit
5778076c2b
|
@ -199,6 +199,7 @@ Virtio9p.prototype.Reset = function() {
|
|||
|
||||
|
||||
Virtio9p.prototype.BuildReply = function(id, tag, payloadsize) {
|
||||
dbg_assert(payloadsize >= 0, "9P: Negative payload size");
|
||||
marshall.Marshall(["w", "b", "h"], [payloadsize+7, id+1, tag], this.replybuffer, 0);
|
||||
if ((payloadsize+7) >= this.replybuffer.length) {
|
||||
message.Debug("Error in 9p: payloadsize exceeds maximum length");
|
||||
|
@ -216,6 +217,7 @@ Virtio9p.prototype.SendError = function (tag, errormsg, errorcode) {
|
|||
}
|
||||
|
||||
Virtio9p.prototype.SendReply = function (bufchain) {
|
||||
dbg_assert(this.replybuffersize >= 0, "9P: Negative replybuffersize");
|
||||
bufchain.set_next_blob(this.replybuffer.subarray(0, this.replybuffersize));
|
||||
this.virtqueue.push_reply(bufchain);
|
||||
this.virtqueue.flush_replies();
|
||||
|
@ -538,6 +540,12 @@ Virtio9p.prototype.ReceiveRequest = function (bufchain) {
|
|||
this.bus.send("9p-read-end", [file.name, count]);
|
||||
|
||||
if (inode.size < offset+count) count = inode.size - offset;
|
||||
if(offset > inode.size)
|
||||
{
|
||||
// offset can be greater than available - should return count of zero.
|
||||
// See http://ericvh.github.io/9p-rfc/rfc9p2000.html#anchor30
|
||||
count = 0;
|
||||
}
|
||||
var data = this.fs.inodedata[this.fids[fid].inodeid];
|
||||
if(data) {
|
||||
this.replybuffer.set(data.subarray(offset, offset + count), 7 + 4);
|
||||
|
|
Loading…
Reference in a new issue