Remove dead & improve
This commit is contained in:
parent
f156824ebc
commit
ea50e29e5f
3
Makefile
3
Makefile
|
@ -268,8 +268,7 @@ expect-tests: all-debug build/libwabt.js
|
|||
./tests/expect/run.js
|
||||
|
||||
devices-test: all-debug
|
||||
./tests/devices/filestorage.js
|
||||
#./tests/devices/virtio_9p.js # XXX: Hangs
|
||||
./tests/devices/virtio_9p.js
|
||||
|
||||
rust-test: $(RUST_FILES)
|
||||
env RUST_BACKTRACE=full RUST_TEST_THREADS=1 RUSTFLAGS="-D warnings" cargo +nightly test -- --nocapture
|
||||
|
|
|
@ -1,14 +1,5 @@
|
|||
"use strict";
|
||||
|
||||
const INDEXEDDB_STORAGE_VERSION = 2;
|
||||
const INDEXEDDB_STORAGE_NAME = "v86-filesystem-storage";
|
||||
const INDEXEDDB_STORAGE_STORE = "store";
|
||||
const INDEXEDDB_STORAGE_KEY_PATH = "sha256sum";
|
||||
const INDEXEDDB_STORAGE_DATA_PATH = "data";
|
||||
const INDEXEDDB_STORAGE_GET_BLOCK_KEY = (sha256sum, block_number) =>
|
||||
block_number === 0 ? sha256sum : `${sha256sum}-${block_number - 1}`;
|
||||
const INDEXEDDB_STORAGE_BLOCKSIZE = 4096;
|
||||
|
||||
/** @interface */
|
||||
function FileStorageInterface() {}
|
||||
|
||||
|
@ -27,7 +18,7 @@ FileStorageInterface.prototype.read = function(sha256sum, offset, count) {};
|
|||
* @param {!Uint8Array} data
|
||||
* @return {!Promise}
|
||||
*/
|
||||
FileStorageInterface.prototype.set = function(sha256sum, data) {};
|
||||
FileStorageInterface.prototype.cache = function(sha256sum, data) {};
|
||||
|
||||
/**
|
||||
* Call this when the file won't be used soon, e.g. when a file closes or when this immutable
|
||||
|
@ -72,11 +63,9 @@ MemoryFileStorage.prototype.read = async function(sha256sum, offset, count)
|
|||
* @param {string} sha256sum
|
||||
* @param {!Uint8Array} data
|
||||
*/
|
||||
MemoryFileStorage.prototype.set = async function(sha256sum, data)
|
||||
MemoryFileStorage.prototype.cache = async function(sha256sum, data)
|
||||
{
|
||||
dbg_assert(sha256sum, "MemoryFileStorage set: sha256sum should be a non-empty string");
|
||||
dbg_assert(!this.filedata.has(sha256sum), "MemoryFileStorage set: Storage should be read-only");
|
||||
|
||||
dbg_assert(sha256sum, "MemoryFileStorage cache: sha256sum should be a non-empty string");
|
||||
this.filedata.set(sha256sum, data);
|
||||
};
|
||||
|
||||
|
@ -88,286 +77,6 @@ MemoryFileStorage.prototype.uncache = function(sha256sum)
|
|||
this.filedata.delete(sha256sum);
|
||||
};
|
||||
|
||||
/**
|
||||
* Use IndexedDBFileStorage.try_create() instead.
|
||||
* @private
|
||||
* @constructor
|
||||
* @param {!IDBDatabase} db The IndexedDB database opened via init_db().
|
||||
* @implements {FileStorageInterface}
|
||||
*/
|
||||
function IndexedDBFileStorage(db)
|
||||
{
|
||||
this.db = db;
|
||||
}
|
||||
|
||||
IndexedDBFileStorage.try_create = async function()
|
||||
{
|
||||
if(typeof window === "undefined" || !window.indexedDB)
|
||||
{
|
||||
throw new Error("IndexedDB is not available");
|
||||
}
|
||||
const db = await IndexedDBFileStorage.init_db();
|
||||
const file_storage = new IndexedDBFileStorage(db);
|
||||
return file_storage;
|
||||
};
|
||||
|
||||
/**
|
||||
* @return {!Promise<!IDBDatabase>}
|
||||
*/
|
||||
IndexedDBFileStorage.init_db = function()
|
||||
{
|
||||
return new Promise((resolve, reject) =>
|
||||
{
|
||||
const open_request = indexedDB.open(INDEXEDDB_STORAGE_NAME, INDEXEDDB_STORAGE_VERSION);
|
||||
|
||||
open_request.onblocked = event =>
|
||||
{
|
||||
dbg_log("IndexedDB blocked by an older database version being opened.", LOG_9P);
|
||||
};
|
||||
|
||||
open_request.onerror = event =>
|
||||
{
|
||||
dbg_log("Error opening IndexedDB! Are you in private browsing mode? Error:", LOG_9P);
|
||||
dbg_log(open_request.error.toString(), LOG_9P);
|
||||
reject(open_request.error);
|
||||
};
|
||||
|
||||
/** @suppress{uselessCode} */
|
||||
open_request.onupgradeneeded = event =>
|
||||
{
|
||||
const db = open_request.result;
|
||||
if(event.oldVersion < 1)
|
||||
{
|
||||
// Initial version.
|
||||
db.createObjectStore(INDEXEDDB_STORAGE_STORE, { keyPath: INDEXEDDB_STORAGE_KEY_PATH });
|
||||
}
|
||||
if(event.oldVersion < 2)
|
||||
{
|
||||
// Version 2 removes total_size and extra_block_count from the base entries.
|
||||
// No changes needed, but new files written are not backwards compatible.
|
||||
}
|
||||
};
|
||||
|
||||
open_request.onsuccess = event =>
|
||||
{
|
||||
const db = open_request.result;
|
||||
db.onabort = event =>
|
||||
{
|
||||
dbg_assert(false, "IndexedDBFileStorage: transaction aborted unexpectedly");
|
||||
};
|
||||
db.onclose = event =>
|
||||
{
|
||||
dbg_assert(false, "IndexedDBFileStorage: connection closed unexpectedly");
|
||||
};
|
||||
db.onerror = event =>
|
||||
{
|
||||
const error = event.target.error;
|
||||
dbg_log("IndexedDBFileStorage: unexpected error: " + error, LOG_9P);
|
||||
throw error;
|
||||
};
|
||||
db.onversionchange = event =>
|
||||
{
|
||||
dbg_log("Caution: Another v86 instance might be trying to upgrade the IndexedDB " +
|
||||
"database to a newer version, or a request has been issued to delete the " +
|
||||
"database, but is blocked by this current v86 instance ", LOG_9P);
|
||||
};
|
||||
resolve(db);
|
||||
};
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* @private
|
||||
* @param {IDBObjectStore} store
|
||||
* @param {string} sha256sum
|
||||
* @return {!Promise<boolean>}
|
||||
*/
|
||||
IndexedDBFileStorage.prototype.db_has_file = function(store, sha256sum)
|
||||
{
|
||||
return new Promise((resolve, reject) =>
|
||||
{
|
||||
const request = store.count(sha256sum);
|
||||
request.onsuccess = event => resolve(/** @type {number} **/ (request.result) > 0);
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* @param {string} sha256sum
|
||||
* @param {number} offset
|
||||
* @param {number} count
|
||||
* @return {!Promise<Uint8Array>} null if file does not exist.
|
||||
*/
|
||||
IndexedDBFileStorage.prototype.read = function(sha256sum, offset, count)
|
||||
{
|
||||
dbg_assert(sha256sum, "IndexedDBFileStorage read: sha256sum should be a non-empty string");
|
||||
|
||||
const transaction = this.db.transaction(INDEXEDDB_STORAGE_STORE, "readonly");
|
||||
transaction.onerror = event =>
|
||||
{
|
||||
const error = event.target.error;
|
||||
dbg_log(`IndexedDBFileStorage read: Error with transaction: ${error}`, LOG_9P);
|
||||
throw error;
|
||||
};
|
||||
const store = transaction.objectStore(INDEXEDDB_STORAGE_STORE);
|
||||
|
||||
const block_number_start = Math.floor(offset / INDEXEDDB_STORAGE_BLOCKSIZE);
|
||||
const block_number_end = count > 0 ?
|
||||
Math.floor((offset + count - 1) / INDEXEDDB_STORAGE_BLOCKSIZE) :
|
||||
block_number_start;
|
||||
|
||||
return new Promise((resolve, reject) =>
|
||||
{
|
||||
if(block_number_end === 0)
|
||||
{
|
||||
// Only first block to be read.
|
||||
|
||||
const block_key = INDEXEDDB_STORAGE_GET_BLOCK_KEY(sha256sum, 0);
|
||||
const block_request = store.get(block_key);
|
||||
block_request.onsuccess = async event =>
|
||||
{
|
||||
const block_entry = block_request.result;
|
||||
if(!block_entry)
|
||||
{
|
||||
resolve(null);
|
||||
return;
|
||||
}
|
||||
const block_data = block_entry[INDEXEDDB_STORAGE_DATA_PATH];
|
||||
dbg_assert(block_data instanceof Uint8Array, "IndexedDBFileStorage read: " +
|
||||
`Entry for block-0 without Uint8Array data field.`);
|
||||
const chunk = block_data.subarray(offset, offset + count);
|
||||
resolve(chunk);
|
||||
};
|
||||
}
|
||||
else if(block_number_start === block_number_end)
|
||||
{
|
||||
// Only one block to be read.
|
||||
|
||||
const block_offset = block_number_start * INDEXEDDB_STORAGE_BLOCKSIZE;
|
||||
const block_key = INDEXEDDB_STORAGE_GET_BLOCK_KEY(sha256sum, block_number_start);
|
||||
const block_request = store.get(block_key);
|
||||
block_request.onsuccess = async event =>
|
||||
{
|
||||
const block_entry = block_request.result;
|
||||
if(!block_entry)
|
||||
{
|
||||
if(!await this.db_has_file(store, sha256sum))
|
||||
{
|
||||
resolve(null);
|
||||
}
|
||||
else
|
||||
{
|
||||
resolve(new Uint8Array(0));
|
||||
}
|
||||
return;
|
||||
}
|
||||
const block_data = block_entry[INDEXEDDB_STORAGE_DATA_PATH];
|
||||
dbg_assert(block_data instanceof Uint8Array, "IndexedDBFileStorage read: " +
|
||||
`Entry for block-${block_number_start} without Uint8Array data field.`);
|
||||
const chunk_start = Math.max(0, offset - block_offset);
|
||||
const chunk_end = offset + count - block_offset;
|
||||
const chunk = block_data.subarray(chunk_start, chunk_end);
|
||||
resolve(chunk);
|
||||
};
|
||||
}
|
||||
else
|
||||
{
|
||||
// Multiple blocks to be read.
|
||||
|
||||
const read_data = new Uint8Array(count);
|
||||
let read_count = 0;
|
||||
for(let block_number = block_number_start; block_number <= block_number_end; block_number++)
|
||||
{
|
||||
const block_offset = block_number * INDEXEDDB_STORAGE_BLOCKSIZE;
|
||||
const block_key = INDEXEDDB_STORAGE_GET_BLOCK_KEY(sha256sum, block_number);
|
||||
const block_request = store.get(block_key);
|
||||
block_request.onsuccess = async event =>
|
||||
{
|
||||
const block_entry = block_request.result;
|
||||
|
||||
if(!block_entry)
|
||||
{
|
||||
// If the first requested block doesn't exist, then the remaining blocks
|
||||
// cannot exist.
|
||||
if(block_number === block_number_start)
|
||||
{
|
||||
if(!await this.db_has_file(store, sha256sum))
|
||||
{
|
||||
// Not aborting transaction here because:
|
||||
// - Abort is treated like an error,
|
||||
// - AbortError sometimes indicate a different error we want to notice,
|
||||
// - Most read calls only read a single block anyway.
|
||||
resolve(null);
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
const block_data = block_entry[INDEXEDDB_STORAGE_DATA_PATH];
|
||||
dbg_assert(block_data instanceof Uint8Array, "IndexedDBFileStorage read: " +
|
||||
`Entry for block-${block_number} without Uint8Array data field.`);
|
||||
|
||||
const chunk_start = Math.max(0, offset - block_offset);
|
||||
const chunk_end = offset + count - block_offset;
|
||||
const chunk = block_data.subarray(chunk_start, chunk_end);
|
||||
|
||||
read_data.set(chunk, block_offset + chunk_start - offset);
|
||||
read_count += chunk.length;
|
||||
};
|
||||
}
|
||||
|
||||
transaction.oncomplete = event =>
|
||||
{
|
||||
resolve(read_data.subarray(0, read_count));
|
||||
};
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* @param {string} sha256sum
|
||||
* @param {!Uint8Array} data
|
||||
*/
|
||||
IndexedDBFileStorage.prototype.set = function(sha256sum, data)
|
||||
{
|
||||
dbg_assert(sha256sum, "IndexedDBFileStorage set: sha256sum should be a non-empty string");
|
||||
|
||||
const transaction = this.db.transaction(INDEXEDDB_STORAGE_STORE, "readwrite");
|
||||
transaction.onerror = event =>
|
||||
{
|
||||
const error = event.target.error;
|
||||
dbg_log(`IndexedDBFileStorage set: Error with transaction: ${error}`, LOG_9P);
|
||||
throw error;
|
||||
};
|
||||
const store = transaction.objectStore(INDEXEDDB_STORAGE_STORE);
|
||||
|
||||
// Ensure at least a single entry is added for empty files.
|
||||
const offset_upper_bound = data.length || 1;
|
||||
|
||||
for(let i = 0, offset = 0; offset < offset_upper_bound; i++, offset += INDEXEDDB_STORAGE_BLOCKSIZE)
|
||||
{
|
||||
const block_key = INDEXEDDB_STORAGE_GET_BLOCK_KEY(sha256sum, i);
|
||||
// Note: Without cloning, the entire backing ArrayBuffer is serialized into the database.
|
||||
const block_data = data.slice(offset, offset + INDEXEDDB_STORAGE_BLOCKSIZE);
|
||||
store.put({
|
||||
[INDEXEDDB_STORAGE_KEY_PATH]: block_key,
|
||||
[INDEXEDDB_STORAGE_DATA_PATH]: block_data,
|
||||
});
|
||||
}
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
transaction.oncomplete = event => resolve();
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* @param {string} sha256sum
|
||||
*/
|
||||
IndexedDBFileStorage.prototype.uncache = function(sha256sum)
|
||||
{
|
||||
// No-op.
|
||||
};
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
* @implements {FileStorageInterface}
|
||||
|
@ -393,7 +102,7 @@ ServerFileStorageWrapper.prototype.load_from_server = function(sha256sum)
|
|||
v86util.load_file(this.baseurl + sha256sum, { done: buffer =>
|
||||
{
|
||||
const data = new Uint8Array(buffer);
|
||||
this.set(sha256sum, data).then(() => resolve(data));
|
||||
this.cache(sha256sum, data).then(() => resolve(data));
|
||||
}});
|
||||
});
|
||||
};
|
||||
|
@ -419,9 +128,9 @@ ServerFileStorageWrapper.prototype.read = async function(sha256sum, offset, coun
|
|||
* @param {string} sha256sum
|
||||
* @param {!Uint8Array} data
|
||||
*/
|
||||
ServerFileStorageWrapper.prototype.set = async function(sha256sum, data)
|
||||
ServerFileStorageWrapper.prototype.cache = async function(sha256sum, data)
|
||||
{
|
||||
return await this.storage.set(sha256sum, data);
|
||||
return await this.storage.cache(sha256sum, data);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -436,19 +145,16 @@ ServerFileStorageWrapper.prototype.uncache = function(sha256sum)
|
|||
if(typeof window !== "undefined")
|
||||
{
|
||||
window["MemoryFileStorage"] = MemoryFileStorage;
|
||||
window["IndexedDBFileStorage"] = IndexedDBFileStorage;
|
||||
window["ServerFileStorageWrapper"] = ServerFileStorageWrapper;
|
||||
}
|
||||
else if(typeof module !== "undefined" && typeof module.exports !== "undefined")
|
||||
{
|
||||
module.exports["MemoryFileStorage"] = MemoryFileStorage;
|
||||
module.exports["IndexedDBFileStorage"] = IndexedDBFileStorage;
|
||||
module.exports["ServerFileStorageWrapper"] = ServerFileStorageWrapper;
|
||||
}
|
||||
else if(typeof importScripts === "function")
|
||||
{
|
||||
// web worker
|
||||
self["MemoryFileStorage"] = MemoryFileStorage;
|
||||
self["IndexedDBFileStorage"] = IndexedDBFileStorage;
|
||||
self["ServerFileStorageWrapper"] = ServerFileStorageWrapper;
|
||||
}
|
||||
|
|
|
@ -447,9 +447,8 @@ V86Starter.prototype.continue_init = async function(emulator, options)
|
|||
var fs_url = options["filesystem"]["basefs"];
|
||||
var base_url = options["filesystem"]["baseurl"];
|
||||
|
||||
let file_storage = typeof window === "undefined" || !window.indexedDB ?
|
||||
new MemoryFileStorage() :
|
||||
await IndexedDBFileStorage.try_create();
|
||||
let file_storage = new MemoryFileStorage();
|
||||
|
||||
if(base_url)
|
||||
{
|
||||
file_storage = new ServerFileStorageWrapper(file_storage, base_url);
|
||||
|
@ -1050,9 +1049,8 @@ V86Starter.prototype.serial0_send = function(data)
|
|||
*/
|
||||
V86Starter.prototype.mount_fs = async function(path, baseurl, basefs, callback)
|
||||
{
|
||||
let file_storage = typeof window === "undefined" || !window.indexedDB ?
|
||||
new MemoryFileStorage() :
|
||||
await IndexedDBFileStorage.try_create();
|
||||
let file_storage = new MemoryFileStorage();
|
||||
|
||||
if(baseurl)
|
||||
{
|
||||
file_storage = new ServerFileStorageWrapper(file_storage, baseurl);
|
||||
|
|
|
@ -1,242 +0,0 @@
|
|||
#!/usr/bin/env node
|
||||
"use strict";
|
||||
|
||||
process.on("unhandledRejection", exn => { throw exn; });
|
||||
const util = require("util");
|
||||
const { MemoryFileStorage, IndexedDBFileStorage } = require("../../build/libv86-debug.js");
|
||||
|
||||
const MAX_TESTFILE_SIZE = 16384;
|
||||
const NUMBER_OF_TESTFILES = 16;
|
||||
const NUMBER_OF_TESTREADS = 64;
|
||||
|
||||
function log_pass(msg, ...args)
|
||||
{
|
||||
console.log(`\x1b[92m[+] ${msg}\x1b[0m`, ...args);
|
||||
}
|
||||
|
||||
function log_fail(msg, ...args)
|
||||
{
|
||||
console.error(`\x1b[91m[-] ${msg}\x1b[0m`, ...args);
|
||||
}
|
||||
|
||||
function assert_uint8array_equal(actual, expected)
|
||||
{
|
||||
if(actual === null || expected === null)
|
||||
{
|
||||
if(actual !== null || expected !== null)
|
||||
{
|
||||
const the_null = actual ? "expected" : "actual";
|
||||
const not_null = actual ? "actual" : "expected";
|
||||
log_fail("Failed assert equal. %s is null but %s is not", the_null, not_null);
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if(actual.length !== expected.length)
|
||||
{
|
||||
log_fail("Failed assert equal - lengths differ. Actual length: %d, Expected length: %d",
|
||||
actual.length, expected.length);
|
||||
return false;
|
||||
}
|
||||
for(let i = 0; i < actual.length; i++)
|
||||
{
|
||||
if(actual[i] !== expected[i])
|
||||
{
|
||||
log_fail("Failed assert equal at position %d. Actual: %d, Expected %d",
|
||||
i, actual[i], expected[i]);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function mock_indexeddb()
|
||||
{
|
||||
const db = new Map();
|
||||
return {
|
||||
transaction(store_name, mode)
|
||||
{
|
||||
const transaction = {
|
||||
objectStore(store_name)
|
||||
{
|
||||
return {
|
||||
get(key)
|
||||
{
|
||||
assert_transaction_active(`get ${key}`);
|
||||
const result = db.get(key);
|
||||
const request = { result };
|
||||
mock_request_completion(request);
|
||||
return request;
|
||||
},
|
||||
count(key)
|
||||
{
|
||||
assert_transaction_active(`get ${key}`);
|
||||
const result = db.get(key) ? 1 : 0;
|
||||
const request = { result };
|
||||
mock_request_completion(request);
|
||||
return request;
|
||||
},
|
||||
put(value)
|
||||
{
|
||||
assert_transaction_active(`put ${value}`);
|
||||
const key = value["sha256sum"];
|
||||
db.set(key, value);
|
||||
const request = {};
|
||||
mock_request_completion(request);
|
||||
return request;
|
||||
},
|
||||
};
|
||||
},
|
||||
abort()
|
||||
{
|
||||
// No-op.
|
||||
},
|
||||
};
|
||||
|
||||
let is_active = true;
|
||||
let pending_requests = 0;
|
||||
let pending_callbacks = 1;
|
||||
|
||||
function assert_transaction_active(verb)
|
||||
{
|
||||
if(!is_active)
|
||||
{
|
||||
log_fail(`Attempted to ${verb} when transaction is inactive`);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
function mock_request_completion(request)
|
||||
{
|
||||
pending_requests++;
|
||||
setImmediate(() =>
|
||||
{
|
||||
pending_requests--;
|
||||
pending_callbacks++;
|
||||
|
||||
// Transaction is active during onsuccess callback and during its microtasks.
|
||||
is_active = true;
|
||||
|
||||
// Queue before the onsuccess callback queues any other macrotask.
|
||||
queue_transaction_deactivate();
|
||||
|
||||
if(request.onsuccess)
|
||||
{
|
||||
request.onsuccess();
|
||||
}
|
||||
});
|
||||
}
|
||||
function queue_transaction_deactivate()
|
||||
{
|
||||
// Deactivate transaction only after all microtasks (e.g. promise callbacks) have
|
||||
// been completed.
|
||||
setImmediate(() =>
|
||||
{
|
||||
is_active = false;
|
||||
pending_callbacks--;
|
||||
|
||||
// Complete transaction when it can no longer become active.
|
||||
if(!pending_requests && !pending_callbacks)
|
||||
{
|
||||
if(transaction.oncomplete)
|
||||
{
|
||||
transaction.oncomplete();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
queue_transaction_deactivate();
|
||||
|
||||
return transaction;
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async function test_read(oracle, iut, key, offset, count)
|
||||
{
|
||||
const expected = await oracle.read(key, offset, count);
|
||||
const actual = await iut.read(key, offset, count);
|
||||
return assert_uint8array_equal(actual, expected);
|
||||
}
|
||||
|
||||
async function test_with_file(oracle, iut, key, file_data)
|
||||
{
|
||||
if(file_data)
|
||||
{
|
||||
console.log("Testing file with size: %d", file_data.length);
|
||||
await oracle.set(key, file_data);
|
||||
await iut.set(key, file_data);
|
||||
}
|
||||
else
|
||||
{
|
||||
console.log("Testing nonexistent file");
|
||||
}
|
||||
|
||||
// Some boundary values.
|
||||
if(!await test_read(oracle, iut, key, 0, 0)) return false;
|
||||
if(!await test_read(oracle, iut, key, 0, 1)) return false;
|
||||
if(!await test_read(oracle, iut, key, 0, 4096)) return false;
|
||||
if(!await test_read(oracle, iut, key, 0, 4097)) return false;
|
||||
if(!await test_read(oracle, iut, key, 4095, 2)) return false;
|
||||
if(!await test_read(oracle, iut, key, 4096, 1)) return false;
|
||||
if(!await test_read(oracle, iut, key, 4096, 4096)) return false;
|
||||
if(!await test_read(oracle, iut, key, 4097, 1)) return false;
|
||||
if(!await test_read(oracle, iut, key, 4097, 4095)) return false;
|
||||
|
||||
// Random ranges.
|
||||
for(let i = 0; i < NUMBER_OF_TESTREADS; i++)
|
||||
{
|
||||
const offset = Math.floor(Math.random() * MAX_TESTFILE_SIZE);
|
||||
const count = Math.floor(Math.random() * MAX_TESTFILE_SIZE);
|
||||
const pass = await test_read(oracle, iut, key, offset, count);
|
||||
if(!pass)
|
||||
{
|
||||
log_fail("Test case offset=%d, count=%d", offset, count);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
function on_unexpected_exit(exit_code)
|
||||
{
|
||||
if(exit_code === 0)
|
||||
{
|
||||
log_fail("Event loop unexpectedly empty.");
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
async function test_start()
|
||||
{
|
||||
process.on("exit", on_unexpected_exit);
|
||||
|
||||
// Test oracle without chunking.
|
||||
const oracle = new MemoryFileStorage();
|
||||
|
||||
// Implementation under test with chunking.
|
||||
const iut = new IndexedDBFileStorage(mock_indexeddb());
|
||||
|
||||
if(!await test_with_file(oracle, iut, "nonexistent")) return false;
|
||||
if(!await test_with_file(oracle, iut, "empty", new Uint8Array(0))) return false;
|
||||
if(!await test_with_file(oracle, iut, "single", new Uint8Array(1).map(v => Math.random() * 0xFF))) return false;
|
||||
if(!await test_with_file(oracle, iut, "1block", new Uint8Array(4096).map(v => Math.random() * 0xFF))) return false;
|
||||
|
||||
for(let i = 0; i < NUMBER_OF_TESTFILES; i++)
|
||||
{
|
||||
const size = Math.floor(Math.random() * MAX_TESTFILE_SIZE);
|
||||
const file_data = new Uint8Array(size).map(v => Math.random() * 0xFF);
|
||||
const pass = await test_with_file(oracle, iut, i.toString(), file_data);
|
||||
if(!pass) return false;
|
||||
}
|
||||
|
||||
log_pass("All tests passed!");
|
||||
process.removeListener("exit", on_unexpected_exit);
|
||||
return true;
|
||||
}
|
||||
|
||||
test_start().then(pass => pass || process.exit(1));
|
Loading…
Reference in a new issue