Further updated on caching. Small change in DB schema to make lookups a bit more performant. Note that if DB migration in the last commit has been run, this commit will need manual update of the database.
This commit is contained in:
parent
ef0464bbc9
commit
3e3c3a24fe
4 changed files with 20 additions and 23 deletions
|
@ -87,15 +87,6 @@ async function init() {
|
||||||
await shares.regenerateRoleNamesTable();
|
await shares.regenerateRoleNamesTable();
|
||||||
await shares.rebuildPermissions();
|
await shares.rebuildPermissions();
|
||||||
|
|
||||||
await executor.spawn();
|
|
||||||
await testServer.spawn();
|
|
||||||
await verpServer.spawn();
|
|
||||||
await builtinZoneMta.spawn();
|
|
||||||
|
|
||||||
await startHTTPServer(AppType.TRUSTED, 'trusted', trustedPort);
|
|
||||||
await startHTTPServer(AppType.SANDBOXED, 'sandbox', sandboxPort);
|
|
||||||
await startHTTPServer(AppType.PUBLIC, 'public', publicPort);
|
|
||||||
|
|
||||||
await privilegeHelpers.ensureMailtrainDir(filesDir);
|
await privilegeHelpers.ensureMailtrainDir(filesDir);
|
||||||
|
|
||||||
// Update owner of all files under 'files' dir. This should not be necessary, but when files are copied over,
|
// Update owner of all files under 'files' dir. This should not be necessary, but when files are copied over,
|
||||||
|
@ -104,10 +95,17 @@ async function init() {
|
||||||
await privilegeHelpers.ensureMailtrainOwner(dirEnt.path);
|
await privilegeHelpers.ensureMailtrainOwner(dirEnt.path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
await privilegeHelpers.ensureMailtrainDir(uploadedFilesDir);
|
await privilegeHelpers.ensureMailtrainDir(uploadedFilesDir);
|
||||||
await privilegeHelpers.ensureMailtrainDir(reportFilesDir);
|
await privilegeHelpers.ensureMailtrainDir(reportFilesDir);
|
||||||
|
|
||||||
|
await executor.spawn();
|
||||||
|
await testServer.spawn();
|
||||||
|
await verpServer.spawn();
|
||||||
|
await builtinZoneMta.spawn();
|
||||||
|
|
||||||
|
await startHTTPServer(AppType.TRUSTED, 'trusted', trustedPort);
|
||||||
|
await startHTTPServer(AppType.SANDBOXED, 'sandbox', sandboxPort);
|
||||||
|
await startHTTPServer(AppType.PUBLIC, 'public', publicPort);
|
||||||
|
|
||||||
privilegeHelpers.dropRootPrivileges();
|
privilegeHelpers.dropRootPrivileges();
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@ const fileCacheFilesDir = path.join(filesDir, 'cache');
|
||||||
|
|
||||||
const fileCaches = new Map();
|
const fileCaches = new Map();
|
||||||
|
|
||||||
async function _fileCache(typeId, cacheConfig, fileNameGen) {
|
async function _fileCache(typeId, cacheConfig, keyGen) {
|
||||||
if (fileCaches.has(typeId)) {
|
if (fileCaches.has(typeId)) {
|
||||||
return fileCaches.get(typeId);
|
return fileCaches.get(typeId);
|
||||||
}
|
}
|
||||||
|
@ -70,8 +70,8 @@ async function _fileCache(typeId, cacheConfig, fileNameGen) {
|
||||||
setInterval(pruneCache, cacheConfig.pruneInterval * 1000);
|
setInterval(pruneCache, cacheConfig.pruneInterval * 1000);
|
||||||
|
|
||||||
|
|
||||||
const handleCache = async (fileName, res, next) => {
|
const handleCache = async (key, res, next) => {
|
||||||
const fileEntry = await knex('file_cache').where('type', typeId).where('url', fileName).first();
|
const fileEntry = await knex('file_cache').where('type', typeId).where('key', key).first();
|
||||||
|
|
||||||
if (fileEntry) {
|
if (fileEntry) {
|
||||||
res.sendFile(
|
res.sendFile(
|
||||||
|
@ -127,10 +127,10 @@ async function _fileCache(typeId, cacheConfig, fileNameGen) {
|
||||||
fileStream.end(null, null, async () => {
|
fileStream.end(null, null, async () => {
|
||||||
try {
|
try {
|
||||||
await knex.transaction(async tx => {
|
await knex.transaction(async tx => {
|
||||||
const existingFileEntry = await knex('file_cache').where('type', typeId).where('url', fileName).first();
|
const existingFileEntry = await knex('file_cache').where('type', typeId).where('key', key).first();
|
||||||
|
|
||||||
if (!existingFileEntry) {
|
if (!existingFileEntry) {
|
||||||
const ids = await tx('file_cache').insert({type: typeId, url: fileName, mimetype: res.getHeader('Content-Type'), size: fileSize});
|
const ids = await tx('file_cache').insert({type: typeId, key, mimetype: res.getHeader('Content-Type'), size: fileSize});
|
||||||
await fs.moveAsync(tmpFilePath, getLocalFileName(ids[0]), {});
|
await fs.moveAsync(tmpFilePath, getLocalFileName(ids[0]), {});
|
||||||
mayNeedPruning = true;
|
mayNeedPruning = true;
|
||||||
} else {
|
} else {
|
||||||
|
@ -152,7 +152,7 @@ async function _fileCache(typeId, cacheConfig, fileNameGen) {
|
||||||
if (fileStream) {
|
if (fileStream) {
|
||||||
fileStream.destroy(err);
|
fileStream.destroy(err);
|
||||||
fs.unlink(tmpFilePath, () => {
|
fs.unlink(tmpFilePath, () => {
|
||||||
cachedFiles.delete(fileName);
|
cachedFiles.delete(key);
|
||||||
callback();
|
callback();
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
|
@ -166,14 +166,14 @@ async function _fileCache(typeId, cacheConfig, fileNameGen) {
|
||||||
};
|
};
|
||||||
|
|
||||||
const thisFileCache = (req, res, next) => {
|
const thisFileCache = (req, res, next) => {
|
||||||
const fileName = fileNameGen ? fileNameGen(req) : req.url.substring(1);
|
const key = keyGen ? keyGen(req) : req.url.substring(1);
|
||||||
|
|
||||||
if (fileName === null) { // null fileName means we don't attempt to cache
|
if (key === null) { // null key means we don't attempt to cache
|
||||||
res.fileCacheResponse = res;
|
res.fileCacheResponse = res;
|
||||||
next();
|
next();
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
handleCache(fileName, res, next).catch(err => next(err));
|
handleCache(key, res, next).catch(err => next(err));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -59,8 +59,7 @@ function ensureMailtrainOwner(file, callback) {
|
||||||
}
|
}
|
||||||
|
|
||||||
async function ensureMailtrainDir(dir) {
|
async function ensureMailtrainDir(dir) {
|
||||||
const ids = getConfigUidGid();
|
await fs.ensureDirAsync(dir);
|
||||||
await fs.ensureDir(dir);
|
|
||||||
await ensureMailtrainOwner(dir);
|
await ensureMailtrainOwner(dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,12 +2,12 @@ exports.up = (knex, Promise) => (async() => {
|
||||||
await knex.schema.raw('CREATE TABLE `file_cache` (\n' +
|
await knex.schema.raw('CREATE TABLE `file_cache` (\n' +
|
||||||
' `id` int(10) unsigned NOT NULL AUTO_INCREMENT,\n' +
|
' `id` int(10) unsigned NOT NULL AUTO_INCREMENT,\n' +
|
||||||
' `type` varchar(255) NOT NULL,\n' +
|
' `type` varchar(255) NOT NULL,\n' +
|
||||||
' `url` text NOT NULL,\n' +
|
' `key` varchar(255) NOT NULL,\n' +
|
||||||
' `mimetype` varchar(255) DEFAULT NULL,\n' +
|
' `mimetype` varchar(255) DEFAULT NULL,\n' +
|
||||||
' `size` int(11) DEFAULT NULL,\n' +
|
' `size` int(11) DEFAULT NULL,\n' +
|
||||||
' `created` timestamp NOT NULL DEFAULT current_timestamp(),\n' +
|
' `created` timestamp NOT NULL DEFAULT current_timestamp(),\n' +
|
||||||
' PRIMARY KEY (`id`),\n' +
|
' PRIMARY KEY (`id`),\n' +
|
||||||
' KEY `url` (`url`(191))\n' +
|
' KEY `key` (`key`(191))\n' +
|
||||||
') ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;');
|
') ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;');
|
||||||
})();
|
})();
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue