From 3e3c3a24fecc165979bdc704ae0d3075643f697f Mon Sep 17 00:00:00 2001 From: Tomas Bures Date: Mon, 22 Apr 2019 15:41:39 +0200 Subject: [PATCH] Further updated on caching. Small change in DB schema to make lookups a bit more performant. Note that if DB migration in the last commit has been run, this commit will need manual update of the database. --- server/index.js | 18 ++++++++---------- server/lib/file-cache.js | 18 +++++++++--------- server/lib/privilege-helpers.js | 3 +-- .../migrations/20190422084800_file_cache.js | 4 ++-- 4 files changed, 20 insertions(+), 23 deletions(-) diff --git a/server/index.js b/server/index.js index 720a1272..9fd789c9 100644 --- a/server/index.js +++ b/server/index.js @@ -87,15 +87,6 @@ async function init() { await shares.regenerateRoleNamesTable(); await shares.rebuildPermissions(); - await executor.spawn(); - await testServer.spawn(); - await verpServer.spawn(); - await builtinZoneMta.spawn(); - - await startHTTPServer(AppType.TRUSTED, 'trusted', trustedPort); - await startHTTPServer(AppType.SANDBOXED, 'sandbox', sandboxPort); - await startHTTPServer(AppType.PUBLIC, 'public', publicPort); - await privilegeHelpers.ensureMailtrainDir(filesDir); // Update owner of all files under 'files' dir. This should not be necessary, but when files are copied over, @@ -104,10 +95,17 @@ async function init() { await privilegeHelpers.ensureMailtrainOwner(dirEnt.path); } - await privilegeHelpers.ensureMailtrainDir(uploadedFilesDir); await privilegeHelpers.ensureMailtrainDir(reportFilesDir); + await executor.spawn(); + await testServer.spawn(); + await verpServer.spawn(); + await builtinZoneMta.spawn(); + + await startHTTPServer(AppType.TRUSTED, 'trusted', trustedPort); + await startHTTPServer(AppType.SANDBOXED, 'sandbox', sandboxPort); + await startHTTPServer(AppType.PUBLIC, 'public', publicPort); privilegeHelpers.dropRootPrivileges(); diff --git a/server/lib/file-cache.js b/server/lib/file-cache.js index 96970031..302a1a45 100644 --- a/server/lib/file-cache.js +++ b/server/lib/file-cache.js @@ -16,7 +16,7 @@ const fileCacheFilesDir = path.join(filesDir, 'cache'); const fileCaches = new Map(); -async function _fileCache(typeId, cacheConfig, fileNameGen) { +async function _fileCache(typeId, cacheConfig, keyGen) { if (fileCaches.has(typeId)) { return fileCaches.get(typeId); } @@ -70,8 +70,8 @@ async function _fileCache(typeId, cacheConfig, fileNameGen) { setInterval(pruneCache, cacheConfig.pruneInterval * 1000); - const handleCache = async (fileName, res, next) => { - const fileEntry = await knex('file_cache').where('type', typeId).where('url', fileName).first(); + const handleCache = async (key, res, next) => { + const fileEntry = await knex('file_cache').where('type', typeId).where('key', key).first(); if (fileEntry) { res.sendFile( @@ -127,10 +127,10 @@ async function _fileCache(typeId, cacheConfig, fileNameGen) { fileStream.end(null, null, async () => { try { await knex.transaction(async tx => { - const existingFileEntry = await knex('file_cache').where('type', typeId).where('url', fileName).first(); + const existingFileEntry = await knex('file_cache').where('type', typeId).where('key', key).first(); if (!existingFileEntry) { - const ids = await tx('file_cache').insert({type: typeId, url: fileName, mimetype: res.getHeader('Content-Type'), size: fileSize}); + const ids = await tx('file_cache').insert({type: typeId, key, mimetype: res.getHeader('Content-Type'), size: fileSize}); await fs.moveAsync(tmpFilePath, getLocalFileName(ids[0]), {}); mayNeedPruning = true; } else { @@ -152,7 +152,7 @@ async function _fileCache(typeId, cacheConfig, fileNameGen) { if (fileStream) { fileStream.destroy(err); fs.unlink(tmpFilePath, () => { - cachedFiles.delete(fileName); + cachedFiles.delete(key); callback(); }); } else { @@ -166,14 +166,14 @@ async function _fileCache(typeId, cacheConfig, fileNameGen) { }; const thisFileCache = (req, res, next) => { - const fileName = fileNameGen ? fileNameGen(req) : req.url.substring(1); + const key = keyGen ? keyGen(req) : req.url.substring(1); - if (fileName === null) { // null fileName means we don't attempt to cache + if (key === null) { // null key means we don't attempt to cache res.fileCacheResponse = res; next(); } else { - handleCache(fileName, res, next).catch(err => next(err)); + handleCache(key, res, next).catch(err => next(err)); } }; diff --git a/server/lib/privilege-helpers.js b/server/lib/privilege-helpers.js index a98d96a5..67d52be9 100644 --- a/server/lib/privilege-helpers.js +++ b/server/lib/privilege-helpers.js @@ -59,8 +59,7 @@ function ensureMailtrainOwner(file, callback) { } async function ensureMailtrainDir(dir) { - const ids = getConfigUidGid(); - await fs.ensureDir(dir); + await fs.ensureDirAsync(dir); await ensureMailtrainOwner(dir); } diff --git a/server/setup/knex/migrations/20190422084800_file_cache.js b/server/setup/knex/migrations/20190422084800_file_cache.js index dd188843..50a03a37 100644 --- a/server/setup/knex/migrations/20190422084800_file_cache.js +++ b/server/setup/knex/migrations/20190422084800_file_cache.js @@ -2,12 +2,12 @@ exports.up = (knex, Promise) => (async() => { await knex.schema.raw('CREATE TABLE `file_cache` (\n' + ' `id` int(10) unsigned NOT NULL AUTO_INCREMENT,\n' + ' `type` varchar(255) NOT NULL,\n' + - ' `url` text NOT NULL,\n' + + ' `key` varchar(255) NOT NULL,\n' + ' `mimetype` varchar(255) DEFAULT NULL,\n' + ' `size` int(11) DEFAULT NULL,\n' + ' `created` timestamp NOT NULL DEFAULT current_timestamp(),\n' + ' PRIMARY KEY (`id`),\n' + - ' KEY `url` (`url`(191))\n' + + ' KEY `key` (`key`(191))\n' + ') ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;'); })();