I'm using archiver to zip a few files in aws lambda. I see that when there is no more disk space to write, archiver still holds(or leaks) some data. I've been trying to do 'archive.close()' or 'archive.destroy()' but I keep getting archive.close/(archive.destroy) is not a function error. Wondering if anyone has run into this or resolved it. Here is how my code looks like:
exports.makeZip = function(source, destination) {
return new Promise((resolve, reject) => {
console.info("started zipping from source: %s to destination: %s", source, destination);
let output, archive;
try {
output = fs.createWriteStream(destination);
archive = archiver('zip');
output.on('close', function () {
output.destroy();
//archive.close();
console.log("Completed Zipping!");
resolve();
});
output.on('error', function(err) {
output.destroy();
//archive.close();
console.error("something went wrong while zipping! ", err);
reject(new Error(err));
});
archive.on('error', function(err) {
//archive.close();
output.destroy();
console.error("something went wrong while zipping! ", err);
reject(new Error(err));
});
archive.pipe(output);
archive.directory(source, false);
archive.finalize();
} catch (error) {
if (typeof output !== 'undefined') {
output.destroy();
}
if (typeof archive !== 'undefined') {
archive.close();
}
console.error("Zipping failed. Reason: %s", error.message)
reject(new Error(error));
}
});
};
The way I know that this is causing the memory leak because I printed the disk space before and after I remove my files at the beginning and at the end of my Lambda invocation. I see that there is some space that is not cleaned up.
Before cleanup
Filesystem 1K-blocks Used Available Use% Mounted on
/dev/root 8191416 6107036 2067996 75% /
/dev/vdb 1490800 34896 1439520 3% /dev
/dev/vdd 538424 526588 0 100% /tmp
After cleanup
Filesystem 1K-blocks Used Available Use% Mounted on
/dev/root 8191416 6107036 2067996 75% /
/dev/vdb 1490800 34896 1439520 3% /dev
/dev/vdd 538424 11068 515520 3% /tmp