I'm using pdfkit in a lamda function which creates a pdf and then is supposed to upload the pdf to an S3 bucket. But when I test the function I get Error: Cannot determine length of [object PDFDocument]

Here is my function:

var PDFDocument = require('pdfkit');
var AWS = require('aws-sdk');
process.env['PATH'] = process.env['PATH'] + ':' +         
process.env['LAMBDA_TASK_ROOT'];
exports.handler = function(event, context) {

// create a document and pipe to a blob
var doc = new PDFDocument();

// draw some text
doc.fontSize(25)
.text('Hello World', 100, 80);

var params = {
  Bucket : "test-bucket",
  Key : event.pdf_name + ".pdf",
  Body : doc
}

var s3 = new AWS.S3();
s3.putObject(params, function(err, data) {
  if (err) {
    console.log(err)
  } else {
    context.done(null, { status: 'pdf created' });
    doc.end();
  }
 });

};

What am I doing wrong? How do I provide the file size if that is needed? Is this a good way to do this or is there a better way to upload a stream of a pdf file to an s3 bucket?

2

There are 2 best solutions below

1
On

Here is my solution:

const PDFDocument = require('pdfkit');
const fs = require("fs");
const AWS = require('aws-sdk');
const s3 = new AWS.S3();

exports.handler = function (event, callback) {
    let doc = new PDFDocument;
    let fileName = "yourfile.pdf";

    //We use Lambdas temp folder to store file temporarily.
    //When Lambda execution ends temp is flushed
    let file = fs.createWriteStream("/tmp/" + fileName);

    doc.pipe(file);
    doc.text("hello");
    // # Finalize PDF file 
    doc.end();

    // Send pdf file to s3
    file.on("finish", function () {

        //get the file size
        const stats = fs.statSync("/tmp/" + fileName);
        console.log("filesize: " + stats.size);

        console.log("starting s3 putObject");
        s3.putObject({
            Bucket: "[your-bucket]",
            Key: fileName,
            Body: fs.createReadStream("/tmp/" + fileName),
            ContentType: "application/pdf",
            ContentLength: stats.size,
        }, function (err) {
            if (err) {
                console.log(err, err.stack);
                callback(err);
            } else {
                console.log("Done");
                callback(null, "done");
            }
        });
    });
}

Key elements to this solution was use of filestreams and lambda temp folder. file.on("finish") is used to actualy check if the file writing is ended.

0
On

if you want the pdf to be accessible to users remember to add the following attribute ACL: 'public-read' . also when using s3client for digital ocean this worked for me looks like.

    s3Client.putObject({
        Bucket: bucketName,
        Key: fileName,
        Body: fs.createReadStream("/tmp/" + fileName),
        ContentType: "application/pdf",
        ContentLength: stats.size,
        ACL: 'public-read',
    }, function (err) {
        if (err) {
            console.log(err, err.stack);
            callback(err);
        } else {
            console.log("Done");
            callback(null, "done");
        }
    });