I try to make a simple web app that can convert video/audio to transcripts by open-ai whisper API, the Whisper API only supports files that are less than 25 MB, so I break it up into chunks, but have error, heres what error output:

Error in getTranscript: AxiosError: Request failed with status code 400 Error: Cannot set headers after they are sent to the client

and heres screenshot: error image

app.ts:

import "dotenv/config";
import express, { NextFunction, Request, Response } from "express";
import scriptRoutes from "./routes/scripts";
import userRoutes from "./routes/users";
import uploadRoutes from "./routes/upload";
import morgan from "morgan";
import createHttpError, { isHttpError } from "http-errors";
import session from "express-session";
import env from "./util/validateEnv";
import MongoStore  from "connect-mongo";
import bodyParser from "body-parser";

const app = express();

app.use(bodyParser.urlencoded({ extended: false }));
app.use(express.json());
app.use(bodyParser.json());

app.use(morgan("dev"));

app.use(express.json());

app.use(session({
    secret: env.SESSION_SECRET,
    resave: false,
    saveUninitialized: false,
    cookie: {
        maxAge: 60 * 60 * 1000,
    },
    rolling: true,
    store: MongoStore.create({
        mongoUrl: env.MONGO_CONNECTION_STRING
    }),
}))

app.use("/api/users", userRoutes);
app.use("/api/scripts", scriptRoutes);
app.use("/api/upload",  uploadRoutes);

// eslint-disable-next-line @typescript-eslint/no-unused-vars
app.use((error: unknown, req: Request, res: Response, next: NextFunction) => {
    console.error(error);
    let errorMessage = "An unknown error occurred";
    let statusCode = 500;
    if (isHttpError(error)){
        statusCode = error.status;
        errorMessage = error.message;
    } 
    res.status(statusCode).json({ error: errorMessage });
});

//use for reference and testing purposes
app.use((req, res, next) => {
  next(createHttpError(404, "Endpoint not found"));
});
export default app;

upload.ts:

import "dotenv/config";
import express from "express";
import cors from 'cors';
import * as UploadController from "../controllers/uploadC";
import multer from 'multer';

const router = express.Router();
router.use(cors());
const storage = multer.memoryStorage();
const upload = multer({ storage: storage });

router.post('/', upload.array('file'), UploadController.uploadFile);

router.post('/chat',  UploadController.chatWithUser);

export default router;

uploadC.ts:

import fs from "fs";
import path from "path";
import axios from "axios";
import FormData from "form-data";
import { NextFunction, Request, Response } from "express";

const MAX_CHUNK_SIZE = 25 * 1024 * 1024;

export const uploadFile = async (
  req: Request,
  res: Response,
  next: NextFunction
) => {
  
  const files = req.files as Express.Multer.File[];
  if (!files || files.length === 0) {
    return res.status(400).json({ error: "No files uploaded." });
  }

  try {
    let transcript = "";
    for (let i = 0; i < files.length; i++) {
      const file = files[i];
      const fileSize = file.size;

      console.log("size is: " + fileSize);

      let datasize: number = fileSize;

      if (fileSize > MAX_CHUNK_SIZE) {
        const chunks = Math.ceil(fileSize / MAX_CHUNK_SIZE);
        const chunkSize = Math.ceil(fileSize / chunks);
        const tempFilePaths: string[] = [];

        for (let j = 0; j < chunks; j++) {
          const start = j * chunkSize;
          const end = Math.min(start + chunkSize, fileSize);
          console.log("start: " + start + " end: " + end);

          const chunkData = file.buffer.subarray(start, end);
          console.log("chunk size: " + chunkData.length);
          datasize = datasize - chunkData.length;
          console.log("datasize left: " + datasize);

          const tempFilePath = path.join(__dirname, `../result/temp_audio_${i + 1}_${j + 1}.mp4`);

          fs.writeFileSync(tempFilePath, chunkData);
          tempFilePaths.push(tempFilePath);
        }

        for (const tempFilePath of tempFilePaths) {
          console.log("getTranscript called");
          console.log("File path:", tempFilePath);
          const fileTranscript = await getTranscript(tempFilePath);
          transcript += fileTranscript;
          const transcriptFilePath = path.join(__dirname, "../result/transcript.txt");
          fs.writeFileSync(transcriptFilePath, transcript);
        }

        for (const tempFilePath of tempFilePaths) {
          fs.unlinkSync(tempFilePath);
        }
      } else {
        const filePath = path.join(__dirname, `../result/temp_audio_${i + 1}.mp4`);
        fs.writeFileSync(filePath, file.buffer);
        const fileTranscript = await getTranscript(filePath);
        transcript += fileTranscript;
      }
    }

    const transcriptFilePath = path.join(__dirname, "../result/transcript.txt");
    fs.writeFileSync(transcriptFilePath, transcript);

    const analysis = await getChatGPTAnalysis(transcript);

    res.status(200).json({ transcript, transcriptFilePath, analysis });
  } catch (error) {
    next(error);
    res.status(500).json({ error: "Error processing files" });
  }
};

export const chatWithUser = async (
  req: Request,
  res: Response,
  next: NextFunction) => {
  console.log("trying to chat")
  try{
    const transcriptFilePath = path.join(__dirname, "../result/transcript.txt");
    const transcript = fs.readFileSync(transcriptFilePath, "utf8");

    const data = {
      model: "gpt-3.5-turbo",
      messages: [
        { role: "system", content: "You are a helpful assistant." },
        { role: "user", content: "Please answer the question based on the transcripts, which is form video or audio:" },
        { role: "user", content: transcript },
        { role: "user", content: req.body.question },
      ],
    };

    const response = await axios.post(
      "https://api.openai.com/v1/chat/completions",
      data,
      {
        headers: {
          Authorization: `Bearer ${process.env.OPEN_AI_KEY}`,
          'Content-Type': 'application/json',
        },
      }
    );

    const answer = response.data.choices[0].message.content;
    res.status(200).json({ answer })
  }
  catch(error){
    next(error);
    res.status(500).json({ error: "Error passing chat" });
  }


}

async function getTranscript(audioFilePath: string) {
  try {
    const formData = new FormData();
    formData.append("file", fs.createReadStream(audioFilePath));
    formData.append("model", "whisper-1");
    formData.append("response_format", "vtt");

    const response = await axios.post(
      "https://api.openai.com/v1/audio/translations",
      formData,
      {
        headers: {
          Authorization: `Bearer ${process.env.OPEN_AI_KEY}`,
          ...formData.getHeaders(),
        },
      }
    );

    return response.data;
  } catch (error) {
    console.error("Error in getTranscript:", error);
    throw new Error("Error in getTranscript");
  }
}

async function getChatGPTAnalysis(transcript: string) {
  try {
    const data = {
      model: "gpt-3.5-turbo",
      messages: [
        { role: "system", content: "You are a helpful assistant." },
        { role: "user", content: "Please summarize the following transcript:" },
        { role: "user", content: transcript },
      ],
    };

    const response = await axios.post(
      "https://api.openai.com/v1/chat/completions",
      data,
      {
        headers: {
          Authorization: `Bearer ${process.env.OPEN_AI_KEY}`,
          'Content-Type': 'application/json',
        },
      }
    );

    const summary = response.data.choices[0].message.content;
    return summary;
  } catch (error) {
    console.error("Error in getChatGPTAnalysis:", error);
    throw new Error("Error in getChatGPTAnalysis");
  }
}


i tried many times and no idea whts wrong with it, so can someone helpe me please...

0

There are 0 best solutions below