Azure function file not found App Service vs Consumption based

53 Views Asked by At

I'm running a function app that when deployed on a consumption-based plan I have no issues, but when running the exact same function on a Basic App Plan I get the error

[Conversion Error]: could not find file '/tmp/dbfs/ncreprov.dbf'

Here's a code snippet of what I'm trying to do:

def run() -> bool:

try:
    # We create the destination for the raw CSV
    raw_csv = BytesIO()
    # Download in-memory the raw CSV
    DATA_LAKE.download_file(
        LAKE_CONTAINER, LAKE_SOURCE_DIRECTORY, CSV_FILENAME, hotload=True
    ).readinto(raw_csv)
    # We convert the byte stream into a string stream
    csv_file = StringIO(raw_csv.getvalue().decode(LAKE_CSV_ENCODING))
    # We convert the string stream into a Pandas DataFrame
    csv_df = pd.read_csv(csv_file)

    # We remove the columns from the dataframe
    csv_df.drop(columns=EXCLUDED_COLUMNS, inplace=True)

    # We remove REDACTED with nulls
    csv_df.dropna(subset=["REDACTED"])

    session, engine, metadata = SQLServerConnector(
        user=CLIENT_KEY_VAULT.mssql_user,
        password=CLIENT_KEY_VAULT.mssql_password,
        host=CLIENT_KEY_VAULT.mssql_host,
        database=CLIENT_KEY_VAULT.mssql_db
    ).connect()

    # Get current columns in destination table
    columns_response = session.execute(
        "SELECT column_name "
        "FROM REDACTED_TABLE "
        "WHERE table_name = 'REDACTED_TABLE_2'"
    ).fetchall()
    columns = [result[0] for result in columns_response]

    # Drop columns not present in destination table
    csv_df = csv_df[columns]

    with session.begin():
        session.execute("TRUNCATE TABLE REDACTED_TABLE_3")
        csv_df.to_sql(
            "REDACTED_TABLE_3",
            con=session,
            if_exists="append",
            index=False
        )

    session.close()

    # We convert the dataframe back into a CSV
    transformed_csv = csv_df.to_csv(index=False)
    # We encode the CSV to obtain the  bytes
    byte_csv = transformed_csv.encode(LAKE_CSV_ENCODING)
    # We place the bytes into a stream
    final_csv = BytesIO(byte_csv)
    # We upload the resulting frame to the datalake
    DATA_LAKE.upload_file_to(
        LAKE_CONTAINER, LAKE_DESTINATION_DIRECTORY,
        stream=final_csv, file_name=CSV_FILENAME
    )
    return True
except Exception as exception:
    logging.error(f"[Error]: {exception}")
return False
0

There are 0 best solutions below