I Am Getting the INIT_REPORT Init Duration: 10005.81 ms Phase: init Status: timeout
I am deploying a REST API written in Python on AWS Lambda. However, when I try to access the API, I receive the following error:
Init Duration: 9994.93 ms Phase: init Status: timeout(Request Time Out)
Code:-
from fastapi import FastAPI, HTTPException, BackgroundTasks
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
import logging
import time
import os
import tempfile
import pydicom
import numpy as np
from PIL import Image
import firebase_admin
from firebase_admin import credentials, storage, db
from firebase_admin.exceptions import FirebaseError
from concurrent.futures import ThreadPoolExecutor
from ultralytics import YOLO
from threading import Lock
import asyncio
from aiofiles import open as aio_open
from aiofiles.os import remove as aio_remove
from mangum import Mangum # Import Mangum for AWS Lambda
app = FastAPI()
# Enable CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Configure logging
logging.basicConfig(level=logging.INFO)
# Initialize ThreadPoolExecutor
executor = ThreadPoolExecutor(max_workers=20) # Increase the number of workers
# Dictionary to store processing status
processing_status = {}
processing_progress = {}
# Lock for thread-safe Firebase initialization
firebase_lock = Lock()
class PathData(BaseModel):
path: str
adminName1: str
patientId: str
def initialize_firebase():
with firebase_lock:
if not firebase_admin._apps:
cred = credentials.Certificate(r'Run AI backendfirebase-key.json')
firebase_admin.initialize_app(cred, {
'storageBucket': 'storagepath',
'databaseURL': 'databaseurl'
})
@app.get("/health")
async def health_check():
return {"status": "ok"}
@app.post("/get_path")
async def get_path(data: PathData, background_tasks: BackgroundTasks):
logging.info(f"Received data: {data}")
if not data.path or not data.adminName1 or not data.patientId:
raise HTTPException(status_code=400, detail="Path, adminName1, or patientId not provided")
folder_path = data.path
admin_name = data.adminName1
patient_id = data.patientId
logging.info(f"Folder path received: {folder_path}")
# Initialize Firebase Admin SDK
try:
initialize_firebase()
except FirebaseError as e:
logging.error(f"Firebase initialization error: {e}")
raise HTTPException(status_code=500, detail="Firebase initialization failed")
bucket = storage.bucket()
blobs = bucket.list_blobs(prefix=folder_path)
dcm_files = [blob.name for blob in blobs if blob.name.endswith('.dcm')]
if not dcm_files:
logging.error(f"No DICOM files found in the specified path: {folder_path}")
raise HTTPException(status_code=404, detail="No DICOM files found in the specified path")
dcm_file_path = dcm_files[0]
logging.info(f"DICOM file to process: {dcm_file_path}")
# Initialize status and progress
task_id = f"{admin_name}_{patient_id}"
processing_status[task_id] = {'status': 'Processing started'}
processing_progress[task_id] = 0 # Start with 0%
background_tasks.add_task(process_dicom_file, dcm_file_path, folder_path, os.path.basename(dcm_file_path), admin_name, patient_id, task_id, bucket)
return {"status": "Processing started", "task_id": task_id}
@app.get("/status/{task_id}")
async def get_status(task_id: str):
status = processing_status.get(task_id, {'status': 'Unknown task ID'})
return status
@app.get("/progress/{task_id}")
async def get_progress(task_id: str):
progress = processing_progress.get(task_id, 0)
return {"progress": progress}
async def process_dicom_file(dcm_file_path, original_folder_path, original_filename, admin_name, patient_id, task_id, bucket):
# Initialize YOLO model
model = YOLO(r'Run AI backendyolov8_segmentation_fractureAtlas.pt')
blob = bucket.blob(dcm_file_path)
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
blob.download_to_filename(temp_file.name)
temp_dcm_path = temp_file.name
processing_progress[task_id] = 10
ds = pydicom.dcmread(temp_dcm_path)
pixel_array = ds.pixel_array
image = Image.fromarray(pixel_array).convert("L")
with tempfile.NamedTemporaryFile(delete=False, suffix='.png') as temp_image_file:
image.save(temp_image_file.name)
image_path = temp_image_file.name
processing_progress[task_id] = 30
processing_status[task_id]['status'] = 'Running AI on the XRAY'
await asyncio.sleep(2)
results = model(image_path, conf=0.25)
prediction = results[0]
fracture_detected = len(prediction.boxes) > 0
message = "Positive" if fracture_detected else "Negative"
plot_array = prediction.plot()
plot_img = Image.fromarray(np.uint8(plot_array))
processed_pixel_array = np.array(plot_img)
if len(processed_pixel_array.shape) == 3 and processed_pixel_array.shape[2] == 3:
processed_pixel_array = np.mean(processed_pixel_array, axis=2).astype(ds.pixel_array.dtype)
ds.PixelData = processed_pixel_array.tobytes()
with tempfile.NamedTemporaryFile(delete=False, suffix='.dcm') as temp_processed_dcm_file:
ds.save_as(temp_processed_dcm_file.name)
processed_dcm_path = temp_processed_dcm_file.name
processing_progress[task_id] = 70
processed_folder_path = os.path.join(original_folder_path, 'AI_Results').replace("\", "/")
processed_blob_path = os.path.join(processed_folder_path, original_filename).replace("\", "/")
logging.info(f"Processed DICOM file will be uploaded to: {processed_blob_path}")
processing_status[task_id]['status'] = 'Uploading Generated Result'
await asyncio.sleep(2)
processed_blob = bucket.blob(processed_blob_path)
processed_blob.upload_from_filename(processed_dcm_path)
await aio_remove(image_path)
await aio_remove(temp_dcm_path)
await aio_remove(processed_dcm_path)
processing_progress[task_id] = 100 # Update progress to 100%
processing_status[task_id]['status'] = 'Done'
processing_status[task_id]['message'] = message
processing_status[task_id]['fracture_detected'] = fracture_detected
processing_status[task_id]['processed_image_path'] = processed_blob_path
patient_ref = db.reference(f'superadmin/admins/{admin_name}/patients/{patient_id}')
patient_ref.update({'AI_result': 'true'})
return {"message": message, "fracture_detected": fracture_detected, "processed_image_path": processed_blob_path}
# Add Mangum handler for AWS Lambda
handler = Mangum(app)
AWS Cloud Logs
INFO: Started server process [8]
2024-08-11T20:13:37.314+05:30
INFO: Waiting for application startup.
2024-08-11T20:13:37.314+05:30
INFO: Application startup complete.
2024-08-11T20:13:37.315+05:30
INFO: Uvicorn running on http://0.0.0.0:8080 (Press CTRL+C to quit)
2024-08-11T20:13:41.644+05:30
INIT_REPORT Init Duration: 9994.93 ms Phase: init Status: timeout
2024-08-11T20:13:45.492+05:30
INFO: Started server process [8]
2024-08-11T20:13:45.492+05:30
INFO: Waiting for application startup.
2024-08-11T20:13:45.492+05:30
INFO: Application startup complete.
2024-08-11T20:13:45.493+05:30
INFO: Uvicorn running on http://0.0.0.0:8080 (Press CTRL+C to quit)
On Testing Endpoint i get the Request Timeout Message
2
Answers
Answer from @jarmod should be one of the best suggestion.
But i also have another additional suggestion here:
I’m not familiar with Magnum, but looking for documentation I noticed that the project is archived (Check out the repository: https://github.com/jordaneremieff/mangum). However, in the README I saw the option
lifespan
with valueoff
. Have you tried using this value?I hope my answer helps!