from fastapi import FastAPI, UploadFile, File import tensorflow as tf import numpy as np from PIL import Image import cv2 import io import uvicorn app = FastAPI() # Load model Keras model = tf.keras.models.load_model("lontara_model_finetuning.keras") # Label kelas sesuai model labels = [ "Tu", "He", "We", "No", "Mu", "Bu", "Ji", "Jo", "I", "Nro", "Cu", "Na", "Bo", "Yi", "Se", "Nyi", "So", "Wa", "Ko", "Ge", "E", "Yo", "Ngu", "Ra", "Wo", "Ta", "Pe", "Nra", "Da", "Ci", "Lo", "Nci", "U", "Ro", "Mo", "Nre", "Du", "Be", "Mpu", "Hu", "Ne", "Nyo", "Ncu", "Su", "Ju", "Gu", "Nu", "Di", "Nri", "Gi", "Co", "Nca", "Ri", "Si", "Ja", "Bi", "Ke", "Wu", "Nki", "Te", "Go", "Ya", "Nku", "Pu", "Nka", "Ba", "Mpe", "A", "Nya", "Me", "Nge", "Mpa", "Ma", "Mpi", "O", "Mi", "Re", "Po", "Ti", "Je", "Nco", "Pa", "Ho", "Nko", "Ce", "Li", "Nke", "Ru", "Ca", "Ke_", "Do", "Ga", "Mpo", "Nye", "Nru", "Nga", "Lu", "Pi", "Ku", "Ni", "Nce", "Le", "Ngo", "De", "Ki", "Wi", "Hi", "Ye", "Ngi", "Ka", "Nyu", "La", "Ha", "Sa" ] @app.get("/") def home(): return {"message": "Aksara Lontara API is running"} def preprocess_image(image: np.array): """Mengolah gambar untuk segmentasi huruf sebelum prediksi""" # **1️⃣ Edge Detection (Canny)** edges = cv2.Canny(image, 50, 150) # Menangkap batas huruf # **2️⃣ Morphological Operations untuk membersihkan noise** kernel = np.ones((3,3), np.uint8) edges_cleaned = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel, iterations=2) # **3️⃣ Connected Component Analysis (CCA)** num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(edges_cleaned, connectivity=8) # **4️⃣ Filter Huruf Berdasarkan Area** min_area = 500 # Sesuaikan berdasarkan ukuran huruf bounding_boxes = [] for i in range(1, num_labels): # Skip background x, y, w, h, area = stats[i] if area > min_area: # Filter noise kecil bounding_boxes.append((x, y, w, h)) # **5️⃣ Urutkan huruf berdasarkan posisi X** bounding_boxes.sort(key=lambda b: b[0]) # **6️⃣ Gabungkan Bounding Box yang Berdekatan** merged_boxes = [] merge_threshold = 20 # Jika jarak antar bounding box < 20 piksel, gabungkan for i in range(len(bounding_boxes)): x, y, w, h = bounding_boxes[i] if merged_boxes and (x - (merged_boxes[-1][0] + merged_boxes[-1][2])) < merge_threshold: # Gabungkan bounding box terakhir dengan yang sekarang x_prev, y_prev, w_prev, h_prev = merged_boxes.pop() x_new = min(x_prev, x) y_new = min(y_prev, y) w_new = max(x_prev + w_prev, x + w) - x_new h_new = max(y_prev + h_prev, y + h) - y_new merged_boxes.append((x_new, y_new, w_new, h_new)) else: # Tambahkan sebagai bounding box baru merged_boxes.append((x, y, w, h)) # **7️⃣ Potong karakter hasil segmentasi** segmented_chars = [] for (x, y, w, h) in merged_boxes: char_segment = image[y:y+h, x:x+w] # Potong area karakter segmented_chars.append(char_segment) return segmented_chars @app.post("/predict") async def predict(file: UploadFile = File(...)): # Baca gambar image = Image.open(io.BytesIO(await file.read())).convert("L") # Konversi ke grayscale image = np.array(image) # Preprocessing: Segmentasi huruf segmented_chars = preprocess_image(image) predictions = [] for char_img in segmented_chars: # Resize ke ukuran yang diharapkan oleh model char_img_resized = cv2.resize(char_img, (128, 128)) char_img_resized = char_img_resized / 255.0 # Normalisasi char_img_resized = char_img_resized.reshape(1, 128, 128, 1) # Ubah ke bentuk input model # Prediksi prediction = model.predict(char_img_resized) label = labels[np.argmax(prediction)] predictions.append(label) return {"predictions": predictions} if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=8000)