Spaces:
Sleeping
Sleeping
File size: 2,950 Bytes
cf11566 f6a49ad 75d42e8 f6a49ad 88c40fe f6a49ad 7e8ff25 db215a1 f18fc68 4c25720 ee6eac0 f6a49ad db215a1 f6a49ad f1ba0d6 f6a49ad f1ba0d6 f6a49ad |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
from fastapi import FastAPI, UploadFile, File
import numpy as np
import cv2
import tensorflow as tf
from PIL import Image
import io
app = FastAPI()
# Load model Keras
model = tf.keras.models.load_model("lontara_model_finetuning.keras")
def home():
return {"message": "Aksara Lontara API is running"}
def preprocess_image(image: np.ndarray):
""" Melakukan segmentasi karakter menggunakan OpenCV """
# **1️⃣ Edge Detection (Canny)**
edges = cv2.Canny(image, 50, 150)
# **2️⃣ Morphological Cleaning**
kernel = np.ones((3, 3), np.uint8)
edges_cleaned = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel, iterations=2)
# **3️⃣ Connected Component Analysis (CCA)**
num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(edges_cleaned, connectivity=8)
# **4️⃣ Filter huruf berdasarkan area**
min_area = 500
bounding_boxes = []
for i in range(1, num_labels): # Skip background
x, y, w, h, area = stats[i]
if area > min_area:
bounding_boxes.append((x, y, w, h))
# **5️⃣ Urutkan huruf berdasarkan posisi X**
bounding_boxes.sort(key=lambda b: b[0])
# **6️⃣ Gabungkan Bounding Box yang Berdekatan**
merged_boxes = []
merge_threshold = 20
for i in range(len(bounding_boxes)):
x, y, w, h = bounding_boxes[i]
if merged_boxes and (x - (merged_boxes[-1][0] + merged_boxes[-1][2])) < merge_threshold:
x_prev, y_prev, w_prev, h_prev = merged_boxes.pop()
x_new = min(x_prev, x)
y_new = min(y_prev, y)
w_new = max(x_prev + w_prev, x + w) - x_new
h_new = max(y_prev + h_prev, y + h) - y_new
merged_boxes.append((x_new, y_new, w_new, h_new))
else:
merged_boxes.append((x, y, w, h))
# **7️⃣ Potong dan proses karakter**
segmented_chars = []
for (x, y, w, h) in merged_boxes:
char_segment = image[y:y+h, x:x+w]
char_segment = cv2.resize(char_segment, (128, 128), interpolation=cv2.INTER_AREA)
segmented_chars.append(char_segment)
return segmented_chars
@app.post("/predict")
async def predict(file: UploadFile = File(...)):
# Baca gambar dari file upload
image = Image.open(io.BytesIO(await file.read())).convert("L")
image = np.array(image)
# **Segmentasi huruf**
segmented_chars = preprocess_image(image)
# Jika tidak ada huruf terdeteksi
if not segmented_chars:
return {"prediction": "No characters detected"}
# **Prediksi untuk setiap karakter**
predictions = []
for char in segmented_chars:
char_norm = np.array(char) / 255.0 # Normalisasi
char_norm = char_norm.reshape(1, 128, 128, 1) # Reshape untuk model
prediction = model.predict(char_norm)
predicted_label = labels[np.argmax(prediction)]
predictions.append(predicted_label)
return {"predictions": predictions} |