AdrielAmoguis commited on
Commit
ad3e283
·
1 Parent(s): 7487879

something someething works now

Browse files
Files changed (7) hide show
  1. M-Pre.pt +3 -0
  2. N-Pre.pt +3 -0
  3. S-Pre.pt +3 -0
  4. app.py +31 -17
  5. app_test.py +34 -0
  6. flagged/image/tmpyu6iq1kf.png +0 -0
  7. flagged/log.csv +2 -0
M-Pre.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2baad89d1c7a9a02ee3a639f9e5e932f874471ac8ca210cf96fef07593833521
3
+ size 52099424
N-Pre.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:116d8ddbdd37eeba30a231947f3faaab597fdef0a8e4ade40c9bd3a5dd203ecd
3
+ size 22557880
S-Pre.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5df9846e215318de5e880efcaf7549bea622f583fc5693e340e2566204842edc
3
+ size 22557944
app.py CHANGED
@@ -2,7 +2,6 @@ import numpy as np
2
  from PIL import Image
3
  import gradio as gr
4
  from ultralytics import YOLO
5
- from ultralytics.yolo.utils.ops import scale_image
6
  import cv2
7
 
8
  # Load the YOLO model
@@ -10,7 +9,21 @@ m_raw_model = YOLO("M-Raw.pt")
10
  n_raw_model = YOLO("N-Raw.pt")
11
  s_raw_model = YOLO("S-Raw.pt")
12
 
13
- def snap(image, model, conf, iou):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  # Run the selected model
16
  results = None
@@ -25,27 +38,24 @@ def snap(image, model, conf, iou):
25
  result = results[0]
26
 
27
  if result.boxes.cls.cpu().numpy().size == 0:
28
- return [image]
29
- classes = result.boxes.cls.cpu().numpy()[0]
30
- probs = result.boxes.conf.cpu().numpy()[0]
 
 
 
31
  boxes = result.boxes.xyxy.cpu().numpy()
32
 
33
- print("-------------------")
34
  print(classes)
35
- print("-------------------")
36
  print(probs)
37
- print("-------------------")
38
  print(boxes)
39
 
40
- print(image)
41
  for i in range(len(boxes)):
42
  x1, y1, x2, y2 = boxes[i]
43
  x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
44
  cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
45
- cv2.putText(image, f"{classes} {probs:.2f}", (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
46
-
47
- # Convert the resulting image to a PIL image
48
- resulting_image = image
49
 
50
  # Get the labels
51
  # labels = results.pandas().xyxy[0]["name"].values
@@ -53,15 +63,19 @@ def snap(image, model, conf, iou):
53
  # Sort the labels by their x-value first and then by their y-value
54
  # print(labels)
55
 
56
- return [resulting_image]
57
 
58
 
59
  demo = gr.Interface(
60
  snap,
61
- [gr.Image(source="webcam", tool=None, streaming=True), gr.Radio(["M-Raw", "S-Raw", "N-Raw"]), gr.Slider(0, 1, value=0.6, label="Classifier Confidence Threshold"), gr.Slider(0, 1, value=0.7, label="IoU Threshold")],
62
- ["image"],
 
 
 
 
63
  title="Baybayin Instance Detection"
64
- )
65
 
66
  if __name__ == "__main__":
67
  demo.launch()
 
2
  from PIL import Image
3
  import gradio as gr
4
  from ultralytics import YOLO
 
5
  import cv2
6
 
7
  # Load the YOLO model
 
9
  n_raw_model = YOLO("N-Raw.pt")
10
  s_raw_model = YOLO("S-Raw.pt")
11
 
12
+ # Class to syllable map
13
+ class_mapping = {0: 'Baybayin Character', 1: 'a', 2: 'b', 3: 'ba', 4: 'be', 5: 'bi', 6: 'bo', 7: 'bu', 8: 'd', 9: 'da', 10: 'di', 11: 'do', 12: 'du', 13: 'e', 14: 'g', 15: 'ga', 16: 'gi', 17: 'go', 18: 'gu', 19: 'ha', 20: 'he', 21: 'hi', 22: 'ho', 23: 'hu', 24: 'i', 25: 'k', 26: 'ka', 27: 'ki', 28: 'ko', 29: 'ku', 30: 'l', 31: 'la', 32: 'le', 33: 'li', 34: 'lo', 35: 'lu', 36: 'm', 37: 'ma', 38: 'me', 39: 'mi', 40: 'mo', 41: 'mu', 42: 'n', 43: 'na', 44: 'ng', 45: 'nga', 46: 'ngi', 47: 'ngo', 48: 'ngu', 49: 'ni', 50: 'no', 51: 'nu', 52: 'o', 53: 'p', 54: 'pa', 55: 'pe', 56: 'pi', 57: 'po', 58: 'pu', 59: 'r', 60: 'ra', 61: 're', 62: 'ri', 63: 'ro', 64: 'ru', 65: 's', 66: 'sa', 67: 'se', 68: 'si', 69: 'so', 70: 'su', 71: 't', 72: 'ta', 73: 'te', 74: 'ti', 75: 'to', 76: 'tu', 77: 'u', 78: 'w', 79: 'wa', 80: 'we', 81: 'wi', 82: 'y', 83: 'ya', 84: 'yi', 85: 'yo', 86: 'yu'}
14
+
15
+ def snap(webcam, upload, model, conf, iou):
16
+
17
+ if webcam is not None:
18
+ image = webcam
19
+ elif upload is not None:
20
+ image = upload
21
+ else:
22
+ image = webcam
23
+
24
+ # If no model selected, use M-Raw
25
+ if model == None:
26
+ model = "M-Raw"
27
 
28
  # Run the selected model
29
  results = None
 
38
  result = results[0]
39
 
40
  if result.boxes.cls.cpu().numpy().size == 0:
41
+ print("No detections.")
42
+ return image
43
+
44
+
45
+ classes = result.boxes.cls.cpu().numpy()
46
+ probs = result.boxes.conf.cpu().numpy()
47
  boxes = result.boxes.xyxy.cpu().numpy()
48
 
 
49
  print(classes)
 
50
  print(probs)
 
51
  print(boxes)
52
 
53
+ # print(f"Detected {classes} with {probs:.2f} confidence.")
54
  for i in range(len(boxes)):
55
  x1, y1, x2, y2 = boxes[i]
56
  x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
57
  cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
58
+ cv2.putText(image, f"{class_mapping[int(classes[i])]} {probs[i]:.2f}", (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
 
 
 
59
 
60
  # Get the labels
61
  # labels = results.pandas().xyxy[0]["name"].values
 
63
  # Sort the labels by their x-value first and then by their y-value
64
  # print(labels)
65
 
66
+ return image
67
 
68
 
69
  demo = gr.Interface(
70
  snap,
71
+ [gr.Webcam(type="numpy", label="Webcam"),
72
+ gr.Image(source="upload", type="numpy", label="Baybayin Image"),
73
+ gr.Radio(["M-Raw", "S-Raw", "N-Raw"]),
74
+ gr.Slider(0, 1, value=0.6, label="Classifier Confidence Threshold"),
75
+ gr.Slider(0, 1, value=0.7, label="IoU Threshold")],
76
+ [gr.Image(type="numpy", label="Detected Baybayin")],
77
  title="Baybayin Instance Detection"
78
+ ).queue()
79
 
80
  if __name__ == "__main__":
81
  demo.launch()
app_test.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from PIL import Image
4
+ import json
5
+
6
+ m_raw_model = torch.hub.load('ultralytics/yolov8', 'custom', path='M-Raw.pt', source="local")
7
+ s_raw_model = torch.hub.load('ultralytics/yolov8', 'custom', path='S-Raw.pt', source="local")
8
+ n_raw_model = torch.hub.load('ultralytics/yolov8', 'custom', path='N-Raw.pt', source="local")
9
+ m_pre_model = torch.hub.load('ultralytics/yolov8', 'custom', path='M-Pre.pt', source="local")
10
+ s_pre_model = torch.hub.load('ultralytics/yolov8', 'custom', path='S-Pre.pt', source="local")
11
+ n_pre_model = torch.hub.load('ultralytics/yolov8', 'custom', path='N-Pre.pt', source="local")
12
+
13
+ def snap(image, model, conf, iou):
14
+
15
+ # If no model selected, use M-Raw
16
+ if model == None:
17
+ model = "M-Raw"
18
+
19
+ # Run the selected model
20
+ results = None
21
+ if model == "M-Raw":
22
+ results = m_raw_model(image, conf=conf, iou=iou)
23
+ elif model == "N-Raw":
24
+ results = n_raw_model(image, conf=conf, iou=iou)
25
+ elif model == "S-Raw":
26
+ results = s_raw_model(image, conf=conf, iou=iou)
27
+ elif model == "M-Pre":
28
+ results = m_pre_model(image, conf=conf, iou=iou)
29
+ elif model == "N-Pre":
30
+ results = n_pre_model(image, conf=conf, iou=iou)
31
+ elif model == "S-Pre":
32
+ results = s_pre_model(image, conf=conf, iou=iou)
33
+
34
+
flagged/image/tmpyu6iq1kf.png ADDED
flagged/log.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ image,model,Classifier Confidence Threshold,IoU Threshold,output,flag,username,timestamp
2
+ /Users/adrielamoguis/Documents/Academics/De La Salle University/Publications/Baybayin OCR - PCSC/Baybayin-Instance-Detection/flagged/image/tmpyu6iq1kf.png,N-Raw,0.6,0.7,,,,2023-03-23 19:23:28.934630