Sadok2109 commited on
Commit
fcbb3f1
·
verified ·
1 Parent(s): 2a8c45d

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +45 -0
  2. requirements.txt +5 -0
app.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoProcessor
2
+ import torch
3
+ import gradio as gr
4
+ from PIL import Image
5
+
6
+ model_id = "ContactDoctor/Bio-Medical-MultiModal-Llama-3-8B-V1"
7
+
8
+ model = AutoModelForCausalLM.from_pretrained(
9
+ model_id,
10
+ trust_remote_code=True,
11
+ torch_dtype=torch.float16,
12
+ device_map="auto"
13
+ )
14
+
15
+ processor = AutoProcessor.from_pretrained(model_id)
16
+
17
+ def generate_answer(image, question):
18
+ if not question or question.strip() == "":
19
+ return "❌ Please enter a medical question."
20
+
21
+ prompt = f"### User: {question}\n### Assistant:"
22
+
23
+ try:
24
+ if image is None:
25
+ inputs = processor(prompt, return_tensors="pt").to(model.device)
26
+ else:
27
+ inputs = processor(prompt, images=image, return_tensors="pt").to(model.device)
28
+
29
+ outputs = model.generate(**inputs, max_new_tokens=256)
30
+ return processor.tokenizer.decode(outputs[0], skip_special_tokens=True)[len(prompt):].strip()
31
+ except Exception as e:
32
+ return f"⚠️ Internal Error: {str(e)}"
33
+
34
+ demo = gr.Interface(
35
+ fn=generate_answer,
36
+ inputs=[
37
+ gr.Image(type="pil", label="Upload a medical image (optional)"),
38
+ gr.Textbox(label="Medical Question")
39
+ ],
40
+ outputs="text",
41
+ title="🧠 ContactDoctor - Biomedical LLM",
42
+ description="Multimodal Medical Assistant: upload an image and ask a medical question."
43
+ )
44
+
45
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ transformers
2
+ torch
3
+ gradio
4
+ accelerate
5
+ pillow