mknolan commited on
Commit
0e07c83
·
verified ·
1 Parent(s): 8d463f3

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +329 -0
app.py ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import math
4
+ import numpy as np
5
+ import torch
6
+ import torchvision.transforms as T
7
+ from torchvision.transforms.functional import InterpolationMode
8
+ from PIL import Image
9
+ import gradio as gr
10
+ from transformers import AutoModel, AutoTokenizer
11
+
12
+ # Constants
13
+ IMAGENET_MEAN = (0.485, 0.456, 0.406)
14
+ IMAGENET_STD = (0.229, 0.224, 0.225)
15
+
16
+ # Configuration
17
+ MODEL_NAME = "OpenGVLab/InternVL2_5-8B" # Smaller model for faster loading
18
+ IMAGE_SIZE = 448
19
+
20
+ # Set up environment variables
21
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128"
22
+
23
+ # Utility functions for image processing
24
+ def build_transform(input_size):
25
+ MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
26
+ transform = T.Compose([
27
+ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
28
+ T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
29
+ T.ToTensor(),
30
+ T.Normalize(mean=MEAN, std=STD)
31
+ ])
32
+ return transform
33
+
34
+ def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
35
+ best_ratio_diff = float('inf')
36
+ best_ratio = (1, 1)
37
+ area = width * height
38
+ for ratio in target_ratios:
39
+ target_aspect_ratio = ratio[0] / ratio[1]
40
+ ratio_diff = abs(aspect_ratio - target_aspect_ratio)
41
+ if ratio_diff < best_ratio_diff:
42
+ best_ratio_diff = ratio_diff
43
+ best_ratio = ratio
44
+ elif ratio_diff == best_ratio_diff:
45
+ if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
46
+ best_ratio = ratio
47
+ return best_ratio
48
+
49
+ def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
50
+ orig_width, orig_height = image.size
51
+ aspect_ratio = orig_width / orig_height
52
+
53
+ # calculate the existing image aspect ratio
54
+ target_ratios = set(
55
+ (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
56
+ i * j <= max_num and i * j >= min_num)
57
+ target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
58
+
59
+ # find the closest aspect ratio to the target
60
+ target_aspect_ratio = find_closest_aspect_ratio(
61
+ aspect_ratio, target_ratios, orig_width, orig_height, image_size)
62
+
63
+ # calculate the target width and height
64
+ target_width = image_size * target_aspect_ratio[0]
65
+ target_height = image_size * target_aspect_ratio[1]
66
+ blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
67
+
68
+ # resize the image
69
+ resized_img = image.resize((target_width, target_height))
70
+ processed_images = []
71
+ for i in range(blocks):
72
+ box = (
73
+ (i % (target_width // image_size)) * image_size,
74
+ (i // (target_width // image_size)) * image_size,
75
+ ((i % (target_width // image_size)) + 1) * image_size,
76
+ ((i // (target_width // image_size)) + 1) * image_size
77
+ )
78
+ # split the image
79
+ split_img = resized_img.crop(box)
80
+ processed_images.append(split_img)
81
+ assert len(processed_images) == blocks
82
+ if use_thumbnail and len(processed_images) != 1:
83
+ thumbnail_img = image.resize((image_size, image_size))
84
+ processed_images.append(thumbnail_img)
85
+ return processed_images
86
+
87
+ # Function to split model across GPUs
88
+ def split_model(model_name):
89
+ device_map = {}
90
+ world_size = torch.cuda.device_count()
91
+ if world_size <= 1:
92
+ return "auto"
93
+
94
+ num_layers = {
95
+ 'InternVL2_5-1B': 24,
96
+ 'InternVL2_5-2B': 24,
97
+ 'InternVL2_5-4B': 36,
98
+ 'InternVL2_5-8B': 32,
99
+ 'InternVL2_5-26B': 48,
100
+ 'InternVL2_5-38B': 64,
101
+ 'InternVL2_5-78B': 80
102
+ }[model_name]
103
+
104
+ # Since the first GPU will be used for ViT, treat it as half a GPU.
105
+ num_layers_per_gpu = math.ceil(num_layers / (world_size - 0.5))
106
+ num_layers_per_gpu = [num_layers_per_gpu] * world_size
107
+ num_layers_per_gpu[0] = math.ceil(num_layers_per_gpu[0] * 0.5)
108
+ layer_cnt = 0
109
+ for i, num_layer in enumerate(num_layers_per_gpu):
110
+ for j in range(num_layer):
111
+ device_map[f'language_model.model.layers.{layer_cnt}'] = i
112
+ layer_cnt += 1
113
+ device_map['vision_model'] = 0
114
+ device_map['mlp1'] = 0
115
+ device_map['language_model.model.tok_embeddings'] = 0
116
+ device_map['language_model.model.embed_tokens'] = 0
117
+ device_map['language_model.model.rotary_emb'] = 0
118
+ device_map['language_model.output'] = 0
119
+ device_map['language_model.model.norm'] = 0
120
+ device_map['language_model.lm_head'] = 0
121
+ device_map[f'language_model.model.layers.{num_layers - 1}'] = 0
122
+
123
+ return device_map
124
+
125
+ # Model loading function
126
+ def load_model():
127
+ print(f"\n=== Loading {MODEL_NAME} ===")
128
+ print(f"CUDA available: {torch.cuda.is_available()}")
129
+
130
+ if torch.cuda.is_available():
131
+ print(f"GPU count: {torch.cuda.device_count()}")
132
+ for i in range(torch.cuda.device_count()):
133
+ print(f"GPU {i}: {torch.cuda.get_device_name(i)}")
134
+
135
+ # Memory info
136
+ print(f"Total GPU memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.2f} GB")
137
+ print(f"Allocated GPU memory: {torch.cuda.memory_allocated() / 1e9:.2f} GB")
138
+ print(f"Reserved GPU memory: {torch.cuda.memory_reserved() / 1e9:.2f} GB")
139
+
140
+ # Determine device map
141
+ device_map = "auto"
142
+ if torch.cuda.is_available() and torch.cuda.device_count() > 1:
143
+ model_short_name = MODEL_NAME.split('/')[-1]
144
+ device_map = split_model(model_short_name)
145
+
146
+ # Load model and tokenizer
147
+ try:
148
+ model = AutoModel.from_pretrained(
149
+ MODEL_NAME,
150
+ torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
151
+ low_cpu_mem_usage=True,
152
+ trust_remote_code=True,
153
+ device_map=device_map
154
+ )
155
+
156
+ tokenizer = AutoTokenizer.from_pretrained(
157
+ MODEL_NAME,
158
+ use_fast=False,
159
+ trust_remote_code=True
160
+ )
161
+
162
+ # Fix for image context token ID - needed to make the model work with images
163
+ print("Setting image context token ID...")
164
+ if hasattr(tokenizer, 'encode'):
165
+ # Get special token ID from tokenizer
166
+ img_context_token_id = tokenizer.encode("<image>", add_special_tokens=False)[0]
167
+ model.img_context_token_id = img_context_token_id
168
+ print(f"Set img_context_token_id to {img_context_token_id}")
169
+
170
+ print(f"✓ Model and tokenizer loaded successfully!")
171
+ return model, tokenizer
172
+ except Exception as e:
173
+ print(f"❌ Error loading model: {e}")
174
+ import traceback
175
+ traceback.print_exc()
176
+ return None, None
177
+
178
+ # Image analysis function - single image
179
+ def analyze_image(model, tokenizer, image, prompt):
180
+ try:
181
+ # Check if image is valid
182
+ if image is None:
183
+ return "Please upload an image first."
184
+
185
+ # Process the image
186
+ processed_images = dynamic_preprocess(image, image_size=IMAGE_SIZE)
187
+
188
+ # Prepare the prompt
189
+ text_prompt = f"USER: <image>\n{prompt}\nASSISTANT:"
190
+
191
+ # Convert inputs for the model
192
+ inputs = tokenizer([text_prompt], return_tensors="pt")
193
+
194
+ # Move inputs to the right device
195
+ if torch.cuda.is_available():
196
+ inputs = {k: v.cuda() for k, v in inputs.items()}
197
+
198
+ # Add image to the inputs
199
+ inputs["images"] = processed_images
200
+
201
+ # Generate a response
202
+ with torch.no_grad():
203
+ outputs = model.generate(
204
+ **inputs,
205
+ max_new_tokens=512,
206
+ )
207
+
208
+ # Decode the outputs
209
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
210
+
211
+ # Extract only the assistant's response
212
+ assistant_response = generated_text.split("ASSISTANT:")[-1].strip()
213
+
214
+ return assistant_response
215
+ except Exception as e:
216
+ import traceback
217
+ error_msg = f"Error analyzing image: {str(e)}\n{traceback.format_exc()}"
218
+ return error_msg
219
+
220
+ # New function for analyzing two images
221
+ def analyze_two_images(model, tokenizer, image1, image2, prompt):
222
+ try:
223
+ # Check if images are valid
224
+ if image1 is None and image2 is None:
225
+ return "Please upload at least one image."
226
+
227
+ # Process the images
228
+ processed_images = []
229
+ if image1 is not None:
230
+ processed_images.extend(dynamic_preprocess(image1, image_size=IMAGE_SIZE))
231
+ if image2 is not None:
232
+ processed_images.extend(dynamic_preprocess(image2, image_size=IMAGE_SIZE))
233
+
234
+ # Prepare the prompt with two image tokens
235
+ text_prompt = f"USER: <image><image>\n{prompt}\nASSISTANT:"
236
+
237
+ # Convert inputs for the model
238
+ inputs = tokenizer([text_prompt], return_tensors="pt")
239
+
240
+ # Move inputs to the right device
241
+ if torch.cuda.is_available():
242
+ inputs = {k: v.cuda() for k, v in inputs.items()}
243
+
244
+ # Add images to the inputs
245
+ inputs["images"] = processed_images
246
+
247
+ # Generate a response
248
+ with torch.no_grad():
249
+ outputs = model.generate(
250
+ **inputs,
251
+ max_new_tokens=512,
252
+ )
253
+
254
+ # Decode the outputs
255
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
256
+
257
+ # Extract only the assistant's response
258
+ assistant_response = generated_text.split("ASSISTANT:")[-1].strip()
259
+
260
+ return assistant_response
261
+ except Exception as e:
262
+ import traceback
263
+ error_msg = f"Error analyzing images: {str(e)}\n{traceback.format_exc()}"
264
+ return error_msg
265
+
266
+ # Main function
267
+ def main():
268
+ # Load the model
269
+ model, tokenizer = load_model()
270
+
271
+ if model is None:
272
+ # Create an error interface if model loading failed
273
+ demo = gr.Interface(
274
+ fn=lambda x: "Model loading failed. Please check the logs for details.",
275
+ inputs=gr.Textbox(),
276
+ outputs=gr.Textbox(),
277
+ title="InternVL2.5 Dual Image Analyzer - Error",
278
+ description="The model failed to load. Please check the logs for more information."
279
+ )
280
+ return demo
281
+
282
+ # Predefined prompts for analysis
283
+ prompts = [
284
+ "Describe these images in detail.",
285
+ "What can you tell me about these images?",
286
+ "Is there any text in these images? If so, can you read it?",
287
+ "Compare and contrast these two images.",
288
+ "What are the main subjects in these images?",
289
+ "What emotions or feelings do these images convey?",
290
+ "Describe the composition and visual elements of these images.",
291
+ "Summarize what you see in these images in one paragraph."
292
+ ]
293
+
294
+ # Create the interface
295
+ demo = gr.Interface(
296
+ fn=lambda img1, img2, prompt: analyze_two_images(model, tokenizer, img1, img2, prompt),
297
+ inputs=[
298
+ gr.Image(type="pil", label="Upload First Image"),
299
+ gr.Image(type="pil", label="Upload Second Image"),
300
+ gr.Dropdown(choices=prompts, value=prompts[0], label="Select a prompt or write your own below",
301
+ allow_custom_value=True)
302
+ ],
303
+ outputs=gr.Textbox(label="Analysis Results", lines=15),
304
+ title="InternVL2.5 Dual Image Analyzer",
305
+ description="Upload two images and ask the InternVL2.5 model to analyze them together.",
306
+ examples=[
307
+ ["example_images/example1.jpg", "example_images/example2.jpg", "Compare and contrast these two images."],
308
+ ["example_images/example1.jpg", "example_images/example2.jpg", "What can you tell me about these images?"]
309
+ ],
310
+ theme=gr.themes.Soft(),
311
+ allow_flagging="never"
312
+ )
313
+
314
+ return demo
315
+
316
+ # Run the application
317
+ if __name__ == "__main__":
318
+ try:
319
+ # Check for GPU
320
+ if not torch.cuda.is_available():
321
+ print("WARNING: CUDA is not available. The model requires a GPU to function properly.")
322
+
323
+ # Create and launch the interface
324
+ demo = main()
325
+ demo.launch(server_name="0.0.0.0")
326
+ except Exception as e:
327
+ print(f"Error starting the application: {e}")
328
+ import traceback
329
+ traceback.print_exc()