ColorfulAI commited on
Commit
f8fe891
·
verified ·
1 Parent(s): a690da7

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +85 -85
README.md CHANGED
@@ -1,85 +1,85 @@
1
- ---
2
- license: mit
3
- ---
4
-
5
- # LongVA-7B-Qwen2-Audio
6
-
7
-
8
- LongVA-7B-Qwen2-Audio is an extension of [LongVA-7B](https://github.com/EvolvingLMMs-Lab/LongVA), further trained using the [LLaVA-NeXT-Audio](https://huggingface.co/datasets/ColorfulAI/LLaVA-NeXT-Audio) dataset.
9
-
10
-
11
- ## Usage
12
-
13
- *Please refer to [M4](https://github.com/patrick-tssn/M4) to install relvevant packages*
14
-
15
- ```python
16
-
17
- import os
18
- from PIL import Image
19
- import numpy as np
20
- import torchaudio
21
- import torch
22
- from decord import VideoReader, cpu
23
- import whisper
24
- # fix seed
25
- torch.manual_seed(0)
26
-
27
- from intersuit.model.builder import load_pretrained_model
28
- from intersuit.mm_utils import tokenizer_image_speech_tokens, process_images
29
- from intersuit.constants import IMAGE_TOKEN_INDEX, SPEECH_TOKEN_INDEX
30
-
31
- import warnings
32
- warnings.filterwarnings("ignore")
33
-
34
- model_path = "ColorfulAI/LongVA-7B-Qwen2-Audio"
35
- video_path = "local_demo/assets/water.mp4"
36
- audio_path = "local_demo/wav/infer.wav"
37
- max_frames_num = 16 # you can change this to several thousands so long you GPU memory can handle it :)
38
- gen_kwargs = {"do_sample": True, "temperature": 0.5, "top_p": None, "num_beams": 1, "use_cache": True, "max_new_tokens": 1024}
39
- tokenizer, model, image_processor, _ = load_pretrained_model(model_path, None, "llava_qwen", device_map="cuda:0")
40
-
41
- query = "Give a detailed caption of the video as if I am blind."
42
- query = None # comment this to use ChatTTS to convert the query to audio
43
-
44
- #video input
45
- prompt = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<image><|im_end|>\n<|im_start|>user\n<speech>\n<|im_end|>\n<|im_start|>assistant\n"
46
- input_ids = tokenizer_image_speech_tokens(prompt, tokenizer, IMAGE_TOKEN_INDEX, SPEECH_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(model.device)
47
- vr = VideoReader(video_path, ctx=cpu(0))
48
- total_frame_num = len(vr)
49
- uniform_sampled_frames = np.linspace(0, total_frame_num - 1, max_frames_num, dtype=int)
50
- frame_idx = uniform_sampled_frames.tolist()
51
- frames = vr.get_batch(frame_idx).asnumpy()
52
- video_tensor = image_processor.preprocess(frames, return_tensors="pt")["pixel_values"].to(model.device, dtype=torch.float16)
53
-
54
- #audio input
55
- # process speech for input question
56
- if query is not None:
57
- import ChatTTS
58
- chat = ChatTTS.Chat()
59
- chat.load(source='local', compile=True)
60
- audio_path = "./local_demo/wav/" + "infer.wav"
61
- if os.path.exists(audio_path): os.remove(audio_path) # refresh
62
- if not os.path.exists(audio_path):
63
- wav = chat.infer(query)
64
- try:
65
- torchaudio.save(audio_path, torch.from_numpy(wav).unsqueeze(0), 24000)
66
- except:
67
- torchaudio.save(audio_path, torch.from_numpy(wav), 24000)
68
- print(f"Human: {query}")
69
-
70
- else:
71
- print("Human: <audio>")
72
-
73
- speech = whisper.load_audio(audio_path)
74
- speech = whisper.pad_or_trim(speech)
75
- speech = whisper.log_mel_spectrogram(speech, n_mels=128).permute(1, 0).to(device=model.device, dtype=torch.float16)
76
- speech_length = torch.LongTensor([speech.shape[0]]).to(model.device)
77
-
78
- with torch.inference_mode():
79
- output_ids = model.generate(input_ids, images=[video_tensor], modalities=["video"], speeches=speech.unsqueeze(0), speech_lengths=speech_length, **gen_kwargs)
80
- outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
81
- print(f"Agent: {outputs}")
82
-
83
- ```
84
-
85
-
 
1
+ ---
2
+ license: mit
3
+ ---
4
+
5
+ # LongVA-7B-Qwen2-Audio
6
+
7
+
8
+ LongVA-7B-Qwen2-Audio is an extension of [LongVA-7B](https://github.com/EvolvingLMMs-Lab/LongVA), further trained using the [LLaVA-NeXT-Audio](https://huggingface.co/datasets/ColorfulAI/LLaVA-NeXT-Audio) dataset for 0.4 epochs.
9
+
10
+
11
+ ## Usage
12
+
13
+ *Please refer to [M4](https://github.com/patrick-tssn/M4) to install relvevant packages*
14
+
15
+ ```python
16
+
17
+ import os
18
+ from PIL import Image
19
+ import numpy as np
20
+ import torchaudio
21
+ import torch
22
+ from decord import VideoReader, cpu
23
+ import whisper
24
+ # fix seed
25
+ torch.manual_seed(0)
26
+
27
+ from intersuit.model.builder import load_pretrained_model
28
+ from intersuit.mm_utils import tokenizer_image_speech_tokens, process_images
29
+ from intersuit.constants import IMAGE_TOKEN_INDEX, SPEECH_TOKEN_INDEX
30
+
31
+ import warnings
32
+ warnings.filterwarnings("ignore")
33
+
34
+ model_path = "ColorfulAI/LongVA-7B-Qwen2-Audio"
35
+ video_path = "local_demo/assets/water.mp4"
36
+ audio_path = "local_demo/wav/infer.wav"
37
+ max_frames_num = 16 # you can change this to several thousands so long you GPU memory can handle it :)
38
+ gen_kwargs = {"do_sample": True, "temperature": 0.5, "top_p": None, "num_beams": 1, "use_cache": True, "max_new_tokens": 1024}
39
+ tokenizer, model, image_processor, _ = load_pretrained_model(model_path, None, "llava_qwen", device_map="cuda:0")
40
+
41
+ query = "Give a detailed caption of the video as if I am blind."
42
+ query = None # comment this to use ChatTTS to convert the query to audio
43
+
44
+ #video input
45
+ prompt = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<image><|im_end|>\n<|im_start|>user\n<speech>\n<|im_end|>\n<|im_start|>assistant\n"
46
+ input_ids = tokenizer_image_speech_tokens(prompt, tokenizer, IMAGE_TOKEN_INDEX, SPEECH_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(model.device)
47
+ vr = VideoReader(video_path, ctx=cpu(0))
48
+ total_frame_num = len(vr)
49
+ uniform_sampled_frames = np.linspace(0, total_frame_num - 1, max_frames_num, dtype=int)
50
+ frame_idx = uniform_sampled_frames.tolist()
51
+ frames = vr.get_batch(frame_idx).asnumpy()
52
+ video_tensor = image_processor.preprocess(frames, return_tensors="pt")["pixel_values"].to(model.device, dtype=torch.float16)
53
+
54
+ #audio input
55
+ # process speech for input question
56
+ if query is not None:
57
+ import ChatTTS
58
+ chat = ChatTTS.Chat()
59
+ chat.load(source='local', compile=True)
60
+ audio_path = "./local_demo/wav/" + "infer.wav"
61
+ if os.path.exists(audio_path): os.remove(audio_path) # refresh
62
+ if not os.path.exists(audio_path):
63
+ wav = chat.infer(query)
64
+ try:
65
+ torchaudio.save(audio_path, torch.from_numpy(wav).unsqueeze(0), 24000)
66
+ except:
67
+ torchaudio.save(audio_path, torch.from_numpy(wav), 24000)
68
+ print(f"Human: {query}")
69
+
70
+ else:
71
+ print("Human: <audio>")
72
+
73
+ speech = whisper.load_audio(audio_path)
74
+ speech = whisper.pad_or_trim(speech)
75
+ speech = whisper.log_mel_spectrogram(speech, n_mels=128).permute(1, 0).to(device=model.device, dtype=torch.float16)
76
+ speech_length = torch.LongTensor([speech.shape[0]]).to(model.device)
77
+
78
+ with torch.inference_mode():
79
+ output_ids = model.generate(input_ids, images=[video_tensor], modalities=["video"], speeches=speech.unsqueeze(0), speech_lengths=speech_length, **gen_kwargs)
80
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
81
+ print(f"Agent: {outputs}")
82
+
83
+ ```
84
+
85
+