64 lines
1.8 KiB
Python
64 lines
1.8 KiB
Python
|
|
import sys
|
||
|
|
sys.path.append("../3rd_party/transformers/src/")
|
||
|
|
|
||
|
|
from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
|
||
|
|
from qwen_vl_utils import process_vision_info
|
||
|
|
import sys
|
||
|
|
model_path = './models/Qwen2-VL-2B/'
|
||
|
|
|
||
|
|
# default: Load the model on the available device(s)
|
||
|
|
model = Qwen2VLForConditionalGeneration.from_pretrained(
|
||
|
|
"Qwen/Qwen2-VL-2B-Instruct", torch_dtype="auto", device_map="cpu"
|
||
|
|
)
|
||
|
|
|
||
|
|
# We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.
|
||
|
|
# model = Qwen2VLForConditionalGeneration.from_pretrained(
|
||
|
|
# "Qwen/Qwen2-VL-2B-Instruct",
|
||
|
|
# torch_dtype=torch.bfloat16,
|
||
|
|
# attn_implementation="flash_attention_2",
|
||
|
|
# device_map="auto",
|
||
|
|
# )
|
||
|
|
|
||
|
|
# default processer
|
||
|
|
processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct")
|
||
|
|
|
||
|
|
# Messages containing a video and a text query
|
||
|
|
messages = [
|
||
|
|
{
|
||
|
|
"role": "user",
|
||
|
|
"content": [
|
||
|
|
{
|
||
|
|
"type": "video",
|
||
|
|
"video": sys.argv[1],
|
||
|
|
"max_pixels": 320 * 240,
|
||
|
|
"fps": 1.0,
|
||
|
|
},
|
||
|
|
{"type": "text", "text": "Describe this video."},
|
||
|
|
],
|
||
|
|
}
|
||
|
|
]
|
||
|
|
|
||
|
|
text = processor.apply_chat_template(
|
||
|
|
messages, tokenize=False, add_generation_prompt=True
|
||
|
|
)
|
||
|
|
image_inputs, video_inputs = process_vision_info(messages)
|
||
|
|
|
||
|
|
inputs = processor(
|
||
|
|
text=[text],
|
||
|
|
images=image_inputs,
|
||
|
|
videos=video_inputs,
|
||
|
|
padding=True,
|
||
|
|
return_tensors="pt",
|
||
|
|
)
|
||
|
|
inputs = inputs.to("cpu")
|
||
|
|
|
||
|
|
# Inference
|
||
|
|
generated_ids = model.generate(**inputs, max_new_tokens=128)
|
||
|
|
generated_ids_trimmed = [
|
||
|
|
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
||
|
|
]
|
||
|
|
output_text = processor.batch_decode(
|
||
|
|
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
||
|
|
)
|
||
|
|
print(output_text)
|