LongVPO: From Anchored Cues to Self-Reasoning for Long-Form Video Preference Optimization
Paper
β’ 2602.02341 β’ Published
β’ 1
We evaluate LongVPO-Stage2-InternVL3-7B on various video understanding benchmarks, comparing it with the baseline InternVL3-8B.
LongVPO achieves significant improvements on long-video benchmarks (MLVU, LongVideoBench, LVBench, Video-MME) while maintaining competitive performance on short-video tasks (MVBench).
The results demonstrate the progressive improvements from the baseline to Stage 1 (Anchored Cues) and finally to Stage 2 (Self-Reasoning).
| Benchmark | Type | InternVL3-8B (Base) | LongVPO-InternVL3-8B (Stage 1) | LongVPO-InternVL3-8B (Stage 2) |
|---|---|---|---|---|
| MLVU | Long Video | 71.4 | 75.1 | 76.4 |
| LongVideoBench | Long Video | 62.3 | 66.8 | 66.0 |
| LVBench | Long Video | 48.8 | 52.4 | 53.6 |
| Video-MME (w/o sub) | Long Video | 66.5 | 68.1 | 68.9 |
| Video-MME (w/ sub) | Long Video | 72.5 | 74.0 | 74.0 |
| MVBench | Short Video | 75.4 | 75.1 | 75.0 |
Please use
transformers>=4.37.2to ensure the model works normally.pip install "transformers>=4.37.2" # optional pip install flash-attn --no-build-isolation
Here is a self-contained example code for performing multi-round video conversation using LongVPO-Stage2-InternVL3-7B.
import math
import numpy as np
import torch
import torchvision.transforms as T
from decord import VideoReader, cpu
from PIL import Image
from torchvision.transforms.functional import InterpolationMode
from transformers import AutoModel, AutoTokenizer
# Constants for image normalization
IMAGENET_MEAN = (0.485, 0.456, 0.406)
IMAGENET_STD = (0.229, 0.224, 0.225)
def build_transform(input_size):
MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
transform = T.Compose([
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
T.ToTensor(),
T.Normalize(mean=MEAN, std=STD)
])
return transform
def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
best_ratio_diff = float('inf')
best_ratio = (1, 1)
area = width * height
for ratio in target_ratios:
target_aspect_ratio = ratio[0] / ratio[1]
ratio_diff = abs(aspect_ratio - target_aspect_ratio)
if ratio_diff < best_ratio_diff:
best_ratio_diff = ratio_diff
best_ratio = ratio
elif ratio_diff == best_ratio_diff:
if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
best_ratio = ratio
return best_ratio
def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
orig_width, orig_height = image.size
aspect_ratio = orig_width / orig_height
# calculate the existing image aspect ratio
target_ratios = set(
(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
i * j <= max_num and i * j >= min_num)
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
# find the closest aspect ratio to the target
target_aspect_ratio = find_closest_aspect_ratio(
aspect_ratio, target_ratios, orig_width, orig_height, image_size)
# calculate the target width and height
target_width = image_size * target_aspect_ratio[0]
target_height = image_size * target_aspect_ratio[1]
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
# resize the image
resized_img = image.resize((target_width, target_height))
processed_images = []
for i in range(blocks):
box = (
(i % (target_width // image_size)) * image_size,
(i // (target_width // image_size)) * image_size,
((i % (target_width // image_size)) + 1) * image_size,
((i // (target_width // image_size)) + 1) * image_size
)
# split the image
split_img = resized_img.crop(box)
processed_images.append(split_img)
assert len(processed_images) == blocks
if use_thumbnail and len(processed_images) != 1:
thumbnail_img = image.resize((image_size, image_size))
processed_images.append(thumbnail_img)
return processed_images
def get_index(bound, fps, max_frame, first_idx=0, num_segments=32):
if bound:
start, end = bound[0], bound[1]
else:
start, end = -100000, 100000
start_idx = max(first_idx, round(start * fps))
end_idx = min(round(end * fps), max_frame)
seg_size = float(end_idx - start_idx) / num_segments
frame_indices = np.array([
int(start_idx + (seg_size / 2) + np.round(seg_size * idx))
for idx in range(num_segments)
])
return frame_indices
def load_video(video_path, bound=None, input_size=448, max_num=1, num_segments=32):
vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
max_frame = len(vr) - 1
fps = float(vr.get_avg_fps())
pixel_values_list, num_patches_list = [], []
transform = build_transform(input_size=input_size)
frame_indices = get_index(bound, fps, max_frame, first_idx=0, num_segments=num_segments)
for frame_index in frame_indices:
img = Image.fromarray(vr[frame_index].asnumpy()).convert('RGB')
img = dynamic_preprocess(img, image_size=input_size, use_thumbnail=True, max_num=max_num)
pixel_values = [transform(tile) for tile in img]
pixel_values = torch.stack(pixel_values)
num_patches_list.append(pixel_values.shape[0])
pixel_values_list.append(pixel_values)
pixel_values = torch.cat(pixel_values_list)
return pixel_values, num_patches_list
# 1. Load Model
model_path = "MCG-NJU/LongVPO-Stage2-InternVL3-8B"
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = AutoModel.from_pretrained(model_path, torch_dtype=torch.bfloat16, trust_remote_code=True).cuda().eval()
generation_config = dict(max_new_tokens=1024, do_sample=True)
# 2. Prepare Video
video_path = './examples/red-panda.mp4'
pixel_values, num_patches_list = load_video(video_path, num_segments=8, max_num=1)
pixel_values = pixel_values.to(torch.bfloat16).cuda()
# 3. Multi-round Conversation
# Round 1
video_prefix = ''.join([f'Frame{i+1}: <image>\n' for i in range(len(num_patches_list))])
question1 = video_prefix + 'What is the red panda doing?'
# Input format: Frame1: <image>\n...Frame8: <image>\n{question}
response, history = model.chat(tokenizer, pixel_values, question1, generation_config,
num_patches_list=num_patches_list, history=None, return_history=True)
print(f'User: {question1}\nAssistant: {response}')
# Round 2
question2 = 'Describe this video in detail.'
response, history = model.chat(tokenizer, pixel_values, question2, generation_config,
num_patches_list=num_patches_list, history=history, return_history=True)
print(f'User: {question2}\nAssistant: {response}')
If you find this work helpful, please consider citing our paper:
@inproceedings{huang2025longvpo,
title={Long{VPO}: From Anchored Cues to Self-Reasoning for Long-Form Video Preference Optimization},
author={Zhenpeng Huang and Jiaqi Li and Zihan Jia and Xinhao Li and Desen Meng and Lingxue Song and Xi Chen and Liang Li and Limin Wang},
booktitle={The Thirty-ninth Annual Conference on Neural Information Processing Systems},
year={2025},
url={[https://openreview.net/forum?id=LKAp7Dknxf](https://openreview.net/forum?id=LKAp7Dknxf)}
}
Base model
OpenGVLab/InternVL3-8B-Pretrained