def sample_frame_indices(clip_len, frame_sample_rate, seg_len): … ‘’’ … Sample a given number of frame indices from the video. … Args: … clip_len (int): Total number of frames to sample. … frame_sample_rate (int): Sample every n-th frame. … seg_len (int): Maximum allowed index of sample’s last frame. … Returns: … indices (List[int]): List of sampled frame indices … ‘’’ … converted_len = int(clip_len * frame_sample_rate) … end_idx = np.random.randint(converted_len, seg_len) … start_idx = end_idx - converted_len … indices = np.linspace(start_idx, end_idx, num=clip_len) … indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64) … return indices
load video file_path = hf_hub_download( … repo_id=“nielsr/video-demo”, filename=“eating_spaghetti.mp4”, repo_type=“dataset” … ) container = av.open(file_path)
sample frames num_frames = model.config.num_image_with_embedding indices = sample_frame_indices( … clip_len=num_frames, frame_sample_rate=4, seg_len=container.streams.video[0].frames … ) frames = read_video_pyav(container, indices)
pixel_values = processor(images=list(frames), return_tensors=“pt”).pixel_values
generated_ids = model.generate(pixel_values=pixel_values, max_length=50)
print(“Generated caption:”, processor.batch_decode(generated_ids, skip_special_tokens=True)) Generated caption: [‘a woman is sitting at a table and she is talking about the food she is holding.’]