|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
|
Minimal test script for DeepSeek-OCR with vLLM. |
|
|
|
|
|
This is an MVP script to validate that DeepSeek-OCR works with vLLM. |
|
|
Installs vLLM from nightly wheels (PR #27247 now merged to main). |
|
|
|
|
|
Use this to test in Colab before building the full production script. |
|
|
|
|
|
Usage: |
|
|
# Test with a HF dataset image |
|
|
uv run deepseek-ocr-vllm-test.py --dataset pixparse/idl-wds --index 0 |
|
|
|
|
|
# Test with local image |
|
|
uv run deepseek-ocr-vllm-test.py --image path/to/image.png |
|
|
|
|
|
# Test with different resolution mode |
|
|
uv run deepseek-ocr-vllm-test.py --dataset pixparse/idl-wds --resolution-mode tiny |
|
|
""" |
|
|
|
|
|
import argparse |
|
|
import base64 |
|
|
import io |
|
|
import logging |
|
|
import sys |
|
|
from typing import List, Dict |
|
|
|
|
|
import torch |
|
|
from PIL import Image |
|
|
from datasets import load_dataset |
|
|
from vllm import LLM, SamplingParams |
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
|
RESOLUTION_MODES = { |
|
|
"tiny": {"base_size": 512, "image_size": 512, "crop_mode": False}, |
|
|
"small": {"base_size": 640, "image_size": 640, "crop_mode": False}, |
|
|
"base": {"base_size": 1024, "image_size": 1024, "crop_mode": False}, |
|
|
"large": {"base_size": 1280, "image_size": 1280, "crop_mode": False}, |
|
|
"gundam": {"base_size": 1024, "image_size": 640, "crop_mode": True}, |
|
|
} |
|
|
|
|
|
|
|
|
def check_cuda(): |
|
|
"""Check CUDA availability.""" |
|
|
if not torch.cuda.is_available(): |
|
|
logger.error("β CUDA not available. DeepSeek-OCR requires a GPU.") |
|
|
logger.error("Please run in Colab with GPU runtime or on a GPU machine.") |
|
|
sys.exit(1) |
|
|
logger.info(f"β
CUDA available: {torch.cuda.get_device_name(0)}") |
|
|
|
|
|
|
|
|
def make_message(image: Image.Image, prompt: str) -> List[Dict]: |
|
|
"""Create vLLM chat message with base64 encoded image.""" |
|
|
|
|
|
image = image.convert("RGB") |
|
|
|
|
|
|
|
|
buf = io.BytesIO() |
|
|
image.save(buf, format="PNG") |
|
|
data_uri = f"data:image/png;base64,{base64.b64encode(buf.getvalue()).decode()}" |
|
|
|
|
|
return [ |
|
|
{ |
|
|
"role": "user", |
|
|
"content": [ |
|
|
{"type": "image_url", "image_url": {"url": data_uri}}, |
|
|
{"type": "text", "text": prompt}, |
|
|
], |
|
|
} |
|
|
] |
|
|
|
|
|
|
|
|
def main(): |
|
|
parser = argparse.ArgumentParser( |
|
|
description="Test DeepSeek-OCR with vLLM", |
|
|
formatter_class=argparse.RawDescriptionHelpFormatter, |
|
|
epilog=""" |
|
|
Examples: |
|
|
# Test with HF dataset |
|
|
uv run deepseek-ocr-vllm-test.py --dataset pixparse/idl-wds --index 0 |
|
|
|
|
|
# Test with local image |
|
|
uv run deepseek-ocr-vllm-test.py --image document.png |
|
|
|
|
|
# Test different resolution |
|
|
uv run deepseek-ocr-vllm-test.py --dataset pixparse/idl-wds --resolution-mode large |
|
|
""", |
|
|
) |
|
|
|
|
|
|
|
|
source_group = parser.add_mutually_exclusive_group(required=True) |
|
|
source_group.add_argument("--image", help="Path to local image file") |
|
|
source_group.add_argument( |
|
|
"--dataset", |
|
|
help="HF dataset ID (will use first image from train split)", |
|
|
) |
|
|
|
|
|
parser.add_argument( |
|
|
"--index", |
|
|
type=int, |
|
|
default=0, |
|
|
help="Dataset index to use (default: 0)", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--image-column", |
|
|
default="image", |
|
|
help="Column name for images in dataset (default: image)", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--split", |
|
|
default="train", |
|
|
help="Dataset split to use (default: train)", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--resolution-mode", |
|
|
choices=list(RESOLUTION_MODES.keys()), |
|
|
default="gundam", |
|
|
help="Resolution mode preset (default: gundam)", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--prompt", |
|
|
default="<image>\n<|grounding|>Convert the document to markdown.", |
|
|
help="OCR prompt", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--model", |
|
|
default="deepseek-ai/DeepSeek-OCR", |
|
|
help="Model ID (default: deepseek-ai/DeepSeek-OCR)", |
|
|
) |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
check_cuda() |
|
|
|
|
|
|
|
|
logger.info("π· Loading image...") |
|
|
if args.image: |
|
|
image = Image.open(args.image) |
|
|
logger.info(f"Loaded from file: {args.image}") |
|
|
else: |
|
|
dataset = load_dataset(args.dataset, split=args.split, streaming=False) |
|
|
if args.image_column not in dataset.column_names: |
|
|
logger.error(f"Column '{args.image_column}' not found in dataset") |
|
|
logger.error(f"Available columns: {dataset.column_names}") |
|
|
sys.exit(1) |
|
|
image = dataset[args.index][args.image_column] |
|
|
if isinstance(image, dict) and "bytes" in image: |
|
|
image = Image.open(io.BytesIO(image["bytes"])) |
|
|
logger.info(f"Loaded from dataset: {args.dataset}[{args.index}]") |
|
|
|
|
|
logger.info(f"Image size: {image.size}") |
|
|
|
|
|
|
|
|
resolution = RESOLUTION_MODES[args.resolution_mode] |
|
|
logger.info(f"Resolution mode: {args.resolution_mode}") |
|
|
logger.info(f" base_size={resolution['base_size']}, image_size={resolution['image_size']}, crop_mode={resolution['crop_mode']}") |
|
|
|
|
|
|
|
|
logger.info(f"π Loading model: {args.model}") |
|
|
logger.info("This may take a few minutes on first run...") |
|
|
|
|
|
try: |
|
|
llm = LLM( |
|
|
model=args.model, |
|
|
trust_remote_code=True, |
|
|
max_model_len=8192, |
|
|
gpu_memory_utilization=0.8, |
|
|
limit_mm_per_prompt={"image": 1}, |
|
|
enforce_eager=False, |
|
|
) |
|
|
logger.info("β
Model loaded successfully!") |
|
|
except Exception as e: |
|
|
logger.error(f"β Failed to load model: {e}") |
|
|
logger.error("\nThis might mean:") |
|
|
logger.error(" 1. The model architecture is not recognized") |
|
|
logger.error(" 2. Missing dependencies") |
|
|
logger.error(" 3. Insufficient GPU memory") |
|
|
sys.exit(1) |
|
|
|
|
|
|
|
|
logger.info("π Running OCR...") |
|
|
sampling_params = SamplingParams( |
|
|
temperature=0.0, |
|
|
max_tokens=8192, |
|
|
) |
|
|
|
|
|
message = make_message(image, args.prompt) |
|
|
|
|
|
try: |
|
|
outputs = llm.chat([message], sampling_params) |
|
|
result = outputs[0].outputs[0].text.strip() |
|
|
|
|
|
logger.info("β
OCR Complete!") |
|
|
print("\n" + "=" * 80) |
|
|
print("MARKDOWN OUTPUT:") |
|
|
print("=" * 80) |
|
|
print(result) |
|
|
print("=" * 80) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"β Inference failed: {e}") |
|
|
sys.exit(1) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|