File size: 2,752 Bytes
ccaeb92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import gradio as gr
import torch
from PIL import Image
from transformers import AutoModelForCausalLM, AutoProcessor
import spaces

# Model configuration
MODEL_PATH = "PaddlePaddle/PaddleOCR-VL"
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"

# Task prompts
PROMPTS = {
    "OCR": "OCR:",
    "Table Recognition": "Table Recognition:",
    "Formula Recognition": "Formula Recognition:",
    "Chart Recognition": "Chart Recognition:",
}

# Load model and processor
print(f"Loading model on {DEVICE}...")
model = AutoModelForCausalLM.from_pretrained(
    MODEL_PATH,
    trust_remote_code=True,
    torch_dtype=torch.bfloat16
).to(DEVICE).eval()

processor = AutoProcessor.from_pretrained(MODEL_PATH, trust_remote_code=True)
print("Model loaded successfully!")

@spaces.GPU
def process_image(image, task):
    """
    Process an image with PaddleOCR-VL model.
    
    Args:
        image: PIL Image or path to image
        task: Task type (OCR, Table Recognition, etc.)
    
    Returns:
        str: Recognition result
    """
    if image is None:
        return "Please upload an image first."
    
    # Convert to PIL Image if needed
    if not isinstance(image, Image.Image):
        image = Image.open(image)
    
    image = image.convert("RGB")
    
    # Get prompt for the task
    prompt = PROMPTS.get(task, PROMPTS["OCR"])
    
    # Prepare messages
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": prompt},
            ]
        }
    ]
    
    # Process with model
    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(DEVICE)
    
    # Generate output
    with torch.no_grad():
        outputs = model.generate(**inputs, max_new_tokens=1024)
    
    # Decode and return result
    result = processor.batch_decode(outputs, skip_special_tokens=True)[0]
    
    return result

# Create Gradio interface
demo = gr.Interface(
    fn=process_image,
    inputs=[
        gr.Image(type="pil", label="Upload Image"),
        gr.Radio(
            choices=list(PROMPTS.keys()),
            value="OCR",
            label="Task Type"
        )
    ],
    outputs=gr.Textbox(label="Result", lines=10),
    title="PaddleOCR-VL: Multilingual Document Parsing",
    description="Upload an image and select a task. This model supports OCR in 109 languages, table recognition, formula recognition, and chart recognition.",
    examples=[
        ["example.png", "OCR"],
    ] if False else None,  # Add examples if you upload sample images
)

if __name__ == "__main__":
    demo.launch()