Mauricio-100 commited on
Commit
5bb9abc
·
verified ·
1 Parent(s): a8b7895

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -1
app.py CHANGED
@@ -1,7 +1,7 @@
1
  from fastapi import FastAPI
2
  from pydantic import BaseModel
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
4
- import torch
5
 
6
  MODEL_ID = "Gopu-poss/gopu-agent-2k-fdf"
7
 
@@ -23,3 +23,25 @@ def infer(data: Input):
23
  outputs = model.generate(**inputs, max_new_tokens=200, temperature=0.7, top_p=0.9)
24
  text = tokenizer.decode(outputs[0], skip_special_tokens=True)
25
  return {"generated_text": text}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from fastapi import FastAPI
2
  from pydantic import BaseModel
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
4
+ import torch, gradio as gr
5
 
6
  MODEL_ID = "Gopu-poss/gopu-agent-2k-fdf"
7
 
 
23
  outputs = model.generate(**inputs, max_new_tokens=200, temperature=0.7, top_p=0.9)
24
  text = tokenizer.decode(outputs[0], skip_special_tokens=True)
25
  return {"generated_text": text}
26
+
27
+ # Gradio multimodal
28
+ def gopu_chat(prompt, image=None, audio=None, video=None):
29
+ # Pour l’instant on ne traite que le texte
30
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
31
+ outputs = model.generate(**inputs, max_new_tokens=200, temperature=0.7, top_p=0.9)
32
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
33
+
34
+ demo = gr.Interface(
35
+ fn=gopu_chat,
36
+ inputs=[
37
+ gr.Textbox(label="Texte"),
38
+ gr.Image(label="Image", type="filepath", optional=True),
39
+ gr.Audio(label="Voix", type="filepath", optional=True),
40
+ gr.Video(label="Vidéo", type="filepath", optional=True)
41
+ ],
42
+ outputs="text",
43
+ title="GopuOS Agentic Endpoint",
44
+ description="Texte, image, voix, vidéo, recherche web (à venir)."
45
+ )
46
+
47
+ app = gr.mount_gradio_app(app, demo, path="/gradio")