Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
from fastapi import FastAPI
|
| 2 |
from pydantic import BaseModel
|
| 3 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 4 |
import torch, gradio as gr
|
| 5 |
|
| 6 |
MODEL_ID = "Gopu-poss/gopu-agent-2k-fdf"
|
|
@@ -16,33 +16,48 @@ app = FastAPI()
|
|
| 16 |
|
| 17 |
class Input(BaseModel):
|
| 18 |
input: str
|
|
|
|
| 19 |
|
| 20 |
@app.post("/infer")
|
| 21 |
def infer(data: Input):
|
| 22 |
-
|
|
|
|
|
|
|
| 23 |
outputs = model.generate(**inputs, max_new_tokens=200, temperature=0.7, top_p=0.9)
|
| 24 |
text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 25 |
return {"generated_text": text}
|
| 26 |
|
| 27 |
-
# Gradio multimodal
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
outputs = model.generate(**inputs, max_new_tokens=200, temperature=0.7, top_p=0.9)
|
| 32 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 33 |
|
| 34 |
demo = gr.Interface(
|
| 35 |
fn=gopu_chat,
|
| 36 |
inputs=[
|
| 37 |
-
gr.Textbox(label="
|
| 38 |
-
gr.
|
| 39 |
-
gr.Audio(label="Voix", type="filepath"),
|
| 40 |
-
gr.
|
| 41 |
-
|
| 42 |
],
|
| 43 |
outputs="text",
|
| 44 |
title="GopuOS Agentic Endpoint",
|
| 45 |
-
description="
|
| 46 |
)
|
| 47 |
|
| 48 |
app = gr.mount_gradio_app(app, demo, path="/gradio")
|
|
|
|
| 1 |
from fastapi import FastAPI
|
| 2 |
from pydantic import BaseModel
|
| 3 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 4 |
import torch, gradio as gr
|
| 5 |
|
| 6 |
MODEL_ID = "Gopu-poss/gopu-agent-2k-fdf"
|
|
|
|
| 16 |
|
| 17 |
class Input(BaseModel):
|
| 18 |
input: str
|
| 19 |
+
system_prompt: str | None = None
|
| 20 |
|
| 21 |
@app.post("/infer")
|
| 22 |
def infer(data: Input):
|
| 23 |
+
# Concatène prompt système + input utilisateur
|
| 24 |
+
full_prompt = f"{data.system_prompt}\n{data.input}" if data.system_prompt else data.input
|
| 25 |
+
inputs = tokenizer(full_prompt, return_tensors="pt").to(model.device)
|
| 26 |
outputs = model.generate(**inputs, max_new_tokens=200, temperature=0.7, top_p=0.9)
|
| 27 |
text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 28 |
return {"generated_text": text}
|
| 29 |
|
| 30 |
+
# --- Gradio multimodal ---
|
| 31 |
+
# Ajoutons un champ "system prompt" + micro pour parler
|
| 32 |
+
|
| 33 |
+
# Pipeline de reconnaissance vocale (tu peux choisir un modèle HF adapté)
|
| 34 |
+
asr = pipeline("automatic-speech-recognition", model="openai/whisper-small")
|
| 35 |
+
|
| 36 |
+
def gopu_chat(system_prompt, prompt, audio=None, image=None, video=None):
|
| 37 |
+
# Si audio fourni → transcrire et remplacer le prompt
|
| 38 |
+
if audio is not None:
|
| 39 |
+
transcription = asr(audio)["text"]
|
| 40 |
+
prompt = transcription
|
| 41 |
+
|
| 42 |
+
# Construire le prompt complet
|
| 43 |
+
full_prompt = f"{system_prompt}\n{prompt}" if system_prompt else prompt
|
| 44 |
+
|
| 45 |
+
inputs = tokenizer(full_prompt, return_tensors="pt").to(model.device)
|
| 46 |
outputs = model.generate(**inputs, max_new_tokens=200, temperature=0.7, top_p=0.9)
|
| 47 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 48 |
|
| 49 |
demo = gr.Interface(
|
| 50 |
fn=gopu_chat,
|
| 51 |
inputs=[
|
| 52 |
+
gr.Textbox(label="Prompt système (contexte)", placeholder="Ex: Tu es GopuOS, un agent exclusif..."),
|
| 53 |
+
gr.Textbox(label="Texte utilisateur"),
|
| 54 |
+
gr.Audio(label="Voix (optionnel)", type="filepath"),
|
| 55 |
+
gr.Image(label="Image (optionnel)", type="filepath"),
|
| 56 |
+
gr.Video(label="Vidéo (optionnel)", type="filepath"),
|
| 57 |
],
|
| 58 |
outputs="text",
|
| 59 |
title="GopuOS Agentic Endpoint",
|
| 60 |
+
description="Ajoute un prompt système, parle avec ta voix, ou envoie texte/image/vidéo."
|
| 61 |
)
|
| 62 |
|
| 63 |
app = gr.mount_gradio_app(app, demo, path="/gradio")
|