duqueg commited on
Commit
bee8c25
·
verified ·
1 Parent(s): 7c814b0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -58
app.py CHANGED
@@ -1,78 +1,51 @@
1
- # app.py — AURA no HF Spaces (CPU), flan-t5-small, resposta limpa
2
 
3
  import gradio as gr
4
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
5
- import torch
6
 
7
- # Modelo leve (cabe no limite gratuito)
8
- MODEL_ID = "google/mt5-small"
9
 
10
- # Carrega modelo/tokenizer (CPU)
11
- tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
12
- model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_ID)
13
 
14
- # Prompt de sistema curto (o completo está em aura_prompt.json, mas Flan-T5
15
- # funciona melhor com instruções compactas)
16
  SYSTEM_PROMPT = (
17
  "You are AURA, an Integrative Therapeutic Intelligence. "
18
- "Your role is to offer emotional support, reflective questions, and short micro-practices "
19
- "(e.g., breathing, grounding, brief journaling, or a simple 3-step CBT reframing). "
20
- "Never diagnose, label, prescribe, or claim to replace a licensed clinician. "
21
- "Adjust your tone to the user's emotional state; be warm, calm, non-judgmental, and concise. "
22
- "Prefer 3–6 sentences unless the user asks for more. Offer at most one micro-practice per turn. "
23
- "Always reply in the user's language; if the user writes in Portuguese, answer in Portuguese. "
24
- "Crisis protocol: if the user mentions suicide, self-harm, violence, or immediate danger, "
25
- "stop normal guidance and respond with a safety message encouraging immediate human help. "
26
- "Use this safety text in Portuguese if relevant: "
27
- "'Sinto muito que você esteja passando por isso. Eu me preocupo com a sua segurança agora. "
28
- "Eu não substituo ajuda humana. Se houver risco imediato, ligue para 190/192 (ou o número de emergência da sua região) "
29
- "ou procure alguém de confiança imediatamente. Posso te ajudar a encontrar um serviço de apoio perto de você?'."
30
  )
31
 
32
-
33
- def generate_reply(message: str) -> str:
34
- # Prompt format that MT5 understands
35
  prompt = (
36
- f"Instruction: Reply as AURA, an integrative therapeutic AI who speaks with empathy, calm and wisdom. "
37
- f"You respond kindly to the user's message, offering short, compassionate reflections and gentle guidance. "
38
- f"Always reply in the same language used by the user.\n\n"
39
- f"User message: {message}\n\n"
40
  f"AURA:"
41
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
- inputs = tokenizer(prompt, return_tensors="pt", truncation=True)
44
- with torch.no_grad():
45
- outputs = model.generate(
46
- **inputs,
47
- max_new_tokens=120,
48
- temperature=0.7,
49
- do_sample=True,
50
- top_p=0.9
51
- )
52
-
53
- text = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
54
- # Remove prompt echoes
55
- if "AURA:" in text:
56
- text = text.split("AURA:")[-1].strip()
57
- if "User message:" in text:
58
- text = text.split("User message:")[0].strip()
59
-
60
- if not text:
61
- text = "I'm here with you. Would you like to tell me more about how you're feeling right now?"
62
- return text
63
-
64
-
65
- def chat_fn(message, history):
66
- reply = generate_reply(message)
67
- return reply
68
-
69
- # Interface simples e estável do Gradio
70
  demo = gr.ChatInterface(
71
  fn=chat_fn,
72
  title="AURA — Inteligência Terapêutica Integrativa",
73
- description="Prototipo experimental (não substitui terapia)."
74
  )
75
 
76
  if __name__ == "__main__":
77
  demo.launch()
78
-
 
1
+ # app.py — AURA no HF Spaces (CPU), mt0-small (multilíngue), saída limpa
2
 
3
  import gradio as gr
4
+ from transformers import pipeline
 
5
 
6
+ # Modelo pequeno, instruído, multilíngue e leve (cabe no plano grátis)
7
+ MODEL_ID = "bigscience/mt0-small"
8
 
9
+ # Carrega via pipeline (mais estável para chat curto)
10
+ generator = pipeline("text2text-generation", model=MODEL_ID)
 
11
 
 
 
12
  SYSTEM_PROMPT = (
13
  "You are AURA, an Integrative Therapeutic Intelligence. "
14
+ "Speak with empathy, calm and clarity. Ask short reflective questions. "
15
+ "Offer at most one micro-practice (e.g., breathing for 1 minute) when appropriate. "
16
+ "Never diagnose or replace a clinician. "
17
+ "Always reply in the user's language."
 
 
 
 
 
 
 
 
18
  )
19
 
20
+ def chat_fn(message, history):
21
+ # Prompt simples e compatível com mt0
 
22
  prompt = (
23
+ f"{SYSTEM_PROMPT}\n\n"
24
+ f"User: {message}\n"
 
 
25
  f"AURA:"
26
  )
27
+ out = generator(
28
+ prompt,
29
+ max_new_tokens=120,
30
+ do_sample=True,
31
+ top_p=0.9,
32
+ temperature=0.7
33
+ )[0]["generated_text"].strip()
34
+
35
+ # Limpeza leve: mantém só o trecho final após "AURA:"
36
+ if "AURA:" in out:
37
+ out = out.split("AURA:")[-1].strip()
38
+ # Fallback seguro
39
+ if not out:
40
+ out = "I’m here with you. Would you like to tell me a little more about how you’re feeling right now?"
41
+
42
+ return out
43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  demo = gr.ChatInterface(
45
  fn=chat_fn,
46
  title="AURA — Inteligência Terapêutica Integrativa",
47
+ description="Protótipo experimental (não substitui terapia)."
48
  )
49
 
50
  if __name__ == "__main__":
51
  demo.launch()