duqueg commited on
Commit
7c814b0
·
verified ·
1 Parent(s): c88e7b9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -18
app.py CHANGED
@@ -31,38 +31,37 @@ SYSTEM_PROMPT = (
31
 
32
 
33
  def generate_reply(message: str) -> str:
34
- # Monta instrução de diálogo minimalista para o Flan-T5
35
  prompt = (
36
- f"{SYSTEM_PROMPT}\n\n"
37
- f"Usuário: {message}\n"
 
 
38
  f"AURA:"
39
  )
40
- inputs = tokenizer(prompt, return_tensors="pt")
 
41
  with torch.no_grad():
42
  outputs = model.generate(
43
  **inputs,
44
- max_new_tokens=90,
45
- do_sample=False, # previsível
46
- num_beams=1,
47
- early_stopping=True
48
  )
49
- text = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
50
 
51
- # Remove eco do prompt, se vier
52
- if text.startswith(prompt):
53
- text = text[len(prompt):].strip()
54
- # Em alguns casos o modelo pode repetir “Usuário:”/“AURA:”
55
- # Mantém apenas o trecho depois do último "AURA:"
56
  if "AURA:" in text:
57
  text = text.split("AURA:")[-1].strip()
58
- if "Usuário:" in text:
59
- text = text.split("Usuário:")[0].strip()
60
 
61
- # Fallback amigável
62
  if not text:
63
- text = "Estou aqui com você. Quer me contar um pouco mais do que está sentindo agora?"
64
  return text
65
 
 
66
  def chat_fn(message, history):
67
  reply = generate_reply(message)
68
  return reply
 
31
 
32
 
33
  def generate_reply(message: str) -> str:
34
+ # Prompt format that MT5 understands
35
  prompt = (
36
+ f"Instruction: Reply as AURA, an integrative therapeutic AI who speaks with empathy, calm and wisdom. "
37
+ f"You respond kindly to the user's message, offering short, compassionate reflections and gentle guidance. "
38
+ f"Always reply in the same language used by the user.\n\n"
39
+ f"User message: {message}\n\n"
40
  f"AURA:"
41
  )
42
+
43
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True)
44
  with torch.no_grad():
45
  outputs = model.generate(
46
  **inputs,
47
+ max_new_tokens=120,
48
+ temperature=0.7,
49
+ do_sample=True,
50
+ top_p=0.9
51
  )
 
52
 
53
+ text = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
54
+ # Remove prompt echoes
 
 
 
55
  if "AURA:" in text:
56
  text = text.split("AURA:")[-1].strip()
57
+ if "User message:" in text:
58
+ text = text.split("User message:")[0].strip()
59
 
 
60
  if not text:
61
+ text = "I'm here with you. Would you like to tell me more about how you're feeling right now?"
62
  return text
63
 
64
+
65
  def chat_fn(message, history):
66
  reply = generate_reply(message)
67
  return reply