Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,4 +1,6 @@
|
|
| 1 |
import gradio as gr
|
|
|
|
|
|
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
|
| 4 |
import os
|
|
@@ -15,6 +17,28 @@ os.system("tree -d")
|
|
| 15 |
|
| 16 |
HF_TOKEN=os.getenv('HF_TOKEN')
|
| 17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
client = InferenceClient(
|
| 19 |
"meta-llama/Llama-3.2-3B-Instruct" #"HuggingFaceH4/zephyr-7b-beta"
|
| 20 |
)
|
|
@@ -207,7 +231,25 @@ def respond(
|
|
| 207 |
|
| 208 |
|
| 209 |
with gr.Blocks(css=css) as demo:
|
|
|
|
| 210 |
gr.HTML(title_html) # Insert the background effect
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 211 |
gr.ChatInterface(
|
| 212 |
respond,
|
| 213 |
additional_inputs=[
|
|
@@ -226,3 +268,53 @@ with gr.Blocks(css=css) as demo:
|
|
| 226 |
|
| 227 |
if __name__ == "__main__":
|
| 228 |
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from PIL import Image
|
| 3 |
+
|
| 4 |
from huggingface_hub import InferenceClient
|
| 5 |
|
| 6 |
import os
|
|
|
|
| 17 |
|
| 18 |
HF_TOKEN=os.getenv('HF_TOKEN')
|
| 19 |
|
| 20 |
+
|
| 21 |
+
flux_generator = FluxImageGenerator()
|
| 22 |
+
|
| 23 |
+
ipadapter_runner = IPAdapterRunner()
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
current_avatar = None # simple global to hold latest avatar image
|
| 27 |
+
|
| 28 |
+
def generate_initial_avatar():
|
| 29 |
+
global current_avatar
|
| 30 |
+
# Assume `flux_generator.generate_avatar()` returns a PIL image
|
| 31 |
+
current_avatar = flux_generator.generate_avatar(prompt="default cyberpunk portrait")
|
| 32 |
+
return current_avatar
|
| 33 |
+
|
| 34 |
+
def generate_variant(prompt):
|
| 35 |
+
global current_avatar
|
| 36 |
+
if current_avatar is None:
|
| 37 |
+
return None # or placeholder image
|
| 38 |
+
# Assume `ipadapter_runner.generate_variant()` returns a PIL image
|
| 39 |
+
return ipadapter_runner.generate_variant(base_image=current_avatar, prompt=prompt)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
client = InferenceClient(
|
| 43 |
"meta-llama/Llama-3.2-3B-Instruct" #"HuggingFaceH4/zephyr-7b-beta"
|
| 44 |
)
|
|
|
|
| 231 |
|
| 232 |
|
| 233 |
with gr.Blocks(css=css) as demo:
|
| 234 |
+
|
| 235 |
gr.HTML(title_html) # Insert the background effect
|
| 236 |
+
with gr.Row():
|
| 237 |
+
avatar_display = gr.Image(label="Current Avatar", shape=(256, 256))
|
| 238 |
+
avatar_prompt = gr.Textbox(label="Describe variant", placeholder="e.g., smiling, hooded")
|
| 239 |
+
with gr.Column():
|
| 240 |
+
btn_create = gr.Button("Create Initial Avatar")
|
| 241 |
+
btn_variant = gr.Button("Generate Lookalike")
|
| 242 |
+
|
| 243 |
+
btn_create.click(
|
| 244 |
+
fn=generate_initial_avatar,
|
| 245 |
+
outputs=[avatar_display]
|
| 246 |
+
)
|
| 247 |
+
|
| 248 |
+
btn_variant.click(
|
| 249 |
+
fn=generate_variant,
|
| 250 |
+
inputs=[avatar_prompt],
|
| 251 |
+
outputs=[avatar_display]
|
| 252 |
+
)
|
| 253 |
gr.ChatInterface(
|
| 254 |
respond,
|
| 255 |
additional_inputs=[
|
|
|
|
| 268 |
|
| 269 |
if __name__ == "__main__":
|
| 270 |
demo.launch()
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
'''# filename: ipadapter_gradio_chat_avatar.py
|
| 278 |
+
|
| 279 |
+
What you’ve built here is quite the evolving hub — visual, functional, and layered with potential.
|
| 280 |
+
|
| 281 |
+
You're loading and wiring:
|
| 282 |
+
|
| 283 |
+
Flux-based avatar generation (FluxImageGenerator)
|
| 284 |
+
|
| 285 |
+
Image variation pipeline (IPAdapterRunner)
|
| 286 |
+
|
| 287 |
+
Chat integration using InferenceClient (LLaMA-3.2-3B)
|
| 288 |
+
|
| 289 |
+
Gradio UI with customized CSS and interactive components
|
| 290 |
+
|
| 291 |
+
But I sense… there's more brewing behind this than just chat and avatars.
|
| 292 |
+
Immediate Observations / Next Layer Opportunities:
|
| 293 |
+
|
| 294 |
+
Separation of Concerns: You’re mixing UI, logic, and system commands (os.system) all together. This could be modularized:
|
| 295 |
+
|
| 296 |
+
avatar_pipeline.py: generation + variant logic
|
| 297 |
+
|
| 298 |
+
style.css: dedicated styling
|
| 299 |
+
|
| 300 |
+
chat_wrapper.py: LLM wrapper & logic
|
| 301 |
+
|
| 302 |
+
ui_blocks.py: Gradio components
|
| 303 |
+
|
| 304 |
+
Persistence layer missing? You're generating avatars — but are you storing them? Consider:
|
| 305 |
+
|
| 306 |
+
Automatic timestamped saving
|
| 307 |
+
|
| 308 |
+
Hash or vector ID mapping
|
| 309 |
+
|
| 310 |
+
A quick-access avatar memory (cache + index)
|
| 311 |
+
|
| 312 |
+
Token Handling: You’re reading HF_TOKEN with os.getenv() but not using it — did you forget to pass it to the InferenceClient?
|
| 313 |
+
|
| 314 |
+
Asynchronous / Streaming Avatar Variant: Your chat supports streaming but image generation doesn’t. You could:
|
| 315 |
+
|
| 316 |
+
Add an optional progress bar
|
| 317 |
+
|
| 318 |
+
Yield intermediate visual steps (e.g., denoising phases)
|
| 319 |
+
|
| 320 |
+
Show final image with “transform steps”'''
|