```CODE: # Load model directly from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("0xSero/GLM-4.7-REAP-50-W4A16") model = AutoModelForCausalLM.from_pretrained("0xSero/GLM-4.7-REAP-50-W4A16") messages = [ {"role": "user", "content": "Who are you?"}, ] inputs = tokenizer.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", ).to(model.device) outputs = model.generate(**inputs, max_new_tokens=40) print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:])) ``` ERROR: Traceback (most recent call last): File "/tmp/0xSero_GLM-4.7-REAP-50-W4A16_1JtDYlL.py", line 27, in model = AutoModelForCausalLM.from_pretrained("0xSero/GLM-4.7-REAP-50-W4A16") File "/tmp/.cache/uv/environments-v2/c8049131ec121543/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 604, in from_pretrained return model_class.from_pretrained( ~~~~~~~~~~~~~~~~~~~~~~~~~~~^ pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ) ^ File "/tmp/.cache/uv/environments-v2/c8049131ec121543/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper return func(*args, **kwargs) File "/tmp/.cache/uv/environments-v2/c8049131ec121543/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained hf_quantizer, config, dtype, device_map = get_hf_quantizer( ~~~~~~~~~~~~~~~~^ config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ) ^ File "/tmp/.cache/uv/environments-v2/c8049131ec121543/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 311, in get_hf_quantizer hf_quantizer = AutoHfQuantizer.from_config( config.quantization_config, pre_quantized=pre_quantized, ) File "/tmp/.cache/uv/environments-v2/c8049131ec121543/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 185, in from_config return target_cls(quantization_config, **kwargs) File "/tmp/.cache/uv/environments-v2/c8049131ec121543/lib/python3.13/site-packages/transformers/quantizers/quantizer_gptq.py", line 49, in __init__ raise ImportError("Loading a GPTQ quantized model requires optimum (`pip install optimum`)") ImportError: Loading a GPTQ quantized model requires optimum (`pip install optimum`)