Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| import torch | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
| from huggingface_hub import snapshot_download | |
| # ================================ | |
| # CONFIGURATION | |
| # ================================ | |
| MODEL_NAME_PRIMARY = "tiiuae/Falcon-H1-7B-Instruct" | |
| MODEL_NAME_FALLBACK = "tiiuae/falcon-7b-instruct" | |
| MODEL_LOCAL_DIR = "./falcon_model" | |
| MAX_LENGTH = 120 | |
| TEMPERATURE = 0.3 | |
| REPETITION_PENALTY = 1.8 | |
| print("🚀 Preparing environment...") | |
| # 1️⃣ Upgrade transformers & accelerate | |
| os.system("pip install --upgrade pip") | |
| os.system("pip install --upgrade transformers accelerate safetensors huggingface_hub") | |
| # 2️⃣ Ensure clean download of model | |
| try: | |
| print(f"⬇️ Downloading model: {MODEL_NAME_PRIMARY}") | |
| snapshot_download(MODEL_NAME_PRIMARY, local_dir=MODEL_LOCAL_DIR, force_download=True) | |
| model_name = MODEL_LOCAL_DIR | |
| except Exception as e: | |
| print(f"⚠️ Primary model download failed: {e}") | |
| print("➡️ Falling back to Falcon 7B Instruct") | |
| snapshot_download(MODEL_NAME_FALLBACK, local_dir=MODEL_LOCAL_DIR, force_download=True) | |
| model_name = MODEL_LOCAL_DIR | |
| # 3️⃣ Load tokenizer and model | |
| try: | |
| print("🔄 Loading tokenizer and model...") | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| model_name, | |
| torch_dtype=torch.float16, | |
| trust_remote_code=True, | |
| device_map="auto", | |
| low_cpu_mem_usage=True | |
| ) | |
| generator = pipeline( | |
| "text-generation", | |
| model=model, | |
| tokenizer=tokenizer, | |
| torch_dtype=torch.float16, | |
| device=0 if torch.cuda.is_available() else -1 | |
| ) | |
| print("✅ Model loaded successfully") | |
| model_loaded = True | |
| except Exception as e: | |
| print(f"❌ Model loading failed: {e}") | |
| generator = None | |
| model_loaded = False | |
| # ================================ | |
| # Test Questions (Pre-Filled) | |
| # ================================ | |
| test_questions = [ | |
| "بدي شقة بالمالكي فيها شرفة وغسالة صحون.", | |
| "هل في شقة دوبلكس بالمزة الفيلات فيها موقفين سيارة؟", | |
| "بدي بيت عربي قديم بباب توما مع حديقة داخلية.", | |
| "أرخص شقة بالشعلان شو سعرها؟", | |
| "هل يوجد شقق بإطلالة جبلية في أبو رمانة؟", | |
| "بدي شقة مفروشة بالكامل بالمزة ٨٦، الطابق الأول.", | |
| "عندك منزل مستقل بالمهاجرين مع موقد حطب؟" | |
| ] | |
| # ================================ | |
| # Falcon Chat Function | |
| # ================================ | |
| def chat_falcon(user_input): | |
| if not model_loaded: | |
| return "❌ النموذج غير محمل. تحقق من الإعدادات." | |
| prompt = f"أنت مساعد عقارات ذكي. أجب بجملة أو جملتين واضحتين.\nالسؤال: {user_input}\nالجواب:" | |
| output = generator( | |
| prompt, | |
| max_new_tokens=MAX_LENGTH, | |
| do_sample=True, | |
| temperature=TEMPERATURE, | |
| repetition_penalty=REPETITION_PENALTY, | |
| top_p=0.9 | |
| )[0]["generated_text"] | |
| return output.replace(prompt, "").strip() | |
| # ================================ | |
| # Build Gradio Interface | |
| # ================================ | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## 🏠 Falcon H1 7B Instruct - Damascus Real Estate Test") | |
| gr.Markdown("اختبر قدرة النموذج على فهم الأسئلة بالعربية (لهجة سورية أو فصحى)") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| user_input = gr.Textbox(label="اكتب سؤالك هنا", lines=3, placeholder="مثال: بدي شقة بالمزة فيها بلكون") | |
| submit_btn = gr.Button("🔎 أرسل") | |
| with gr.Column(scale=1): | |
| suggestions = gr.Dropdown(choices=test_questions, label="🧾 أسئلة جاهزة", value=test_questions[0]) | |
| output_box = gr.Textbox(label="إجابة النموذج", lines=8) | |
| submit_btn.click(fn=chat_falcon, inputs=user_input, outputs=output_box) | |
| suggestions.change(fn=chat_falcon, inputs=suggestions, outputs=output_box) | |
| demo.launch() | |