Spaces:
Sleeping
Sleeping
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import os | |
| MODEL_NAME = "Yuk050/gemma-3-1b-text-to-sql-model" | |
| LOCAL_DIR = "./model_cache" | |
| _tokenizer = None | |
| _model = None | |
| def load_model(): | |
| global _tokenizer, _model | |
| if _tokenizer is not None and _model is not None: | |
| return _tokenizer, _model | |
| print("π Loading model...") | |
| if os.path.exists(LOCAL_DIR): | |
| _tokenizer = AutoTokenizer.from_pretrained(LOCAL_DIR) | |
| _model = AutoModelForCausalLM.from_pretrained(LOCAL_DIR, trust_remote_code=True) | |
| else: | |
| _tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
| _model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, trust_remote_code=True) | |
| os.makedirs(LOCAL_DIR, exist_ok=True) | |
| _tokenizer.save_pretrained(LOCAL_DIR) | |
| _model.save_pretrained(LOCAL_DIR) | |
| print("β Model loaded successfully!") | |
| return _tokenizer, _model | |