| | from fastapi import FastAPI, HTTPException |
| | from pydantic import BaseModel |
| | from transformers import pipeline, TextStreamer |
| | import torch |
| |
|
| | class ModelInput(BaseModel): |
| | prompt: str |
| | max_new_tokens: int = 4096 |
| |
|
| | app = FastAPI() |
| |
|
| | |
| | generator = pipeline( |
| | "text-generation", |
| | model="HuggingFaceTB/SmolLM2-360M-Instruct", |
| | device="cpu" |
| | ) |
| |
|
| | |
| | streamer = TextStreamer(generator.tokenizer, skip_prompt=True) |
| |
|
| | def generate_response(prompt: str, max_new_tokens: int = 4096): |
| | try: |
| | |
| | output = generator(prompt, max_new_tokens=max_new_tokens, do_sample=False, streamer=streamer) |
| | |
| | |
| | full_text = output[0]["generated_text"] |
| | |
| | |
| | if full_text.startswith(prompt): |
| | return full_text[len(prompt):].strip() |
| | return full_text |
| | except Exception as e: |
| | raise ValueError(f"Error generating response: {e}") |
| | |
| | @app.post("/generate") |
| | async def generate_text(input: ModelInput): |
| | try: |
| | response = generate_response( |
| | prompt=(input.prompt,"You are a helpful assistant.") |
| | max_new_tokens=input.max_new_tokens |
| | ) |
| | return {"generated_text": response} |
| | except Exception as e: |
| | raise HTTPException(status_code=500, detail=str(e)) |
| |
|
| | @app.get("/") |
| | async def root(): |
| | return {"message": "Welcome to the Streaming Model API!"} |
| |
|