| import streamlit as st | |
| from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM | |
| from diffusers import StableDiffusionPipeline, AutoencoderKL | |
| from torchvision import models | |
| tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen-350M-multi") | |
| model = AutoModelForCausalLM.from_pretrained("Salesforce/codegen-350M-multi") | |
| pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") | |
| video_model = models.resnet50(pretrained=True) | |
| st.title("FallnAI Inference App") | |
| st.subheader("Coding Model") | |
| user_input = st.text_input("Enter your code:") | |
| if st.button("Generate"): | |
| result = pipeline("text-generation", model=model, tokenizer=tokenizer)(user_input) | |
| st.write(result[0]['generated_text']) | |
| st.subheader("Stable Diffusion Model") | |
| prompt = st.text_input("Enter your prompt:") | |
| if st.button("Generate"): | |
| image = pipe(prompt).images[0] | |
| st.image(image) | |
| st.subheader("Video Model") | |
| video_file = st.file_uploader("Upload a video file:", type=["mp4", "avi"]) | |
| if video_file is not None: | |
| video_bytes = video_file.getvalue() | |
| st.video(video_bytes) | |
| video_transformed = video_model(video_bytes) | |