Spaces:
Running
Running
File size: 1,816 Bytes
7b5fc70 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import streamlit as st
from transformers import AutoTokenizer, AutoModel, AutoModelForSequenceClassification, pipeline
import torch
import plotly.express as px
import numpy as np
from utils import visualize_attention, list_supported_models
st.set_page_config(page_title="Transformer Visualizer", layout="wide")
st.title("π§ Transformer Visualizer")
st.markdown("Explore how Transformer models process and understand language.")
task = st.sidebar.selectbox("Select Task", ["Text Classification", "Text Generation", "Question Answering"])
model_name = st.sidebar.selectbox("Select Model", list_supported_models(task))
text_input = st.text_area("Enter input text", "The quick brown fox jumps over the lazy dog.")
if st.button("Run"):
st.info(f"Loading model: `{model_name}`...")
tokenizer = AutoTokenizer.from_pretrained(model_name)
if task == "Text Classification":
model = AutoModelForSequenceClassification.from_pretrained(model_name, output_attentions=True)
else:
model = AutoModel.from_pretrained(model_name, output_attentions=True)
inputs = tokenizer(text_input, return_tensors="pt")
outputs = model(**inputs)
attentions = outputs.attentions
st.success("Model inference complete!")
if attentions:
st.subheader("Attention Visualization")
fig = visualize_attention(attentions, tokenizer, inputs)
st.plotly_chart(fig, use_container_width=True)
else:
st.warning("This model does not return attention weights.")
if task == "Text Classification":
st.subheader("Prediction")
pipe = pipeline("text-classification", model=model, tokenizer=tokenizer)
prediction = pipe(text_input)
st.write(prediction)
st.sidebar.markdown("---")
st.sidebar.write("App by Rahiya Esar π")
|