1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36import streamlit as st
import pandas as pd
# Page config
st.set_page_config(page_title="Equalify CSV Dashboard", layout="wide")
# Title
st.title("Equalify CSV Dashboard")
# Load CSV
df = pd.read_csv("input.csv")
# Group by URL and count unique Node IDs
url_summary = df.groupby("URL")["Node ID"].nunique().reset_index()
url_summary.columns = ["URL", "Number of Unique Nodes"]
url_summary = url_summary.sort_values(by="Number of Unique Nodes", ascending=False)
# Group by Messages and count unique Node IDs
summary = df.groupby("Messages")["Node ID"].nunique().reset_index()
summary.columns = ["Message", "Number of Unique Nodes"]
summary = summary.sort_values(by="Number of Unique Nodes", ascending=False)
# Display URL summary table
st.subheader("Node Counts by URL")
st.dataframe(url_summary)
# Separate messages into violations and warnings
violations = summary[summary["Message"].str.lower().str.startswith("violation:")]
warnings = summary[summary["Message"].str.lower().str.startswith("warning:")]
# Display summary tables
st.subheader("Violations")
st.dataframe(violations)
st.subheader("Warnings")
st.dataframe(warnings)