Update app.py
Browse files
app.py
CHANGED
|
@@ -27,10 +27,10 @@ st.markdown("Predict 30-day readmission risk with LLM explanations powered by SH
|
|
| 27 |
with st.sidebar:
|
| 28 |
st.caption(f"🧠 Model Version: `{MODEL_VERSION}`")
|
| 29 |
model_choice = st.selectbox("Choose LLM for explanation", [
|
|
|
|
| 30 |
"deepcogito/cogito-v1-preview-llama-3B",
|
| 31 |
"microsoft/Phi-4-mini-instruct",
|
| 32 |
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
|
| 33 |
-
"meta-llama/Llama-3.2-3B-Instruct",
|
| 34 |
"TheBloke/Mistral-7B-Instruct-v0.1-GGUF",
|
| 35 |
])
|
| 36 |
|
|
@@ -75,31 +75,30 @@ st.markdown(f"### Prediction Result: {pred_label}")
|
|
| 75 |
st.markdown(f"**Predicted Probability:** `{pred_proba:.2%}`")
|
| 76 |
st.markdown(f"**LLM Model Used:** `{model_choice}`")
|
| 77 |
|
| 78 |
-
# LLM Explanation
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
explanation = explain_prediction(
|
| 83 |
-
patient_id=patient["Patient_ID"],
|
| 84 |
-
patient_data=patient.to_dict(),
|
| 85 |
-
model_name=model_choice,
|
| 86 |
-
_model=model,
|
| 87 |
-
_client=llm
|
| 88 |
-
)
|
| 89 |
-
st.success("### LLM Explanation\n" + explanation)
|
| 90 |
-
except Exception as e:
|
| 91 |
-
fallback = f"Unable to generate LLM explanation due to error: {e}"
|
| 92 |
-
st.warning(f"⚠️ {fallback}")
|
| 93 |
-
explanation = fallback
|
| 94 |
-
|
| 95 |
-
st.session_state["log_df"] = log_explanation(
|
| 96 |
-
st.session_state["log_df"],
|
| 97 |
patient_id=patient["Patient_ID"],
|
|
|
|
| 98 |
model_name=model_choice,
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
explanation=explanation
|
| 102 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
|
| 104 |
# Log download
|
| 105 |
if not st.session_state["log_df"].empty:
|
|
|
|
| 27 |
with st.sidebar:
|
| 28 |
st.caption(f"🧠 Model Version: `{MODEL_VERSION}`")
|
| 29 |
model_choice = st.selectbox("Choose LLM for explanation", [
|
| 30 |
+
"meta-llama/Llama-3.2-3B-Instruct",
|
| 31 |
"deepcogito/cogito-v1-preview-llama-3B",
|
| 32 |
"microsoft/Phi-4-mini-instruct",
|
| 33 |
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
|
|
|
|
| 34 |
"TheBloke/Mistral-7B-Instruct-v0.1-GGUF",
|
| 35 |
])
|
| 36 |
|
|
|
|
| 75 |
st.markdown(f"**Predicted Probability:** `{pred_proba:.2%}`")
|
| 76 |
st.markdown(f"**LLM Model Used:** `{model_choice}`")
|
| 77 |
|
| 78 |
+
# LLM Explanation auto-update
|
| 79 |
+
with st.spinner(f"Generating explanation with {model_choice}..."):
|
| 80 |
+
try:
|
| 81 |
+
explanation = explain_prediction(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
patient_id=patient["Patient_ID"],
|
| 83 |
+
patient_data=patient.to_dict(),
|
| 84 |
model_name=model_choice,
|
| 85 |
+
_model=model,
|
| 86 |
+
_client=llm
|
|
|
|
| 87 |
)
|
| 88 |
+
st.success("### LLM Explanation\n" + explanation)
|
| 89 |
+
except Exception as e:
|
| 90 |
+
fallback = f"Unable to generate LLM explanation due to error: {e}"
|
| 91 |
+
st.warning(f"⚠️ {fallback}")
|
| 92 |
+
explanation = fallback
|
| 93 |
+
|
| 94 |
+
st.session_state["log_df"] = log_explanation(
|
| 95 |
+
st.session_state["log_df"],
|
| 96 |
+
patient_id=patient["Patient_ID"],
|
| 97 |
+
model_name=model_choice,
|
| 98 |
+
prediction=pred_proba,
|
| 99 |
+
shap_summary="SHAP summary internal only",
|
| 100 |
+
explanation=explanation
|
| 101 |
+
)
|
| 102 |
|
| 103 |
# Log download
|
| 104 |
if not st.session_state["log_df"].empty:
|