Multiscaler / app.R
cjerzak's picture
Update app.R
cb6d45e verified
# setwd('~/Dropbox/ImageSeq/') # Set your working directory if needed
options(error = NULL)
library(shiny)
library(dplyr)
library(fields) # For image.plot in heatMap
library(akima) # For interpolation
MAX_PLOT_DIM <- 600
safe_dim <- function(client_name, cap = MAX_PLOT_DIM) {
if (exists("session", inherits = TRUE)) { # Shiny context?
cd <- session$clientData[[client_name]]
if (!is.null(cd)) return(min(cap, cd)) # clamp to cap
}
cap # fallback
}
# Load the data from sm.csv
# Ensure 'sm.csv' is in the same directory as the app.R file or provide the full path.
# Add error handling for file loading
sm <- tryCatch({
read.csv("sm.csv")
}, error = function(e) {
stop("Error loading sm.csv: ", e$message, "\nPlease ensure 'sm.csv' is in the application directory.")
})
# Define function to convert to numeric
f2n <- function(x) as.numeric(as.character(x))
# Compute MaxImageDimsLeft and MaxImageDimsRight from MaxImageDims
# Handle potential errors if split doesn't work as expected
sm$MaxImageDimsLeft <- tryCatch({
unlist(lapply(strsplit(as.character(sm$MaxImageDims), split = "_"), function(x) sort(f2n(x))[1]))
}, error = function(e) {
warning("Could not parse MaxImageDimsLeft from MaxImageDims. Check format (e.g., '64_128').")
NA # Assign NA or a default value
})
sm$MaxImageDimsRight <- tryCatch({
unlist(lapply(strsplit(as.character(sm$MaxImageDims), split = "_"), function(x) sort(f2n(x))[2]))
}, error = function(e) {
warning("Could not parse MaxImageDimsRight from MaxImageDims. Check format (e.g., '64_128').")
NA # Assign NA or a default value
})
# Handle cases where parsing might have failed or where Right dim might be missing for single scale
sm <- sm %>%
mutate(
MaxImageDimsLeft = f2n(MaxImageDimsLeft), # Ensure numeric
MaxImageDimsRight = f2n(MaxImageDimsRight), # Ensure numeric
# If Right is NA after parsing (or originally missing), assume it's the same as Left (single scale)
MaxImageDimsRight = ifelse(is.na(MaxImageDimsRight), MaxImageDimsLeft, MaxImageDimsRight)
)
# Remove rows where essential dimensions couldn't be determined
sm <- sm %>% filter(!is.na(MaxImageDimsLeft) & !is.na(MaxImageDimsRight))
# Heatmap function (no significant changes needed here, aesthetics controlled in server)
heatMap <- function(x, y, z,
main = "",
N, yaxt = NULL,
xlab = "",
ylab = "",
horizontal = FALSE,
useLog = "",
legend.width = 1,
ylim = NULL,
xlim = NULL,
zlim = NULL,
add.legend = TRUE,
legend.only = FALSE,
vline = NULL,
col_vline = "black",
hline = NULL,
col_hline = "black",
cex.lab = 1.3, # Default adjusted slightly
cex.main = 1.5, # Default adjusted slightly
myCol = NULL,
includeMarginals = FALSE,
marginalJitterSD_x = 0.01,
marginalJitterSD_y = 0.01,
openBrowser = FALSE,
optimal_point = NULL) {
if (openBrowser) { browser() }
# Ensure finite values for interpolation range finding
finite_x <- x[is.finite(x)]
finite_y <- y[is.finite(y)]
if(length(finite_x) == 0 || length(finite_y) == 0) {
warning("Insufficient finite x or y data for interpolation range.")
return(NULL) # Cannot proceed
}
min_x <- min(finite_x, na.rm = TRUE)
max_x <- max(finite_x, na.rm = TRUE)
min_y <- min(finite_y, na.rm = TRUE)
max_y <- max(finite_y, na.rm = TRUE)
# Ensure xo and yo sequences are valid
if (min_x == max_x) { max_x <- min_x + 1e-6 } # Avoid zero range
if (min_y == max_y) { max_y <- min_y + 1e-6 } # Avoid zero range
xo_seq <- seq(min_x, max_x, length = N)
yo_seq <- seq(min_y, max_y, length = N)
# Perform interpolation
s_ <- tryCatch({
akima::interp(x = x, y = y, z = z,
xo = xo_seq,
yo = yo_seq,
duplicate = "mean",
linear = TRUE) # Use linear interpolation by default
}, error = function(e) {
warning("Akima interpolation failed: ", e$message)
return(NULL) # Return NULL if interp fails
})
if (is.null(s_)) return(NULL) # Exit if interpolation failed
if (is.null(xlim)) { xlim = range(s_$x, finite = TRUE) }
if (is.null(ylim)) { ylim = range(s_$y, finite = TRUE) }
# Default color palette if none provided
if (is.null(myCol)) { myCol = hcl.colors(50, palette = "YlOrRd", rev = TRUE) }
imageFxn <- if (add.legend) fields::image.plot else graphics::image
if (!grepl(useLog, pattern = "z")) {
imageFxn(s_, xlab = xlab, ylab = ylab, log = useLog, cex.lab = cex.lab, main = main,
cex.main = cex.main, col = myCol, xlim = xlim, ylim = ylim,
legend.width = legend.width, horizontal = horizontal, yaxt = yaxt,
zlim = zlim, legend.only = legend.only)
} else {
useLog <- gsub(useLog, pattern = "z", replace = "")
z_finite <- s_$z[is.finite(s_$z)]
if (length(z_finite) == 0 || all(z_finite <= 0)) {
warning("Cannot compute log scale for z: All finite values are non-positive.")
# Fallback to non-log scale or plot without z-log
imageFxn(s_, xlab = xlab, ylab = ylab, log = useLog, cex.lab = cex.lab, main = paste(main, "(z-log failed)"),
cex.main = cex.main, col = myCol, xlim = xlim, ylim = ylim,
legend.width = legend.width, horizontal = horizontal, yaxt = yaxt,
zlim = zlim, legend.only = legend.only)
} else {
zTicks <- pretty(range(log(z_finite[z_finite > 0]), na.rm = TRUE), n = 5) # Use pretty for nice log ticks
zTickLabels <- signif(exp(zTicks), 2) # Nicer labels
# ep_ <- min(z_finite[z_finite > 0], na.rm=TRUE) * 0.1 # Small positive value based on data
ep_ <- 1e-9 # Or a small fixed epsilon
s_$z[s_$z <= ep_] <- ep_ # Replace non-positive with epsilon for log
imageFxn(s_$x, s_$y, log(s_$z), yaxt = yaxt,
axis.args = list(at = zTicks, labels = zTickLabels),
main = main, cex.main = cex.main, xlab = xlab, ylab = ylab,
log = useLog, cex.lab = cex.lab, xlim = xlim, ylim = ylim,
horizontal = horizontal, col = myCol, legend.width = legend.width,
zlim = if(!is.null(zlim)) log(zlim) else NULL, # Apply log to zlim if provided
legend.only = legend.only)
}
}
if (!is.null(vline)) { abline(v = vline, lwd = 3, col = col_vline, lty = 2) } # Thinner, dashed line
if (!is.null(hline)) { abline(h = hline, lwd = 3, col = col_hline, lty = 2) } # Thinner, dashed line
if (includeMarginals) {
points(x + rnorm(length(y), sd = marginalJitterSD_x * sd(x, na.rm = TRUE)), # Added na.rm
rep(ylim[1] + 0.02 * diff(ylim), length(y)), # Adjust position slightly off bottom
pch = "|", col = "darkgray")
points(rep(xlim[1] + 0.02 * diff(xlim), length(x)), # Adjust position slightly off left
y + rnorm(length(y), sd = sd(y, na.rm = TRUE) * marginalJitterSD_y), # Added na.rm
pch = "-", col = "darkgray")
}
# Add green star at optimal point if provided and valid
if (!is.null(optimal_point) && is.finite(optimal_point$x) && is.finite(optimal_point$y)) {
points(optimal_point$x, optimal_point$y, pch = 8, col = "green", cex = 2.5, lwd = 3) # Slightly smaller star
}
}
##############################################################################
# IMPORTANT: Store the meaningful labels for metric in a named vector.
# The "name" is what is displayed to the user in the dropdown,
# while the "value" is the underlying column in the dataset.
##############################################################################
metric_choices <- c(
"Mean AUTOC RATE Ratio" = "AUTOC_rate_std_ratio_mean",
"Mean AUTOC RATE" = "AUTOC_rate_mean",
"Mean SD of AUTOC RATE" = "AUTOC_rate_std_mean",
"Mean AUTOC RATE Ratio with PC" = "AUTOC_rate_std_ratio_mean_pc",
"Mean AUTOC RATE with PC" = "AUTOC_rate_mean_pc",
"Mean SD of AUTOC RATE with PC" = "AUTOC_rate_std_mean_pc",
"Mean Variable Importance (Img 1)" = "MeanVImportHalf1", # Shorter label
"Mean Variable Importance (Img 2)" = "MeanVImportHalf2", # Shorter label
"Mean Frac Top k Feats (Img 1)" = "FracTopkHalf1", # Shorter label
"Mean RMSE" = "RMSE"
)
##############################################################################
# Helper function to retrieve the *label* from its code
##############################################################################
getMetricLabel <- function(metric_value) {
# This returns, e.g., "Mean AUTOC RATE" if metric_value == "AUTOC_rate_mean".
# If it doesn't find a match, return the code itself.
lbl <- names(metric_choices)[which(metric_choices == metric_value)]
if (length(lbl) == 0 || is.na(lbl)) return(metric_value) # Handle NA/no match
lbl
}
# UI Definition
ui <- fluidPage(
titlePanel("Multiscale Representations Forge"),
tags$head(
# Add some basic CSS for better spacing/responsiveness if needed
tags$style(HTML("
.shiny-plot-output { /* Ensure plot output behaves well */
margin: auto; /* Center if container allows */
}
.control-label { /* Ensure labels are readable */
font-weight: bold;
}
#contextNote { /* Style for the context note */
margin-top: 15px;
font-size: 0.9em; /* Slightly smaller font */
line-height: 1.6; /* Better readability */
}
#share-button { margin-bottom: 15px; } /* Add space below share button */
"))
),
tags$p(
style = "text-align: left; margin-top: -10px; margin-bottom: 10px;", # Added margin-bottom
tags$a(
href = "https://planetarycausalinference.org/",
target = "_blank",
title = "PlanetaryCausalInference.org",
style = "color: #337ab7; text-decoration: none; font-weight: bold;", # Make link bold
"PlanetaryCausalInference.org ",
icon("external-link", style = "font-size: 12px;")
)
),
# ---- Share button HTML + JS ----
tags$div(
style = "text-align: left;", # Removed fixed margin
HTML('
<button id="share-button"
style="
display: inline-flex;
align-items: center;
justify-content: center;
gap: 8px;
padding: 5px 10px;
font-size: 14px; /* Slightly smaller font */
font-weight: normal;
color: #333; /* Darker text */
background-color: #f8f9fa; /* Lighter background */
border: 1px solid #ccc; /* Lighter border */
border-radius: 4px; /* Smaller radius */
cursor: pointer;
box-shadow: 0 1px 1px rgba(0,0,0,0.05); /* Softer shadow */
">
<svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor"
stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
<circle cx="18" cy="5" r="3"></circle>
<circle cx="6" cy="12" r="3"></circle>
<circle cx="18" cy="19" r="3"></circle>
<line x1="8.59" y1="13.51" x2="15.42" y2="17.49"></line>
<line x1="15.41" y1="6.51" x2="8.59" y2="10.49"></line>
</svg>
<strong>Share</strong>
</button>
'),
tags$script(
HTML("
(function() {
const shareBtn = document.getElementById('share-button');
if (!shareBtn) return; // Exit if button not found
function showCopyNotification() {
const notification = document.createElement('div');
notification.innerText = 'Link copied!'; /* Shorter message */
notification.style.position = 'fixed';
notification.style.bottom = '15px'; /* Adjust position */
notification.style.left = '50%'; /* Center horizontally */
notification.style.transform = 'translateX(-50%)'; /* Correct centering */
notification.style.backgroundColor = 'rgba(0, 0, 0, 0.75)';
notification.style.color = '#fff';
notification.style.padding = '8px 15px'; /* Adjust padding */
notification.style.borderRadius = '4px';
notification.style.fontSize = '14px'; /* Match button font */
notification.style.zIndex = '10000'; /* Ensure visibility */
notification.style.boxShadow = '0 2px 5px rgba(0,0,0,0.2)'; /* Add shadow */
document.body.appendChild(notification);
setTimeout(() => { notification.remove(); }, 1500); /* Shorter duration */
}
shareBtn.addEventListener('click', function() {
const currentURL = window.location.href;
const pageTitle = document.title || 'Multiscale Explorer';
if (navigator.share) {
navigator.share({
title: pageTitle,
text: 'Check out this multiscale analysis:', /* Add context */
url: currentURL
})
.catch((error) => {
// If user cancels share, don't log error unless it's a real failure
if (error.name !== 'AbortError') {
console.log('Sharing failed', error);
}
});
} else if (navigator.clipboard && navigator.clipboard.writeText) {
navigator.clipboard.writeText(currentURL).then(() => {
showCopyNotification();
}, (err) => {
console.error('Could not copy text: ', err);
// Fallback alert if clipboard fails unexpectedly
alert('Failed to copy link. Please copy manually:\\n' + currentURL);
});
} else {
// Basic fallback for very old browsers
try {
const textArea = document.createElement('textarea');
textArea.value = currentURL;
textArea.style.position = 'fixed'; // Prevent scrolling
textArea.style.opacity = '0'; // Hide element
document.body.appendChild(textArea);
textArea.select();
document.execCommand('copy');
showCopyNotification();
document.body.removeChild(textArea);
} catch (err) {
alert('Sharing not supported. Please copy this link manually:\\n' + currentURL);
}
}
});
})();
")
)
),
# ---- End: Share button snippet ----
sidebarLayout(
sidebarPanel(
width = 3, # Explicitly set sidebar width (adjust as needed 1-12)
selectInput("application", "Application:", # Colon for clarity
choices = unique(sm$application),
selected = unique(sm$application)[1]),
selectInput("model", "Model:",
choices = unique(sm$optimizeImageRep),
selected = "clip-rsicd"),
########################################################################
# Use our named vector 'metric_choices' directly in selectInput
########################################################################
selectInput("metric", "Metric:",
choices = metric_choices,
selected = "AUTOC_rate_std_ratio_mean"),
checkboxInput("compareToBest", "Compare to best single scale?", value = FALSE), # Question format
# Add some explanation directly in the sidebar
tags$hr(), # Horizontal line separator
tags$p(tags$small("Adjust parameters to explore how multiscale image representations impact model performance or heterogeneity discovery across different applications."))
),
mainPanel(
width = 9, # Explicitly set main panel width (should sum to 12 with sidebar)
# Wrap plot in a div for potential future styling/sizing control
div(
# *** ADJUSTED PLOT OUTPUT ***
plotOutput("heatmapPlot", height = "500px", width = "100%")
),
# *** ADDED VERTICAL SPACE ***
br(), # Add a line break for spacing
# OR use a div with margin:
tags$div(style="margin-bottom: 80px;"), # Alternative way to add space
# Use uiOutput for potentially HTML content in the note
uiOutput("contextNote")
)
)
)
# Server Definition
server <- function(input, output, session) { # Add session argument
# Function to determine whether to maximize or minimize the metric
get_better_direction <- function(metric_value) {
# Assuming lower SD and lower RMSE are better
if (grepl("std_mean|RMSE", metric_value, ignore.case = TRUE)) {
"min"
} else {
"max" # Assume higher is better for others (RATE, Ratio, VImport, FracTopk)
}
}
# Reactive data processing
filteredData <- reactive({
req(input$application, input$model) # Ensure inputs are available
df <- sm %>%
filter(application == input$application,
optimizeImageRep == input$model) %>%
# Ensure dimensions are numeric before filtering/grouping
mutate(
MaxImageDimsLeft = as.numeric(MaxImageDimsLeft),
MaxImageDimsRight = as.numeric(MaxImageDimsRight),
metric_value = as.numeric(get(input$metric)) # Get chosen metric value
) %>%
filter(is.finite(MaxImageDimsLeft) & is.finite(MaxImageDimsRight) & is.finite(metric_value)) # Keep only valid rows
# Check if data exists after filtering
if (nrow(df) == 0) {
warning("No valid data found for the selected Application/Model/Metric combination.")
return(NULL)
}
df
})
# Reactive expression to compute grouped/summarized data and best single scale
summaryData <- reactive({
data <- filteredData()
req(data) # Require filtered data
# Group data
grouped_data <- data %>%
group_by(MaxImageDimsLeft, MaxImageDimsRight) %>%
summarise(
mean_metric = mean(metric_value, na.rm = TRUE),
se_metric = sd(metric_value, na.rm = TRUE) / sqrt(n()),
n = n(),
.groups = "drop"
) %>%
filter(is.finite(mean_metric)) # Ensure mean is valid after aggregation
if (nrow(grouped_data) < 3) {
warning("Less than 3 unique dimension pairs after grouping. Cannot interpolate.")
return(NULL) # Not enough data points for reliable interpolation
}
# Check variability in dimensions needed for interpolation
if (length(unique(grouped_data$MaxImageDimsLeft)) < 2 || length(unique(grouped_data$MaxImageDimsRight)) < 2) {
warning("Insufficient variability in one or both image dimensions for interpolation.")
return(NULL)
}
better_dir <- get_better_direction(input$metric)
# Calculate best single scale metric *from the summarized data*
single_scale_data <- grouped_data %>% filter(MaxImageDimsLeft == MaxImageDimsRight)
best_single_scale_metric <- if (nrow(single_scale_data) > 0) {
if (better_dir == "max") {
max(single_scale_data$mean_metric, na.rm = TRUE)
} else {
min(single_scale_data$mean_metric, na.rm = TRUE)
}
} else {
NA # No single scale data available for comparison
}
# Calculate improvement only if best_single_scale_metric is valid
if (is.finite(best_single_scale_metric)) {
grouped_data <- grouped_data %>%
mutate(improvement = if (better_dir == "max") {
mean_metric - best_single_scale_metric
} else {
best_single_scale_metric - mean_metric
})
} else {
# If no valid single-scale baseline, improvement cannot be calculated
grouped_data <- grouped_data %>% mutate(improvement = NA_real_)
# Optionally disable the checkbox if comparison isn't possible
# updateCheckboxInput(session, "compareToBest", value = FALSE, label = "Compare to best single scale (N/A)")
# shinyjs::disable("compareToBest") # Requires shinyjs package
}
list(
grouped_data = grouped_data,
best_single_scale_metric = best_single_scale_metric,
better_dir = better_dir
)
})
# Reactive expression for interpolation (depends on summaryData)
interpolatedData <- reactive({
sumData <- summaryData()
req(sumData) # Requires valid summary data
grouped_data <- sumData$grouped_data
better_dir <- sumData$better_dir
# Determine which z-value to interpolate based on user choice and availability
use_improvement <- input$compareToBest && "improvement" %in% names(grouped_data) && any(is.finite(grouped_data$improvement))
z_to_interpolate <- if (use_improvement) {
grouped_data$improvement
} else {
grouped_data$mean_metric
}
# Filter out rows where the chosen z value is not finite
valid_rows <- is.finite(grouped_data$MaxImageDimsLeft) &
is.finite(grouped_data$MaxImageDimsRight) &
is.finite(z_to_interpolate)
if (sum(valid_rows) < 3) {
warning("Less than 3 valid points remaining for interpolation after filtering non-finite z-values.")
return(NULL)
}
x <- grouped_data$MaxImageDimsLeft[valid_rows]
y <- grouped_data$MaxImageDimsRight[valid_rows]
z <- z_to_interpolate[valid_rows]
# Double-check dimension variability again with filtered data
if (length(unique(x)) < 2 || length(unique(y)) < 2) {
warning("Insufficient dimension variability after filtering for interpolation.")
return(NULL)
}
# Perform interpolation
s_ <- tryCatch({
akima::interp(
x = x,
y = y,
z = z,
xo = seq(min(x, na.rm=TRUE), max(x, na.rm=TRUE), length = 50),
yo = seq(min(y, na.rm=TRUE), max(y, na.rm=TRUE), length = 50),
duplicate = "mean",
linear = TRUE # Ensure linear is explicitly set if default changes
)
}, error = function(e){
warning("Interpolation failed: ", e$message)
return(NULL)
})
if (is.null(s_) || !is.matrix(s_$z) || all(!is.finite(s_$z))) {
warning("Interpolation result is invalid or contains no finite values.")
return(NULL) # Interpolation failed or yielded no usable results
}
# Find optimal point from the *interpolated* grid (s_$z)
optimal_z_value <- NA
optimal_x <- NA
optimal_y <- NA
if(any(is.finite(s_$z))) { # Proceed only if there are finite values in the grid
# Determine optimization direction for the *interpolated* z-value
# If we interpolated 'improvement', we always maximize it.
# Otherwise, use the original metric's direction.
interp_better_dir <- if(use_improvement) "max" else better_dir
if (interp_better_dir == "max") {
max_idx <- which.max(s_$z)
optimal_z_value <- max(s_$z, na.rm = TRUE)
} else {
max_idx <- which.min(s_$z) # Index of the minimum
optimal_z_value <- min(s_$z, na.rm = TRUE)
}
# Convert linear index to row/column
row_col <- arrayInd(max_idx, .dim = dim(s_$z))
optimal_x <- s_$x[row_col[1, 1]]
optimal_y <- s_$y[row_col[1, 2]]
} else {
warning("No finite values in the interpolated grid to find optimum.")
}
list(
s_ = s_,
optimal_point = list(x = optimal_x, y = optimal_y, z = optimal_z_value),
interpolated_metric_name = if(use_improvement) "Improvement" else getMetricLabel(input$metric)
)
})
# Heatmap Output
output$heatmapPlot <- renderPlot({
sumData <- summaryData()
interpData <- interpolatedData()
# Use req() for cleaner checking of reactive results
req(sumData, interpData, cancelOutput = TRUE) # Ensure both summary and interpolation are valid
grouped_data <- sumData$grouped_data
optimal_point <- interpData$optimal_point
# Determine z values and title based on checkbox and data availability
use_improvement <- input$compareToBest && "improvement" %in% names(grouped_data) && any(is.finite(grouped_data$improvement))
if (use_improvement) {
z <- grouped_data$improvement
# Check if improvement calculation was possible
if (all(is.na(z))) {
plot.new()
title(main = "Cannot Compute Improvement", sub = "No valid single-scale baseline found.", col.main = "red")
return()
}
main_title <- paste(input$application, "-", getMetricLabel(input$metric), "\nImprovement Over Best Single Scale")
plot_zlim <- range(interpData$s_$z, na.rm = TRUE) # Use range of interpolated improvement
} else {
z <- grouped_data$mean_metric
main_title <- paste(input$application, "-", getMetricLabel(input$metric))
plot_zlim <- range(interpData$s_$z, na.rm = TRUE) # Use range of interpolated metric
if (input$compareToBest) { # Add note if checkbox is ticked but comparison N/A
main_title <- paste0(main_title, "\n(Comparison to single scale not available)")
}
}
x <- grouped_data$MaxImageDimsLeft
y <- grouped_data$MaxImageDimsRight
# Filter data for plotting to match data used for interpolation
valid_rows <- is.finite(x) & is.finite(y) & is.finite(z)
if(sum(valid_rows) == 0) {
plot.new()
text(0.5, 0.5, "No valid data to plot.", cex = 1.5)
return()
}
x_plot <- x[valid_rows]
y_plot <- y[valid_rows]
z_plot <- z[valid_rows]
# *** ADJUSTED MARGINS AND COLORS ***
#par(mar=c(5, 5, 4, 2) + 0.1) # Adjusted margins (bottom, left, top, right)
par(mar=c(5.1, 4.1, 3.1, 4.1)) # Margins: bottom, left, top, right
# *** USING HCL COLORS ***
customPalette <- hcl.colors(50, palette = "YlOrRd", rev = TRUE) # Or "Viridis", "Plasma" etc.
# Call heatMap using the raw (but filtered) data points
# The interpolation result (interpData$s_) is implicitly used by heatMap via akima::interp
# We pass the *original* x, y, z used for interpolation to heatMap
heatMap(
x = x_plot,
y = y_plot,
z = z_plot, # Pass the original data used for interpolation
N = 50, # Interpolation grid size used within heatMap
main = main_title,
xlab = "Image Dimension 1 (log scale)", # Clarify log scale
ylab = "Image Dimension 2 (log scale)", # Clarify log scale
useLog = "xy", # Keep log scale for axes
myCol = customPalette,
cex.lab = 1.3, # Slightly reduced label size
cex.main = 1.5, # Slightly reduced main title size
zlim = plot_zlim, # Use zlim from the *interpolated* data for consistent coloring
optimal_point = optimal_point, # Pass the calculated optimal point
add.legend = TRUE,
legend.width = 1.5 # Slightly wider legend
)
},
width = function() safe_dim("output_heatmapPlot_width"),
height = function() safe_dim("output_heatmapPlot_height"),
res = 96,
execOnResize = TRUE) # Adjust resolution if needed
# Contextual Note Output (using renderUI for HTML)
output$contextNote <- renderText({
SharedContextText <- c(
"The Peru RCT involves a multifaceted graduation program treatment to reduce poverty outcomes.",
"The Uganda RCT involves a cash grant program to stimulate human capital and living conditions among the poor.",
"For more information, see the associated paper, <a href='https://arxiv.org/abs/2411.02134' target='_blank'>arXiv.org/abs/2411.02134</a>
(<a href='https://connorjerzak.com/wp-content/uploads/2024/11/MultilevelBib.txt' target='_blank'>BibTex</a>),
and <a href='https://www.youtube.com/watch?v=RvAoJGMlKAI' target='_blank'>YouTube tutorial</a>.
",
"<div style='font-size: 10px; line-height: 1.5;'>",
"<b>Glossary:</b><br>",
"• <b>Model:</b> The neural-network backbone (e.g., clip-rsicd) transforming satellite images into numerical representations.<br>",
"• <b>Metric:</b> The criterion (e.g., RATE Ratio, RMSE) measuring performance or heterogeneity detection.<br>",
"• <b>Compare to best single-scale:</b> Toggle showing metric improvement relative to the best single-scale baseline.<br>",
"• <b>ImageDim1, ImageDim2:</b> Image sizes (e.g., 64×64, 128×128) for multi-scale analysis.<br>",
"• <b>RATE Ratio:</b> A t-statistic-like quantity indicating how much a data-model combination captures treatment-effect variation. Ratio of the RATE and its standard error. It can employ two weighting scemes (AUTOC and Qini).<br>",
"• <b>PC:</b> Principal Components; a compression step of neural representations.<br>",
"• <b>MeanDiff, MeanDiff_pc:</b> Gain in RATE Ratio from multi-scale vs. single-scale, with '_pc' for compressed data.<br>",
"• <b>RMSE:</b> Root Mean Squared Error, measuring prediction accuracy in simulations.<br>",
"</div>"
)
chosen_metric_label <- getMetricLabel(input$metric)
if (input$compareToBest) {
c(
paste(
"This heatmap shows the improvement in",
paste0("'", chosen_metric_label, "'"),
"over the best single scale for",
input$application,
"using the", input$model, "model. The green star marks the optimal point."
),
SharedContextText
)
} else {
c(
paste(
"This heatmap displays",
paste0("'", chosen_metric_label, "'"),
"for", input$application,
"using the", input$model,
"model across different image dimension combinations. The green star marks the optimal point."
),
SharedContextText
)
}
})
}
# Run the Shiny App
shinyApp(ui = ui, server = server)