idiot errors

This commit is contained in:
End Nightshade 2026-02-10 15:03:57 -07:00
parent bb1679827f
commit b16bd5722f
No known key found for this signature in database
6 changed files with 95 additions and 57 deletions

View file

@ -16,6 +16,7 @@ module API
content_type = Marcel::MimeType.for(file.tempfile, name: file.original_filename) || file.content_type || "application/octet-stream"
# Pre-gen upload ID for predictable storage path
upload_id = SecureRandom.uuid_v7
sanitized_filename = ActiveStorage::Filename.new(file.original_filename).sanitized
storage_key = "#{upload_id}/#{sanitized_filename}"
@ -75,6 +76,7 @@ module API
download_auth = request.headers["X-Download-Authorization"]
upload = Upload.create_from_url(url, user: current_user, provenance: :api, original_url: url, authorization: download_auth)
# Check quota after download (URL upload size unknown beforehand)
quota_service = QuotaService.new(current_user)
unless quota_service.can_upload?(0)
if current_user.total_storage_bytes > quota_service.current_policy.max_total_storage
@ -133,23 +135,28 @@ module API
private
def check_quota
# For direct uploads, check file size before processing
if params[:file].present?
file_size = params[:file].size
quota_service = QuotaService.new(current_user)
policy = quota_service.current_policy
# Check per-file size limit
if file_size > policy.max_file_size
usage = quota_service.current_usage
render json: quota_error_json(usage, "File size exceeds your limit of #{ActiveSupport::NumberHelper.number_to_human_size(policy.max_file_size)} per file"), status: :payment_required
return
end
# Check if upload would exceed total storage quota
unless quota_service.can_upload?(file_size)
usage = quota_service.current_usage
render json: quota_error_json(usage), status: :payment_required
nil
end
end
# For URL uploads, quota is checked after download in create_from_url
# For batch uploads, quota is handled by BatchUploadService
end
def quota_error_json(usage, custom_message = nil)

View file

@ -53,7 +53,10 @@ class UploadsController < ApplicationController
@upload.rename!(new_filename)
redirect_to uploads_path, notice: "Renamed to #{@upload.filename}"
rescue Pundit::NotAuthorizedError
redirect_to uploads_path, alert: "You are not authorized to rename this upload."
redirect_back fallback_location: uploads_path, alert: "You are not authorized to rename this upload."
rescue StandardError => e
event = Sentry.capture_exception(e)
redirect_back fallback_location: uploads_path, alert: "Rename failed. (Error ID: #{event&.event_id})"
end
def destroy
@ -73,13 +76,12 @@ class UploadsController < ApplicationController
return
end
uploads = current_user.uploads.where(id: ids)
uploads = current_user.uploads.where(id: ids).includes(:blob)
count = uploads.size
filenames = uploads.includes(:blob).map { |u| u.filename.to_s }
uploads.destroy_all
redirect_to uploads_path, notice: "Deleted #{count} #{'file'.pluralize(count)}: #{filenames.join(', ')}"
redirect_to uploads_path, notice: "Deleted #{count} #{'file'.pluralize(count)}."
end
private
@ -96,13 +98,22 @@ class UploadsController < ApplicationController
if result.uploads.any?
count = result.uploads.size
filenames = result.uploads.map { |u| u.filename.to_s }.join(", ")
parts << "Uploaded #{count} #{'file'.pluralize(count)}: #{filenames}"
names = result.uploads.map { |u| u.filename.to_s }
if names.size <= 5
parts << "Uploaded #{count} #{'file'.pluralize(count)}: #{names.join(', ')}"
else
parts << "Uploaded #{count} #{'file'.pluralize(count)}: #{names.first(5).join(', ')} and #{count - 5} more"
end
end
if result.failed.any?
failures = result.failed.map { |f| "#{f.filename} (#{f.reason})" }.join("; ")
parts << "Failed: #{failures}"
failed_count = result.failed.size
failed_names = result.failed.map(&:filename)
if failed_names.size <= 5
parts << "Failed to upload #{failed_count} #{'file'.pluralize(failed_count)}: #{failed_names.join(', ')}"
else
parts << "Failed to upload #{failed_count} #{'file'.pluralize(failed_count)}: #{failed_names.first(5).join(', ')} and #{failed_count - 5} more"
end
end
parts.join(". ")

View file

@ -1,4 +1,6 @@
(function () {
let listenersAttached = false;
function init() {
const selectAll = document.querySelector("[data-batch-select-all]");
const bar = document.querySelector("[data-batch-bar]");
@ -33,29 +35,34 @@
}
}
// Individual checkbox changes
document.addEventListener("change", (e) => {
if (e.target.matches("[data-batch-select-item]")) {
updateBar();
}
});
// Only attach global document listeners once
if (!listenersAttached) {
listenersAttached = true;
// Click filename to toggle its checkbox
document.addEventListener("click", (e) => {
const toggle = e.target.closest("[data-batch-select-toggle]");
if (!toggle) return;
// Individual checkbox changes
document.addEventListener("change", (e) => {
if (e.target.matches("[data-batch-select-item]")) {
updateBar();
}
});
const uploadId = toggle.dataset.batchSelectToggle;
const checkbox = document.querySelector(
'[data-batch-select-item][data-upload-id="' + uploadId + '"]',
);
if (checkbox) {
checkbox.checked = !checkbox.checked;
updateBar();
}
});
// Click filename to toggle its checkbox
document.addEventListener("click", (e) => {
const toggle = e.target.closest("[data-batch-select-toggle]");
if (!toggle) return;
// Select all toggle
const uploadId = toggle.dataset.batchSelectToggle;
const checkbox = document.querySelector(
'[data-batch-select-item][data-upload-id="' + uploadId + '"]',
);
if (checkbox) {
checkbox.checked = !checkbox.checked;
updateBar();
}
});
}
// These reference page-specific elements, rebind on each navigation
if (selectAll) {
selectAll.addEventListener("change", () => {
const checkboxes = getCheckboxes();
@ -66,7 +73,6 @@
});
}
// Deselect all button
if (deselectBtn) {
deselectBtn.addEventListener("click", () => {
getCheckboxes().forEach((cb) => {
@ -77,7 +83,6 @@
});
}
// Confirmation before batch delete
if (deleteForm) {
deleteForm.addEventListener("submit", (e) => {
const checked = getChecked();
@ -86,7 +91,6 @@
return;
}
// Build a list of filenames from the row next to each checkbox
const names = checked.map((cb) => {
const row = cb.closest("[class*='BorderBox']") || cb.parentElement;
const nameEl = row.querySelector("[data-batch-select-toggle]");
@ -98,7 +102,7 @@
checked.length +
(checked.length === 1 ? " file" : " files") +
"?\n\n" +
names.map((n) => " " + n).join("\n") +
names.map((n) => " \u2022 " + n).join("\n") +
"\n\nThis cannot be undone.";
if (!confirm(message)) {

View file

@ -5,13 +5,17 @@ require "open-uri"
class Upload < ApplicationRecord
include PgSearch::Model
# UUID v7 primary key (automatic via migration)
belongs_to :user
belongs_to :blob, class_name: "ActiveStorage::Blob"
after_destroy :purge_blob
# Delegate file metadata to blob (no duplication!)
delegate :filename, :byte_size, :content_type, :checksum, to: :blob
# Search configuration
pg_search_scope :search_by_filename,
associated_against: {
blob: :filename
@ -28,9 +32,11 @@ class Upload < ApplicationRecord
},
using: { tsearch: { prefix: true } }
# Aliases for consistency
alias_method :file_size, :byte_size
alias_method :mime_type, :content_type
# Provenance enum
enum :provenance, {
slack: "slack",
web: "web",
@ -50,11 +56,13 @@ class Upload < ApplicationRecord
ActiveSupport::NumberHelper.number_to_human_size(byte_size)
end
# Direct URL to public R2 bucket
def assets_url
host = ENV.fetch("CDN_ASSETS_HOST", "cdn.hackclub-assets.com")
"https://#{host}/#{blob.key}"
end
# Get CDN URL (uses external uploads controller)
def cdn_url
Rails.application.routes.url_helpers.external_upload_url(
id:,
@ -75,14 +83,18 @@ class Upload < ApplicationRecord
blob.update!(filename: sanitized)
end
# Create upload from URL (for API/rescue operations)
# Checks quota via HEAD request before downloading when possible
def self.create_from_url(url, user:, provenance:, original_url: nil, authorization: nil, filename: nil)
conn = build_http_client
headers = {}
headers["Authorization"] = authorization if authorization.present?
# Pre-check file size via HEAD if possible
pre_check_quota_via_head(conn, url, headers, user)
# Download the file
response = conn.get(url, nil, headers)
if response.status.between?(300, 399)
location = response.headers["location"]
@ -96,6 +108,7 @@ class Upload < ApplicationRecord
response.headers["content-type"] ||
"application/octet-stream"
# Pre-generate upload ID for predictable storage path
upload_id = SecureRandom.uuid_v7
sanitized_filename = ActiveStorage::Filename.new(filename).sanitized
storage_key = "#{upload_id}/#{sanitized_filename}"
@ -149,6 +162,7 @@ class Upload < ApplicationRecord
raise "File would exceed storage quota"
rescue Faraday::Error
# HEAD request failed — proceed with GET and check after
nil
end

View file

@ -17,6 +17,14 @@ class BatchUploadService
uploads = []
failed = []
# Enforce batch size limit inside the service
if files.size > MAX_FILES_PER_BATCH
files[MAX_FILES_PER_BATCH..].each do |file|
failed << FailedUpload[file.original_filename, "Too many files in batch (maximum is #{MAX_FILES_PER_BATCH})"]
end
files = files.first(MAX_FILES_PER_BATCH)
end
# Fresh read to minimize stale data window
current_storage = @user.reload.total_storage_bytes
max_storage = @policy.max_total_storage
@ -58,11 +66,11 @@ class BatchUploadService
uploads << upload
batch_bytes_used += file_size
rescue StandardError => e
failed << FailedUpload[filename, "Upload error: #{e.message}"]
Rails.logger.error("BatchUploadService upload failed for #{filename}: #{e.class}: #{e.message}")
failed << FailedUpload[filename, "Upload failed due to an internal error"]
end
end
# Post-upload enforcement: if concurrent requests caused overage, clean up
enforce_quota_after_upload!(uploads, failed) if uploads.any?
Result[uploads, failed]
@ -76,40 +84,21 @@ class BatchUploadService
return if actual_total <= max_storage
# Over quota due to concurrent uploads — remove newest files first until under
overage = actual_total - max_storage
reclaimed = 0
destroyed_ids = []
uploads.reverse.each do |upload|
break if reclaimed >= overage
reclaimed += upload.byte_size
destroyed_ids << upload.id
failed << FailedUpload[upload.filename.to_s, "Removed: concurrent uploads exceeded quota"]
upload.destroy!
end
def enforce_quota_after_upload!(uploads, failed)
actual_total = @user.reload.total_storage_bytes
max_storage = @policy.max_total_storage
return if actual_total <= max_storage
overage = actual_total - max_storage
reclaimed = 0
destroyed_ids = []
uploads.reverse.each do |upload|
break if reclaimed >= overage
reclaimed += upload.byte_size
destroyed_ids << upload.id
failed << FailedUpload[upload.filename.to_s, "Removed: concurrent uploads exceeded quota"]
upload.destroy!
end
uploads.reject! { |u| destroyed_ids.include?(u.id) }
end
uploads.reject! { |u| destroyed_ids.include?(u.id) }
end
def create_upload(file)
content_type = Marcel::MimeType.for(file.tempfile, name: file.original_filename) ||

View file

@ -35,14 +35,27 @@ Rails.application.routes.draw do
get "/docs", to: redirect("/docs/getting-started")
get "/docs/:id", to: "docs#show", as: :doc
# Define your application routes per the DSL in https://guides.rubyonrails.org/routing.html
# Reveal health status on /up that returns 200 if the app boots with no exceptions, otherwise 500.
# Can be used by load balancers and uptime monitors to verify that the app is live.
get "up" => "rails/health#show", as: :rails_health_check
# Render dynamic PWA files from app/views/pwa/* (remember to link manifest in application.html.erb)
# get "manifest" => "rails/pwa#manifest", as: :pwa_manifest
# get "service-worker" => "rails/pwa#service_worker", as: :pwa_service_worker
# Defines the root path route ("/")
# root "posts#index"
# Rescue endpoint to find uploads by original URL
get "/rescue", to: "external_uploads#rescue", as: :rescue_upload
# Slack events webhook
namespace :slack do
post "events", to: "events#create"
end
# External upload redirects (must be last to avoid conflicts)
get "/:id/*filename", to: "external_uploads#show", constraints: { id: /[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/ }, as: :external_upload
end