fix: various fixes to sentry-reported errors and more

This commit is contained in:
Hampus Kraft 2026-02-18 15:38:51 +00:00
parent 302c0d2a0c
commit 0517a966a3
No known key found for this signature in database
GPG Key ID: 6090864C465A454D
357 changed files with 25420 additions and 16281 deletions

View File

@ -1,92 +0,0 @@
name: deploy queue
on:
push:
branches:
- canary
paths:
- fluxer_queue/**
- .github/workflows/deploy-queue.yaml
workflow_dispatch:
inputs:
ref:
type: string
required: false
default: ''
description: Optional git ref (defaults to the triggering branch)
concurrency:
group: deploy-fluxer-queue
cancel-in-progress: true
permissions:
contents: read
jobs:
deploy:
name: Deploy queue
runs-on: blacksmith-8vcpu-ubuntu-2404
timeout-minutes: 25
env:
IS_CANARY: true
STACK: fluxer-queue
CACHE_SCOPE: deploy-fluxer-queue
RELEASE_CHANNEL: canary
steps:
- uses: actions/checkout@v6
with:
ref: ${{ inputs.ref || '' }}
fetch-depth: 0
- name: Record deploy commit
run: python3 scripts/ci/workflows/deploy_queue.py --step record_deploy_commit
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Set build timestamp
run: python3 scripts/ci/workflows/deploy_queue.py --step set_build_timestamp
- name: Build image
uses: docker/build-push-action@v6
with:
context: .
file: fluxer_queue/Dockerfile
tags: |
${{ env.STACK }}:${{ env.DEPLOY_SHA }}
load: true
platforms: linux/amd64
cache-from: type=gha,scope=${{ env.CACHE_SCOPE }}
cache-to: type=gha,mode=max,scope=${{ env.CACHE_SCOPE }}
build-args: |
BUILD_SHA=${{ env.DEPLOY_SHA }}
BUILD_NUMBER=${{ github.run_number }}
BUILD_TIMESTAMP=${{ env.BUILD_TIMESTAMP }}
RELEASE_CHANNEL=${{ env.RELEASE_CHANNEL }}
env:
DOCKER_BUILD_SUMMARY: false
DOCKER_BUILD_RECORD_UPLOAD: false
- name: Install docker-pussh
run: python3 scripts/ci/workflows/deploy_queue.py --step install_docker_pussh
- name: Set up SSH agent
uses: webfactory/ssh-agent@v0.9.1
with:
ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY_SERVER }}
- name: Add server to known hosts
run: python3 scripts/ci/workflows/deploy_queue.py --step add_known_hosts --server-ip ${{ secrets.SERVER_IP }}
- name: Push image and deploy
env:
SERVER: ${{ secrets.SERVER_USER }}@${{ secrets.SERVER_IP }}
IMAGE_TAG: ${{ env.STACK }}:${{ env.DEPLOY_SHA }}
run: python3 scripts/ci/workflows/deploy_queue.py --step push_and_deploy

2
.gitignore vendored
View File

@ -23,7 +23,7 @@ devenv.local.nix
/dev/livekit.yaml
/dev/bluesky_oauth_key.pem
/dev/meilisearch_master_key
/dev/data/meilisearch/
/dev/data/
**/.dev.vars
**/.DS_Store
**/.env

View File

@ -37,20 +37,15 @@
},
"gateway": {
"port": 49107,
"rpc_tcp_port": 49108,
"api_host": "http://localhost:49319/api",
"admin_reload_secret": "",
"media_proxy_endpoint": "http://localhost:49319/media",
"logger_level": "debug"
},
"queue": {
"secret": ""
"nats": {
"core_url": "nats://127.0.0.1:4222",
"jetstream_url": "nats://127.0.0.1:4223"
}
},
"gateway": {
"rpc_endpoint": "http://localhost:49107",
"rpc_secret": ""
},
"auth": {
"sudo_mode_secret": "",
"connection_initiation_secret": "",

View File

@ -16,7 +16,7 @@
"s3": {
"access_key_id": "YOUR_S3_ACCESS_KEY",
"secret_access_key": "YOUR_S3_SECRET_KEY",
"endpoint": "http://127.0.0.1:3900"
"endpoint": "http://127.0.0.1:8080/s3"
},
"services": {
"server": {
@ -36,19 +36,15 @@
},
"gateway": {
"port": 8082,
"rpc_tcp_port": 8083,
"api_host": "http://127.0.0.1:8080/api",
"admin_reload_secret": "GENERATE_A_64_CHAR_HEX_SECRET",
"media_proxy_endpoint": "http://127.0.0.1:8080/media"
},
"queue": {
"secret": "GENERATE_A_64_CHAR_HEX_SECRET"
"nats": {
"core_url": "nats://nats:4222",
"jetstream_url": "nats://nats:4222",
"auth_token": "GENERATE_A_NATS_AUTH_TOKEN"
}
},
"gateway": {
"rpc_endpoint": "http://127.0.0.1:8082",
"rpc_secret": "GENERATE_A_64_CHAR_HEX_SECRET"
},
"auth": {
"sudo_mode_secret": "GENERATE_A_64_CHAR_HEX_SECRET",
"connection_initiation_secret": "GENERATE_A_64_CHAR_HEX_SECRET",

View File

@ -23,18 +23,10 @@
"oauth_client_secret": "test-oauth-client-secret"
},
"gateway": {
"rpc_tcp_port": 8089,
"api_host": "http://localhost:8088/api",
"admin_reload_secret": "test-gateway-admin-reload-secret-32-chars",
"media_proxy_endpoint": "http://localhost:8088/media"
},
"queue": {
"secret": "test-queue-secret-key-minimum-32-chars"
}
},
"gateway": {
"rpc_secret": "test-gateway-rpc-secret-minimum-32-chars"
},
"auth": {
"sudo_mode_secret": "test-sudo-mode-secret-minimum-32-chars",
"connection_initiation_secret": "test-connection-initiation-secret-32ch",

View File

@ -96,6 +96,20 @@
restart = "always";
};
};
nats_core = {
command = lib.mkForce "exec ${config.git.root}/scripts/dev_process_entry.sh nats_core nats-server -p 4222 -a 127.0.0.1";
log_location = "${config.git.root}/dev/logs/nats_core.log";
availability = {
restart = "always";
};
};
nats_jetstream = {
command = lib.mkForce "exec ${config.git.root}/scripts/dev_process_entry.sh nats_jetstream nats-server -p 4223 -js -sd ${config.git.root}/dev/data/nats_jetstream -a 127.0.0.1";
log_location = "${config.git.root}/dev/logs/nats_jetstream.log";
availability = {
restart = "always";
};
};
};
};
};
@ -107,6 +121,7 @@
rebar3
valkey
meilisearch
nats-server
ffmpeg
exiftool
caddy
@ -143,6 +158,8 @@
"devenv:processes:mailpit"
"devenv:processes:valkey"
"devenv:processes:caddy"
"devenv:processes:nats_core"
"devenv:processes:nats_jetstream"
];
};
@ -229,5 +246,9 @@
caddy.exec = ''
exec caddy run --config ${config.git.root}/dev/Caddyfile.dev --adapter caddyfile
'';
nats_core.exec = "exec nats-server -p 4222 -a 127.0.0.1";
nats_jetstream.exec = ''
exec nats-server -p 4223 -js -sd ${config.git.root}/dev/data/nats_jetstream -a 127.0.0.1
'';
};
}

View File

@ -96,6 +96,11 @@ export const Endpoints = {
CHANNEL_TYPING: (channelId: string) => `/channels/${channelId}/typing`,
CHANNEL_WEBHOOKS: (channelId: string) => `/channels/${channelId}/webhooks`,
CHANNEL_RTC_REGIONS: (channelId: string) => `/channels/${channelId}/rtc-regions`,
CHANNEL_CHUNKED_UPLOADS: (channelId: string) => `/channels/${channelId}/chunked-uploads`,
CHANNEL_CHUNKED_UPLOAD_CHUNK: (channelId: string, uploadId: string, chunkIndex: number) =>
`/channels/${channelId}/chunked-uploads/${uploadId}/chunks/${chunkIndex}`,
CHANNEL_CHUNKED_UPLOAD_COMPLETE: (channelId: string, uploadId: string) =>
`/channels/${channelId}/chunked-uploads/${uploadId}/complete`,
CHANNEL_CALL: (channelId: string) => `/channels/${channelId}/call`,
CHANNEL_CALL_RING: (channelId: string) => `/channels/${channelId}/call/ring`,
CHANNEL_CALL_STOP_RINGING: (channelId: string) => `/channels/${channelId}/call/stop-ringing`,

View File

@ -191,7 +191,7 @@ export const Messages = observer(function Messages({channel, onBottomBarVisibili
canAutoAck,
});
useEffect(() => {
useLayoutEffect(() => {
const node = messagesWrapperRef.current;
if (node) {
node.style.setProperty('--message-group-spacing', `${state.messageGroupSpacing}px`);

View File

@ -36,7 +36,7 @@
--search-scope-badge-hover-color: var(--text-primary);
--search-scope-badge-border-color: var(--background-modifier-accent);
--search-input-text-color: var(--text-primary);
--search-input-placeholder-color: var(--text-primary-muted);
--search-input-placeholder-color: var(--text-tertiary);
--search-clear-button-color: var(--text-tertiary);
--search-clear-button-hover-color: var(--text-primary);
--search-clear-button-hover-background: var(--background-modifier-hover);

View File

@ -150,9 +150,10 @@ export const EmojiListItem: React.FC<{
guildId: string;
emoji: GuildEmojiWithUser;
layout: 'list' | 'grid';
canModify: boolean;
onRename: (emojiId: string, newName: string) => void;
onRemove: (emojiId: string) => void;
}> = observer(({guildId, emoji, layout, onRename, onRemove}) => {
}> = observer(({guildId, emoji, layout, canModify, onRename, onRemove}) => {
const {t} = useLingui();
const avatarUrl = emoji.user ? AvatarUtils.getUserAvatarURL(emoji.user, false) : null;
const gridNameButtonRef = useRef<HTMLButtonElement | null>(null);
@ -222,38 +223,44 @@ export const EmojiListItem: React.FC<{
</div>
<div className={styles.gridName}>
<Popout
position="bottom"
offsetMainAxis={8}
offsetCrossAxis={0}
returnFocusRef={gridNameButtonRef}
render={({onClose}) => (
<EmojiRenamePopoutContent initialName={emoji.name} onSave={handleSave} onClose={onClose} />
)}
>
<button
type="button"
ref={gridNameButtonRef}
className={styles.gridNameButton}
aria-label={t`Rename :${emoji.name}:`}
{canModify ? (
<Popout
position="bottom"
offsetMainAxis={8}
offsetCrossAxis={0}
returnFocusRef={gridNameButtonRef}
render={({onClose}) => (
<EmojiRenamePopoutContent initialName={emoji.name} onSave={handleSave} onClose={onClose} />
)}
>
<span className={styles.gridNameText}>:{emoji.name}:</span>
</button>
</Popout>
<button
type="button"
ref={gridNameButtonRef}
className={styles.gridNameButton}
aria-label={t`Rename :${emoji.name}:`}
>
<span className={styles.gridNameText}>:{emoji.name}:</span>
</button>
</Popout>
) : (
<span className={styles.gridNameText}>:{emoji.name}:</span>
)}
</div>
</div>
<Tooltip text={t`Delete`}>
<FocusRing offset={-2}>
<button
type="button"
onClick={handleDelete}
className={clsx(styles.deleteButton, styles.deleteButtonFloating)}
>
<XIcon className={styles.deleteIcon} weight="bold" />
</button>
</FocusRing>
</Tooltip>
{canModify && (
<Tooltip text={t`Delete`}>
<FocusRing offset={-2}>
<button
type="button"
onClick={handleDelete}
className={clsx(styles.deleteButton, styles.deleteButtonFloating)}
>
<XIcon className={styles.deleteIcon} weight="bold" />
</button>
</FocusRing>
</Tooltip>
)}
</div>
);
}
@ -266,17 +273,21 @@ export const EmojiListItem: React.FC<{
</div>
<div className={styles.listName}>
<InlineEdit
value={emoji.name}
onSave={handleSave}
prefix=":"
suffix=":"
maxLength={32}
width="100%"
className={styles.nameInlineEdit}
inputClassName={styles.nameInlineEditInput}
buttonClassName={styles.nameInlineEditButton}
/>
{canModify ? (
<InlineEdit
value={emoji.name}
onSave={handleSave}
prefix=":"
suffix=":"
maxLength={32}
width="100%"
className={styles.nameInlineEdit}
inputClassName={styles.nameInlineEditInput}
buttonClassName={styles.nameInlineEditButton}
/>
) : (
<span className={styles.nameInlineEdit}>:{emoji.name}:</span>
)}
</div>
<div className={styles.listUploader}>
@ -293,13 +304,15 @@ export const EmojiListItem: React.FC<{
</div>
</div>
<Tooltip text={t`Delete`}>
<FocusRing offset={-2}>
<button type="button" onClick={handleDelete} className={styles.deleteButton}>
<XIcon className={styles.deleteIcon} weight="bold" />
</button>
</FocusRing>
</Tooltip>
{canModify && (
<Tooltip text={t`Delete`}>
<FocusRing offset={-2}>
<button type="button" onClick={handleDelete} className={styles.deleteButton}>
<XIcon className={styles.deleteIcon} weight="bold" />
</button>
</FocusRing>
</Tooltip>
)}
</div>
);
});

View File

@ -37,7 +37,7 @@
}
.input::placeholder {
color: var(--text-primary-muted);
color: var(--text-tertiary);
}
.input.minHeight {
@ -209,7 +209,7 @@
}
.textarea::placeholder {
color: var(--text-primary-muted);
color: var(--text-tertiary);
}
.textareaActions {

View File

@ -121,14 +121,28 @@ const UserAreaInner = observer(
return;
}
const height = voiceConnectionRef.current?.getBoundingClientRect().height ?? 0;
if (height > 0) {
root.style.setProperty(VOICE_CONNECTION_HEIGHT_VARIABLE, `${Math.round(height)}px`);
} else {
const element = voiceConnectionRef.current;
if (!element) {
root.style.removeProperty(VOICE_CONNECTION_HEIGHT_VARIABLE);
return;
}
const updateHeight = () => {
const height = element.getBoundingClientRect().height;
if (height > 0) {
root.style.setProperty(VOICE_CONNECTION_HEIGHT_VARIABLE, `${Math.round(height)}px`);
} else {
root.style.removeProperty(VOICE_CONNECTION_HEIGHT_VARIABLE);
}
};
updateHeight();
const observer = new ResizeObserver(updateHeight);
observer.observe(element);
return () => {
observer.disconnect();
root.style.removeProperty(VOICE_CONNECTION_HEIGHT_VARIABLE);
};
}, [hasVoiceConnection]);
@ -164,13 +178,13 @@ const UserAreaInner = observer(
return (
<div className={wrapperClassName}>
{hasVoiceConnection && (
<>
<div ref={voiceConnectionRef}>
<div className={styles.separator} aria-hidden />
<div ref={voiceConnectionRef} className={styles.voiceConnectionWrapper}>
<div className={styles.voiceConnectionWrapper}>
<VoiceConnectionStatus />
</div>
<div className={styles.separator} aria-hidden />
</>
</div>
)}
{!hasVoiceConnection && <div className={styles.separator} aria-hidden />}
<div className={styles.userAreaContainer}>

View File

@ -93,7 +93,10 @@ export const AddConnectionModal = observer(({defaultType}: AddConnectionModalPro
const onSubmitInitiate = useCallback(
async (data: InitiateFormInputs) => {
const identifier = data.identifier.trim();
let identifier = data.identifier.trim();
if (type === ConnectionTypes.BLUESKY) {
identifier = identifier.replace(/^https?:\/\/bsky\.app\/profile\//i, '').replace(/^@/, '');
}
if (UserConnectionStore.hasConnectionByTypeAndName(type, identifier)) {
initiateForm.setError('identifier', {type: 'validate', message: t`You already have this connection.`});
return;

View File

@ -56,8 +56,11 @@ export const GuildSettingsModal: React.FC<GuildSettingsModalProps> = observer(
if (!guild) return guildSettingsTabs;
return guildSettingsTabs.filter((tab) => {
if (tab.permission && !PermissionStore.can(tab.permission, {guildId})) {
return false;
if (tab.permission) {
const perms = Array.isArray(tab.permission) ? tab.permission : [tab.permission];
if (!perms.some((p) => PermissionStore.can(p, {guildId}))) {
return false;
}
}
if (tab.requireFeature && !guild.features.has(tab.requireFeature)) {
return false;

View File

@ -192,9 +192,6 @@ export const InviteModal = observer(({channelId}: {channelId: string}) => {
};
const getExpirationText = () => {
if (maxAge === '0') {
return <Trans>never expires</Trans>;
}
const option = maxAgeOptions.find((opt) => opt.value === maxAge);
if (option) {
switch (option.value) {
@ -310,9 +307,16 @@ export const InviteModal = observer(({channelId}: {channelId: string}) => {
onInputClick={(e) => e.currentTarget.select()}
inputProps={{placeholder: t`Invite link`}}
>
{isUsingVanityUrl ? (
{isUsingVanityUrl || maxAge === '0' ? (
<p className={styles.expirationText}>
<Trans>This invite link never expires.</Trans>
<Trans>This invite link never expires.</Trans>{' '}
{!isUsingVanityUrl && (
<FocusRing offset={-2}>
<button type="button" onClick={() => setShowAdvanced(true)} className={styles.editLink}>
<Trans>Edit invite link</Trans>
</button>
</FocusRing>
)}
</p>
) : (
<p className={styles.expirationText}>

View File

@ -87,7 +87,7 @@
margin: 0;
overflow: hidden;
display: -webkit-box;
-webkit-line-clamp: 2;
-webkit-line-clamp: 4;
-webkit-box-orient: vertical;
flex: 1;
}

View File

@ -19,11 +19,14 @@
import type {DiscoveryGuild} from '@app/actions/DiscoveryActionCreators';
import * as DiscoveryActionCreators from '@app/actions/DiscoveryActionCreators';
import * as ModalActionCreators from '@app/actions/ModalActionCreators';
import * as NavigationActionCreators from '@app/actions/NavigationActionCreators';
import * as ToastActionCreators from '@app/actions/ToastActionCreators';
import {GuildBadge} from '@app/components/guild/GuildBadge';
import styles from '@app/components/modals/discovery/DiscoveryGuildCard.module.css';
import {GuildIcon} from '@app/components/popouts/GuildIcon';
import {Button} from '@app/components/uikit/button/Button';
import DiscoveryStore from '@app/stores/DiscoveryStore';
import GuildStore from '@app/stores/GuildStore';
import {getApiErrorMessage} from '@app/utils/ApiErrorUtils';
import {getCurrentLocale} from '@app/utils/LocaleUtils';
@ -51,6 +54,9 @@ export const DiscoveryGuildCard = observer(function DiscoveryGuildCard({guild}:
setJoining(true);
try {
await DiscoveryActionCreators.joinGuild(guild.id);
DiscoveryStore.reset();
ModalActionCreators.pop();
NavigationActionCreators.selectGuild(guild.id);
} catch (error) {
setJoining(false);
const message = getApiErrorMessage(error) ?? t`Failed to join this community. Please try again.`;

View File

@ -121,22 +121,22 @@ const GuildDiscoveryTab: React.FC<{guildId: string}> = ({guildId}) => {
application.status === DiscoveryApplicationStatus.REJECTED ||
application.status === DiscoveryApplicationStatus.REMOVED);
const formValues = useMemo(
() =>
hasActiveApplication && application
? {description: application.description, category_type: application.category_type}
: undefined,
[hasActiveApplication, application],
);
const form = useForm<FormInputs>({
defaultValues: {
description: hasActiveApplication ? application.description : '',
category_type: hasActiveApplication ? application.category_type : 0,
description: '',
category_type: 0,
},
values: formValues,
});
useEffect(() => {
if (hasActiveApplication && application) {
form.reset({
description: application.description,
category_type: application.category_type,
});
}
}, [application, hasActiveApplication, form]);
const setApplicationFromResponse = useCallback((response: DiscoveryApplicationResponse) => {
setStatus((prev) => (prev ? {...prev, application: response} : prev));
}, []);
@ -291,8 +291,10 @@ const GuildDiscoveryTab: React.FC<{guildId: string}> = ({guildId}) => {
<div className={styles.fieldLabel}>
<Trans>Description</Trans>
</div>
<Textarea
{...form.register('description', {
<Controller
name="description"
control={form.control}
rules={{
required: t`A description is required.`,
minLength: {
value: DISCOVERY_DESCRIPTION_MIN_LENGTH,
@ -302,15 +304,24 @@ const GuildDiscoveryTab: React.FC<{guildId: string}> = ({guildId}) => {
value: DISCOVERY_DESCRIPTION_MAX_LENGTH,
message: t`Description must be no more than ${DISCOVERY_DESCRIPTION_MAX_LENGTH} characters.`,
},
})}
error={form.formState.errors.description?.message}
label=""
placeholder={t`Describe what your community is about...`}
minRows={3}
maxRows={6}
maxLength={DISCOVERY_DESCRIPTION_MAX_LENGTH}
showCharacterCount
disabled={!eligible && canApply}
}}
render={({field, fieldState}) => (
<Textarea
name={field.name}
value={field.value}
onChange={field.onChange}
onBlur={field.onBlur}
ref={field.ref}
error={fieldState.error?.message}
label=""
placeholder={t`Describe what your community is about...`}
minRows={3}
maxRows={6}
maxLength={DISCOVERY_DESCRIPTION_MAX_LENGTH}
showCharacterCount
disabled={!eligible && canApply}
/>
)}
/>
</div>

View File

@ -33,11 +33,14 @@ import {Logger} from '@app/lib/Logger';
import EmojiStickerLayoutStore from '@app/stores/EmojiStickerLayoutStore';
import {seedGuildEmojiCache, subscribeToGuildEmojiUpdates} from '@app/stores/GuildExpressionTabCache';
import GuildStore from '@app/stores/GuildStore';
import PermissionStore from '@app/stores/PermissionStore';
import UserStore from '@app/stores/UserStore';
import {getApiErrorCode, getApiErrorErrors} from '@app/utils/ApiErrorUtils';
import {openFilePicker} from '@app/utils/FilePickerUtils';
import * as ImageCropUtils from '@app/utils/ImageCropUtils';
import {GlobalLimits} from '@app/utils/limits/GlobalLimits';
import {APIErrorCodes} from '@fluxer/constants/src/ApiErrorCodes';
import {Permissions} from '@fluxer/constants/src/ChannelConstants';
import type {GuildEmojiWithUser} from '@fluxer/schema/src/domains/guild/GuildEmojiSchemas';
import {sortBySnowflakeDesc} from '@fluxer/snowflake/src/SnowflakeUtils';
import {Trans, useLingui} from '@lingui/react/macro';
@ -62,6 +65,10 @@ const GuildEmojiTab: React.FC<{guildId: string}> = observer(function GuildEmojiT
const layout = layoutStore.getEmojiLayout();
const guild = GuildStore.getGuild(guildId);
const canCreateExpressions = PermissionStore.can(Permissions.CREATE_EXPRESSIONS, {guildId});
const canManageExpressions = PermissionStore.can(Permissions.MANAGE_EXPRESSIONS, {guildId});
const currentUserId = UserStore.currentUserId;
const setEmojisWithCache = useCallback(
(updater: React.SetStateAction<ReadonlyArray<GuildEmojiWithUser>>) => {
setEmojis((prev) => {
@ -132,6 +139,15 @@ const GuildEmojiTab: React.FC<{guildId: string}> = observer(function GuildEmojiT
return 50;
}, [guild]);
const canModifyEmoji = useCallback(
(emoji: GuildEmojiWithUser): boolean => {
if (canManageExpressions) return true;
if (canCreateExpressions && emoji.user?.id === currentUserId) return true;
return false;
},
[canManageExpressions, canCreateExpressions, currentUserId],
);
const handleEmojiDelete = useCallback(
async (emojiId: string) => {
try {
@ -322,45 +338,50 @@ const GuildEmojiTab: React.FC<{guildId: string}> = observer(function GuildEmojiT
</div>
)}
<UploadSlotInfo
title={<Trans>Emoji Slots</Trans>}
currentCount={staticEmojis.length}
maxCount={maxStaticEmojis}
uploadButtonText={<Trans>Upload Emoji</Trans>}
onUploadClick={async () => {
const files = await openFilePicker({
multiple: true,
accept: '.jpg,.jpeg,.png,.apng,.gif,.webp,.avif,image/*',
});
if (files.length > 0) {
void handleFileSelect(files);
}
}}
description={
<Trans>
Emoji names must be at least 2 characters long and can only contain alphanumeric characters and underscores.
Allowed file types: JPEG, PNG, WebP, GIF. We compress images to 128x128 pixels. Maximum size:{' '}
{Math.round(GlobalLimits.getEmojiMaxSize() / 1024)} KB per emoji.
</Trans>
}
additionalSlots={
<>
<span>
{canCreateExpressions && (
<>
<UploadSlotInfo
title={<Trans>Emoji Slots</Trans>}
currentCount={staticEmojis.length}
maxCount={maxStaticEmojis}
uploadButtonText={<Trans>Upload Emoji</Trans>}
onUploadClick={async () => {
const files = await openFilePicker({
multiple: true,
accept: '.jpg,.jpeg,.png,.apng,.gif,.webp,.avif,image/*',
});
if (files.length > 0) {
void handleFileSelect(files);
}
}}
description={
<Trans>
Static: {staticEmojis.length} / {maxStaticEmojis === Number.POSITIVE_INFINITY ? '∞' : maxStaticEmojis}
Emoji names must be at least 2 characters long and can only contain alphanumeric characters and
underscores. Allowed file types: JPEG, PNG, WebP, GIF. We compress images to 128x128 pixels. Maximum
size: {Math.round(GlobalLimits.getEmojiMaxSize() / 1024)} KB per emoji.
</Trans>
</span>
<span>
<Trans>
Animated: {animatedEmojis.length} /{' '}
{maxAnimatedEmojis === Number.POSITIVE_INFINITY ? '∞' : maxAnimatedEmojis}
</Trans>
</span>
</>
}
/>
}
additionalSlots={
<>
<span>
<Trans>
Static: {staticEmojis.length} /{' '}
{maxStaticEmojis === Number.POSITIVE_INFINITY ? '∞' : maxStaticEmojis}
</Trans>
</span>
<span>
<Trans>
Animated: {animatedEmojis.length} /{' '}
{maxAnimatedEmojis === Number.POSITIVE_INFINITY ? '∞' : maxAnimatedEmojis}
</Trans>
</span>
</>
}
/>
<UploadDropZone onDrop={handleDrop} description={<Trans>Drag and drop emoji files here</Trans>} />
<UploadDropZone onDrop={handleDrop} description={<Trans>Drag and drop emoji files here</Trans>} />
</>
)}
{searchQuery && filteredEmojis.length === 0 && (
<div className={styles.notice}>
@ -391,6 +412,7 @@ const GuildEmojiTab: React.FC<{guildId: string}> = observer(function GuildEmojiT
guildId={guildId}
emoji={emoji}
layout={layout}
canModify={canModifyEmoji(emoji)}
onRename={handleEmojiRename}
onRemove={handleEmojiDelete}
/>
@ -411,6 +433,7 @@ const GuildEmojiTab: React.FC<{guildId: string}> = observer(function GuildEmojiT
guildId={guildId}
emoji={emoji}
layout={layout}
canModify={canModifyEmoji(emoji)}
onRename={handleEmojiRename}
onRemove={handleEmojiDelete}
/>

View File

@ -32,8 +32,11 @@ import {Logger} from '@app/lib/Logger';
import EmojiStickerLayoutStore from '@app/stores/EmojiStickerLayoutStore';
import {seedGuildStickerCache, subscribeToGuildStickerUpdates} from '@app/stores/GuildExpressionTabCache';
import GuildStore from '@app/stores/GuildStore';
import PermissionStore from '@app/stores/PermissionStore';
import UserStore from '@app/stores/UserStore';
import {openFilePicker} from '@app/utils/FilePickerUtils';
import {GlobalLimits} from '@app/utils/limits/GlobalLimits';
import {Permissions} from '@fluxer/constants/src/ChannelConstants';
import type {GuildStickerWithUser} from '@fluxer/schema/src/domains/guild/GuildEmojiSchemas';
import {sortBySnowflakeDesc} from '@fluxer/snowflake/src/SnowflakeUtils';
import {Trans, useLingui} from '@lingui/react/macro';
@ -54,6 +57,11 @@ const GuildStickersTab: React.FC<{guildId: string}> = observer(function GuildSti
const layoutStore = EmojiStickerLayoutStore;
const viewMode = layoutStore.getStickerViewMode();
const guild = GuildStore.getGuild(guildId);
const canCreateExpressions = PermissionStore.can(Permissions.CREATE_EXPRESSIONS, {guildId});
const canManageExpressions = PermissionStore.can(Permissions.MANAGE_EXPRESSIONS, {guildId});
const currentUserId = UserStore.currentUserId;
const setStickersWithCache = useCallback(
(updater: React.SetStateAction<ReadonlyArray<GuildStickerWithUser>>) => {
setStickers((prev) => {
@ -120,6 +128,15 @@ const GuildStickersTab: React.FC<{guildId: string}> = observer(function GuildSti
});
}, [stickers, searchQuery]);
const canModifySticker = useCallback(
(sticker: GuildStickerWithUser): boolean => {
if (canManageExpressions) return true;
if (canCreateExpressions && sticker.user?.id === currentUserId) return true;
return false;
},
[canManageExpressions, canCreateExpressions, currentUserId],
);
const maxStickers = guild?.maxStickers ?? 50;
return (
@ -154,25 +171,29 @@ const GuildStickersTab: React.FC<{guildId: string}> = observer(function GuildSti
</div>
</div>
<UploadSlotInfo
title={<Trans>Sticker Slots</Trans>}
currentCount={stickers.length}
maxCount={maxStickers}
uploadButtonText={<Trans>Upload Sticker</Trans>}
onUploadClick={handleAddSticker}
description={
<Trans>
Stickers must be exactly 320x320 pixels and no larger than{' '}
{Math.round(GlobalLimits.getStickerMaxSize() / 1024)} KB, but we automatically resize and compress images
for you. Allowed file types: JPEG, PNG, WebP, GIF.
</Trans>
}
/>
<UploadDropZone
onDrop={handleDrop}
description={<Trans>Drag and drop a sticker file here (one at a time)</Trans>}
acceptMultiple={false}
/>
{canCreateExpressions && (
<>
<UploadSlotInfo
title={<Trans>Sticker Slots</Trans>}
currentCount={stickers.length}
maxCount={maxStickers}
uploadButtonText={<Trans>Upload Sticker</Trans>}
onUploadClick={handleAddSticker}
description={
<Trans>
Stickers must be exactly 320x320 pixels and no larger than{' '}
{Math.round(GlobalLimits.getStickerMaxSize() / 1024)} KB, but we automatically resize and compress
images for you. Allowed file types: JPEG, PNG, WebP, GIF.
</Trans>
}
/>
<UploadDropZone
onDrop={handleDrop}
description={<Trans>Drag and drop a sticker file here (one at a time)</Trans>}
acceptMultiple={false}
/>
</>
)}
{fetchStatus === 'pending' && (
<div className={styles.spinnerContainer}>
@ -192,7 +213,13 @@ const GuildStickersTab: React.FC<{guildId: string}> = observer(function GuildSti
{fetchStatus === 'success' && filteredStickers.length > 0 && (
<div className={clsx(styles.stickerGrid, viewMode === 'compact' ? styles.compactGrid : styles.cozyGrid)}>
{filteredStickers.map((sticker) => (
<StickerGridItem key={sticker.id} guildId={guildId} sticker={sticker} onUpdate={fetchStickers} />
<StickerGridItem
key={sticker.id}
guildId={guildId}
sticker={sticker}
canModify={canModifySticker(sticker)}
onUpdate={fetchStickers}
/>
))}
</div>
)}

View File

@ -72,7 +72,7 @@ export interface GuildSettingsTab {
icon: Icon;
iconWeight?: IconWeight;
component: React.ComponentType<{guildId: string}>;
permission?: bigint;
permission?: bigint | ReadonlyArray<bigint>;
requireFeature?: string;
}
@ -83,7 +83,7 @@ interface GuildSettingsTabDescriptor {
icon: Icon;
iconWeight?: IconWeight;
component: React.ComponentType<{guildId: string}>;
permission?: bigint;
permission?: bigint | ReadonlyArray<bigint>;
requireFeature?: string;
}
@ -110,7 +110,7 @@ const GUILD_SETTINGS_TABS_DESCRIPTORS: Array<GuildSettingsTabDescriptor> = [
label: msg`Custom Emoji`,
icon: SmileyIcon,
component: GuildEmojiTab,
permission: Permissions.MANAGE_EXPRESSIONS,
permission: [Permissions.CREATE_EXPRESSIONS, Permissions.MANAGE_EXPRESSIONS],
},
{
type: 'stickers',
@ -118,7 +118,7 @@ const GUILD_SETTINGS_TABS_DESCRIPTORS: Array<GuildSettingsTabDescriptor> = [
label: msg`Custom Stickers`,
icon: StickerIcon,
component: GuildStickersTab,
permission: Permissions.MANAGE_EXPRESSIONS,
permission: [Permissions.CREATE_EXPRESSIONS, Permissions.MANAGE_EXPRESSIONS],
},
{
type: 'moderation',

View File

@ -38,10 +38,16 @@ import {observer} from 'mobx-react-lite';
interface StickerGridItemProps {
guildId: string;
sticker: GuildStickerWithUser;
canModify: boolean;
onUpdate: () => void;
}
export const StickerGridItem = observer(function StickerGridItem({guildId, sticker, onUpdate}: StickerGridItemProps) {
export const StickerGridItem = observer(function StickerGridItem({
guildId,
sticker,
canModify,
onUpdate,
}: StickerGridItemProps) {
const {t} = useLingui();
const {shouldAnimate} = useStickerAnimation();
@ -107,23 +113,25 @@ export const StickerGridItem = observer(function StickerGridItem({guildId, stick
)}
</div>
<div className={styles.actions}>
<Tooltip text={t`Edit`}>
<FocusRing offset={-2}>
<button type="button" onClick={handleEdit} className={styles.actionButton}>
<PencilIcon className={styles.icon} weight="bold" />
</button>
</FocusRing>
</Tooltip>
{canModify && (
<div className={styles.actions}>
<Tooltip text={t`Edit`}>
<FocusRing offset={-2}>
<button type="button" onClick={handleEdit} className={styles.actionButton}>
<PencilIcon className={styles.icon} weight="bold" />
</button>
</FocusRing>
</Tooltip>
<Tooltip text={t`Delete`}>
<FocusRing offset={-2}>
<button type="button" onClick={handleDelete} className={clsx(styles.actionButton, styles.deleteButton)}>
<XIcon className={styles.icon} weight="bold" />
</button>
</FocusRing>
</Tooltip>
</div>
<Tooltip text={t`Delete`}>
<FocusRing offset={-2}>
<button type="button" onClick={handleDelete} className={clsx(styles.actionButton, styles.deleteButton)}>
<XIcon className={styles.icon} weight="bold" />
</button>
</FocusRing>
</Tooltip>
</div>
)}
</div>
);
});

View File

@ -228,8 +228,11 @@ export function useGuildMenuData(guild: GuildRecord, options: UseGuildMenuDataOp
const availableSettingsTabs = useMemo(() => {
const allTabs = getGuildSettingsTabs(i18n);
return allTabs.filter((tab) => {
if (tab.permission && !PermissionStore.can(tab.permission, {guildId: guild.id})) {
return false;
if (tab.permission) {
const perms = Array.isArray(tab.permission) ? tab.permission : [tab.permission];
if (!perms.some((p) => PermissionStore.can(p, {guildId: guild.id}))) {
return false;
}
}
if (tab.requireFeature && !guild.features.has(tab.requireFeature)) {
return false;

View File

@ -311,8 +311,11 @@ export const CommunitySettingsMenuItem: React.FC<GuildMenuItemProps> = observer(
const accessibleTabs = useMemo(() => {
const guildTabs = getGuildSettingsTabs(i18n);
return guildTabs.filter((tab) => {
if (tab.permission && !PermissionStore.can(tab.permission, {guildId: guild.id})) {
return false;
if (tab.permission) {
const perms = Array.isArray(tab.permission) ? tab.permission : [tab.permission];
if (!perms.some((p) => PermissionStore.can(p, {guildId: guild.id}))) {
return false;
}
}
if (tab.requireFeature && !guild.features.has(tab.requireFeature)) {
return false;

View File

@ -0,0 +1,195 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {Endpoints} from '@app/Endpoints';
import http from '@app/lib/HttpClient';
import {Logger} from '@app/lib/Logger';
import {CHUNKED_UPLOAD_CHUNK_SIZE} from '@fluxer/constants/src/LimitConstants';
const logger = new Logger('ChunkedUploadService');
const MAX_CONCURRENT_CHUNKS = 4;
const MAX_CHUNK_RETRIES = 3;
const RETRY_BASE_DELAY_MS = 1000;
interface ChunkedUploadResult {
upload_filename: string;
file_size: number;
content_type: string;
}
interface InitiateUploadResponse {
upload_id: string;
upload_filename: string;
chunk_size: number;
chunk_count: number;
}
interface UploadChunkResponse {
etag: string;
}
interface CompleteUploadResponse {
upload_filename: string;
file_size: number;
content_type: string;
}
export async function uploadFileChunked(
channelId: string,
file: File,
onProgress?: (loaded: number, total: number) => void,
signal?: AbortSignal,
): Promise<ChunkedUploadResult> {
const initiateResponse = await http.post<InitiateUploadResponse>({
url: Endpoints.CHANNEL_CHUNKED_UPLOADS(channelId),
body: {
filename: file.name,
file_size: file.size,
},
signal,
rejectWithError: true,
});
const {upload_id, chunk_size, chunk_count} = initiateResponse.body;
logger.debug(`Initiated chunked upload: ${upload_id}, ${chunk_count} chunks of ${chunk_size} bytes`);
const chunkProgress = new Array<number>(chunk_count).fill(0);
const etags = new Array<{chunk_index: number; etag: string}>(chunk_count);
function reportProgress() {
if (!onProgress) return;
const loaded = chunkProgress.reduce((sum, bytes) => sum + bytes, 0);
onProgress(loaded, file.size);
}
const chunkIndices = Array.from({length: chunk_count}, (_, i) => i);
let cursor = 0;
const activeTasks: Array<Promise<void>> = [];
async function uploadOneChunk(chunkIndex: number): Promise<void> {
const start = chunkIndex * chunk_size;
const end = Math.min(start + chunk_size, file.size);
const chunkBlob = file.slice(start, end);
const chunkData = new Uint8Array(await chunkBlob.arrayBuffer());
const chunkLength = chunkData.byteLength;
let lastError: unknown;
for (let attempt = 0; attempt <= MAX_CHUNK_RETRIES; attempt++) {
if (signal?.aborted) {
throw new DOMException('Upload cancelled', 'AbortError');
}
try {
const response = await http.put<UploadChunkResponse>({
url: Endpoints.CHANNEL_CHUNKED_UPLOAD_CHUNK(channelId, upload_id, chunkIndex),
body: chunkData,
headers: {'Content-Type': 'application/octet-stream'},
signal,
rejectWithError: true,
});
etags[chunkIndex] = {chunk_index: chunkIndex, etag: response.body.etag};
chunkProgress[chunkIndex] = chunkLength;
reportProgress();
return;
} catch (error) {
lastError = error;
if (signal?.aborted) {
throw error;
}
const isRetryable =
error instanceof Error &&
'status' in error &&
((error as {status: number}).status >= 500 || (error as {status: number}).status === 429);
if (!isRetryable || attempt === MAX_CHUNK_RETRIES) {
throw error;
}
const delay = RETRY_BASE_DELAY_MS * 2 ** attempt;
logger.debug(
`Chunk ${chunkIndex} failed (attempt ${attempt + 1}/${MAX_CHUNK_RETRIES + 1}), retrying in ${delay}ms`,
);
await new Promise((resolve) => setTimeout(resolve, delay));
}
}
throw lastError;
}
await new Promise<void>((resolve, reject) => {
let settled = false;
function settle(error?: unknown) {
if (settled) return;
settled = true;
if (error) {
reject(error);
} else {
resolve();
}
}
function scheduleNext() {
while (activeTasks.length < MAX_CONCURRENT_CHUNKS && cursor < chunkIndices.length) {
const chunkIndex = chunkIndices[cursor++];
const task = uploadOneChunk(chunkIndex).then(
() => {
const idx = activeTasks.indexOf(task);
if (idx !== -1) activeTasks.splice(idx, 1);
if (cursor >= chunkIndices.length && activeTasks.length === 0) {
settle();
} else {
scheduleNext();
}
},
(error) => {
settle(error);
},
);
activeTasks.push(task);
}
}
scheduleNext();
});
logger.debug(`All ${chunk_count} chunks uploaded, completing upload`);
const completeResponse = await http.post<CompleteUploadResponse>({
url: Endpoints.CHANNEL_CHUNKED_UPLOAD_COMPLETE(channelId, upload_id),
body: {etags},
signal,
rejectWithError: true,
});
return {
upload_filename: completeResponse.body.upload_filename,
file_size: completeResponse.body.file_size,
content_type: completeResponse.body.content_type,
};
}
export function shouldUseChunkedUpload(file: File): boolean {
return file.size > CHUNKED_UPLOAD_CHUNK_SIZE;
}

View File

@ -48,6 +48,7 @@ export interface CloudAttachment {
duration?: number | null;
waveform?: string | null;
isVoiceMessage?: boolean;
uploadedFilename?: string;
}
export interface MessageUpload {

View File

@ -32,6 +32,7 @@ import {NSFWContentRejectedModal} from '@app/components/alerts/NSFWContentReject
import {SlowmodeRateLimitedModal} from '@app/components/alerts/SlowmodeRateLimitedModal';
import {Endpoints} from '@app/Endpoints';
import i18n from '@app/I18n';
import {shouldUseChunkedUpload, uploadFileChunked} from '@app/lib/ChunkedUploadService';
import {CloudUpload} from '@app/lib/CloudUpload';
import http, {type HttpResponse} from '@app/lib/HttpClient';
import type {HttpError} from '@app/lib/HttpError';
@ -224,6 +225,32 @@ class MessageQueue extends Queue<MessageQueuePayload, HttpResponse<Message> | un
files = result.files;
}
if (hasAttachments && files?.length && attachments?.length) {
const abortController = new AbortController();
this.abortControllers.set(nonce, abortController);
try {
const chunkedResult = await this.performChunkedUploads(
channelId,
nonce,
files,
attachments,
abortController.signal,
);
files = chunkedResult.files;
attachments = chunkedResult.attachments;
} catch (error) {
this.abortControllers.delete(nonce);
const httpError = error as HttpError;
logger.error(`Chunked upload failed for channel ${channelId}:`, error);
this.handleSendError(channelId, nonce, httpError, i18n, payload.hasAttachments);
completed(null, undefined, error);
return;
}
this.abortControllers.delete(nonce);
}
const requestBody = buildMessageCreateRequest({
content: payload.content,
nonce,
@ -294,6 +321,77 @@ class MessageQueue extends Queue<MessageQueuePayload, HttpResponse<Message> | un
}
}
private async performChunkedUploads(
channelId: string,
nonce: string,
files: Array<File>,
attachments: Array<ApiAttachmentMetadata>,
signal: AbortSignal,
): Promise<{files: Array<File>; attachments: Array<ApiAttachmentMetadata>}> {
const largeFileIndices = new Set<number>();
for (let i = 0; i < files.length; i++) {
if (shouldUseChunkedUpload(files[i])) {
largeFileIndices.add(i);
}
}
if (largeFileIndices.size === 0) {
return {files, attachments};
}
const totalChunkedSize = Array.from(largeFileIndices).reduce((sum, i) => sum + files[i].size, 0);
const totalOverallSize = files.reduce((sum, f) => sum + f.size, 0);
const chunkedRatio = totalOverallSize > 0 ? totalChunkedSize / totalOverallSize : 0;
const chunkedProgressWeight = chunkedRatio * 90;
const perFileProgress = new Map<number, number>();
for (const i of largeFileIndices) {
perFileProgress.set(i, 0);
}
const updatedAttachments = [...attachments];
await Promise.all(
Array.from(largeFileIndices).map(async (fileIndex) => {
const file = files[fileIndex];
const result = await uploadFileChunked(
channelId,
file,
(loaded, _total) => {
perFileProgress.set(fileIndex, loaded);
const totalLoaded = Array.from(perFileProgress.values()).reduce((s, v) => s + v, 0);
const ratio = totalChunkedSize > 0 ? totalLoaded / totalChunkedSize : 0;
const overallProgress = ratio * chunkedProgressWeight;
CloudUpload.updateSendingProgress(nonce, overallProgress);
},
signal,
);
if (updatedAttachments[fileIndex]) {
updatedAttachments[fileIndex] = {
...updatedAttachments[fileIndex],
uploaded_filename: result.upload_filename,
};
}
}),
);
const inlineFiles: Array<File> = [];
let inlineIndex = 0;
const remappedAttachments = updatedAttachments.map((att, originalIndex) => {
if (largeFileIndices.has(originalIndex)) {
return att;
}
const newId = String(inlineIndex);
inlineFiles.push(files[originalIndex]);
inlineIndex++;
return {...att, id: newId};
});
return {files: inlineFiles, attachments: remappedAttachments};
}
private async sendMultipartMessage(
channelId: string,
requestBody: MessageCreateRequest,

View File

@ -1192,7 +1192,7 @@ export class ScrollManager {
this.props = {...nextProps};
const {offsetHeight, scrollHeight} = this.getScrollerState();
const {scrollTop, offsetHeight, scrollHeight} = this.getScrollerState();
const heightChanged = this.isHeightChange(offsetHeight, scrollHeight);
const shouldForceBottom = preUpdatePinState?.isPinned ?? false;
@ -1276,6 +1276,9 @@ export class ScrollManager {
}
if (heightChanged) {
if (!shouldForceBottom && this.automaticAnchor) {
this.updateAutomaticAnchor(scrollTop);
}
this.fixScrollPosition(offsetHeight, scrollHeight, shouldForceBottom);
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -17,7 +17,6 @@
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import Config from '@app/Config';
import {getPreferredCompression} from '@app/lib/GatewayCompression';
import {type GatewayErrorData, GatewaySocket, type GatewaySocketProperties, GatewayState} from '@app/lib/GatewaySocket';
import {Logger} from '@app/lib/Logger';
@ -200,7 +199,7 @@ class GatewayConnectionStore {
logger.info(`Using gateway compression: ${compression}`);
let identifyFlags = Config.PUBLIC_RELEASE_CHANNEL === 'canary' ? GatewayIdentifyFlags.USE_CANARY_API : 0;
let identifyFlags = 0;
identifyFlags |= GatewayIdentifyFlags.DEBOUNCE_MESSAGE_REACTIONS;
const initialGuildId = SelectedGuildStore.selectedGuildId ?? null;
this.initialGuildIdAtIdentify = initialGuildId;

View File

@ -63,7 +63,7 @@
}
.searchInput::placeholder {
color: var(--text-primary-muted);
color: var(--text-tertiary);
}
.clearButton {

View File

@ -39,7 +39,8 @@ export async function prepareAttachmentsForNonce(
throw new Error('No message upload found');
}
const files = messageUpload.attachments.map((att) => att.file);
const inlineAttachments = messageUpload.attachments.filter((att) => !att.uploadedFilename);
const files = inlineAttachments.map((att) => att.file);
const attachments = favoriteMemeId ? undefined : mapMessageUploadAttachments(messageUpload.attachments);
return {attachments, files};
@ -54,5 +55,6 @@ export function mapMessageUploadAttachments(attachments: Array<CloudAttachment>)
flags: att.flags,
duration: att.duration != null ? Math.ceil(att.duration) : undefined,
waveform: att.waveform ?? undefined,
uploaded_filename: att.uploadedFilename,
}));
}

View File

@ -34,6 +34,7 @@ export interface ApiAttachmentMetadata {
flags?: number;
duration?: number;
waveform?: string;
uploaded_filename?: string;
}
export interface MessageCreateRequest {

View File

@ -280,8 +280,14 @@ function generateGuildGeneralPermissionSpec(i18n: I18n): PermissionSpec {
description: i18n._(msg`Change other members' nicknames.`),
flag: Permissions.MANAGE_NICKNAMES,
},
{
title: i18n._(msg`Create Emoji & Stickers`),
description: i18n._(msg`Upload new emoji and stickers, and manage your own creations.`),
flag: Permissions.CREATE_EXPRESSIONS,
},
{
title: i18n._(msg`Manage Emoji & Stickers`),
description: i18n._(msg`Edit or delete emoji and stickers created by other members.`),
flag: Permissions.MANAGE_EXPRESSIONS,
},
{

View File

@ -42,7 +42,7 @@ export function getSelectStyles<
const controlFocusBorderColor = isCompactOverlay ? 'transparent' : controlFocusBorder;
const controlBackgroundColor = isCompactOverlay ? 'transparent' : 'var(--form-surface-background)';
const valueColor = isCompactOverlay ? 'var(--white)' : 'var(--text-primary)';
const placeholderColor = isCompactOverlay ? 'rgb(255 255 255 / 0.92)' : 'var(--text-primary-muted)';
const placeholderColor = isCompactOverlay ? 'rgb(255 255 255 / 0.92)' : 'var(--text-tertiary)';
const indicatorColor = isCompactOverlay ? 'rgb(255 255 255 / 0.85)' : 'var(--text-tertiary)';
const indicatorHoverColor = isCompactOverlay ? 'var(--white)' : 'var(--text-primary)';

View File

@ -0,0 +1,29 @@
services:
nats_core:
image: nats:2-alpine
hostname: nats_core
command: ['-c', '/etc/nats/nats.conf']
env_file:
- /etc/fluxer/nats_core.env
volumes:
- ./nats.conf:/etc/nats/nats.conf:ro
networks:
- fluxer-shared
ports:
- '4222:4222'
deploy:
replicas: 1
restart_policy:
condition: on-failure
delay: 10s
max_attempts: 3
healthcheck:
test: ['CMD-SHELL', 'wget -q --spider http://localhost:8222/healthz || exit 1']
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
networks:
fluxer-shared:
external: true

View File

@ -0,0 +1,6 @@
port: 4222
http_port: 8222
authorization {
token: "$NATS_AUTH_TOKEN"
}

View File

@ -0,0 +1,34 @@
services:
nats_jetstream:
image: nats:2-alpine
hostname: nats_jetstream
command: ['-c', '/etc/nats/nats.conf']
env_file:
- /etc/fluxer/nats_jetstream.env
volumes:
- ./nats.conf:/etc/nats/nats.conf:ro
- nats_jetstream_data:/data
networks:
- fluxer-shared
ports:
- '4223:4222'
deploy:
replicas: 1
restart_policy:
condition: on-failure
delay: 10s
max_attempts: 3
healthcheck:
test: ['CMD-SHELL', 'wget -q --spider http://localhost:8222/healthz || exit 1']
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
networks:
fluxer-shared:
external: true
volumes:
nats_jetstream_data:
driver: local

View File

@ -0,0 +1,12 @@
port: 4222
http_port: 8222
jetstream {
store_dir: /data
max_mem: 256M
max_file: 10G
}
authorization {
token: "$NATS_AUTH_TOKEN"
}

View File

@ -1,2 +0,0 @@
!.gitignore
*

View File

@ -1,407 +0,0 @@
{
"name": "Fluxer Critical Alerts",
"description": "Critical alerts for Fluxer services",
"version": 2,
"alerts": [
{
"id": "high-api-error-rate",
"name": "High API Error Rate",
"type": "metric",
"condition": {
"query": "sum(rate(http_server_request_count{service_name='fluxer-api',http_response_status_code=~'5..'}[5m])) > 10",
"evaluation_interval": "1m",
"for": "5m"
},
"severity": "critical",
"annotations": {
"summary": "API error rate is above 10 req/s",
"description": "The fluxer-api service is experiencing a high error rate (5xx responses). This may indicate a service degradation or outage."
},
"labels": {
"service": "fluxer-api",
"alert_type": "error_rate"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-critical"
}
]
},
{
"id": "high-api-latency",
"name": "High API Latency",
"type": "metric",
"condition": {
"query": "histogram_quantile(0.95, sum(rate(http_server_request_duration_bucket{service_name='fluxer-api'}[5m])) > 1000",
"evaluation_interval": "1m",
"for": "10m"
},
"severity": "warning",
"annotations": {
"summary": "API P95 latency is above 1000ms",
"description": "The fluxer-api service is experiencing high latency. 95% of requests are taking longer than 1 second."
},
"labels": {
"service": "fluxer-api",
"alert_type": "latency"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-warning"
}
]
},
{
"id": "queue-depth-high",
"name": "Queue Depth Too High",
"type": "metric",
"condition": {
"query": "fluxer_queue_depth > 10000",
"evaluation_interval": "1m",
"for": "15m"
},
"severity": "warning",
"annotations": {
"summary": "Queue depth is above 10,000 jobs",
"description": "The job queue has accumulated more than 10,000 jobs. This may indicate processing is slower than job arrival."
},
"labels": {
"service": "fluxer-queue",
"alert_type": "queue_depth"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-warning"
}
]
},
{
"id": "queue-dlq-rate",
"name": "High Dead Letter Queue Rate",
"type": "metric",
"condition": {
"query": "sum(rate(fluxer_queue_dead_letter[5m])) > 5",
"evaluation_interval": "1m",
"for": "10m"
},
"severity": "critical",
"annotations": {
"summary": "DLQ rate is above 5 jobs/sec",
"description": "Jobs are being moved to the dead letter queue at a high rate. This may indicate persistent job failures."
},
"labels": {
"service": "fluxer-queue",
"alert_type": "dlq_rate"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-critical"
}
]
},
{
"id": "gateway-connection-drop",
"name": "Gateway Connection Drop Rate",
"type": "metric",
"condition": {
"query": "rate(gateway_websocket_disconnections[1m]) / rate(gateway_websocket_connections[1m]) > 0.5",
"evaluation_interval": "1m",
"for": "5m"
},
"severity": "critical",
"annotations": {
"summary": "Gateway disconnect rate exceeds 50% of connect rate",
"description": "WebSocket connections are dropping at an unusually high rate. This may indicate network issues or service instability."
},
"labels": {
"service": "fluxer-gateway",
"alert_type": "connection_stability"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-critical"
}
]
},
{
"id": "gateway-rpc-latency-high",
"name": "Gateway RPC Latency High",
"type": "metric",
"condition": {
"query": "gateway_rpc_latency_p95 > 500",
"evaluation_interval": "1m",
"for": "10m"
},
"severity": "warning",
"annotations": {
"summary": "Gateway RPC P95 latency above 500ms",
"description": "RPC calls from gateway to backend are experiencing high latency."
},
"labels": {
"service": "fluxer-gateway",
"alert_type": "latency"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-warning"
}
]
},
{
"id": "media-proxy-error-rate",
"name": "Media Proxy High Error Rate",
"type": "metric",
"condition": {
"query": "sum(rate(media_proxy_failure{service_name='fluxer-media-proxy'}[5m])) / sum(rate(http_server_request_count{service_name='fluxer-media-proxy'}[5m])) > 0.1",
"evaluation_interval": "1m",
"for": "10m"
},
"severity": "warning",
"annotations": {
"summary": "Media proxy error rate above 10%",
"description": "The media proxy is failing more than 10% of requests. This may indicate origin issues or cache problems."
},
"labels": {
"service": "fluxer-media-proxy",
"alert_type": "error_rate"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-warning"
}
]
},
{
"id": "media-proxy-timeout-rate",
"name": "Media Proxy High Timeout Rate",
"type": "metric",
"condition": {
"query": "sum(rate(media_proxy_failure{error_type='timeout'}[5m])) > 5",
"evaluation_interval": "1m",
"for": "5m"
},
"severity": "warning",
"annotations": {
"summary": "Media proxy timeout rate above 5 req/s",
"description": "The media proxy is experiencing a high rate of timeouts. This may indicate network issues or slow origin servers."
},
"labels": {
"service": "fluxer-media-proxy",
"alert_type": "timeout"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-warning"
}
]
},
{
"id": "telemetry-ingestion-stopped",
"name": "Telemetry Ingestion Stopped",
"type": "metric",
"condition": {
"query": "increase(signoz_traces_signoz_index_v2[15m]) == 0",
"evaluation_interval": "1m",
"for": "5m"
},
"severity": "critical",
"annotations": {
"summary": "No traces being ingested",
"description": "The SigNoz collector has not received any traces in the last 15 minutes. This may indicate a collector issue or service instrumentation failure."
},
"labels": {
"service": "signoz",
"alert_type": "telemetry"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-critical"
}
]
},
{
"id": "cron-job-overdue",
"name": "Cron Job Overdue",
"type": "metric",
"condition": {
"query": "time() - max(fluxer_queue_cron_tick_timestamp by (cron)) > 3600",
"evaluation_interval": "5m",
"for": "5m"
},
"severity": "warning",
"annotations": {
"summary": "Cron job has not executed in over 1 hour",
"description": "A scheduled cron job has not run in over an hour. This may indicate a hung cron process or scheduling issue."
},
"labels": {
"service": "fluxer-queue",
"alert_type": "cron"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-warning"
}
]
},
{
"id": "csam-match-detected",
"name": "CSAM Match Detected",
"type": "metric",
"condition": {
"query": "sum(rate(fluxer_csam_matches_total{service_name='fluxer-api'}[1m])) > 0",
"evaluation_interval": "1m",
"for": "0m"
},
"severity": "critical",
"annotations": {
"summary": "CSAM content has been detected",
"description": "CSAM content has been detected. Immediate review required."
},
"labels": {
"service": "fluxer-api",
"alert_type": "csam_match"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-critical"
}
]
},
{
"id": "photodna-api-error-rate-high",
"name": "PhotoDNA API Error Rate High",
"type": "metric",
"condition": {
"query": "sum(rate(fluxer_csam_photodna_api_total{service_name='fluxer-api',status='error'}[5m])) / sum(rate(fluxer_csam_photodna_api_total{service_name='fluxer-api'}[5m])) > 0.1",
"evaluation_interval": "1m",
"for": "5m"
},
"severity": "warning",
"annotations": {
"summary": "PhotoDNA API error rate exceeds 10%",
"description": "PhotoDNA API error rate exceeds 10%"
},
"labels": {
"service": "fluxer-api",
"alert_type": "photodna_error_rate"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-warning"
}
]
},
{
"id": "ncmec-submission-failure",
"name": "NCMEC Submission Failure",
"type": "metric",
"condition": {
"query": "sum(rate(fluxer_csam_ncmec_submissions{service_name='fluxer-api',status='error'}[5m])) > 0",
"evaluation_interval": "1m",
"for": "5m"
},
"severity": "critical",
"annotations": {
"summary": "NCMEC report submission has failed",
"description": "NCMEC report submission has failed. Manual intervention required."
},
"labels": {
"service": "fluxer-api",
"alert_type": "ncmec_submission"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-critical"
}
]
},
{
"id": "csam-scan-failure-rate-high",
"name": "CSAM Scan Failure Rate High",
"type": "metric",
"condition": {
"query": "sum(rate(fluxer_csam_scans_total{service_name='fluxer-api',status='error'}[5m])) / sum(rate(fluxer_csam_scans_total{service_name='fluxer-api'}[5m])) > 0.05",
"evaluation_interval": "1m",
"for": "5m"
},
"severity": "warning",
"annotations": {
"summary": "CSAM scan failure rate exceeds 5%",
"description": "CSAM scan failure rate exceeds 5%"
},
"labels": {
"service": "fluxer-api",
"alert_type": "csam_scan_failure_rate"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-warning"
}
]
},
{
"id": "photodna-api-latency-high",
"name": "PhotoDNA API Latency High",
"type": "metric",
"condition": {
"query": "histogram_quantile(0.95, sum(rate(fluxer_csam_photodna_api_duration_ms_bucket{service_name='fluxer-api'}[5m])) by (le)) > 5000",
"evaluation_interval": "1m",
"for": "5m"
},
"severity": "warning",
"annotations": {
"summary": "PhotoDNA API p95 latency exceeds 5 seconds",
"description": "PhotoDNA API p95 latency exceeds 5 seconds"
},
"labels": {
"service": "fluxer-api",
"alert_type": "photodna_latency"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-warning"
}
]
}
],
"notification_channels": {
"slack": {
"type": "webhook",
"url": "${ALERT_WEBHOOK_URL}",
"channel_mapping": {
"critical": "#alerts-critical",
"warning": "#alerts-warning"
}
}
}
}

View File

@ -1,329 +0,0 @@
groups:
- name: fluxer_api_alerts
interval: 30s
rules:
- alert: FluxerHighErrorRate
expr: |
(
rate(http_server_request_count{http_response_status_code=~"5.."}[5m])
/
rate(http_server_request_count[5m])
) > 0.05
and rate(http_server_request_count[5m]) > 10
for: 5m
labels:
severity: critical
service: fluxer-api
alert_type: error_rate
annotations:
summary: 'High error rate on {{ $labels.service_name }}'
description: 'Error rate is above 5% (minimum 10 requests/5m) on {{ $labels.service_name }}. Current value: {{ $value | humanizePercentage }}'
runbook: 'https://docs.fluxer.dev/runbooks/high-error-rate'
- alert: FluxerElevatedErrorRate
expr: |
(
rate(http_server_request_count{http_response_status_code=~"5.."}[5m])
/
rate(http_server_request_count[5m])
) > 0.01
and rate(http_server_request_count[5m]) > 10
for: 10m
labels:
severity: warning
service: fluxer-api
alert_type: error_rate
annotations:
summary: 'Elevated error rate on {{ $labels.service_name }}'
description: 'Error rate is above 1% on {{ $labels.service_name }}. Current value: {{ $value | humanizePercentage }}'
runbook: 'https://docs.fluxer.dev/runbooks/high-error-rate'
- name: fluxer_queue_alerts
interval: 30s
rules:
- alert: FluxerQueueDepthCritical
expr: |
fluxer_queue_depth{service_name="fluxer-queue"} > 10000
for: 5m
labels:
severity: critical
service: fluxer-queue
alert_type: queue_depth
annotations:
summary: 'Queue depth critically high for {{ $labels.queue_name }}'
description: 'Queue {{ $labels.queue_name }} has {{ $value }} jobs pending (threshold: 10,000). Jobs may be delayed or processing is stalled.'
runbook: 'https://docs.fluxer.dev/runbooks/queue-depth-critical'
- alert: FluxerQueueDepthElevated
expr: |
fluxer_queue_depth{service_name="fluxer-queue"} > 5000
for: 10m
labels:
severity: warning
service: fluxer-queue
alert_type: queue_depth
annotations:
summary: 'Queue depth elevated for {{ $labels.queue_name }}'
description: 'Queue {{ $labels.queue_name }} has {{ $value }} jobs pending (threshold: 5,000). Monitor for escalation.'
- alert: FluxerDLQRateCritical
expr: |
sum(rate(fluxer_queue_dead_letter{service_name="fluxer-queue"}[5m])) > 5
for: 5m
labels:
severity: critical
service: fluxer-queue
alert_type: dlq_rate
annotations:
summary: 'High dead letter queue rate'
description: 'Jobs are failing and moving to DLQ at rate {{ $value | humanize }} jobs/sec. Check job failures and error logs.'
runbook: 'https://docs.fluxer.dev/runbooks/high-dlq-rate'
- name: fluxer_gateway_alerts
interval: 30s
rules:
- alert: FluxerGatewayConnectionDropCritical
expr: |
sum(rate(gateway_websocket_disconnections{reason="error"}[1m])) by (service_name) > 10
for: 3m
labels:
severity: critical
service: fluxer-gateway
alert_type: connection_drop
annotations:
summary: 'Critical WebSocket error disconnect rate'
description: 'Gateway experiencing {{ $value | humanize }} error disconnects/min. This may indicate service instability or network issues.'
runbook: 'https://docs.fluxer.dev/runbooks/gateway-connection-drop'
- alert: FluxerGatewayDisconnectElevated
expr: |
sum(rate(gateway_websocket_disconnections{reason="error"}[1m])) by (service_name) > 5
for: 5m
labels:
severity: warning
service: fluxer-gateway
alert_type: connection_drop
annotations:
summary: 'Elevated WebSocket error disconnect rate'
description: 'Gateway experiencing {{ $value | humanize }} error disconnects/min. Monitor for escalation.'
- alert: FluxerGatewayDisconnectRatioHigh
expr: |
(
sum(rate(gateway_websocket_disconnections{reason="error"}[5m])) by (service_name)
/
sum(rate(gateway_websocket_connections[5m])) by (service_name)
) > 0.1
for: 5m
labels:
severity: critical
service: fluxer-gateway
alert_type: disconnect_ratio
annotations:
summary: 'Gateway disconnect ratio above 10%'
description: 'Error disconnects represent {{ $value | humanizePercentage }} of new connections. Check gateway stability.'
runbook: 'https://docs.fluxer.dev/runbooks/gateway-connection-drop'
- alert: FluxerGatewayRPCLatencyHigh
expr: |
histogram_quantile(0.95,
sum(rate(gateway_rpc_latency_bucket{service_name="fluxer-gateway"}[5m])) by (le)
) > 500
for: 10m
labels:
severity: warning
service: fluxer-gateway
alert_type: rpc_latency
annotations:
summary: 'Gateway RPC P95 latency above 500ms'
description: 'Gateway RPC calls experiencing high latency. Current P95: {{ $value | humanize }}ms'
runbook: 'https://docs.fluxer.dev/runbooks/gateway-rpc-latency'
- name: fluxer_log_alerts
interval: 30s
rules:
- alert: FluxerLogErrorSpikeCritical
expr: |
sum(rate(logs_count{severity_text="ERROR"}[5m])) by (service_name) > 50
for: 2m
labels:
severity: critical
alert_type: log_error_spike
annotations:
summary: 'Critical error log volume spike on {{ $labels.service_name }}'
description: 'Service {{ $labels.service_name }} logging {{ $value | humanize }} errors/sec. Check logs and traces for root cause.'
runbook: 'https://docs.fluxer.dev/runbooks/log-error-spike'
- alert: FluxerLogErrorElevated
expr: |
sum(rate(logs_count{severity_text="ERROR"}[5m])) by (service_name) > 20
for: 10m
labels:
severity: warning
alert_type: log_error_elevated
annotations:
summary: 'Elevated error log volume on {{ $labels.service_name }}'
description: 'Service {{ $labels.service_name }} logging {{ $value | humanize }} errors/sec. Monitor for escalation.'
- alert: FluxerLogWarningElevated
expr: |
sum(rate(logs_count{severity_text="WARN"}[5m])) by (service_name) > 100
for: 10m
labels:
severity: warning
alert_type: log_warning_elevated
annotations:
summary: 'Elevated warning log volume on {{ $labels.service_name }}'
description: 'Service {{ $labels.service_name }} logging {{ $value | humanize }} warnings/sec. Review warning patterns.'
- name: fluxer_api_performance_alerts
interval: 30s
rules:
- alert: FluxerAPILatencyCritical
expr: |
histogram_quantile(0.95,
sum(rate(http_server_request_duration_bucket{service_name="fluxer-api"}[5m])) by (le, http_route)
) > 2000
for: 5m
labels:
severity: critical
service: fluxer-api
alert_type: latency
annotations:
summary: 'Critical API latency on route {{ $labels.http_route }}'
description: 'P95 latency for route {{ $labels.http_route }} is above 2 seconds. Current: {{ $value | humanize }}ms'
runbook: 'https://docs.fluxer.dev/runbooks/high-api-latency'
- alert: FluxerAPILatencyElevated
expr: |
histogram_quantile(0.95,
sum(rate(http_server_request_duration_bucket{service_name="fluxer-api"}[5m])) by (le, http_route)
) > 1000
for: 10m
labels:
severity: warning
service: fluxer-api
alert_type: latency
annotations:
summary: 'Elevated API latency on route {{ $labels.http_route }}'
description: 'P95 latency for route {{ $labels.http_route }} is above 1 second. Current: {{ $value | humanize }}ms'
- name: fluxer_database_alerts
interval: 30s
rules:
- alert: FluxerDBLatencyCritical
expr: |
histogram_quantile(0.95,
sum(rate(db_query_latency_bucket[5m])) by (le, query_type)
) > 1000
for: 5m
labels:
severity: critical
alert_type: database_latency
annotations:
summary: 'Critical database query latency for {{ $labels.query_type }}'
description: 'P95 {{ $labels.query_type }} query latency above 1 second. Current: {{ $value | humanize }}ms'
runbook: 'https://docs.fluxer.dev/runbooks/database-latency'
- alert: FluxerDBConnectionPoolHigh
expr: |
db_connection_pool_active / db_connection_pool_max > 0.8
for: 10m
labels:
severity: warning
alert_type: connection_pool
annotations:
summary: 'Database connection pool usage above 80%'
description: 'Connection pool at {{ $value | humanizePercentage }} capacity. May lead to connection waits.'
runbook: 'https://docs.fluxer.dev/runbooks/connection-pool'
- name: fluxer_cache_alerts
interval: 30s
rules:
- alert: FluxerCacheHitRateLow
expr: |
sum(rate(cache_operation{status="hit"}[5m])) by (cache_name)
/
sum(rate(cache_operation{status=~"hit|miss"}[5m])) by (cache_name) < 0.5
for: 15m
labels:
severity: warning
alert_type: cache_efficiency
annotations:
summary: 'Low cache hit rate for {{ $labels.cache_name }}'
description: 'Cache {{ $labels.cache_name }} hit rate below 50%. Current: {{ $value | humanizePercentage }}'
runbook: 'https://docs.fluxer.dev/runbooks/low-cache-hit-rate'
- name: fluxer_worker_alerts
interval: 30s
rules:
- alert: FluxerWorkerFailureRateCritical
expr: |
sum(rate(fluxer_worker_task_failure[5m])) by (task_name) > 1
for: 5m
labels:
severity: critical
alert_type: worker_failure
annotations:
summary: 'Critical worker task failure rate for {{ $labels.task_name }}'
description: 'Worker task {{ $labels.task_name }} failing at {{ $value | humanize }} tasks/sec. Check task logs.'
runbook: 'https://docs.fluxer.dev/runbooks/worker-failures'
- alert: FluxerCronJobOverdue
expr: |
time() - max(fluxer_queue_cron_tick_timestamp by (cron)) > 3600
for: 5m
labels:
severity: warning
service: fluxer-queue
alert_type: cron
annotations:
summary: 'Cron job {{ $labels.cron }} has not executed in over 1 hour'
description: "Scheduled cron job hasn't run since {{ $value | humanizeTimestamp }}. May indicate hung process."
runbook: 'https://docs.fluxer.dev/runbooks/cron-overdue'
- name: fluxer_telemetry_alerts
interval: 60s
rules:
- alert: FluxerTelemetryIngestionStopped
expr: |
increase(signoz_traces_signoz_index_v2[15m]) == 0
for: 5m
labels:
severity: critical
alert_type: telemetry
annotations:
summary: 'No traces being ingested'
description: "SigNoz collector hasn't received traces in 15 minutes. Check collector health and service instrumentation."
runbook: 'https://docs.fluxer.dev/runbooks/telemetry-down'
- name: fluxer_media_proxy_alerts
interval: 30s
rules:
- alert: FluxerMediaProxyErrorRate
expr: |
sum(rate(media_proxy_failure{service_name="fluxer-media-proxy"}[5m]))
/
sum(rate(http_server_request_count{service_name="fluxer-media-proxy"}[5m])) > 0.1
for: 10m
labels:
severity: warning
service: fluxer-media-proxy
alert_type: error_rate
annotations:
summary: 'Media proxy error rate above 10%'
description: 'Media proxy failing {{ $value | humanizePercentage }} of requests. Check origin servers and cache.'
runbook: 'https://docs.fluxer.dev/runbooks/media-proxy-errors'
- alert: FluxerMediaProxyTimeoutRate
expr: |
sum(rate(media_proxy_failure{error_type="timeout"}[5m])) > 5
for: 5m
labels:
severity: warning
service: fluxer-media-proxy
alert_type: timeout
annotations:
summary: 'Media proxy timeout rate above 5 req/s'
description: 'Media proxy experiencing high timeout rate. May indicate network issues or slow origins.'
runbook: 'https://docs.fluxer.dev/runbooks/media-proxy-timeouts'

File diff suppressed because it is too large Load Diff

View File

@ -578,6 +578,19 @@ description: 'Admin object schemas from the Fluxer API.'
| region_id | [SnowflakeType](#snowflaketype) | ID of the region the server belongs to |
| server_id | [SnowflakeType](#snowflaketype) | ID of the voice server to delete |
<a id="deletewebauthncredentialrequest"></a>
## DeleteWebAuthnCredentialRequest
**Related endpoints**
- [`POST /admin/users/delete-webauthn-credential`](/api-reference/admin/delete-user-webauthn-credential)
| Field | Type | Description |
|-------|------|-------------|
| credential_id | [SnowflakeType](#snowflaketype) | ID of the WebAuthn credential to delete |
| user_id | [SnowflakeType](#snowflaketype) | |
<a id="disableforsuspiciousactivityrequest"></a>
## DisableForSuspiciousActivityRequest
@ -851,11 +864,13 @@ description: 'Admin object schemas from the Fluxer API.'
- [`POST /admin/users/change-log`](/api-reference/admin/get-user-change-log)
- [`POST /admin/users/change-username`](/api-reference/admin/change-user-username)
- [`POST /admin/users/clear-fields`](/api-reference/admin/clear-user-fields)
- [`POST /admin/users/delete-webauthn-credential`](/api-reference/admin/delete-user-webauthn-credential)
- [`POST /admin/users/disable-mfa`](/api-reference/admin/disable-user-mfa)
- [`POST /admin/users/disable-suspicious`](/api-reference/admin/disable-user-for-suspicious-activity)
- [`POST /admin/users/list-dm-channels`](/api-reference/admin/list-user-dm-channels)
- [`POST /admin/users/list-guilds`](/api-reference/admin/list-user-guilds)
- [`POST /admin/users/list-sessions`](/api-reference/admin/list-user-sessions)
- [`POST /admin/users/list-webauthn-credentials`](/api-reference/admin/list-user-webauthn-credentials)
- [`POST /admin/users/lookup`](/api-reference/admin/lookup-user)
- [`POST /admin/users/schedule-deletion`](/api-reference/admin/schedule-account-deletion)
- [`POST /admin/users/search`](/api-reference/admin/search-users)
@ -911,6 +926,8 @@ description: 'Admin object schemas from the Fluxer API.'
- [`POST /channels/{channel_id}/call/end`](/api-reference/channels/end-call-session)
- [`POST /channels/{channel_id}/call/ring`](/api-reference/channels/ring-call-recipients)
- [`POST /channels/{channel_id}/call/stop-ringing`](/api-reference/channels/stop-ringing-call-recipients)
- [`POST /channels/{channel_id}/chunked-uploads`](/api-reference/channels/initiate-a-chunked-upload-session)
- [`POST /channels/{channel_id}/chunked-uploads/{upload_id}/complete`](/api-reference/channels/complete-a-chunked-upload)
- [`POST /channels/{channel_id}/invites`](/api-reference/invites/create-channel-invite)
- [`POST /channels/{channel_id}/messages`](/api-reference/channels/send-a-message)
- [`POST /channels/{channel_id}/messages/{message_id}/ack`](/api-reference/channels/acknowledge-a-message)
@ -1017,6 +1034,7 @@ description: 'Admin object schemas from the Fluxer API.'
- [`POST /webhooks/{webhook_id}/{token}/github`](/api-reference/webhooks/execute-github-webhook)
- [`POST /webhooks/{webhook_id}/{token}/sentry`](/api-reference/webhooks/execute-sentry-webhook)
- [`POST /webhooks/{webhook_id}/{token}/slack`](/api-reference/webhooks/execute-slack-webhook)
- [`PUT /channels/{channel_id}/chunked-uploads/{upload_id}/chunks/{chunk_index}`](/api-reference/channels/upload-a-file-chunk)
- [`PUT /channels/{channel_id}/messages/{message_id}/reactions/{emoji}/@me`](/api-reference/channels/add-reaction-to-message)
- [`PUT /channels/{channel_id}/permissions/{overwrite_id}`](/api-reference/channels/set-permission-overwrite-for-channel)
- [`PUT /channels/{channel_id}/pins/{message_id}`](/api-reference/channels/pin-a-message)
@ -1788,6 +1806,18 @@ Type: [MessageShredStatusNotFoundResponse](#messageshredstatusnotfoundresponse)
|-------|------|-------------|
| servers | [VoiceServerAdminResponse](#voiceserveradminresponse)[] | List of voice servers |
<a id="listwebauthncredentialsrequest"></a>
## ListWebAuthnCredentialsRequest
**Related endpoints**
- [`POST /admin/users/list-webauthn-credentials`](/api-reference/admin/list-user-webauthn-credentials)
| Field | Type | Description |
|-------|------|-------------|
| user_id | [SnowflakeType](#snowflaketype) | |
<a id="lookupguildrequest"></a>
## LookupGuildRequest
@ -2775,6 +2805,17 @@ Type: Bitflags32
|-------|------|-------------|
| success | enum&lt;`true`&gt; | |
<a id="webauthncredentiallistresponse"></a>
## WebAuthnCredentialListResponse
**Related endpoints**
- [`GET /users/@me/mfa/webauthn/credentials`](/api-reference/users/list-webauthn-credentials)
- [`POST /admin/users/list-webauthn-credentials`](/api-reference/admin/list-user-webauthn-credentials)
Type: [WebAuthnCredentialResponse](#webauthncredentialresponse)[]
<a id="adminarchiveresponseschemasubjecttype"></a>
## AdminArchiveResponseSchemaSubjectType
@ -2871,6 +2912,7 @@ Type of search index to refresh
| `channel_messages` | Channel message search index |
| `guild_members` | Guild member search index |
| `favorite_memes` | Favourite meme search index |
| `discovery` | Discovery guild search index |
<a id="systemdmjobresponsestatus"></a>

View File

@ -360,6 +360,60 @@ Type: [ChannelUpdateTextRequest](#channelupdatetextrequest) \| [ChannelUpdateVoi
| url? | ?string | External URL for link channels |
| user_limit? | ?integer (int32) | Maximum users allowed in voice channel (0-99, 0 means unlimited) |
<a id="completechunkeduploadrequest"></a>
## CompleteChunkedUploadRequest
**Related endpoints**
- [`POST /channels/{channel_id}/chunked-uploads/{upload_id}/complete`](/api-reference/channels/complete-a-chunked-upload)
| Field | Type | Description |
|-------|------|-------------|
| etags | [CompleteChunkedUploadRequestEtagsItem](#completechunkeduploadrequestetagsitem)[] | Array of chunk ETags in order |
<a id="completechunkeduploadresponse"></a>
## CompleteChunkedUploadResponse
**Related endpoints**
- [`POST /channels/{channel_id}/chunked-uploads/{upload_id}/complete`](/api-reference/channels/complete-a-chunked-upload)
| Field | Type | Description |
|-------|------|-------------|
| content_type | string | The MIME type of the uploaded file |
| file_size | integer (int53) | The total size of the uploaded file in bytes |
| upload_filename | string | The temporary filename used to reference this upload |
<a id="createchunkeduploadrequest"></a>
## CreateChunkedUploadRequest
**Related endpoints**
- [`POST /channels/{channel_id}/chunked-uploads`](/api-reference/channels/initiate-a-chunked-upload-session)
| Field | Type | Description |
|-------|------|-------------|
| file_size | integer (int64) | The total size of the file in bytes |
| filename | string | The name of the file being uploaded |
<a id="createchunkeduploadresponse"></a>
## CreateChunkedUploadResponse
**Related endpoints**
- [`POST /channels/{channel_id}/chunked-uploads`](/api-reference/channels/initiate-a-chunked-upload-session)
| Field | Type | Description |
|-------|------|-------------|
| chunk_count | integer (int53) | The total number of chunks to upload |
| chunk_size | integer (int53) | The size of each chunk in bytes |
| upload_filename | string | The temporary filename used to reference this upload |
| upload_id | [SnowflakeType](#snowflaketype) | The unique identifier for the upload session |
<a id="embedauthorresponse"></a>
## EmbedAuthorResponse
@ -945,6 +999,18 @@ Type: [UserPartialResponse](#userpartialresponse)[]
|-------|------|-------------|
| region? | string | The preferred voice region for the stream (1-64 characters) |
<a id="uploadchunkresponse"></a>
## UploadChunkResponse
**Related endpoints**
- [`PUT /channels/{channel_id}/chunked-uploads/{upload_id}/chunks/{chunk_index}`](/api-reference/channels/upload-a-file-chunk)
| Field | Type | Description |
|-------|------|-------------|
| etag | string | The ETag of the uploaded chunk |
<a id="voiceregionadminresponse"></a>
## VoiceRegionAdminResponse
@ -1082,6 +1148,17 @@ The type of message
| `7` | A system message indicating a user joined |
| `19` | A reply message |
<a id="completechunkeduploadrequestetagsitem"></a>
## CompleteChunkedUploadRequestEtagsItem
Array of chunk ETags in order
| Field | Type | Description |
|-------|------|-------------|
| chunk_index | integer (int64) | The zero-based index of the chunk |
| etag | string | The ETag returned when the chunk was uploaded |
<a id="messagereactionresponseemoji"></a>
## MessageReactionResponseEmoji

View File

@ -1029,6 +1029,8 @@ The NSFW level of the guild
- [`POST /channels/{channel_id}/call/end`](/api-reference/channels/end-call-session)
- [`POST /channels/{channel_id}/call/ring`](/api-reference/channels/ring-call-recipients)
- [`POST /channels/{channel_id}/call/stop-ringing`](/api-reference/channels/stop-ringing-call-recipients)
- [`POST /channels/{channel_id}/chunked-uploads`](/api-reference/channels/initiate-a-chunked-upload-session)
- [`POST /channels/{channel_id}/chunked-uploads/{upload_id}/complete`](/api-reference/channels/complete-a-chunked-upload)
- [`POST /channels/{channel_id}/invites`](/api-reference/invites/create-channel-invite)
- [`POST /channels/{channel_id}/messages`](/api-reference/channels/send-a-message)
- [`POST /channels/{channel_id}/messages/{message_id}/ack`](/api-reference/channels/acknowledge-a-message)
@ -1060,6 +1062,7 @@ The NSFW level of the guild
- [`POST /webhooks/{webhook_id}/{token}/github`](/api-reference/webhooks/execute-github-webhook)
- [`POST /webhooks/{webhook_id}/{token}/sentry`](/api-reference/webhooks/execute-sentry-webhook)
- [`POST /webhooks/{webhook_id}/{token}/slack`](/api-reference/webhooks/execute-slack-webhook)
- [`PUT /channels/{channel_id}/chunked-uploads/{upload_id}/chunks/{chunk_index}`](/api-reference/channels/upload-a-file-chunk)
- [`PUT /channels/{channel_id}/messages/{message_id}/reactions/{emoji}/@me`](/api-reference/channels/add-reaction-to-message)
- [`PUT /channels/{channel_id}/permissions/{overwrite_id}`](/api-reference/channels/set-permission-overwrite-for-channel)
- [`PUT /channels/{channel_id}/pins/{message_id}`](/api-reference/channels/pin-a-message)

View File

@ -18,10 +18,10 @@ Resource tables use a compact notation:
## Resources by domain
- [Admin](/resources/admin) (169 schemas)
- [Admin](/resources/admin) (172 schemas)
- [Auth](/resources/auth) (37 schemas)
- [Billing](/resources/billing) (3 schemas)
- [Channels](/resources/channels) (66 schemas)
- [Channels](/resources/channels) (71 schemas)
- [Common](/resources/common) (35 schemas)
- [Gateway](/resources/gateway) (2 schemas)
- [Gifts](/resources/gifts) (1 schemas)
@ -37,7 +37,7 @@ Resource tables use a compact notation:
- [Saved media](/resources/saved_media) (5 schemas)
- [Search](/resources/search) (2 schemas)
- [Themes](/resources/themes) (2 schemas)
- [Users](/resources/users) (81 schemas)
- [Users](/resources/users) (80 schemas)
- [Webhooks](/resources/webhooks) (9 schemas)
## All schemas
@ -122,12 +122,16 @@ Resource tables use a compact notation:
- [ClearGuildFieldsRequest](/resources/admin#clearguildfieldsrequest)
- [ClearUserFieldsRequest](/resources/admin#clearuserfieldsrequest)
- [CodesResponse](/resources/admin#codesresponse)
- [CompleteChunkedUploadRequest](/resources/channels#completechunkeduploadrequest)
- [CompleteChunkedUploadResponse](/resources/channels#completechunkeduploadresponse)
- [ConnectionListResponse](/resources/common#connectionlistresponse)
- [ConnectionResponse](/resources/common#connectionresponse)
- [ConnectionVerificationResponse](/resources/common#connectionverificationresponse)
- [CreateAdminApiKeyRequest](/resources/admin#createadminapikeyrequest)
- [CreateAdminApiKeyResponse](/resources/admin#createadminapikeyresponse)
- [CreateCheckoutSessionRequest](/resources/billing#createcheckoutsessionrequest)
- [CreateChunkedUploadRequest](/resources/channels#createchunkeduploadrequest)
- [CreateChunkedUploadResponse](/resources/channels#createchunkeduploadresponse)
- [CreateConnectionRequest](/resources/common#createconnectionrequest)
- [CreateFavoriteMemeBodySchema](/resources/saved_media#createfavoritememebodyschema)
- [CreateFavoriteMemeFromUrlBodySchema](/resources/saved_media#createfavoritememefromurlbodyschema)
@ -150,6 +154,7 @@ Resource tables use a compact notation:
- [DeleteVoiceRegionRequest](/resources/admin#deletevoiceregionrequest)
- [DeleteVoiceResponse](/resources/admin#deletevoiceresponse)
- [DeleteVoiceServerRequest](/resources/admin#deletevoiceserverrequest)
- [DeleteWebAuthnCredentialRequest](/resources/admin#deletewebauthncredentialrequest)
- [DisableForSuspiciousActivityRequest](/resources/admin#disableforsuspiciousactivityrequest)
- [DisableMfaRequest](/resources/admin#disablemfarequest)
- [DisableTotpRequest](/resources/users#disabletotprequest)
@ -331,6 +336,7 @@ Resource tables use a compact notation:
- [ListVoiceRegionsResponse](/resources/admin#listvoiceregionsresponse)
- [ListVoiceServersRequest](/resources/admin#listvoiceserversrequest)
- [ListVoiceServersResponse](/resources/admin#listvoiceserversresponse)
- [ListWebAuthnCredentialsRequest](/resources/admin#listwebauthncredentialsrequest)
- [Locale](/resources/klipy#locale)
- [LoginRequest](/resources/auth#loginrequest)
- [LogoutAuthSessionsRequest](/resources/auth#logoutauthsessionsrequest)
@ -524,6 +530,7 @@ Resource tables use a compact notation:
- [UpdateVoiceRegionResponse](/resources/admin#updatevoiceregionresponse)
- [UpdateVoiceServerRequest](/resources/admin#updatevoiceserverrequest)
- [UpdateVoiceServerResponse](/resources/admin#updatevoiceserverresponse)
- [UploadChunkResponse](/resources/channels#uploadchunkresponse)
- [UrlResponse](/resources/billing#urlresponse)
- [UserAdminResponseSchema](/resources/common#useradminresponseschema)
- [UserAuthenticatorTypes](/resources/common#userauthenticatortypes)
@ -564,7 +571,7 @@ Resource tables use a compact notation:
- [WebAuthnAuthenticateRequest](/resources/auth#webauthnauthenticaterequest)
- [WebAuthnAuthenticationOptionsResponse](/resources/auth#webauthnauthenticationoptionsresponse)
- [WebAuthnChallengeResponse](/resources/users#webauthnchallengeresponse)
- [WebAuthnCredentialListResponse](/resources/users#webauthncredentiallistresponse)
- [WebAuthnCredentialListResponse](/resources/admin#webauthncredentiallistresponse)
- [WebAuthnCredentialResponse](/resources/common#webauthncredentialresponse)
- [WebAuthnCredentialUpdateRequest](/resources/users#webauthncredentialupdaterequest)
- [WebAuthnMfaRequest](/resources/auth#webauthnmfarequest)

View File

@ -1278,16 +1278,6 @@ UI theme preference
|-------|------|-------------|
| challenge | string | The WebAuthn challenge |
<a id="webauthncredentiallistresponse"></a>
## WebAuthnCredentialListResponse
**Related endpoints**
- [`GET /users/@me/mfa/webauthn/credentials`](/api-reference/users/list-webauthn-credentials)
Type: [WebAuthnCredentialResponse](#webauthncredentialresponse)[]
<a id="webauthncredentialupdaterequest"></a>
## WebAuthnCredentialUpdateRequest

View File

@ -601,7 +601,7 @@ When `enabled` = `true` and `provider` = `smtp`, the following properties are re
| from_name? | string | Default sender name. Default: `Fluxer` |
| provider? | enum&lt;`smtp`, `none`&gt; | Email provider selection. Default: `none` |
| smtp? | [smtp_email](#smtp-email) | |
| webhook_secret? | string | SendGrid signed event webhook public key (PEM or base64-encoded DER). |
| webhook_secret? | string | Sweego webhook signing secret (base64-encoded). |
<Expandable title="Example JSON">
```json
@ -1446,7 +1446,6 @@ Configuration for the Gateway service (WebSocket).
| Property | Type | Description |
|----------|------|-------------|
| admin_reload_secret | string | Secret used to trigger code hot-swapping/reloads. |
| api_canary_host? | string | Host/Port of the Canary API service (optional). Default: `""` |
| api_host | string | Host/Port of the API service to communicate with. |
| gateway_metrics_enabled? | boolean | Enable collection of gateway metrics. Default: `false` |
| gateway_metrics_report_interval_ms? | number | Interval in milliseconds to report gateway metrics. Default: `30000` |
@ -1473,7 +1472,6 @@ Configuration for the Gateway service (WebSocket).
"admin_reload_secret": "your_admin_reload_secret",
"api_host": "your_api_host",
"media_proxy_endpoint": "your_media_proxy_endpoint",
"api_canary_host": "",
"gateway_metrics_enabled": false,
"gateway_metrics_report_interval_ms": 30000,
"guild_shards": 1,

View File

@ -3,7 +3,8 @@
{cowboy, "2.14.2"},
{base64url, "1.0.1"},
{jose, "1.11.10"},
{ezstd, "1.1.0"}
{ezstd, "1.1.0"},
{enats, "1.2.0"}
]}.
{overrides, [
@ -13,6 +14,7 @@
{relx, [
{release, {fluxer_gateway, "0.0.0"}, [
fluxer_gateway,
enats,
sasl
]},
{dev_mode, true},
@ -54,7 +56,7 @@
{dialyzer, [
{plt_extra_apps, [
jose, ranch
jose, ranch, enats
]},
{warnings_file, "dialyzer.ignore-warnings"}
]}.

View File

@ -2,22 +2,31 @@
[{<<"base64url">>,{pkg,<<"base64url">>,<<"1.0.1">>},0},
{<<"cowboy">>,{pkg,<<"cowboy">>,<<"2.14.2">>},0},
{<<"cowlib">>,{pkg,<<"cowlib">>,<<"2.16.0">>},1},
{<<"enats">>,{pkg,<<"enats">>,<<"1.2.0">>},0},
{<<"enats_msg">>,{pkg,<<"enats_msg">>,<<"1.0.3">>},1},
{<<"ezstd">>,{pkg,<<"ezstd">>,<<"1.1.0">>},0},
{<<"jose">>,{pkg,<<"jose">>,<<"1.11.10">>},0},
{<<"opentelemetry_api">>,{pkg,<<"opentelemetry_api">>,<<"1.4.0">>},1},
{<<"ranch">>,{pkg,<<"ranch">>,<<"2.2.0">>},1}]}.
[
{pkg_hash,[
{<<"base64url">>, <<"F8C7F2DA04CA9A5D0F5F50258F055E1D699F0E8BF4CFDB30B750865368403CF6">>},
{<<"cowboy">>, <<"4008BE1DF6ADE45E4F2A4E9E2D22B36D0B5ABA4E20B0A0D7049E28D124E34847">>},
{<<"cowlib">>, <<"54592074EBBBB92EE4746C8A8846E5605052F29309D3A873468D76CDF932076F">>},
{<<"enats">>, <<"D7459C804013CAFA4AF880B18D446C48890D28D372D62AD66C76187E5779248D">>},
{<<"enats_msg">>, <<"50631124F37D88BE76A91A5B96A6565C5981EBF917CD819F0175CA658A966F43">>},
{<<"ezstd">>, <<"D3B483D6ACFADFB65DBA4015371E6D54526DBF3D9EF0941B5ADD8BF5890731F4">>},
{<<"jose">>, <<"A903F5227417BD2A08C8A00A0CBCC458118BE84480955E8D251297A425723F83">>},
{<<"opentelemetry_api">>, <<"63CA1742F92F00059298F478048DFB826F4B20D49534493D6919A0DB39B6DB04">>},
{<<"ranch">>, <<"25528F82BC8D7C6152C57666CA99EC716510FE0925CB188172F41CE93117B1B0">>}]},
{pkg_hash_ext,[
{<<"base64url">>, <<"F9B3ADD4731A02A9B0410398B475B33E7566A695365237A6BDEE1BB447719F5C">>},
{<<"cowboy">>, <<"569081DA046E7B41B5DF36AA359BE71A0C8874E5B9CFF6F747073FC57BAF1AB9">>},
{<<"cowlib">>, <<"7F478D80D66B747344F0EA7708C187645CFCC08B11AA424632F78E25BF05DB51">>},
{<<"enats">>, <<"20DEB3CB1D3E960194DF8B136C40D2DB085B485BBA5E493B340AB5F9FD2BED22">>},
{<<"enats_msg">>, <<"C4F2139E5144FABC99AFE01B8B016AD9DA278CDDC60857AC5D5AFB0AD1283534">>},
{<<"ezstd">>, <<"28CFA0ED6CC3922095AD5BA0F23392A1664273358B17184BAA909868361184E7">>},
{<<"jose">>, <<"0D6CD36FF8BA174DB29148FC112B5842186B68A90CE9FC2B3EC3AFE76593E614">>},
{<<"opentelemetry_api">>, <<"3DFBBFAA2C2ED3121C5C483162836C4F9027DEF469C41578AF5EF32589FCFC58">>},
{<<"ranch">>, <<"FA0B99A1780C80218A4197A59EA8D3BDAE32FBFF7E88527D7D8A4787EFF4F8E7">>}]}
].

View File

@ -384,8 +384,6 @@ disconnect_user_after_pending_timeout(ConnectionId, UserId, SessionId, State) ->
VoiceStates = maps:get(voice_states, State),
case maps:is_key(UserId, VoiceStates) of
true ->
%% Keep the active participant state when LiveKit confirm arrives late.
%% Reconciliation and explicit leave/disconnect paths handle true ghosts.
{noreply, State#{pending_connections => NewPending}};
false ->
NewSessions = remove_session_entry(SessionId, maps:get(sessions, State)),

View File

@ -23,11 +23,12 @@
start(_StartType, _StartArgs) ->
fluxer_gateway_env:load(),
otel_metrics:init(),
passive_sync_registry:init(),
guild_counts_cache:init(),
Port = fluxer_gateway_env:get(port),
Dispatch = cowboy_router:compile([
{'_', [
{<<"/_health">>, health_handler, []},
{<<"/_rpc">>, gateway_rpc_http_handler, []},
{<<"/_admin/reload">>, hot_reload_handler, []},
{<<"/">>, gateway_handler, []}
]}

View File

@ -43,17 +43,15 @@ load_from(Path) when is_list(Path) ->
-spec build_config(map()) -> config().
build_config(Json) ->
Service = get_map(Json, [<<"services">>, <<"gateway">>]),
Gateway = get_map(Json, [<<"gateway">>]),
Nats = get_map(Json, [<<"services">>, <<"nats">>]),
Telemetry = get_map(Json, [<<"telemetry">>]),
Sentry = get_map(Json, [<<"sentry">>]),
Vapid = get_map(Json, [<<"auth">>, <<"vapid">>]),
#{
port => get_int(Service, <<"port">>, 8080),
rpc_tcp_port => get_int(Service, <<"rpc_tcp_port">>, 8772),
api_host => get_env_or_string("FLUXER_GATEWAY_API_HOST", Service, <<"api_host">>, "api"),
api_canary_host => get_optional_string(Service, <<"api_canary_host">>),
admin_reload_secret => get_optional_binary(Service, <<"admin_reload_secret">>),
rpc_secret_key => get_binary(Gateway, <<"rpc_secret">>, undefined),
nats_core_url => get_string(Nats, <<"core_url">>, "nats://127.0.0.1:4222"),
nats_auth_token => get_string(Nats, <<"auth_token">>, ""),
identify_rate_limit_enabled => get_bool(Service, <<"identify_rate_limit_enabled">>, false),
push_enabled => get_bool(Service, <<"push_enabled">>, true),
push_user_guild_settings_cache_mb => get_int(
@ -73,18 +71,12 @@ build_config(Json) ->
get_int(Service, <<"push_badge_counts_cache_ttl_seconds">>, 60),
push_dispatcher_max_inflight => get_int(Service, <<"push_dispatcher_max_inflight">>, 16),
push_dispatcher_max_queue => get_int(Service, <<"push_dispatcher_max_queue">>, 2048),
gateway_http_rpc_connect_timeout_ms =>
get_int(Service, <<"gateway_http_rpc_connect_timeout_ms">>, 5000),
gateway_http_rpc_recv_timeout_ms =>
get_int(Service, <<"gateway_http_rpc_recv_timeout_ms">>, 30000),
gateway_http_push_connect_timeout_ms =>
get_int(Service, <<"gateway_http_push_connect_timeout_ms">>, 3000),
gateway_http_push_recv_timeout_ms =>
get_int(Service, <<"gateway_http_push_recv_timeout_ms">>, 5000),
gateway_http_rpc_max_concurrency =>
get_int(Service, <<"gateway_http_rpc_max_concurrency">>, 512),
gateway_rpc_tcp_max_input_buffer_bytes =>
get_int(Service, <<"gateway_rpc_tcp_max_input_buffer_bytes">>, 2097152),
gateway_http_push_max_concurrency =>
get_int(Service, <<"gateway_http_push_max_concurrency">>, 256),
gateway_http_failure_threshold =>
@ -148,27 +140,6 @@ get_optional_bool(Map, Key) ->
get_string(Map, Key, Default) when is_list(Default) ->
to_string(get_value(Map, Key), Default).
-spec get_env_or_string(string(), map(), binary(), string()) -> string().
get_env_or_string(EnvVar, Map, Key, Default) when is_list(EnvVar), is_list(Default) ->
case os:getenv(EnvVar) of
false -> get_string(Map, Key, Default);
"" -> get_string(Map, Key, Default);
Value -> Value
end.
-spec get_optional_string(map(), binary()) -> string() | undefined.
get_optional_string(Map, Key) ->
case get_value(Map, Key) of
undefined ->
undefined;
Value ->
Clean = string:trim(to_string(Value, "")),
case Clean of
"" -> undefined;
_ -> Clean
end
end.
-spec get_binary(map(), binary(), binary() | undefined) -> binary() | undefined.
get_binary(Map, Key, Default) ->
to_binary(get_value(Map, Key), Default).

View File

@ -32,7 +32,7 @@ init([]) ->
},
Children = [
child_spec(gateway_http_client, gateway_http_client),
child_spec(gateway_rpc_tcp_server, gateway_rpc_tcp_server),
child_spec(gateway_nats_rpc, gateway_nats_rpc),
child_spec(session_manager, session_manager),
child_spec(presence_cache, presence_cache),
child_spec(presence_bus, presence_bus),

View File

@ -35,7 +35,6 @@
-spec parse_compression(binary() | undefined) -> compression().
parse_compression(<<"none">>) ->
none;
%% TODO: temporarily disabled re-enable zstd-stream once compression issues are resolved
parse_compression(<<"zstd-stream">>) ->
none;
parse_compression(_) ->
@ -123,7 +122,6 @@ parse_compression_test_() ->
?_assertEqual(none, parse_compression(<<>>)),
?_assertEqual(none, parse_compression(<<"none">>)),
?_assertEqual(none, parse_compression(<<"invalid">>)),
%% zstd-stream temporarily disabled always returns none
?_assertEqual(none, parse_compression(<<"zstd-stream">>))
].

View File

@ -517,11 +517,16 @@ handle_resume(Data, State) ->
Token = maps:get(<<"token">>, Data),
SessionId = maps:get(<<"session_id">>, Data),
Seq = maps:get(<<"seq">>, Data),
case session_manager:lookup(SessionId) of
{ok, Pid} when is_pid(Pid) ->
handle_resume_with_session(Pid, Token, SessionId, Seq, State);
{error, not_found} ->
handle_resume_session_not_found(State)
case is_binary(SessionId) of
false ->
handle_resume_session_not_found(State);
true ->
case session_manager:lookup(SessionId) of
{ok, Pid} when is_pid(Pid) ->
handle_resume_with_session(Pid, Token, SessionId, Seq, State);
{error, _} ->
handle_resume_session_not_found(State)
end
end.
-spec handle_voice_state_update(pid(), map(), state()) -> ws_result().

View File

@ -0,0 +1,260 @@
%% Copyright (C) 2026 Fluxer Contributors
%%
%% This file is part of Fluxer.
%%
%% Fluxer is free software: you can redistribute it and/or modify
%% it under the terms of the GNU Affero General Public License as published by
%% the Free Software Foundation, either version 3 of the License, or
%% (at your option) any later version.
%%
%% Fluxer is distributed in the hope that it will be useful,
%% but WITHOUT ANY WARRANTY; without even the implied warranty of
%% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
%% GNU Affero General Public License for more details.
%%
%% You should have received a copy of the GNU Affero General Public License
%% along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
-module(gateway_nats_rpc).
-behaviour(gen_server).
-export([start_link/0, get_connection/0]).
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]).
-define(DEFAULT_MAX_HANDLERS, 1024).
-define(RECONNECT_DELAY_MS, 2000).
-define(RPC_SUBJECT_PREFIX, <<"rpc.gateway.">>).
-define(RPC_SUBJECT_WILDCARD, <<"rpc.gateway.>">>).
-define(QUEUE_GROUP, <<"gateway">>).
-spec start_link() -> {ok, pid()} | {error, term()}.
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-spec get_connection() -> {ok, nats:conn() | undefined} | {error, term()}.
get_connection() ->
gen_server:call(?MODULE, get_connection).
-spec init([]) -> {ok, map()}.
init([]) ->
process_flag(trap_exit, true),
self() ! connect,
{ok, #{
conn => undefined,
sub => undefined,
handler_count => 0,
max_handlers => max_handlers(),
monitor_ref => undefined
}}.
-spec handle_call(term(), gen_server:from(), map()) -> {reply, term(), map()}.
handle_call(get_connection, _From, #{conn := Conn} = State) ->
{reply, {ok, Conn}, State};
handle_call(_Request, _From, State) ->
{reply, ok, State}.
-spec handle_cast(term(), map()) -> {noreply, map()}.
handle_cast(_Msg, State) ->
{noreply, State}.
-spec handle_info(term(), map()) -> {noreply, map()}.
handle_info(connect, State) ->
{noreply, do_connect(State)};
handle_info({Conn, ready}, #{conn := Conn} = State) ->
{noreply, do_subscribe(State)};
handle_info({Conn, closed}, #{conn := Conn} = State) ->
logger:warning("Gateway NATS RPC connection closed, reconnecting"),
{noreply, schedule_reconnect(State#{conn => undefined, sub => undefined, monitor_ref => undefined})};
handle_info({Conn, {error, Reason}}, #{conn := Conn} = State) ->
logger:warning("Gateway NATS RPC connection error: ~p, reconnecting", [Reason]),
{noreply, schedule_reconnect(State#{conn => undefined, sub => undefined, monitor_ref => undefined})};
handle_info({Conn, _Sid, {msg, Subject, Payload, MsgOpts}},
#{conn := Conn, handler_count := HandlerCount, max_handlers := MaxHandlers} = State) ->
case maps:get(reply_to, MsgOpts, undefined) of
undefined ->
{noreply, State};
ReplyTo ->
case HandlerCount >= MaxHandlers of
true ->
ErrorResponse = iolist_to_binary(json:encode(#{
<<"ok">> => false,
<<"error">> => <<"overloaded">>
})),
nats:pub(Conn, ReplyTo, ErrorResponse),
{noreply, State};
false ->
Parent = self(),
spawn(fun() ->
try
handle_rpc_request(Conn, Subject, Payload, ReplyTo)
after
Parent ! {handler_done, self()}
end
end),
{noreply, State#{handler_count => HandlerCount + 1}}
end
end;
handle_info({handler_done, _Pid}, #{handler_count := HandlerCount} = State) when HandlerCount > 0 ->
{noreply, State#{handler_count => HandlerCount - 1}};
handle_info({handler_done, _Pid}, State) ->
{noreply, State};
handle_info({'DOWN', MRef, process, Conn, Reason}, #{conn := Conn, monitor_ref := MRef} = State) ->
logger:warning("Gateway NATS RPC connection process died: ~p, reconnecting", [Reason]),
{noreply, schedule_reconnect(State#{conn => undefined, sub => undefined, monitor_ref => undefined})};
handle_info(_Info, State) ->
{noreply, State}.
-spec terminate(term(), map()) -> ok.
terminate(_Reason, #{conn := Conn}) when Conn =/= undefined ->
catch nats:disconnect(Conn),
logger:info("Gateway NATS RPC subscriber stopped"),
ok;
terminate(_Reason, _State) ->
ok.
-spec code_change(term(), map(), term()) -> {ok, map()}.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-spec do_connect(map()) -> map().
do_connect(State) ->
NatsUrl = fluxer_gateway_env:get(nats_core_url),
AuthToken = fluxer_gateway_env:get(nats_auth_token),
case parse_nats_url(NatsUrl) of
{ok, Host, Port} ->
Opts = build_connect_opts(AuthToken),
case nats:connect(Host, Port, Opts) of
{ok, Conn} ->
MRef = nats:monitor(Conn),
logger:info("Gateway NATS RPC connected to ~s:~p", [Host, Port]),
State#{conn => Conn, monitor_ref => MRef};
{error, Reason} ->
logger:error("Gateway NATS RPC failed to connect: ~p", [Reason]),
schedule_reconnect(State)
end;
{error, Reason} ->
logger:error("Gateway NATS RPC failed to parse URL: ~p", [Reason]),
schedule_reconnect(State)
end.
-spec do_subscribe(map()) -> map().
do_subscribe(#{conn := Conn} = State) when Conn =/= undefined ->
case nats:sub(Conn, ?RPC_SUBJECT_WILDCARD, #{queue_group => ?QUEUE_GROUP}) of
{ok, Sid} ->
logger:info("Gateway NATS RPC subscribed to ~s with queue group ~s",
[?RPC_SUBJECT_WILDCARD, ?QUEUE_GROUP]),
State#{sub => Sid};
{error, Reason} ->
logger:error("Gateway NATS RPC failed to subscribe: ~p", [Reason]),
State
end;
do_subscribe(State) ->
State.
-spec handle_rpc_request(nats:conn(), binary(), binary(), binary()) -> ok.
handle_rpc_request(Conn, Subject, Payload, ReplyTo) ->
Method = strip_rpc_prefix(Subject),
Response = execute_rpc_method(Method, Payload),
ResponseBin = iolist_to_binary(json:encode(Response)),
nats:pub(Conn, ReplyTo, ResponseBin),
ok.
-spec strip_rpc_prefix(binary()) -> binary().
strip_rpc_prefix(<<"rpc.gateway.", Method/binary>>) ->
Method;
strip_rpc_prefix(Subject) ->
Subject.
-spec execute_rpc_method(binary(), binary()) -> map().
execute_rpc_method(Method, PayloadBin) ->
try
Params = json:decode(PayloadBin),
Result = gateway_rpc_router:execute(Method, Params),
#{<<"ok">> => true, <<"result">> => Result}
catch
throw:{error, Message} ->
#{<<"ok">> => false, <<"error">> => error_binary(Message)};
exit:timeout ->
#{<<"ok">> => false, <<"error">> => <<"timeout">>};
exit:{timeout, _} ->
#{<<"ok">> => false, <<"error">> => <<"timeout">>};
Class:Reason ->
logger:error(
"Gateway NATS RPC method execution failed. method=~ts class=~p reason=~p",
[Method, Class, Reason]
),
#{<<"ok">> => false, <<"error">> => <<"internal_error">>}
end.
-spec error_binary(term()) -> binary().
error_binary(Value) when is_binary(Value) ->
Value;
error_binary(Value) when is_list(Value) ->
unicode:characters_to_binary(Value);
error_binary(Value) when is_atom(Value) ->
atom_to_binary(Value, utf8);
error_binary(Value) ->
unicode:characters_to_binary(io_lib:format("~p", [Value])).
-spec parse_nats_url(term()) -> {ok, string(), inet:port_number()} | {error, term()}.
parse_nats_url(Url) when is_list(Url) ->
parse_nats_url(list_to_binary(Url));
parse_nats_url(<<"nats://", Rest/binary>>) ->
parse_host_port(Rest);
parse_nats_url(<<"tls://", Rest/binary>>) ->
parse_host_port(Rest);
parse_nats_url(Url) when is_binary(Url) ->
parse_host_port(Url);
parse_nats_url(_) ->
{error, invalid_nats_url}.
-spec parse_host_port(binary()) -> {ok, string(), inet:port_number()} | {error, term()}.
parse_host_port(HostPort) ->
case binary:split(HostPort, <<":">>) of
[Host, PortBin] ->
try
Port = binary_to_integer(PortBin),
{ok, binary_to_list(Host), Port}
catch
_:_ -> {error, invalid_port}
end;
[Host] ->
{ok, binary_to_list(Host), 4222}
end.
-spec build_connect_opts(term()) -> map().
build_connect_opts(AuthToken) when is_binary(AuthToken), byte_size(AuthToken) > 0 ->
#{auth_token => AuthToken, buffer_size => 0};
build_connect_opts(AuthToken) when is_list(AuthToken) ->
case AuthToken of
"" -> #{buffer_size => 0};
_ -> #{auth_token => list_to_binary(AuthToken), buffer_size => 0}
end;
build_connect_opts(_) ->
#{buffer_size => 0}.
-spec schedule_reconnect(map()) -> map().
schedule_reconnect(State) ->
erlang:send_after(?RECONNECT_DELAY_MS, self(), connect),
State.
-spec max_handlers() -> pos_integer().
max_handlers() ->
case fluxer_gateway_env:get(gateway_http_rpc_max_concurrency) of
Value when is_integer(Value), Value > 0 ->
Value;
_ ->
?DEFAULT_MAX_HANDLERS
end.
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
parse_nats_url_test() ->
?assertEqual({ok, "127.0.0.1", 4222}, parse_nats_url(<<"nats://127.0.0.1:4222">>)),
?assertEqual({ok, "localhost", 4222}, parse_nats_url(<<"nats://localhost:4222">>)),
?assertEqual({ok, "localhost", 4222}, parse_nats_url(<<"nats://localhost">>)),
?assertEqual({ok, "127.0.0.1", 4222}, parse_nats_url("nats://127.0.0.1:4222")),
?assertEqual({error, invalid_nats_url}, parse_nats_url(undefined)).
-endif.

View File

@ -35,15 +35,11 @@ execute_method(<<"guild.dispatch">>, #{
GuildId = validation:snowflake_or_throw(<<"guild_id">>, GuildIdBin),
with_guild(GuildId, fun(Pid) ->
EventAtom = constants:dispatch_event_atom(Event),
case
gen_server:call(
Pid, {dispatch, #{event => EventAtom, data => Data}}, ?GUILD_CALL_TIMEOUT
)
of
ok ->
true;
_ -> throw({error, <<"dispatch_error">>})
end
IsAlive = erlang:is_process_alive(Pid),
logger:info("rpc guild.dispatch: guild_id=~p event=~p pid=~p alive=~p",
[GuildId, EventAtom, Pid, IsAlive]),
gen_server:cast(Pid, {dispatch, #{event => EventAtom, data => Data}}),
true
end);
execute_method(<<"guild.get_counts">>, #{<<"guild_id">> := GuildIdBin}) ->
GuildId = validation:snowflake_or_throw(<<"guild_id">>, GuildIdBin),
@ -78,29 +74,23 @@ execute_method(<<"guild.get_data">>, #{<<"guild_id">> := GuildIdBin, <<"user_id"
execute_method(<<"guild.get_member">>, #{<<"guild_id">> := GuildIdBin, <<"user_id">> := UserIdBin}) ->
GuildId = validation:snowflake_or_throw(<<"guild_id">>, GuildIdBin),
UserId = validation:snowflake_or_throw(<<"user_id">>, UserIdBin),
with_guild(GuildId, fun(Pid) ->
Request = #{user_id => UserId},
case gen_server:call(Pid, {get_guild_member, Request}, ?GUILD_CALL_TIMEOUT) of
#{success := true, member_data := MemberData} ->
#{<<"success">> => true, <<"member_data">> => MemberData};
#{success := false} ->
#{<<"success">> => false};
_ ->
throw({error, <<"guild_member_error">>})
end
end);
case get_member_cached_or_rpc(GuildId, UserId) of
{ok, MemberData} when is_map(MemberData) ->
#{<<"success">> => true, <<"member_data">> => MemberData};
{ok, undefined} ->
#{<<"success">> => false};
error ->
throw({error, <<"guild_member_error">>})
end;
execute_method(<<"guild.has_member">>, #{<<"guild_id">> := GuildIdBin, <<"user_id">> := UserIdBin}) ->
GuildId = validation:snowflake_or_throw(<<"guild_id">>, GuildIdBin),
UserId = validation:snowflake_or_throw(<<"user_id">>, UserIdBin),
with_guild(GuildId, fun(Pid) ->
Request = #{user_id => UserId},
case gen_server:call(Pid, {has_member, Request}, ?GUILD_CALL_TIMEOUT) of
#{has_member := HasMember} when is_boolean(HasMember) ->
#{<<"has_member">> => HasMember};
_ ->
throw({error, <<"membership_check_error">>})
end
end);
case get_has_member_cached_or_rpc(GuildId, UserId) of
{ok, HasMember} ->
#{<<"has_member">> => HasMember};
error ->
throw({error, <<"membership_check_error">>})
end;
execute_method(<<"guild.list_members">>, #{
<<"guild_id">> := GuildIdBin, <<"limit">> := Limit, <<"offset">> := Offset
}) ->
@ -429,9 +419,9 @@ execute_method(<<"guild.update_member_voice">>, #{
}) ->
GuildId = validation:snowflake_or_throw(<<"guild_id">>, GuildIdBin),
UserId = validation:snowflake_or_throw(<<"user_id">>, UserIdBin),
with_guild(GuildId, fun(Pid) ->
with_voice_server(GuildId, fun(VoicePid, _GuildPid) ->
Request = #{user_id => UserId, mute => Mute, deaf => Deaf},
case gen_server:call(Pid, {update_member_voice, Request}, ?GUILD_CALL_TIMEOUT) of
case gen_server:call(VoicePid, {update_member_voice, Request}, ?GUILD_CALL_TIMEOUT) of
#{success := true} -> #{<<"success">> => true};
#{error := Error} -> throw({error, normalize_voice_rpc_error(Error)})
end
@ -443,9 +433,9 @@ execute_method(
GuildId = validation:snowflake_or_throw(<<"guild_id">>, GuildIdBin),
UserId = validation:snowflake_or_throw(<<"user_id">>, UserIdBin),
ConnectionId = maps:get(<<"connection_id">>, Params, null),
with_guild(GuildId, fun(Pid) ->
with_voice_server(GuildId, fun(VoicePid, _GuildPid) ->
Request = #{user_id => UserId, connection_id => ConnectionId},
case gen_server:call(Pid, {disconnect_voice_user, Request}, ?GUILD_CALL_TIMEOUT) of
case gen_server:call(VoicePid, {disconnect_voice_user, Request}, ?GUILD_CALL_TIMEOUT) of
#{success := true} -> #{<<"success">> => true};
#{error := Error} -> throw({error, normalize_voice_rpc_error(Error)})
end
@ -464,11 +454,11 @@ execute_method(
<<"expected_channel_id">>, ExpectedChannelIdBin
),
ConnectionId = maps:get(<<"connection_id">>, Params, undefined),
with_guild(GuildId, fun(Pid) ->
with_voice_server(GuildId, fun(VoicePid, _GuildPid) ->
Request = build_disconnect_request(UserId, ExpectedChannelId, ConnectionId),
case
gen_server:call(
Pid, {disconnect_voice_user_if_in_channel, Request}, ?GUILD_CALL_TIMEOUT
VoicePid, {disconnect_voice_user_if_in_channel, Request}, ?GUILD_CALL_TIMEOUT
)
of
#{success := true, ignored := true} -> #{<<"success">> => true, <<"ignored">> => true};
@ -481,11 +471,11 @@ execute_method(<<"guild.disconnect_all_voice_users_in_channel">>, #{
}) ->
GuildId = validation:snowflake_or_throw(<<"guild_id">>, GuildIdBin),
ChannelId = validation:snowflake_or_throw(<<"channel_id">>, ChannelIdBin),
with_guild(GuildId, fun(Pid) ->
with_voice_server(GuildId, fun(VoicePid, _GuildPid) ->
Request = #{channel_id => ChannelId},
case
gen_server:call(
Pid, {disconnect_all_voice_users_in_channel, Request}, ?GUILD_CALL_TIMEOUT
VoicePid, {disconnect_all_voice_users_in_channel, Request}, ?GUILD_CALL_TIMEOUT
)
of
#{success := true, disconnected_count := Count} ->
@ -499,11 +489,11 @@ execute_method(<<"guild.confirm_voice_connection_from_livekit">>, Params) ->
ConnectionId = maps:get(<<"connection_id">>, Params),
TokenNonce = maps:get(<<"token_nonce">>, Params, undefined),
GuildId = validation:snowflake_or_throw(<<"guild_id">>, GuildIdBin),
with_guild(GuildId, fun(Pid) ->
with_voice_server(GuildId, fun(VoicePid, _GuildPid) ->
Request = #{connection_id => ConnectionId, token_nonce => TokenNonce},
case
gen_server:call(
Pid, {confirm_voice_connection_from_livekit, Request}, ?GUILD_CALL_TIMEOUT
VoicePid, {confirm_voice_connection_from_livekit, Request}, ?GUILD_CALL_TIMEOUT
)
of
#{success := true} -> #{<<"success">> => true};
@ -518,8 +508,8 @@ execute_method(<<"guild.get_voice_states_for_channel">>, Params) ->
GuildIdBin = maps:get(<<"guild_id">>, Params),
ChannelIdBin = maps:get(<<"channel_id">>, Params),
GuildId = validation:snowflake_or_throw(<<"guild_id">>, GuildIdBin),
with_guild(GuildId, fun(Pid) ->
case gen_server:call(Pid, {get_voice_states_for_channel, ChannelIdBin}, 10000) of
with_voice_server(GuildId, fun(VoicePid, _GuildPid) ->
case gen_server:call(VoicePid, {get_voice_states_for_channel, ChannelIdBin}, 10000) of
#{voice_states := VoiceStates} ->
#{<<"voice_states">> => VoiceStates};
_ ->
@ -530,8 +520,8 @@ execute_method(<<"guild.get_pending_joins_for_channel">>, Params) ->
GuildIdBin = maps:get(<<"guild_id">>, Params),
ChannelIdBin = maps:get(<<"channel_id">>, Params),
GuildId = validation:snowflake_or_throw(<<"guild_id">>, GuildIdBin),
with_guild(GuildId, fun(Pid) ->
case gen_server:call(Pid, {get_pending_joins_for_channel, ChannelIdBin}, 10000) of
with_voice_server(GuildId, fun(VoicePid, _GuildPid) ->
case gen_server:call(VoicePid, {get_pending_joins_for_channel, ChannelIdBin}, 10000) of
#{pending_joins := PendingJoins} ->
#{<<"pending_joins">> => PendingJoins};
_ ->
@ -559,7 +549,7 @@ execute_method(<<"guild.move_member">>, #{
connection_id => ConnectionId
}
),
with_guild(GuildId, fun(Pid) ->
with_voice_server(GuildId, fun(VoicePid, GuildPid) ->
Request = #{
user_id => UserId,
moderator_id => ModeratorId,
@ -567,10 +557,10 @@ execute_method(<<"guild.move_member">>, #{
connection_id => ConnectionId
},
handle_move_member_result(
gen_server:call(Pid, {move_member, Request}, ?GUILD_CALL_TIMEOUT),
gen_server:call(VoicePid, {move_member, Request}, ?GUILD_CALL_TIMEOUT),
GuildId,
ChannelId,
Pid
GuildPid
)
end);
execute_method(<<"guild.get_voice_state">>, #{
@ -578,9 +568,9 @@ execute_method(<<"guild.get_voice_state">>, #{
}) ->
GuildId = validation:snowflake_or_throw(<<"guild_id">>, GuildIdBin),
UserId = validation:snowflake_or_throw(<<"user_id">>, UserIdBin),
with_guild(GuildId, fun(Pid) ->
with_voice_server(GuildId, fun(VoicePid, _GuildPid) ->
Request = #{user_id => UserId},
case gen_server:call(Pid, {get_voice_state, Request}, ?GUILD_CALL_TIMEOUT) of
case gen_server:call(VoicePid, {get_voice_state, Request}, ?GUILD_CALL_TIMEOUT) of
#{voice_state := null} -> #{<<"voice_state">> => null};
#{voice_state := VoiceState} -> #{<<"voice_state">> => VoiceState};
_ -> throw({error, <<"voice_state_error">>})
@ -591,11 +581,11 @@ execute_method(<<"guild.switch_voice_region">>, #{
}) ->
GuildId = validation:snowflake_or_throw(<<"guild_id">>, GuildIdBin),
ChannelId = validation:snowflake_or_throw(<<"channel_id">>, ChannelIdBin),
with_guild(GuildId, fun(Pid) ->
with_voice_server(GuildId, fun(VoicePid, GuildPid) ->
Request = #{channel_id => ChannelId},
case gen_server:call(Pid, {switch_voice_region, Request}, ?GUILD_CALL_TIMEOUT) of
case gen_server:call(VoicePid, {switch_voice_region, Request}, ?GUILD_CALL_TIMEOUT) of
#{success := true} ->
spawn(fun() -> guild_voice:switch_voice_region(GuildId, ChannelId, Pid) end),
spawn(fun() -> guild_voice:switch_voice_region(GuildId, ChannelId, GuildPid) end),
#{<<"success">> => true};
#{error := Error} ->
throw({error, normalize_voice_rpc_error(Error)})
@ -644,12 +634,26 @@ execute_method(<<"guild.batch_voice_state_update">>, #{<<"updates">> := UpdatesB
-spec fetch_online_count_entry(integer()) -> map() | undefined.
fetch_online_count_entry(GuildId) ->
case guild_counts_cache:get(GuildId) of
{ok, MemberCount, OnlineCount} ->
#{
<<"guild_id">> => integer_to_binary(GuildId),
<<"member_count">> => MemberCount,
<<"online_count">> => OnlineCount
};
miss ->
fetch_online_count_entry_from_process(GuildId)
end.
-spec fetch_online_count_entry_from_process(integer()) -> map() | undefined.
fetch_online_count_entry_from_process(GuildId) ->
case get_guild_pid(GuildId) of
{ok, Pid} ->
case gen_server:call(Pid, {get_counts}, ?GUILD_CALL_TIMEOUT) of
#{presence_count := PresenceCount} ->
#{member_count := MemberCount, presence_count := PresenceCount} ->
#{
<<"guild_id">> => integer_to_binary(GuildId),
<<"member_count">> => MemberCount,
<<"online_count">> => PresenceCount
};
_ ->
@ -670,6 +674,23 @@ with_guild(GuildId, Fun, NotFoundError) ->
_ -> throw({error, NotFoundError})
end.
-spec with_voice_server(integer(), fun((pid(), pid()) -> T)) -> T when T :: term().
with_voice_server(GuildId, Fun) ->
case get_guild_pid(GuildId) of
{ok, GuildPid} ->
VoicePid = resolve_voice_pid(GuildId, GuildPid),
Fun(VoicePid, GuildPid);
_ ->
throw({error, <<"guild_not_found">>})
end.
-spec resolve_voice_pid(integer(), pid()) -> pid().
resolve_voice_pid(GuildId, FallbackGuildPid) ->
case guild_voice_server:lookup(GuildId) of
{ok, VoicePid} -> VoicePid;
{error, not_found} -> FallbackGuildPid
end.
-spec get_guild_pid(integer()) -> {ok, pid()} | error.
get_guild_pid(GuildId) ->
case lookup_guild_pid_from_cache(GuildId) of
@ -792,6 +813,56 @@ get_viewable_channels_via_rpc(GuildId, UserId) ->
error
end.
-spec get_has_member_cached_or_rpc(integer(), integer()) -> {ok, boolean()} | error.
get_has_member_cached_or_rpc(GuildId, UserId) ->
case guild_permission_cache:has_member(GuildId, UserId) of
{ok, HasMember} ->
{ok, HasMember};
{error, not_found} ->
get_has_member_via_rpc(GuildId, UserId)
end.
-spec get_has_member_via_rpc(integer(), integer()) -> {ok, boolean()} | error.
get_has_member_via_rpc(GuildId, UserId) ->
case get_guild_pid(GuildId) of
{ok, Pid} ->
Request = #{user_id => UserId},
case gen_server:call(Pid, {has_member, Request}, ?GUILD_CALL_TIMEOUT) of
#{has_member := HasMember} when is_boolean(HasMember) ->
{ok, HasMember};
_ ->
error
end;
error ->
error
end.
-spec get_member_cached_or_rpc(integer(), integer()) -> {ok, map() | undefined} | error.
get_member_cached_or_rpc(GuildId, UserId) ->
case guild_permission_cache:get_member(GuildId, UserId) of
{ok, MemberOrUndefined} ->
{ok, MemberOrUndefined};
{error, not_found} ->
get_member_via_rpc(GuildId, UserId)
end.
-spec get_member_via_rpc(integer(), integer()) -> {ok, map() | undefined} | error.
get_member_via_rpc(GuildId, UserId) ->
case get_guild_pid(GuildId) of
{ok, Pid} ->
Request = #{user_id => UserId},
case gen_server:call(Pid, {get_guild_member, Request}, ?GUILD_CALL_TIMEOUT) of
#{success := true, member_data := MemberData} ->
{ok, MemberData};
#{success := false} ->
{ok, undefined};
_ ->
error
end;
error ->
error
end.
-spec parse_channel_id(binary()) -> integer() | undefined.
parse_channel_id(<<"0">>) -> undefined;
parse_channel_id(ChannelIdBin) -> validation:snowflake_or_throw(<<"channel_id">>, ChannelIdBin).
@ -892,11 +963,12 @@ parse_voice_update(
-spec process_voice_update({integer(), integer(), boolean(), boolean(), term()}) -> map().
process_voice_update({GuildId, UserId, Mute, Deaf, ConnectionId}) ->
case gen_server:call(guild_manager, {start_or_lookup, GuildId}, ?GUILD_LOOKUP_TIMEOUT) of
{ok, Pid} ->
{ok, GuildPid} ->
VoicePid = resolve_voice_pid(GuildId, GuildPid),
Request = #{
user_id => UserId, mute => Mute, deaf => Deaf, connection_id => ConnectionId
},
case gen_server:call(Pid, {update_member_voice, Request}, ?GUILD_CALL_TIMEOUT) of
case gen_server:call(VoicePid, {update_member_voice, Request}, ?GUILD_CALL_TIMEOUT) of
#{success := true} ->
#{
<<"guild_id">> => integer_to_binary(GuildId),
@ -1062,4 +1134,50 @@ get_viewable_channels_cached_or_rpc_prefers_cache_test() ->
ok = guild_permission_cache:delete(GuildId)
end.
get_has_member_cached_or_rpc_prefers_cache_test() ->
GuildId = 12348,
UserId = 502,
Data = #{
<<"guild">> => #{<<"owner_id">> => <<"999">>},
<<"roles">> => [],
<<"members">> => #{
UserId => #{
<<"user">> => #{<<"id">> => integer_to_binary(UserId)},
<<"roles">> => []
}
},
<<"channels">> => []
},
ok = guild_permission_cache:put_data(GuildId, Data),
try
?assertEqual({ok, true}, get_has_member_cached_or_rpc(GuildId, UserId)),
?assertEqual({ok, false}, get_has_member_cached_or_rpc(GuildId, 99999))
after
ok = guild_permission_cache:delete(GuildId)
end.
get_member_cached_or_rpc_prefers_cache_test() ->
GuildId = 12349,
UserId = 503,
Data = #{
<<"guild">> => #{<<"owner_id">> => <<"999">>},
<<"roles">> => [],
<<"members">> => #{
UserId => #{
<<"user">> => #{<<"id">> => integer_to_binary(UserId)},
<<"roles">> => [],
<<"nick">> => <<"CacheNick">>
}
},
<<"channels">> => []
},
ok = guild_permission_cache:put_data(GuildId, Data),
try
{ok, MemberData} = get_member_cached_or_rpc(GuildId, UserId),
?assertEqual(<<"CacheNick">>, maps:get(<<"nick">>, MemberData)),
?assertEqual({ok, undefined}, get_member_cached_or_rpc(GuildId, 99999))
after
ok = guild_permission_cache:delete(GuildId)
end.
-endif.

View File

@ -1,167 +0,0 @@
%% Copyright (C) 2026 Fluxer Contributors
%%
%% This file is part of Fluxer.
%%
%% Fluxer is free software: you can redistribute it and/or modify
%% it under the terms of the GNU Affero General Public License as published by
%% the Free Software Foundation, either version 3 of the License, or
%% (at your option) any later version.
%%
%% Fluxer is distributed in the hope that it will be useful,
%% but WITHOUT ANY WARRANTY; without even the implied warranty of
%% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
%% GNU Affero General Public License for more details.
%%
%% You should have received a copy of the GNU Affero General Public License
%% along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
-module(gateway_rpc_http_handler).
-export([init/2]).
-define(JSON_HEADERS, #{<<"content-type">> => <<"application/json">>}).
-spec init(cowboy_req:req(), term()) -> {ok, cowboy_req:req(), term()}.
init(Req0, State) ->
case cowboy_req:method(Req0) of
<<"POST">> ->
handle_post(Req0, State);
_ ->
Req = cowboy_req:reply(405, #{<<"allow">> => <<"POST">>}, <<>>, Req0),
{ok, Req, State}
end.
-spec handle_post(cowboy_req:req(), term()) -> {ok, cowboy_req:req(), term()}.
handle_post(Req0, State) ->
case authorize(Req0) of
ok ->
case read_body(Req0) of
{ok, Decoded, Req1} ->
handle_decoded_body(Decoded, Req1, State);
{error, ErrorBody, Req1} ->
respond(400, ErrorBody, Req1, State)
end;
{error, Req1} ->
{ok, Req1, State}
end.
-spec handle_decoded_body(map(), cowboy_req:req(), term()) -> {ok, cowboy_req:req(), term()}.
handle_decoded_body(Decoded, Req0, State) ->
case maps:get(<<"method">>, Decoded, undefined) of
undefined ->
respond(400, #{<<"error">> => <<"Missing method">>}, Req0, State);
Method when is_binary(Method) ->
ParamsValue = maps:get(<<"params">>, Decoded, #{}),
case is_map(ParamsValue) of
true ->
execute_method(Method, ParamsValue, Req0, State);
false ->
respond(400, #{<<"error">> => <<"Invalid params">>}, Req0, State)
end;
_ ->
respond(400, #{<<"error">> => <<"Invalid method">>}, Req0, State)
end.
-spec authorize(cowboy_req:req()) -> ok | {error, cowboy_req:req()}.
authorize(Req0) ->
case cowboy_req:header(<<"authorization">>, Req0) of
undefined ->
Req = cowboy_req:reply(
401,
?JSON_HEADERS,
json:encode(#{<<"error">> => <<"Unauthorized">>}),
Req0
),
{error, Req};
AuthHeader ->
authorize_with_secret(AuthHeader, Req0)
end.
-spec authorize_with_secret(binary(), cowboy_req:req()) -> ok | {error, cowboy_req:req()}.
authorize_with_secret(AuthHeader, Req0) ->
case fluxer_gateway_env:get(rpc_secret_key) of
undefined ->
Req = cowboy_req:reply(
500,
?JSON_HEADERS,
json:encode(#{<<"error">> => <<"RPC secret not configured">>}),
Req0
),
{error, Req};
Secret when is_binary(Secret) ->
Expected = <<"Bearer ", Secret/binary>>,
check_auth_header(AuthHeader, Expected, Req0)
end.
-spec check_auth_header(binary(), binary(), cowboy_req:req()) -> ok | {error, cowboy_req:req()}.
check_auth_header(AuthHeader, Expected, Req0) ->
case secure_compare(AuthHeader, Expected) of
true ->
ok;
false ->
Req = cowboy_req:reply(
401,
?JSON_HEADERS,
json:encode(#{<<"error">> => <<"Unauthorized">>}),
Req0
),
{error, Req}
end.
-spec secure_compare(binary(), binary()) -> boolean().
secure_compare(Left, Right) when is_binary(Left), is_binary(Right) ->
case byte_size(Left) =:= byte_size(Right) of
true ->
crypto:hash_equals(Left, Right);
false ->
false
end.
-spec read_body(cowboy_req:req()) ->
{ok, map(), cowboy_req:req()} | {error, map(), cowboy_req:req()}.
read_body(Req0) ->
read_body_chunks(Req0, <<>>).
-spec read_body_chunks(cowboy_req:req(), binary()) ->
{ok, map(), cowboy_req:req()} | {error, map(), cowboy_req:req()}.
read_body_chunks(Req0, Acc) ->
case cowboy_req:read_body(Req0) of
{ok, Body, Req1} ->
FullBody = <<Acc/binary, Body/binary>>,
decode_body(FullBody, Req1);
{more, Body, Req1} ->
read_body_chunks(Req1, <<Acc/binary, Body/binary>>)
end.
-spec decode_body(binary(), cowboy_req:req()) ->
{ok, map(), cowboy_req:req()} | {error, map(), cowboy_req:req()}.
decode_body(Body, Req0) ->
case catch json:decode(Body) of
{'EXIT', _Reason} ->
{error, #{<<"error">> => <<"Invalid JSON payload">>}, Req0};
Decoded when is_map(Decoded) ->
{ok, Decoded, Req0};
_ ->
{error, #{<<"error">> => <<"Invalid request body">>}, Req0}
end.
-spec execute_method(binary(), map(), cowboy_req:req(), term()) -> {ok, cowboy_req:req(), term()}.
execute_method(Method, Params, Req0, State) ->
try
Result = gateway_rpc_router:execute(Method, Params),
respond(200, #{<<"result">> => Result}, Req0, State)
catch
throw:{error, Message} ->
respond(400, #{<<"error">> => Message}, Req0, State);
exit:timeout ->
respond(504, #{<<"error">> => <<"timeout">>}, Req0, State);
exit:{timeout, _} ->
respond(504, #{<<"error">> => <<"timeout">>}, Req0, State);
_:_ ->
respond(500, #{<<"error">> => <<"Internal error">>}, Req0, State)
end.
-spec respond(pos_integer(), map(), cowboy_req:req(), term()) -> {ok, cowboy_req:req(), term()}.
respond(Status, Body, Req0, State) ->
Req = cowboy_req:reply(Status, ?JSON_HEADERS, json:encode(Body), Req0),
{ok, Req, State}.

View File

@ -1,446 +0,0 @@
%% Copyright (C) 2026 Fluxer Contributors
%%
%% This file is part of Fluxer.
%%
%% Fluxer is free software: you can redistribute it and/or modify
%% it under the terms of the GNU Affero General Public License as published by
%% the Free Software Foundation, either version 3 of the License, or
%% (at your option) any later version.
%%
%% Fluxer is distributed in the hope that it will be useful,
%% but WITHOUT ANY WARRANTY; without even the implied warranty of
%% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
%% GNU Affero General Public License for more details.
%%
%% You should have received a copy of the GNU Affero General Public License
%% along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
-module(gateway_rpc_tcp_connection).
-export([serve/1]).
-define(DEFAULT_MAX_INFLIGHT, 1024).
-define(DEFAULT_MAX_INPUT_BUFFER_BYTES, 2097152).
-define(DEFAULT_DISPATCH_RESERVE_DIVISOR, 8).
-define(MAX_FRAME_BYTES, 1048576).
-define(PROTOCOL_VERSION, <<"fluxer.rpc.tcp.v1">>).
-type state() :: #{
socket := inet:socket(),
buffer := binary(),
authenticated := boolean(),
inflight := non_neg_integer(),
max_inflight := pos_integer(),
max_input_buffer_bytes := pos_integer()
}.
-type rpc_result() :: {ok, term()} | {error, binary()}.
-spec serve(inet:socket()) -> ok.
serve(Socket) ->
ok = inet:setopts(Socket, [{active, once}, {nodelay, true}, {keepalive, true}]),
State = #{
socket => Socket,
buffer => <<>>,
authenticated => false,
inflight => 0,
max_inflight => max_inflight(),
max_input_buffer_bytes => max_input_buffer_bytes()
},
loop(State).
-spec loop(state()) -> ok.
loop(#{socket := Socket} = State) ->
receive
{tcp, Socket, Data} ->
case handle_tcp_data(Data, State) of
{ok, NewState} ->
ok = inet:setopts(Socket, [{active, once}]),
loop(NewState);
{stop, Reason, _NewState} ->
logger:debug("Gateway TCP RPC connection closed: ~p", [Reason]),
close_socket(Socket),
ok
end;
{tcp_closed, Socket} ->
ok;
{tcp_error, Socket, Reason} ->
logger:warning("Gateway TCP RPC socket error: ~p", [Reason]),
close_socket(Socket),
ok;
{rpc_response, RequestId, Result} ->
NewState = handle_rpc_response(RequestId, Result, State),
loop(NewState);
_Other ->
loop(State)
end.
-spec handle_tcp_data(binary(), state()) -> {ok, state()} | {stop, term(), state()}.
handle_tcp_data(Data, #{buffer := Buffer, max_input_buffer_bytes := MaxInputBufferBytes} = State) ->
case byte_size(Buffer) + byte_size(Data) =< MaxInputBufferBytes of
false ->
_ = send_error_frame(State, protocol_error_binary(input_buffer_limit_exceeded)),
{stop, input_buffer_limit_exceeded, State};
true ->
Combined = <<Buffer/binary, Data/binary>>,
decode_tcp_frames(Combined, State)
end.
-spec decode_tcp_frames(binary(), state()) -> {ok, state()} | {stop, term(), state()}.
decode_tcp_frames(Combined, State) ->
case decode_frames(Combined, []) of
{ok, Frames, Rest} ->
process_frames(Frames, State#{buffer => Rest});
{error, Reason} ->
_ = send_error_frame(State, protocol_error_binary(Reason)),
{stop, Reason, State}
end.
-spec process_frames([map()], state()) -> {ok, state()} | {stop, term(), state()}.
process_frames([], State) ->
{ok, State};
process_frames([Frame | Rest], State) ->
case process_frame(Frame, State) of
{ok, NewState} ->
process_frames(Rest, NewState);
{stop, Reason, NewState} ->
{stop, Reason, NewState}
end.
-spec process_frame(map(), state()) -> {ok, state()} | {stop, term(), state()}.
process_frame(#{<<"type">> := <<"hello">>} = Frame, #{authenticated := false} = State) ->
handle_hello_frame(Frame, State);
process_frame(#{<<"type">> := <<"hello">>}, State) ->
_ = send_error_frame(State, <<"duplicate_hello">>),
{stop, duplicate_hello, State};
process_frame(#{<<"type">> := <<"request">>} = Frame, #{authenticated := true} = State) ->
handle_request_frame(Frame, State);
process_frame(#{<<"type">> := <<"request">>}, State) ->
_ = send_error_frame(State, <<"unauthorized">>),
{stop, unauthorized, State};
process_frame(#{<<"type">> := <<"ping">>}, State) ->
_ = send_frame(State, #{<<"type">> => <<"pong">>}),
{ok, State};
process_frame(#{<<"type">> := <<"pong">>}, State) ->
{ok, State};
process_frame(#{<<"type">> := <<"close">>}, State) ->
{stop, client_close, State};
process_frame(_Frame, State) ->
_ = send_error_frame(State, <<"unknown_frame_type">>),
{stop, unknown_frame_type, State}.
-spec handle_hello_frame(map(), state()) -> {ok, state()} | {stop, term(), state()}.
handle_hello_frame(Frame, State) ->
case {maps:get(<<"protocol">>, Frame, undefined), maps:get(<<"authorization">>, Frame, undefined)} of
{?PROTOCOL_VERSION, AuthHeader} when is_binary(AuthHeader) ->
authorize_hello(AuthHeader, State);
_ ->
_ = send_error_frame(State, <<"invalid_hello">>),
{stop, invalid_hello, State}
end.
-spec authorize_hello(binary(), state()) -> {ok, state()} | {stop, term(), state()}.
authorize_hello(AuthHeader, State) ->
case fluxer_gateway_env:get(rpc_secret_key) of
Secret when is_binary(Secret) ->
Expected = <<"Bearer ", Secret/binary>>,
case secure_compare(AuthHeader, Expected) of
true ->
HelloAck = #{
<<"type">> => <<"hello_ack">>,
<<"protocol">> => ?PROTOCOL_VERSION,
<<"max_in_flight">> => maps:get(max_inflight, State),
<<"ping_interval_ms">> => 15000
},
_ = send_frame(State, HelloAck),
{ok, State#{authenticated => true}};
false ->
_ = send_error_frame(State, <<"unauthorized">>),
{stop, unauthorized, State}
end;
_ ->
_ = send_error_frame(State, <<"rpc_secret_not_configured">>),
{stop, rpc_secret_not_configured, State}
end.
-spec handle_request_frame(map(), state()) -> {ok, state()}.
handle_request_frame(Frame, State) ->
RequestId = request_id_from_frame(Frame),
Method = maps:get(<<"method">>, Frame, undefined),
case should_reject_request(Method, State) of
true ->
_ =
send_response_frame(
State,
RequestId,
false,
undefined,
<<"overloaded">>
),
{ok, State};
false ->
case {Method, maps:get(<<"params">>, Frame, undefined)} of
{MethodName, Params} when is_binary(RequestId), is_binary(MethodName), is_map(Params) ->
Parent = self(),
_ = spawn(fun() ->
Parent ! {rpc_response, RequestId, execute_method(MethodName, Params)}
end),
{ok, increment_inflight(State)};
_ ->
_ =
send_response_frame(
State,
RequestId,
false,
undefined,
<<"invalid_request">>
),
{ok, State}
end
end.
-spec should_reject_request(term(), state()) -> boolean().
should_reject_request(Method, #{inflight := Inflight, max_inflight := MaxInflight}) ->
case is_dispatch_method(Method) of
true ->
Inflight >= MaxInflight;
false ->
Inflight >= non_dispatch_inflight_limit(MaxInflight)
end.
-spec non_dispatch_inflight_limit(pos_integer()) -> pos_integer().
non_dispatch_inflight_limit(MaxInflight) ->
Reserve = dispatch_reserve_slots(MaxInflight),
max(1, MaxInflight - Reserve).
-spec dispatch_reserve_slots(pos_integer()) -> pos_integer().
dispatch_reserve_slots(MaxInflight) ->
max(1, MaxInflight div ?DEFAULT_DISPATCH_RESERVE_DIVISOR).
-spec is_dispatch_method(term()) -> boolean().
is_dispatch_method(Method) when is_binary(Method) ->
Suffix = <<".dispatch">>,
MethodSize = byte_size(Method),
SuffixSize = byte_size(Suffix),
MethodSize >= SuffixSize andalso
binary:part(Method, MethodSize - SuffixSize, SuffixSize) =:= Suffix;
is_dispatch_method(_) ->
false.
-spec execute_method(binary(), map()) -> rpc_result().
execute_method(Method, Params) ->
try
Result = gateway_rpc_router:execute(Method, Params),
{ok, Result}
catch
throw:{error, Message} ->
{error, error_binary(Message)};
exit:timeout ->
{error, <<"timeout">>};
exit:{timeout, _} ->
{error, <<"timeout">>};
Class:Reason ->
logger:error(
"Gateway TCP RPC method execution failed. method=~ts class=~p reason=~p",
[Method, Class, Reason]
),
{error, <<"internal_error">>}
end.
-spec handle_rpc_response(binary(), rpc_result(), state()) -> state().
handle_rpc_response(RequestId, {ok, Result}, State) ->
_ = send_response_frame(State, RequestId, true, Result, undefined),
decrement_inflight(State);
handle_rpc_response(RequestId, {error, Error}, State) ->
_ = send_response_frame(State, RequestId, false, undefined, Error),
decrement_inflight(State).
-spec send_response_frame(state(), binary(), boolean(), term(), binary() | undefined) -> ok | {error, term()}.
send_response_frame(State, RequestId, true, Result, _Error) ->
send_frame(State, #{
<<"type">> => <<"response">>,
<<"id">> => RequestId,
<<"ok">> => true,
<<"result">> => Result
});
send_response_frame(State, RequestId, false, _Result, Error) ->
send_frame(State, #{
<<"type">> => <<"response">>,
<<"id">> => RequestId,
<<"ok">> => false,
<<"error">> => Error
}).
-spec send_error_frame(state(), binary()) -> ok | {error, term()}.
send_error_frame(State, Error) ->
send_frame(State, #{
<<"type">> => <<"error">>,
<<"error">> => Error
}).
-spec send_frame(state(), map()) -> ok | {error, term()}.
send_frame(#{socket := Socket}, Frame) ->
gen_tcp:send(Socket, encode_frame(Frame)).
-spec encode_frame(map()) -> binary().
encode_frame(Frame) ->
Payload = iolist_to_binary(json:encode(Frame)),
Length = integer_to_binary(byte_size(Payload)),
<<Length/binary, "\n", Payload/binary>>.
-spec decode_frames(binary(), [map()]) -> {ok, [map()], binary()} | {error, term()}.
decode_frames(Buffer, Acc) ->
case binary:match(Buffer, <<"\n">>) of
nomatch ->
{ok, lists:reverse(Acc), Buffer};
{Pos, 1} ->
LengthBin = binary:part(Buffer, 0, Pos),
case parse_length(LengthBin) of
{ok, Length} ->
HeaderSize = Pos + 1,
RequiredSize = HeaderSize + Length,
case byte_size(Buffer) >= RequiredSize of
false ->
{ok, lists:reverse(Acc), Buffer};
true ->
Payload = binary:part(Buffer, HeaderSize, Length),
RestSize = byte_size(Buffer) - RequiredSize,
Rest = binary:part(Buffer, RequiredSize, RestSize),
case decode_payload(Payload) of
{ok, Frame} ->
decode_frames(Rest, [Frame | Acc]);
{error, Reason} ->
{error, Reason}
end
end;
{error, Reason} ->
{error, Reason}
end
end.
-spec decode_payload(binary()) -> {ok, map()} | {error, term()}.
decode_payload(Payload) ->
case catch json:decode(Payload) of
{'EXIT', _} ->
{error, invalid_json};
Frame when is_map(Frame) ->
{ok, Frame};
_ ->
{error, invalid_json}
end.
-spec parse_length(binary()) -> {ok, non_neg_integer()} | {error, term()}.
parse_length(<<>>) ->
{error, invalid_frame_length};
parse_length(LengthBin) ->
try
Length = binary_to_integer(LengthBin),
case Length >= 0 andalso Length =< ?MAX_FRAME_BYTES of
true -> {ok, Length};
false -> {error, invalid_frame_length}
end
catch
_:_ ->
{error, invalid_frame_length}
end.
-spec secure_compare(binary(), binary()) -> boolean().
secure_compare(Left, Right) when is_binary(Left), is_binary(Right) ->
case byte_size(Left) =:= byte_size(Right) of
true ->
crypto:hash_equals(Left, Right);
false ->
false
end.
-spec request_id_from_frame(map()) -> binary().
request_id_from_frame(Frame) ->
case maps:get(<<"id">>, Frame, <<>>) of
Id when is_binary(Id) ->
Id;
Id when is_integer(Id) ->
integer_to_binary(Id);
_ ->
<<>>
end.
-spec increment_inflight(state()) -> state().
increment_inflight(#{inflight := Inflight} = State) ->
State#{inflight => Inflight + 1}.
-spec decrement_inflight(state()) -> state().
decrement_inflight(#{inflight := Inflight} = State) when Inflight > 0 ->
State#{inflight => Inflight - 1};
decrement_inflight(State) ->
State.
-spec error_binary(term()) -> binary().
error_binary(Value) when is_binary(Value) ->
Value;
error_binary(Value) when is_list(Value) ->
unicode:characters_to_binary(Value);
error_binary(Value) when is_atom(Value) ->
atom_to_binary(Value, utf8);
error_binary(Value) ->
unicode:characters_to_binary(io_lib:format("~p", [Value])).
-spec protocol_error_binary(term()) -> binary().
protocol_error_binary(invalid_json) ->
<<"invalid_json">>;
protocol_error_binary(invalid_frame_length) ->
<<"invalid_frame_length">>;
protocol_error_binary(input_buffer_limit_exceeded) ->
<<"input_buffer_limit_exceeded">>.
-spec close_socket(inet:socket()) -> ok.
close_socket(Socket) ->
catch gen_tcp:close(Socket),
ok.
-spec max_inflight() -> pos_integer().
max_inflight() ->
case fluxer_gateway_env:get(gateway_http_rpc_max_concurrency) of
Value when is_integer(Value), Value > 0 ->
Value;
_ ->
?DEFAULT_MAX_INFLIGHT
end.
-spec max_input_buffer_bytes() -> pos_integer().
max_input_buffer_bytes() ->
case fluxer_gateway_env:get(gateway_rpc_tcp_max_input_buffer_bytes) of
Value when is_integer(Value), Value > 0 ->
Value;
_ ->
?DEFAULT_MAX_INPUT_BUFFER_BYTES
end.
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
decode_single_frame_test() ->
Frame = #{<<"type">> => <<"ping">>},
Encoded = encode_frame(Frame),
?assertEqual({ok, [Frame], <<>>}, decode_frames(Encoded, [])).
decode_multiple_frames_test() ->
FrameA = #{<<"type">> => <<"ping">>},
FrameB = #{<<"type">> => <<"pong">>},
Encoded = <<(encode_frame(FrameA))/binary, (encode_frame(FrameB))/binary>>,
?assertEqual({ok, [FrameA, FrameB], <<>>}, decode_frames(Encoded, [])).
decode_partial_frame_test() ->
Frame = #{<<"type">> => <<"ping">>},
Encoded = encode_frame(Frame),
Prefix = binary:part(Encoded, 0, 3),
?assertEqual({ok, [], Prefix}, decode_frames(Prefix, [])).
invalid_length_test() ->
?assertEqual({error, invalid_frame_length}, decode_frames(<<"x\n{}">>, [])).
secure_compare_test() ->
?assert(secure_compare(<<"abc">>, <<"abc">>)),
?assertNot(secure_compare(<<"abc">>, <<"abd">>)),
?assertNot(secure_compare(<<"abc">>, <<"abcd">>)).
-endif.

View File

@ -1,108 +0,0 @@
%% Copyright (C) 2026 Fluxer Contributors
%%
%% This file is part of Fluxer.
%%
%% Fluxer is free software: you can redistribute it and/or modify
%% it under the terms of the GNU Affero General Public License as published by
%% the Free Software Foundation, either version 3 of the License, or
%% (at your option) any later version.
%%
%% Fluxer is distributed in the hope that it will be useful,
%% but WITHOUT ANY WARRANTY; without even the implied warranty of
%% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
%% GNU Affero General Public License for more details.
%%
%% You should have received a copy of the GNU Affero General Public License
%% along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
-module(gateway_rpc_tcp_server).
-behaviour(gen_server).
-export([start_link/0, accept_loop/1]).
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]).
-type state() :: #{
listen_socket := inet:socket(),
acceptor_pid := pid(),
port := inet:port_number()
}.
-spec start_link() -> {ok, pid()} | {error, term()}.
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-spec init([]) -> {ok, state()} | {stop, term()}.
init([]) ->
process_flag(trap_exit, true),
Port = fluxer_gateway_env:get(rpc_tcp_port),
case gen_tcp:listen(Port, listen_options()) of
{ok, ListenSocket} ->
AcceptorPid = spawn_link(?MODULE, accept_loop, [ListenSocket]),
logger:info("Gateway TCP RPC listener started on port ~p", [Port]),
{ok, #{
listen_socket => ListenSocket,
acceptor_pid => AcceptorPid,
port => Port
}};
{error, Reason} ->
{stop, {rpc_tcp_listen_failed, Port, Reason}}
end.
-spec handle_call(term(), gen_server:from(), state()) -> {reply, ok, state()}.
handle_call(_Request, _From, State) ->
{reply, ok, State}.
-spec handle_cast(term(), state()) -> {noreply, state()}.
handle_cast(_Msg, State) ->
{noreply, State}.
-spec handle_info(term(), state()) -> {noreply, state()}.
handle_info({'EXIT', Pid, Reason}, #{acceptor_pid := Pid, listen_socket := ListenSocket} = State) ->
case Reason of
normal ->
{noreply, State};
shutdown ->
{noreply, State};
_ ->
logger:error("Gateway TCP RPC acceptor crashed: ~p", [Reason]),
NewAcceptorPid = spawn_link(?MODULE, accept_loop, [ListenSocket]),
{noreply, State#{acceptor_pid => NewAcceptorPid}}
end;
handle_info(_Info, State) ->
{noreply, State}.
-spec terminate(term(), state()) -> ok.
terminate(_Reason, #{listen_socket := ListenSocket, port := Port}) ->
catch gen_tcp:close(ListenSocket),
logger:info("Gateway TCP RPC listener stopped on port ~p", [Port]),
ok.
-spec code_change(term(), state(), term()) -> {ok, state()}.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-spec accept_loop(inet:socket()) -> ok.
accept_loop(ListenSocket) ->
case gen_tcp:accept(ListenSocket) of
{ok, Socket} ->
_ = spawn_link(?MODULE, accept_loop, [ListenSocket]),
gateway_rpc_tcp_connection:serve(Socket);
{error, closed} ->
ok;
{error, Reason} ->
logger:error("Gateway TCP RPC accept failed: ~p", [Reason]),
timer:sleep(200),
accept_loop(ListenSocket)
end.
-spec listen_options() -> [gen_tcp:listen_option()].
listen_options() ->
[
binary,
{packet, raw},
{active, false},
{reuseaddr, true},
{nodelay, true},
{backlog, 4096},
{keepalive, true}
].

View File

@ -316,6 +316,7 @@ is_fluxer_module(Module) ->
lists:prefix("gateway_http_", ModuleStr) orelse
lists:prefix("session", ModuleStr) orelse
lists:prefix("guild", ModuleStr) orelse
lists:prefix("passive_sync_registry", ModuleStr) orelse
lists:prefix("presence", ModuleStr) orelse
lists:prefix("push", ModuleStr) orelse
lists:prefix("push_dispatcher", ModuleStr) orelse

View File

@ -17,96 +17,72 @@
-module(rpc_client).
-export([
call/1,
call/2,
get_rpc_url/0,
get_rpc_url/1,
get_rpc_headers/0
]).
-export([call/1]).
-define(NATS_RPC_SUBJECT, <<"rpc.api">>).
-define(NATS_RPC_TIMEOUT_MS, 10000).
-type rpc_request() :: map().
-type rpc_response() :: {ok, map()} | {error, term()}.
-type rpc_options() :: map().
-spec call(rpc_request()) -> rpc_response().
call(Request) ->
call(Request, #{}).
case gateway_nats_rpc:get_connection() of
{ok, undefined} ->
{error, not_connected};
{ok, Conn} ->
do_request(Conn, Request);
{error, Reason} ->
{error, {not_connected, Reason}}
end.
-spec call(rpc_request(), rpc_options()) -> rpc_response().
call(Request, _Options) ->
Url = get_rpc_url(),
Headers = get_rpc_headers(),
Body = json:encode(Request),
case gateway_http_client:request(rpc, post, Url, Headers, Body) of
{ok, 200, _RespHeaders, RespBody} ->
handle_success_response(RespBody);
{ok, StatusCode, _RespHeaders, RespBody} ->
handle_error_response(StatusCode, RespBody);
-spec do_request(nats:conn(), rpc_request()) -> rpc_response().
do_request(Conn, Request) ->
Payload = iolist_to_binary(json:encode(Request)),
case nats:request(Conn, ?NATS_RPC_SUBJECT, Payload, #{timeout => ?NATS_RPC_TIMEOUT_MS}) of
{ok, {ResponseBin, _MsgOpts}} ->
handle_nats_response(ResponseBin);
{error, timeout} ->
{error, timeout};
{error, no_responders} ->
{error, no_responders};
{error, Reason} ->
{error, Reason}
end.
-spec handle_success_response(binary()) -> rpc_response().
handle_success_response(RespBody) ->
Response = json:decode(RespBody),
Data = maps:get(<<"data">>, Response, #{}),
{ok, Data}.
-spec handle_error_response(pos_integer(), binary()) -> {error, term()}.
handle_error_response(StatusCode, RespBody) ->
{error, {http_error, StatusCode, RespBody}}.
-spec get_rpc_url() -> string().
get_rpc_url() ->
ApiHost = fluxer_gateway_env:get(api_host),
get_rpc_url(ApiHost).
-spec get_rpc_url(string() | binary()) -> string().
get_rpc_url(ApiHost) ->
BaseUrl = api_host_base_url(ApiHost),
BaseUrl ++ "/_rpc".
-spec api_host_base_url(string() | binary()) -> string().
api_host_base_url(ApiHost) ->
HostString = ensure_string(ApiHost),
Normalized = normalize_api_host(HostString),
strip_trailing_slash(Normalized).
-spec ensure_string(binary() | string()) -> string().
ensure_string(Value) when is_binary(Value) ->
binary_to_list(Value);
ensure_string(Value) when is_list(Value) ->
Value.
-spec normalize_api_host(string()) -> string().
normalize_api_host(Host) ->
Lower = string:lowercase(Host),
case {has_protocol_prefix(Lower, "http://"), has_protocol_prefix(Lower, "https://")} of
{true, _} -> Host;
{_, true} -> Host;
_ -> "http://" ++ Host
-spec handle_nats_response(iodata()) -> rpc_response().
handle_nats_response(ResponseBin) ->
Response = json:decode(iolist_to_binary(ResponseBin)),
case maps:get(<<"_error">>, Response, undefined) of
undefined ->
Data = maps:get(<<"data">>, Response, #{}),
{ok, Data};
_ ->
Status = maps:get(<<"status">>, Response, 500),
Message = maps:get(<<"message">>, Response, <<"unknown error">>),
{error, {rpc_error, Status, Message}}
end.
-spec has_protocol_prefix(string(), string()) -> boolean().
has_protocol_prefix(Str, Prefix) ->
case string:prefix(Str, Prefix) of
nomatch -> false;
_ -> true
end.
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-spec strip_trailing_slash(string()) -> string().
strip_trailing_slash([]) ->
"";
strip_trailing_slash(Url) ->
case lists:last(Url) of
$/ -> strip_trailing_slash(lists:droplast(Url));
_ -> Url
end.
handle_nats_response_ok_test() ->
Response = json:encode(#{
<<"type">> => <<"session">>,
<<"data">> => #{<<"user">> => <<"test">>}
}),
?assertEqual({ok, #{<<"user">> => <<"test">>}}, handle_nats_response(Response)).
-spec get_rpc_headers() -> [{binary() | string(), binary() | string()}].
get_rpc_headers() ->
RpcSecretKey = fluxer_gateway_env:get(rpc_secret_key),
AuthHeader = {<<"Authorization">>, <<"Bearer ", RpcSecretKey/binary>>},
InitialHeaders = [AuthHeader],
gateway_tracing:inject_rpc_headers(InitialHeaders).
handle_nats_response_error_401_test() ->
Response = json:encode(#{<<"_error">> => true, <<"status">> => 401, <<"message">> => <<"Unauthorized">>}),
?assertEqual({error, {rpc_error, 401, <<"Unauthorized">>}}, handle_nats_response(Response)).
handle_nats_response_error_429_test() ->
Response = json:encode(#{<<"_error">> => true, <<"status">> => 429, <<"message">> => <<"Rate limited">>}),
?assertEqual({error, {rpc_error, 429, <<"Rate limited">>}}, handle_nats_response(Response)).
handle_nats_response_error_500_test() ->
Response = json:encode(#{<<"_error">> => true, <<"status">> => 500, <<"message">> => <<"Internal error">>}),
?assertEqual({error, {rpc_error, 500, <<"Internal error">>}}, handle_nats_response(Response)).
-endif.

Some files were not shown because too many files have changed in this diff Show More