5 Commits
main ... main

Author SHA1 Message Date
e1786a62fb fix 2 2025-10-06 00:56:47 -04:00
6806bf5b69 New monitors 2025-10-06 00:52:44 -04:00
68f0912843 disable actions 2025-10-06 00:46:42 -04:00
e4aaee7ff0 new build 2025-10-06 00:27:44 -04:00
04aab1c460 docker 2025-10-06 00:13:07 -04:00
11 changed files with 1224 additions and 19 deletions

65
.dockerignore Normal file
View File

@@ -0,0 +1,65 @@
# Git
.git
.gitignore
# Documentation
README.md
CODE_OF_CONDUCT.md
LICENSE
*.md
# Node.js
node_modules
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# Environment files
.env
.env.local
.env.development.local
.env.test.local
.env.production.local
# IDE
.vscode
.idea
*.swp
*.swo
# OS
.DS_Store
Thumbs.db
# Logs
logs
*.log
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Coverage directory used by tools like istanbul
coverage
# Dependency directories
jspm_packages/
# Optional npm cache directory
.npm
# Optional REPL history
.node_repl_history
# Output of 'npm pack'
*.tgz
# Yarn Integrity file
.yarn-integrity
# Docker
Dockerfile*
docker-compose*
.dockerignore

32
Bot/Dockerfile Normal file
View File

@@ -0,0 +1,32 @@
# Use Node.js 18 Alpine for smaller image size
FROM node:18-alpine
# Set working directory
WORKDIR /app
# Copy package files
COPY package.json ./
# Install dependencies (npm install works without package-lock.json)
RUN npm install --omit=dev --no-audit --no-fund && npm cache clean --force
# Copy application code
COPY . .
# Create non-root user for security
RUN addgroup -g 1001 -S nodejs && \
adduser -S discordbot -u 1001
# Change ownership of the app directory
RUN chown -R discordbot:nodejs /app
USER discordbot
# Expose port (if needed for health checks)
EXPOSE 3000
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD node -e "console.log('Bot is running')" || exit 1
# Start the bot
CMD ["node", "index.js"]

View File

@@ -1,6 +1,15 @@
const { Client, GatewayIntentBits, EmbedBuilder } = require('discord.js'); const { Client, GatewayIntentBits, EmbedBuilder } = require('discord.js');
const axios = require('axios'); const axios = require('axios');
const config = require('./config.json'); // Load configuration from environment variables or config.json
const config = {
token: process.env.DISCORD_TOKEN || require('./config.json').token,
guildID: process.env.GUILD_ID || require('./config.json').guildID,
channelID: process.env.CHANNEL_ID || require('./config.json').channelID,
clientID: process.env.CLIENT_ID || require('./config.json').clientID,
updatetime: parseInt(process.env.UPDATE_TIME) || require('./config.json').updatetime,
backendUrl: process.env.BACKEND_URL || '<YOUR_BACKEND_URL>',
uptimeKumaUrl: process.env.UPTIME_KUMA_URL || '<YOUR_UPTIMEKUMA_URL>'
};
const client = new Client({ const client = new Client({
intents: [ intents: [
@@ -11,9 +20,9 @@ const client = new Client({
}); });
let monitorMessages = { let monitorMessages = {
Gaming: null, 'Web Services': null,
Discord: null, 'Infrastructure': null,
Web: null 'Network': null
}; };
client.once('ready', async () => { client.once('ready', async () => {
@@ -45,31 +54,76 @@ async function updateMessages() {
return; return;
} }
const response = await axios.get('<YOUR_BACKEND_URL>'); console.log(`Fetching from backend: ${config.backendUrl}`);
const response = await axios.get(config.backendUrl);
const monitors = response.data; const monitors = response.data;
console.log('Backend response status:', response.status);
console.log('Backend response data type:', typeof monitors);
console.log('Backend response data:', JSON.stringify(monitors).substring(0, 500) + '...');
// Check if the backend returned an error
if (monitors.error) {
console.error('Backend API error:', monitors.message);
return;
}
// Ensure monitors is an array
if (!Array.isArray(monitors)) {
console.error('Backend returned invalid data format:', typeof monitors);
console.error('Expected array, got:', monitors);
return;
}
console.log(`Fetched ${monitors.length} monitors from backend`);
console.log('Monitor names:', monitors.map(m => m.monitor_name));
const gamingMonitors = monitors.filter(monitor => [ const webServicesMonitors = monitors.filter(monitor => [
'Lobby', 'Skyblock', 'Survival', 'Creative', 'KitPvP', 'Factions', 'Prison', 'Skywars' 'Main Page', 'Pelican', 'Jellyfin', 'Proxmox', 'Jellyseerr',
'CPanel', 'WHMCS', 'Gitea', 'Nextcloud', 'Radarr', 'Sonarr',
'Prowlarr', 'Nginx Proxy Manager', 'Authentik', 'n8n', 'HA Proxy'
].includes(monitor.monitor_name)); ].includes(monitor.monitor_name));
const discordMonitors = monitors.filter(monitor => [ const infrastructureMonitors = monitors.filter(monitor => [
'Discord bot', 'Status bot' 'a01.pve.hrs', 'a05.pve.hrs', 'a07.pve.hrs', 'a08.pve.hrs', 'a09.pve.hrs',
'01.bw.hrs', '01.ga.hrs', '01.ha.hrs', '01.lh.hrs', '01.nc.hrs'
].includes(monitor.monitor_name)); ].includes(monitor.monitor_name));
const webMonitors = monitors.filter(monitor => [ const networkMonitors = monitors.filter(monitor => [
'web1', 'web2', 'web3' '01.sh.hrs', '01.rs.hrs', '01.pe.hrs', '01.pt.hrs', '01.rr.hrs',
'01.wn.hrs', '02.pe.hrs', '01.cp.hrs', '02.cp.hrs', 'a06'
].includes(monitor.monitor_name)); ].includes(monitor.monitor_name));
await sendMonitorsMessage(channel, 'Gaming', gamingMonitors); console.log(`Web Services: ${webServicesMonitors.length}, Infrastructure: ${infrastructureMonitors.length}, Network: ${networkMonitors.length}`);
await sendMonitorsMessage(channel, 'Discord', discordMonitors);
await sendMonitorsMessage(channel, 'Web', webMonitors); await sendMonitorsMessage(channel, 'Web Services', webServicesMonitors);
// await sendMonitorsMessage(channel, 'Infrastructure', infrastructureMonitors);
//await sendMonitorsMessage(channel, 'Network', networkMonitors);
// Send a test message if no monitors found
if (webServicesMonitors.length === 0) {
console.log('No monitors found, sending test message');
await channel.send('🔧 Bot is running but no monitors found. Check backend configuration.');
}
} catch (error) { } catch (error) {
console.error('Error:', error); console.error('Error updating messages:', error);
// If it's an axios error, log more details
if (error.response) {
console.error('Backend API error:', error.response.status, error.response.statusText);
console.error('Response data:', error.response.data);
} else if (error.request) {
console.error('No response from backend API:', error.request);
} else {
console.error('Request setup error:', error.message);
}
} }
} }
async function sendMonitorsMessage(channel, category, monitors) { async function sendMonitorsMessage(channel, category, monitors) {
console.log(`Processing ${category}: ${monitors.length} monitors`);
let description = monitors.map(monitor => { let description = monitors.map(monitor => {
let statusEmoji = ''; let statusEmoji = '';
switch (monitor.status) { switch (monitor.status) {
@@ -91,12 +145,20 @@ async function sendMonitorsMessage(channel, category, monitors) {
return `${statusEmoji} | ${monitor.monitor_name}`; return `${statusEmoji} | ${monitor.monitor_name}`;
}).join('\n'); }).join('\n');
console.log(`Generated description for ${category}: "${description}"`);
// Ensure description is not empty (Discord.js validation requirement)
if (!description || description.trim() === '') {
description = `No ${category.toLowerCase()} monitors found.`;
console.log(`Empty description, using fallback: "${description}"`);
}
let embed = new EmbedBuilder() let embed = new EmbedBuilder()
.setTitle(`${category} Monitor`) .setTitle(`${category} Monitor`)
.setColor('#0099ff') .setColor('#0099ff')
.setDescription(description) .setDescription(description)
.setFooter({ text: `Last updated: ${new Date().toLocaleString()}` }) .setFooter({ text: `Last updated: ${new Date().toLocaleString()}` })
.setURL('<YOUR_UPTIMEKUMA_URL>'); .setURL(config.uptimeKumaUrl);
try { try {

242
DOCKER_SETUP.md Normal file
View File

@@ -0,0 +1,242 @@
# Docker Setup for UptimeKuma Discord Bot
This guide will help you containerize and run the UptimeKuma Discord Bot using Docker and Docker Compose.
## Prerequisites
- Docker and Docker Compose installed on your system
- Uptime Kuma instance running and accessible
- Discord bot application created
## Quick Start
1. **Clone the repository**
```bash
git clone <repository-url>
cd UptimeKuma-DiscordBot
```
2. **Configure environment variables**
```bash
cp env.example .env
```
Edit the `.env` file with your actual values:
```env
# Discord Bot Configuration
DISCORD_TOKEN=your_discord_bot_token_here
GUILD_ID=your_discord_guild_id_here
CHANNEL_ID=your_discord_channel_id_here
CLIENT_ID=your_discord_client_id_here
UPDATE_TIME=30
# Uptime Kuma Configuration
UPTIME_KUMA_URL=https://your-uptime-kuma-instance.com/metrics
UPTIME_KUMA_API_KEY=your_uptime_kuma_api_key_here
```
3. **Build and start the containers**
```bash
docker-compose up -d
```
4. **Check the logs**
```bash
docker-compose logs -f
```
## Configuration Details
### Environment Variables
| Variable | Description | Required |
|----------|-------------|----------|
| `DISCORD_TOKEN` | Your Discord bot token | Yes |
| `GUILD_ID` | Discord server (guild) ID | Yes |
| `CHANNEL_ID` | Discord channel ID for status messages | Yes |
| `CLIENT_ID` | Discord bot client ID | Yes |
| `UPDATE_TIME` | Update frequency in seconds (default: 30) | No |
| `UPTIME_KUMA_URL` | Full URL to your Uptime Kuma metrics endpoint | Yes |
| `UPTIME_KUMA_API_KEY` | API key from Uptime Kuma dashboard | Yes |
### Services
#### Discord Bot (`discord-bot`)
- **Image**: Built from `Bot/Dockerfile`
- **Port**: Internal only (no external port)
- **Health Check**: Node.js process check
- **Dependencies**: `web-backend`
#### Web Backend (`web-backend`)
- **Image**: Built from `Web/Dockerfile`
- **Port**: 8080 (external) → 80 (internal)
- **Health Check**: HTTP endpoint check
- **Access**: `http://localhost:8080/back-end.php`
## Docker Commands
### Build and Start
```bash
# Build and start all services
docker-compose up -d
# Build and start with rebuild
docker-compose up -d --build
# Start specific service
docker-compose up -d discord-bot
```
### Management
```bash
# View logs
docker-compose logs -f
docker-compose logs -f discord-bot
docker-compose logs -f web-backend
# Stop services
docker-compose down
# Stop and remove volumes
docker-compose down -v
# Restart services
docker-compose restart
# Update services
docker-compose pull
docker-compose up -d
```
### Debugging
```bash
# Execute shell in running container
docker-compose exec discord-bot sh
docker-compose exec web-backend bash
# View container status
docker-compose ps
# View resource usage
docker stats
```
## Troubleshooting
### Common Issues
1. **Docker build fails with npm ci error**
```
npm error The `npm ci` command can only install with an existing package-lock.json
```
**Solution:** The Dockerfile uses `npm install` instead of `npm ci` to work without a lock file.
2. **Bot not connecting to Discord**
- Verify `DISCORD_TOKEN` is correct
- Check Discord bot permissions
- Ensure bot is invited to the server
3. **Backend not responding**
- Verify `UPTIME_KUMA_URL` and `UPTIME_KUMA_API_KEY`
- Check Uptime Kuma instance accessibility
- Test API endpoint manually: `curl http://localhost:8080/back-end.php`
4. **Permission errors**
- Ensure Docker has proper permissions
- Check file ownership in mounted volumes
5. **Container health checks failing**
- Check logs: `docker-compose logs`
- Verify all environment variables are set
- Test individual services
### Health Checks
Both services include health checks:
- **Discord Bot**: Checks if Node.js process is running
- **Web Backend**: Checks if PHP endpoint responds
View health status:
```bash
docker-compose ps
```
### Logs
Monitor logs in real-time:
```bash
# All services
docker-compose logs -f
# Specific service
docker-compose logs -f discord-bot
docker-compose logs -f web-backend
# Last 100 lines
docker-compose logs --tail=100
```
## Security Considerations
- Both containers run as non-root users
- Environment variables are used instead of hardcoded secrets
- Network isolation between services
- Health checks for monitoring
## Production Deployment
For production deployment:
1. **Use Docker secrets** for sensitive data
2. **Set up reverse proxy** (nginx/traefik) for SSL termination
3. **Configure log rotation** and monitoring
4. **Use Docker Swarm or Kubernetes** for orchestration
5. **Set up backup strategies** for persistent data
### Example Production docker-compose.yml
```yaml
version: '3.8'
services:
discord-bot:
# ... existing config ...
deploy:
replicas: 1
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
secrets:
- discord_token
- uptime_kuma_api_key
web-backend:
# ... existing config ...
deploy:
replicas: 2
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
secrets:
discord_token:
external: true
uptime_kuma_api_key:
external: true
```
## Support
For issues and questions:
- Check the logs first: `docker-compose logs -f`
- Verify configuration: `docker-compose config`
- Test individual services
- Review this documentation
## Contributing
When contributing Docker-related changes:
1. Test with `docker-compose up -d --build`
2. Verify health checks pass
3. Test with different environment configurations
4. Update this documentation if needed

303
GITEA_ACTIONS_SETUP.md Normal file
View File

@@ -0,0 +1,303 @@
# Gitea Actions Setup for Docker Build and Publish
This document explains how to set up Gitea Actions to automatically build and publish Docker images for the UptimeKuma Discord Bot project.
## Overview
The Gitea Actions workflows provide:
- **Automated Docker image building** for both Discord bot and web backend
- **Multi-platform support** (linux/amd64, linux/arm64)
- **Security scanning** with Trivy
- **Automated releases** with changelog generation
- **Docker Compose testing** to ensure everything works together
## Workflows
### 1. Docker Build (`docker-build.yml`)
**Triggers:**
- Push to `main` or `develop` branches
- Push of version tags (`v*`)
- Pull requests to `main`
**Features:**
- Builds both Discord bot and web backend images
- Multi-platform builds (AMD64, ARM64)
- Pushes to Docker Hub on non-PR events
- Generates Software Bill of Materials (SBOM)
- Security vulnerability scanning
- GitHub Actions cache for faster builds
### 2. Docker Compose Test (`docker-compose-test.yml`)
**Triggers:**
- Push to `main` or `develop` branches
- Pull requests to `main`
**Features:**
- Tests the complete Docker Compose setup
- Validates Dockerfile syntax with hadolint
- Checks service health and connectivity
- Validates environment variable configuration
### 3. Release (`release.yml`)
**Triggers:**
- Push of version tags (`v*`)
**Features:**
- Creates GitHub releases automatically
- Generates changelog from git commits
- Updates release notes with Docker image information
## Required Secrets
Configure these secrets in your Gitea repository settings:
### Docker Hub Secrets
- `DOCKER_USERNAME`: Your Docker Hub username
- `DOCKER_PASSWORD`: Your Docker Hub access token (not password)
### How to Get Docker Hub Token
1. Go to [Docker Hub](https://hub.docker.com/)
2. Sign in to your account
3. Go to Account Settings → Security
4. Create a new access token
5. Copy the token and use it as `DOCKER_PASSWORD`
## Setup Instructions
### 1. Enable Gitea Actions
1. Go to your Gitea repository
2. Navigate to **Settings****Actions**
3. Enable **Actions** if not already enabled
### 2. Configure Secrets
1. Go to **Settings****Secrets**
2. Add the following secrets:
| Secret Name | Description | Example |
|-------------|-------------|---------|
| `DOCKER_USERNAME` | Docker Hub username | `myusername` |
| `DOCKER_PASSWORD` | Docker Hub access token | `dckr_pat_...` |
### 3. Test the Workflow
1. Push a commit to the `main` branch
2. Go to **Actions** tab in your repository
3. Watch the workflow execution
4. Check the logs for any errors
## Workflow Details
### Docker Build Workflow
```yaml
# Key features:
- Multi-platform builds (linux/amd64, linux/arm64)
- Automatic tagging based on git refs
- Build cache for faster subsequent builds
- Security scanning with Trivy
- SBOM generation for supply chain security
```
**Image Tags Generated:**
- `latest` (for main branch)
- `main` (branch name)
- `v1.0.0` (version tags)
- `1.0` (major.minor)
- `1` (major)
### Docker Compose Test Workflow
```yaml
# Key features:
- Validates Docker Compose configuration
- Tests service startup and health checks
- Lints Dockerfiles with hadolint
- Validates environment variable usage
```
### Release Workflow
```yaml
# Key features:
- Automatic release creation on version tags
- Changelog generation from git commits
- Release notes with Docker image information
- Quick start instructions included
```
## Usage Examples
### Creating a Release
1. **Create and push a version tag:**
```bash
git tag v1.0.0
git push origin v1.0.0
```
2. **The workflow will:**
- Build Docker images with version tags
- Create a GitHub release
- Generate changelog
- Publish images to Docker Hub
### Testing Changes
1. **Create a pull request:**
```bash
git checkout -b feature/new-feature
git commit -m "Add new feature"
git push origin feature/new-feature
```
2. **The workflow will:**
- Build images (but not push)
- Run security scans
- Test Docker Compose setup
- Validate configuration
## Monitoring and Troubleshooting
### Viewing Workflow Runs
1. Go to **Actions** tab in your repository
2. Click on a workflow run to see details
3. Expand job steps to view logs
### Common Issues
#### 1. Docker Hub Authentication Failed
```
Error: Cannot perform an interactive login from a non TTY device
```
**Solution:** Check that `DOCKER_USERNAME` and `DOCKER_PASSWORD` secrets are correctly set.
#### 2. Build Cache Issues
```
Error: failed to solve: failed to compute cache key
```
**Solution:** The workflow will automatically retry without cache on failure.
#### 3. Security Scan Failures
```
Error: Trivy found vulnerabilities
```
**Solution:** Review the security scan results and update base images if needed.
### Workflow Status Badge
Add this to your README.md to show workflow status:
```markdown
[![Docker Build](https://gitea.example.com/api/badges/username/repo/status.svg)](https://gitea.example.com/username/repo/actions)
```
## Advanced Configuration
### Custom Registry
To use a different registry than Docker Hub:
1. **Update environment variables in workflows:**
```yaml
env:
REGISTRY: your-registry.com
```
2. **Update image names:**
```yaml
env:
IMAGE_NAME_DISCORD_BOT: your-org/uptime-kuma-discord-bot
IMAGE_NAME_WEB_BACKEND: your-org/uptime-kuma-web-backend
```
### Build Arguments
To pass build arguments to Docker builds:
```yaml
- name: Build and push Discord Bot image
uses: docker/build-push-action@v5
with:
context: ./Bot
file: ./Bot/Dockerfile
build-args: |
NODE_VERSION=18
BUILD_DATE=${{ github.event.head_commit.timestamp }}
```
### Matrix Builds
To build for multiple Node.js versions:
```yaml
strategy:
matrix:
node-version: [16, 18, 20]
```
## Security Considerations
### Secrets Management
- Never commit secrets to the repository
- Use Gitea's built-in secrets management
- Rotate Docker Hub tokens regularly
- Use least-privilege access tokens
### Image Security
- Base images are automatically scanned for vulnerabilities
- SBOMs are generated for supply chain transparency
- Multi-platform builds ensure compatibility
- Non-root users in containers for security
### Workflow Security
- Workflows run in isolated environments
- Secrets are encrypted and only available during execution
- No secrets are logged or exposed in outputs
## Best Practices
### 1. Version Tagging
- Use semantic versioning (`v1.0.0`)
- Tag releases consistently
- Include meaningful commit messages
### 2. Branch Strategy
- Use `main` for production releases
- Use `develop` for integration testing
- Use feature branches for new development
### 3. Monitoring
- Monitor workflow execution regularly
- Set up notifications for failures
- Review security scan results
### 4. Maintenance
- Keep base images updated
- Review and update workflow dependencies
- Monitor for deprecated actions
## Support
For issues with Gitea Actions:
1. Check the workflow logs first
2. Verify secrets are correctly configured
3. Ensure Gitea Actions is enabled
4. Check Gitea version compatibility
5. Review this documentation
## Contributing
When contributing to the workflows:
1. Test changes in a fork first
2. Update documentation as needed
3. Follow the existing workflow patterns
4. Ensure backward compatibility
5. Add appropriate error handling

39
Web/Dockerfile Normal file
View File

@@ -0,0 +1,39 @@
# Use PHP 8.2 Apache for web backend
FROM php:8.2-apache
# Install required PHP extensions
RUN apt-get update && apt-get install -y \
libcurl4-openssl-dev \
pkg-config \
libssl-dev \
&& docker-php-ext-install curl \
&& rm -rf /var/lib/apt/lists/*
# Enable Apache mod_rewrite
RUN a2enmod rewrite
# Set working directory
WORKDIR /var/www/html
# Copy PHP application
COPY back-end.php .
# Create non-root user for security
RUN groupadd -g 1001 phpapp && \
useradd -r -u 1001 -g phpapp phpapp
# Change ownership
RUN chown -R phpapp:phpapp /var/www/html
# Switch to non-root user
USER phpapp
# Expose port 80
EXPOSE 80
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD curl -f http://localhost/back-end.php || exit 1
# Start Apache
CMD ["apache2-foreground"]

View File

@@ -1,8 +1,8 @@
<?php <?php
// Define the URL and API key for the metrics endpoint // Define the URL and API key for the metrics endpoint
$url = '<yourURL>/metrics'; //your URL $url = $_ENV['UPTIME_KUMA_URL'] ?? '<yourURL>/metrics'; //your URL
// Retrieve an API key from the UptimeKuma dashboard here <yourURL>/settings/api-keys // Retrieve an API key from the UptimeKuma dashboard here <yourURL>/settings/api-keys
$password = '***************'; //your API key $password = $_ENV['UPTIME_KUMA_API_KEY'] ?? '***************'; //your API key
$username = ''; //leave empty $username = ''; //leave empty
// Initialize a new cURL session // Initialize a new cURL session
@@ -53,10 +53,26 @@ if ($http_status == 200) {
// Set the content type of the response to JSON // Set the content type of the response to JSON
header('Content-Type: application/json'); header('Content-Type: application/json');
// Log debug information
error_log("UptimeKuma API Response Status: $http_status");
error_log("Parsed monitors count: " . count($data));
// Output the data in JSON format // Output the data in JSON format
echo json_encode($data, JSON_PRETTY_PRINT); echo json_encode($data, JSON_PRETTY_PRINT);
} else { } else {
// Log the error
error_log("UptimeKuma API Error: HTTP Status Code: $http_status");
error_log("Response: " . substr($response, 0, 500));
// Set the content type of the response to JSON
header('Content-Type: application/json');
// Output an error message if the request was not successful // Output an error message if the request was not successful
echo "Failed to fetch data. HTTP Status Code: $http_status"; echo json_encode([
'error' => true,
'message' => "Failed to fetch data. HTTP Status Code: $http_status",
'data' => []
], JSON_PRETTY_PRINT);
} }
?> ?>

330
back-end.php.json Normal file
View File

@@ -0,0 +1,330 @@
[
{
"monitor_name": "a06",
"monitor_type": "ping",
"monitor_url": "https:\/\/",
"monitor_hostname": "192.168.10.16",
"monitor_port": "null",
"status": 0
},
{
"monitor_name": "Main Page",
"monitor_type": "http",
"monitor_url": "https:\/\/hudsonriggs.systems",
"monitor_hostname": "null",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "Pelican",
"monitor_type": "http",
"monitor_url": "https:\/\/pelican.hudsonriggs.systems",
"monitor_hostname": "null",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "Jellyfin",
"monitor_type": "http",
"monitor_url": "https:\/\/jellyfin.hudsonriggs.systems",
"monitor_hostname": "null",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "Proxmox",
"monitor_type": "http",
"monitor_url": "https:\/\/proxmox.hudsonriggs.systems",
"monitor_hostname": "null",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "Jellyseerr",
"monitor_type": "http",
"monitor_url": "https:\/\/jellyseerr.hudsonriggs.systems",
"monitor_hostname": "null",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "CPanel",
"monitor_type": "http",
"monitor_url": "https:\/\/cpanel.hudsonriggs.systems",
"monitor_hostname": "null",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "WHMCS",
"monitor_type": "http",
"monitor_url": "https:\/\/portal.hudsonriggs.systems\/",
"monitor_hostname": "null",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "Gitea",
"monitor_type": "http",
"monitor_url": "https:\/\/git.hudsonriggs.systems\/",
"monitor_hostname": "null",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "Nextcloud",
"monitor_type": "http",
"monitor_url": "https:\/\/cloud.hudsonriggs.systems",
"monitor_hostname": "null",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "Radarr",
"monitor_type": "http",
"monitor_url": "https:\/\/radarr.hudsonriggs.systems",
"monitor_hostname": "null",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "Sonarr",
"monitor_type": "http",
"monitor_url": "https:\/\/sonarr.hudsonriggs.systems",
"monitor_hostname": "null",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "Prowlarr",
"monitor_type": "http",
"monitor_url": "https:\/\/prowlarr.hudsonriggs.systems",
"monitor_hostname": "null",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "01.sh.hrs",
"monitor_type": "ping",
"monitor_url": "https:\/\/",
"monitor_hostname": "192.168.30.116",
"monitor_port": "null",
"status": 0
},
{
"monitor_name": "01.rs.hrs",
"monitor_type": "ping",
"monitor_url": "https:\/\/",
"monitor_hostname": "192.168.30.127",
"monitor_port": "null",
"status": 0
},
{
"monitor_name": "LXC",
"monitor_type": "group",
"monitor_url": "https:\/\/",
"monitor_hostname": "null",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "VM",
"monitor_type": "group",
"monitor_url": "https:\/\/",
"monitor_hostname": "null",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "01.bw.hrs",
"monitor_type": "ping",
"monitor_url": "https:\/\/",
"monitor_hostname": "192.168.30.120",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "01.ga.hrs",
"monitor_type": "ping",
"monitor_url": "https:\/\/",
"monitor_hostname": "192.168.30.113",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "01.ha.hrs",
"monitor_type": "ping",
"monitor_url": "https:\/\/",
"monitor_hostname": "192.168.30.123",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "01.lh.hrs",
"monitor_type": "ping",
"monitor_url": "https:\/\/",
"monitor_hostname": "192.168.30.137",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "01.nc.hrs",
"monitor_type": "ping",
"monitor_url": "https:\/\/",
"monitor_hostname": "192.168.30.117",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "01.oo.hrs",
"monitor_type": "ping",
"monitor_url": "",
"monitor_hostname": "192.168.30.126",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "01.pe.hrs",
"monitor_type": "ping",
"monitor_url": "https:\/\/",
"monitor_hostname": "192.168.30.110",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "01.pt.hrs",
"monitor_type": "ping",
"monitor_url": "https:\/\/",
"monitor_hostname": "192.168.30.115",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "01.rr.hrs",
"monitor_type": "ping",
"monitor_url": "https:\/\/",
"monitor_hostname": "192.168.30.121",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "01.wn.hrs",
"monitor_type": "ping",
"monitor_url": "https:\/\/",
"monitor_hostname": "192.168.20.124",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "02.pe.hrs",
"monitor_type": "ping",
"monitor_url": "192.168.30.111",
"monitor_hostname": "192.168.30.111",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "01.cp.hrs",
"monitor_type": "ping",
"monitor_url": "https:\/\/",
"monitor_hostname": "192.168.20.108",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "02.cp.hrs",
"monitor_type": "ping",
"monitor_url": "https:\/\/",
"monitor_hostname": "192.168.254.6",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "PVE",
"monitor_type": "group",
"monitor_url": "https:\/\/",
"monitor_hostname": "null",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "a01.pve.hrs",
"monitor_type": "ping",
"monitor_url": "https:\/\/",
"monitor_hostname": "192.168.10.11",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "a05.pve.hrs",
"monitor_type": "ping",
"monitor_url": "https:\/\/",
"monitor_hostname": "192.168.10.15",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "a07.pve.hrs",
"monitor_type": "ping",
"monitor_url": "https:\/\/",
"monitor_hostname": "192.168.10.17",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "a08.pve.hrs",
"monitor_type": "ping",
"monitor_url": "https:\/\/",
"monitor_hostname": "192.168.10.18",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "a09.pve.hrs",
"monitor_type": "ping",
"monitor_url": "https:\/\/",
"monitor_hostname": "192.168.10.19",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "Hosts",
"monitor_type": "group",
"monitor_url": "https:\/\/",
"monitor_hostname": "null",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "Nginx Proxy Manager",
"monitor_type": "http",
"monitor_url": "https:\/\/ngpm.hudsonriggs.systems",
"monitor_hostname": "null",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "Authentik",
"monitor_type": "http",
"monitor_url": "https:\/\/authentik.hudsonriggs.systems",
"monitor_hostname": "null",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "n8n",
"monitor_type": "http",
"monitor_url": "https:\/\/n8n.hudsonriggs.systems",
"monitor_hostname": "null",
"monitor_port": "null",
"status": 1
},
{
"monitor_name": "HA Proxy",
"monitor_type": "http",
"monitor_url": "https:\/\/haproxy.hudsonriggs.systems",
"monitor_hostname": "null",
"monitor_port": "null",
"status": 1
}
]

View File

@@ -0,0 +1,51 @@
services:
# Discord Bot Service
discord-bot:
build:
context: ./Bot
dockerfile: Dockerfile
container_name: uptime-kuma-discord-bot
restart: unless-stopped
environment:
- DISCORD_TOKEN=${DISCORD_TOKEN}
- GUILD_ID=${GUILD_ID}
- CHANNEL_ID=${CHANNEL_ID}
- CLIENT_ID=${CLIENT_ID}
- UPDATE_TIME=${UPDATE_TIME:-30}
- BACKEND_URL=http://web-backend:80/back-end.php
- UPTIME_KUMA_URL=${UPTIME_KUMA_URL}
depends_on:
- web-backend
networks:
- uptime-kuma-network
healthcheck:
test: ["CMD", "node", "-e", "console.log('Bot is running')"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
# PHP Web Backend Service
web-backend:
build:
context: ./Web
dockerfile: Dockerfile
container_name: uptime-kuma-web-backend
restart: unless-stopped
environment:
- UPTIME_KUMA_URL=${UPTIME_KUMA_URL}
- UPTIME_KUMA_API_KEY=${UPTIME_KUMA_API_KEY}
ports:
- "8080:80"
networks:
- uptime-kuma-network
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost/back-end.php"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
networks:
uptime-kuma-network:
driver: bridge

52
docker-compose.yml Normal file
View File

@@ -0,0 +1,52 @@
services:
# Discord Bot Service
discord-bot:
build:
context: ./Bot
dockerfile: Dockerfile
container_name: uptime-kuma-discord-bot
restart: unless-stopped
environment:
- DISCORD_TOKEN=${DISCORD_TOKEN}
- GUILD_ID=${GUILD_ID}
- CHANNEL_ID=${CHANNEL_ID}
- CLIENT_ID=${CLIENT_ID}
- UPDATE_TIME=${UPDATE_TIME:-30}
- BACKEND_URL=http://web-backend:80/back-end.php
- UPTIME_KUMA_URL=${UPTIME_KUMA_URL}
depends_on:
- web-backend
networks:
- uptime-kuma-network
healthcheck:
test: ["CMD", "node", "-e", "console.log('Bot is running')"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
# PHP Web Backend Service
web-backend:
build:
context: ./Web
dockerfile: Dockerfile
container_name: uptime-kuma-web-backend
restart: unless-stopped
environment:
- UPTIME_KUMA_URL=${UPTIME_KUMA_URL}
- UPTIME_KUMA_API_KEY=${UPTIME_KUMA_API_KEY}
ports:
- "8080:80"
networks:
- uptime-kuma-network
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost/back-end.php"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
networks:
uptime-kuma-network:
driver: bridge

13
env.example Normal file
View File

@@ -0,0 +1,13 @@
# Discord Bot Configuration
DISCORD_TOKEN=your_discord_bot_token_here
GUILD_ID=your_discord_guild_id_here
CHANNEL_ID=your_discord_channel_id_here
CLIENT_ID=your_discord_client_id_here
UPDATE_TIME=30
# Uptime Kuma Configuration
UPTIME_KUMA_URL=https://your-uptime-kuma-instance.com/metrics
UPTIME_KUMA_API_KEY=your_uptime_kuma_api_key_here
# Optional: Override backend URL (defaults to internal Docker network)
# BACKEND_URL=http://web-backend:80/back-end.php