diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..e2c608c2 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,5 @@ +.git +.github +*.md +tests/ +docs/ \ No newline at end of file diff --git a/.gitignore b/.gitignore index df24dc47..1b1e0d2c 100644 --- a/.gitignore +++ b/.gitignore @@ -49,5 +49,4 @@ coverage *.sw? *.tsbuildinfo -.vite/* - +.vite/* \ No newline at end of file diff --git a/README.md b/README.md index bbd8669d..443f48ff 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ > 🎉 I'm excited to announce that WGDashboard is officially listed on DigitalOcean's Marketplace! For more information, please visit [Host WGDashboard & WireGuard with DigitalOcean](https://docs.wgdashboard.dev/host-wgdashboard-wireguard-with-digitalocean.html) for more information! > [!NOTE] -> **Help Wanted 🎉**: Localizing WGDashboard to other languages! If you're willing to help, please visit https://github.com/donaldzou/WGDashboard/issues/397. Many thanks! +> **Help Wanted 🎉**: Localizing WGDashboard to other languages! If you're willing to help, please visit https://github.com/WGDashboard/WGDashboard/issues/397. Many thanks! @@ -22,7 +22,7 @@
-
+
diff --git a/assets/legacy/Dockerfile-alpine-old b/assets/legacy/Dockerfile-alpine-old
new file mode 100644
index 00000000..555e0c3a
--- /dev/null
+++ b/assets/legacy/Dockerfile-alpine-old
@@ -0,0 +1,76 @@
+FROM golang:1.24 AS awg-go
+
+RUN git clone https://github.com/WGDashboard/amneziawg-go /awg
+WORKDIR /awg
+RUN go mod download && \
+ go mod verify && \
+ go build -ldflags '-linkmode external -extldflags "-fno-PIC -static"' -v -o /usr/bin
+
+FROM alpine:latest AS awg-tools
+
+RUN apk update && apk add --no-cache \
+ make git build-base linux-headers \
+ && git clone https://github.com/WGDashboard/amneziawg-tools \
+ && cd amneziawg-tools/src \
+ && make \
+ && chmod +x wg*
+
+FROM alpine:latest
+LABEL maintainer="dselen@nerthus.nl"
+
+RUN apk update && apk add --no-cache \
+ iproute2 iptables bash curl wget unzip procps sudo \
+ tzdata wireguard-tools python3 py3-psutil py3-bcrypt openresolv
+
+COPY --from=awg-go /usr/bin/amneziawg-go /usr/bin/amneziawg-go
+COPY --from=awg-tools /amneziawg-tools/src/wg /usr/bin/awg
+COPY --from=awg-tools /amneziawg-tools/src/wg-quick/linux.bash /usr/bin/awg-quick
+
+# Declaring environment variables, change Peernet to an address you like, standard is a 24 bit subnet.
+ARG wg_net="10.0.0.1" \
+ wg_port="51820"
+
+# Following ENV variables are changable on container runtime because /entrypoint.sh handles that. See compose.yaml for more info.
+ENV TZ="Europe/Amsterdam" \
+ global_dns="9.9.9.9" \
+ wgd_port="10086" \
+ public_ip=""
+
+# Using WGDASH -- like wg_net functionally as a ARG command. But it is needed in entrypoint.sh so it needs to be exported as environment variable.
+ENV WGDASH=/opt/wgdashboard
+
+# Doing WireGuard Dashboard installation measures. Modify the git clone command to get the preferred version, with a specific branch for example.
+RUN mkdir /data \
+ && mkdir /configs \
+ && mkdir -p ${WGDASH}/src \
+ && mkdir -p /etc/amnezia/amneziawg
+COPY ./src ${WGDASH}/src
+
+# Generate basic WireGuard interface. Echoing the WireGuard interface config for readability, adjust if you want it for efficiency.
+# Also setting the pipefail option, verbose: https://github.com/hadolint/hadolint/wiki/DL4006.
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+RUN out_adapt=$(ip -o -4 route show to default | awk '{print $NF}') \
+ && echo -e "[Interface]\n\
+Address = ${wg_net}/24\n\
+PrivateKey =\n\
+PostUp = iptables -t nat -I POSTROUTING 1 -s ${wg_net}/24 -o ${out_adapt} -j MASQUERADE\n\
+PostUp = iptables -I FORWARD -i wg0 -o wg0 -j DROP\n\
+PreDown = iptables -t nat -D POSTROUTING -s ${wg_net}/24 -o ${out_adapt} -j MASQUERADE\n\
+PreDown = iptables -D FORWARD -i wg0 -o wg0 -j DROP\n\
+ListenPort = ${wg_port}\n\
+SaveConfig = true\n\
+DNS = ${global_dns}" > /configs/wg0.conf.template \
+ && chmod 600 /configs/wg0.conf.template
+
+# Defining a way for Docker to check the health of the container. In this case: checking the gunicorn process.
+HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
+ CMD sh -c 'pgrep gunicorn > /dev/null && pgrep tail > /dev/null' || exit 1
+
+# Copy the basic entrypoint.sh script.
+COPY ./docker/entrypoint.sh /entrypoint.sh
+
+# Exposing the default WireGuard Dashboard port for web access.
+EXPOSE 10086
+WORKDIR $WGDASH
+
+ENTRYPOINT ["/bin/bash", "/entrypoint.sh"]
\ No newline at end of file
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 8e659de3..c9deca46 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -1,53 +1,103 @@
-FROM golang:1.24 AS awg-go
+#
+# AWG GOLANG BUILDING STAGE
+# Base: Alpine
+#
-RUN git clone https://github.com/WGDashboard/amneziawg-go /awg
-WORKDIR /awg
+FROM golang:1.25-alpine AS awg-go
+
+RUN apk add --no-cache \
+ git \
+ gcc \
+ musl-dev
+
+# Standard working directory for WGDashboard
+RUN mkdir -p /workspace && \
+ git clone https://github.com/WGDashboard/amneziawg-go /workspace/awg
+
+ENV CGO_ENABLED=1
+
+WORKDIR /workspace/awg
RUN go mod download && \
go mod verify && \
go build -ldflags '-linkmode external -extldflags "-fno-PIC -static"' -v -o /usr/bin
-
+#
+# AWG TOOLS BUILDING STAGE
+# Base: Debian
+#
FROM alpine:latest AS awg-tools
-RUN apk update && apk add --no-cache \
- make git build-base linux-headers \
- && git clone https://github.com/WGDashboard/amneziawg-tools \
- && cd amneziawg-tools/src \
- && make \
- && chmod +x wg*
+RUN apk add --no-cache \
+ make \
+ git \
+ build-base \
+ linux-headers \
+ ca-certificates
-FROM alpine:latest
+RUN mkdir -p /workspace && \
+ git clone https://github.com/WGDashboard/amneziawg-tools /workspace/awg-tools
+
+WORKDIR /workspace/awg-tools/src
+RUN make && chmod +x wg*
+
+#
+# PIP DEPENDENCY BUILDING
+# Base: Alpine
+#
+FROM python:3.13-alpine AS pip-builder
+
+
+RUN apk add --no-cache \
+ build-base \
+ pkgconfig \
+ python3-dev \
+ libffi-dev \
+ linux-headers \
+ && mkdir -p /opt/wgdashboard/src \
+ && python3 -m venv /opt/wgdashboard/src/venv
+
+COPY ./src/requirements.txt /opt/wgdashboard/src
+RUN . /opt/wgdashboard/src/venv/bin/activate && \
+ pip3 install --upgrade pip && \
+ pip3 install -r /opt/wgdashboard/src/requirements.txt
+
+#
+# WGDashboard RUNNING STAGE
+# Base: Alpine
+#
+FROM python:3.13-alpine AS final
LABEL maintainer="dselen@nerthus.nl"
-RUN apk update && apk add --no-cache \
- iproute2 iptables bash curl wget unzip procps sudo \
- tzdata wireguard-tools python3 py3-psutil py3-bcrypt openresolv
+# Install only the runtime dependencies
+RUN apk add --no-cache \
+ iproute2 iptables \
+ bash curl \
+ wget unzip \
+ procps sudo \
+ tzdata wireguard-tools \
+ openresolv openrc
+# Copy only the final binaries from the builders
COPY --from=awg-go /usr/bin/amneziawg-go /usr/bin/amneziawg-go
-COPY --from=awg-tools /amneziawg-tools/src/wg /usr/bin/awg
-COPY --from=awg-tools /amneziawg-tools/src/wg-quick/linux.bash /usr/bin/awg-quick
+COPY --from=awg-tools /workspace/awg-tools/src/wg /usr/bin/awg
+COPY --from=awg-tools /workspace/awg-tools/src/wg-quick/linux.bash /usr/bin/awg-quick
-# Declaring environment variables, change Peernet to an address you like, standard is a 24 bit subnet.
-ARG wg_net="10.0.0.1" \
- wg_port="51820"
-
-# Following ENV variables are changable on container runtime because /entrypoint.sh handles that. See compose.yaml for more info.
+# Environment variables
+ARG wg_net="10.0.0.1"
+ARG wg_port="51820"
ENV TZ="Europe/Amsterdam" \
global_dns="9.9.9.9" \
wgd_port="10086" \
- public_ip=""
+ public_ip="" \
+ WGDASH=/opt/wgdashboard
-# Using WGDASH -- like wg_net functionally as a ARG command. But it is needed in entrypoint.sh so it needs to be exported as environment variable.
-ENV WGDASH=/opt/wgdashboard
+# Create directories
+RUN mkdir /data /configs -p ${WGDASH}/src /etc/amnezia/amneziawg
-# Doing WireGuard Dashboard installation measures. Modify the git clone command to get the preferred version, with a specific branch for example.
-RUN mkdir /data \
- && mkdir /configs \
- && mkdir -p ${WGDASH}/src \
- && mkdir -p /etc/amnezia/amneziawg
+# Copy app source and prebuilt venv only (no pip cache)
COPY ./src ${WGDASH}/src
+COPY --from=pip-builder /opt/wgdashboard/src/venv /opt/wgdashboard/src/venv
-# Generate basic WireGuard interface. Echoing the WireGuard interface config for readability, adjust if you want it for efficiency.
-# Also setting the pipefail option, verbose: https://github.com/hadolint/hadolint/wiki/DL4006.
+# WireGuard interface template
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
RUN out_adapt=$(ip -o -4 route show to default | awk '{print $NF}') \
&& echo -e "[Interface]\n\
@@ -62,15 +112,12 @@ SaveConfig = true\n\
DNS = ${global_dns}" > /configs/wg0.conf.template \
&& chmod 600 /configs/wg0.conf.template
-# Defining a way for Docker to check the health of the container. In this case: checking the gunicorn process.
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD sh -c 'pgrep gunicorn > /dev/null && pgrep tail > /dev/null' || exit 1
-# Copy the basic entrypoint.sh script.
COPY ./docker/entrypoint.sh /entrypoint.sh
-# Exposing the default WireGuard Dashboard port for web access.
EXPOSE 10086
-WORKDIR $WGDASH
+WORKDIR $WGDASH/src
ENTRYPOINT ["/bin/bash", "/entrypoint.sh"]
diff --git a/docker/README.md b/docker/README.md
index 80d22978..25017df1 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -91,12 +91,23 @@ Updating WGDashboard is currently in **alpha** stage. While the update process m
## ⚙️ Environment Variables
-| Variable | Accepted Values | Default | Example | Description |
-|---------------|------------------------------------------|-------------------------|------------------------|-----------------------------------------------------------------------------|
-| `tz` | Timezone | `Europe/Amsterdam` | `America/New_York` | Sets the container's timezone. Useful for accurate logs and scheduling. |
-| `global_dns` | IPv4 and IPv6 addresses | `9.9.9.9` | `8.8.8.8`, `1.1.1.1` | Default DNS for WireGuard clients. |
-| `public_ip` | Public IP address | Retrieved automatically | `253.162.134.73` | Used to generate accurate client configs. Needed if container is NAT’d. |
-| `wgd_port` | Any port that is allowed for the process | `10086` | `443` | This port is used to set the WGDashboard web port. |
+| Variable | Accepted Values | Default | Example | Description |
+| ------------------ | ---------------------------------------- | ----------------------- | --------------------- | ----------------------------------------------------------------------- |
+| `tz` | Timezone | `Europe/Amsterdam` | `America/New_York` | Sets the container's timezone. Useful for accurate logs and scheduling. |
+| `global_dns` | IPv4 and IPv6 addresses | `9.9.9.9` | `8.8.8.8`, `1.1.1.1` | Default DNS for WireGuard clients. |
+| `public_ip` | Public IP address | Retrieved automatically | `253.162.134.73` | Used to generate accurate client configs. Needed if container is NAT’d. |
+| `wgd_port` | Any port that is allowed for the process | `10086` | `443` | This port is used to set the WGDashboard web port. |
+| `username` | Any non‐empty string | `-` | `admin` | Username for the WGDashboard web interface account. |
+| `password` | Any non‐empty string | `-` | `s3cr3tP@ss` | Password for the WGDashboard web interface account (stored hashed). |
+| `enable_totp` | `true`, `false` | `true` | `false` | Enable TOTP‐based two‐factor authentication for the account. |
+| `wg_autostart` | Wireguard interface name | `false` | `true` | Auto‐start the WireGuard client when the container launches. |
+| `email_server` | SMTP server address | `-` | `smtp.gmail.com` | SMTP server for sending email notifications. |
+| `email_port` | SMTP port number | `-` | `587` | Port for connecting to the SMTP server. |
+| `email_encryption` | `TLS`, `SSL`, etc. | `-` | `TLS` | Encryption method for email communication. |
+| `email_username` | Any non-empty string | `-` | `user@example.com` | Username for SMTP authentication. |
+| `email_password` | Any non-empty string | `-` | `app_password` | Password for SMTP authentication. |
+| `email_from` | Valid email address | `-` | `noreply@example.com` | Email address used as the sender for notifications. |
+| `email_template` | Path to template file | `-` | `your-template` | Custom template for email notifications. |
---
@@ -122,7 +133,7 @@ Examples:
To build from source:
```bash
-git clone https://github.com/donaldzou/WGDashboard.git
+git clone https://github.com/WGDashboard/WGDashboard.git
cd WGDashboard
docker build . -f docker/Dockerfile -t yourname/wgdashboard:latest
```
diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh
index 85427e32..a6da4bf9 100644
--- a/docker/entrypoint.sh
+++ b/docker/entrypoint.sh
@@ -1,10 +1,55 @@
#!/bin/bash
-# Path to the configuration file (exists because of previous function).
config_file="/data/wg-dashboard.ini"
trap 'stop_service' SIGTERM
+# Hash password with bcrypt
+hash_password() {
+ python3 -c "import bcrypt; print(bcrypt.hashpw('$1'.encode(), bcrypt.gensalt(12)).decode())"
+}
+
+# Function to set or update section/key/value in the INI file
+set_ini() {
+ local section="$1" key="$2" value="$3"
+ local current_value
+
+ # Add section if it doesn't exist
+ grep -q "^\[${section}\]" "$config_file" \
+ || printf "\n[%s]\n" "${section}" >> "$config_file"
+
+ # Check current value if key exists
+ if grep -q "^[[:space:]]*${key}[[:space:]]*=" "$config_file"; then
+ current_value=$(grep "^[[:space:]]*${key}[[:space:]]*=" "$config_file" | cut -d= -f2- | xargs)
+
+ # Don't display actual value if it's a password field
+ if [[ "$key" == *"password"* ]]; then
+ if [ "$current_value" = "$value" ]; then
+ echo "- $key is already set correctly (value hidden)"
+ return 0
+ fi
+ sed -i "/^\[${section}\]/,/^\[/{s|^[[:space:]]*${key}[[:space:]]*=.*|${key} = ${value}|}" "$config_file"
+ echo "- Updated $key (value hidden)"
+ else
+ if [ "$current_value" = "$value" ]; then
+ echo "- $key is already set correctly ($value)"
+ return 0
+ fi
+ sed -i "/^\[${section}\]/,/^\[/{s|^[[:space:]]*${key}[[:space:]]*=.*|${key} = ${value}|}" "$config_file"
+ echo "- Updated $key to: $value"
+ fi
+ else
+ sed -i "/^\[${section}\]/a ${key} = ${value}" "$config_file"
+
+ # Don't display actual value if it's a password field
+ if [[ "$key" == *"password"* ]]; then
+ echo "- Added new setting $key (value hidden)"
+ else
+ echo "- Added new setting $key: $value"
+ fi
+ fi
+}
+
stop_service() {
echo "[WGDashboard] Stopping WGDashboard..."
/bin/bash ./wgd.sh stop
@@ -12,79 +57,62 @@ stop_service() {
}
echo "------------------------- START ----------------------------"
-echo "Starting the WireGuard Dashboard Docker container."
+echo "Starting the WGDashboard Docker container."
ensure_installation() {
- # When using a custom directory to store the files, this part moves over and makes sure the installation continues.
echo "Quick-installing..."
-
+
# Make the wgd.sh script executable.
chmod +x "${WGDASH}"/src/wgd.sh
cd "${WGDASH}"/src || exit
-
+
# Github issue: https://github.com/donaldzou/WGDashboard/issues/723
echo "Checking for stale pids..."
if [[ -f ${WGDASH}/src/gunicorn.pid ]]; then
echo "Found stale pid, removing..."
rm ${WGDASH}/src/gunicorn.pid
fi
-
+
# Removing clear shell command from the wgd.sh script to enhance docker logging.
echo "Removing clear command from wgd.sh for better Docker logging."
sed -i '/clear/d' ./wgd.sh
-
- # Create the databases directory if it does not exist yet.
+
+ # Create required directories and links
if [ ! -d "/data/db" ]; then
echo "Creating database dir"
- mkdir /data/db
+ mkdir -p /data/db
fi
-
- # Linking the database on the persistent directory location to where WGDashboard expects.
+
if [ ! -d "${WGDASH}/src/db" ]; then
ln -s /data/db "${WGDASH}/src/db"
fi
-
- # Create the wg-dashboard.ini file if it does not exist yet.
+
if [ ! -f "${config_file}" ]; then
echo "Creating wg-dashboard.ini file"
touch "${config_file}"
fi
-
- # Link the wg-dashboard.ini file from the persistent directory to where WGDashboard expects it.
+
if [ ! -f "${WGDASH}/src/wg-dashboard.ini" ]; then
ln -s "${config_file}" "${WGDASH}/src/wg-dashboard.ini"
fi
# Create the Python virtual environment.
- python3 -m venv "${WGDASH}"/src/venv
. "${WGDASH}/src/venv/bin/activate"
-
- # Due to this pip dependency being available as a system package we can just move it to the venv.
- echo "Moving PIP dependency from ephemerality to runtime environment: psutil"
- mv /usr/lib/python3.12/site-packages/psutil* "${WGDASH}"/src/venv/lib/python3.12/site-packages
-
- # Due to this pip dependency being available as a system package we can just move it to the venv.
- echo "Moving PIP dependency from ephemerality to runtime environment: bcrypt"
- mv /usr/lib/python3.12/site-packages/bcrypt* "${WGDASH}"/src/venv/lib/python3.12/site-packages
-
+
# Use the bash interpreter to install WGDashboard according to the wgd.sh script.
/bin/bash ./wgd.sh install
-
+
echo "Looks like the installation succeeded. Moving on."
-
- # This first step is to ensure the wg0.conf file exists, and if not, then its copied over from the ephemeral container storage.
- # This is done so WGDashboard it works out of the box, it also sets a randomly generated private key.
-
+
+ # Setup WireGuard if needed
if [ ! -f "/etc/wireguard/wg0.conf" ]; then
- echo "Standard wg0 Configuration file not found, grabbing template."
cp -a "/configs/wg0.conf.template" "/etc/wireguard/wg0.conf"
-
+
echo "Setting a secure private key."
-
local privateKey
privateKey=$(wg genkey)
sed -i "s|^PrivateKey *=.*$|PrivateKey = ${privateKey}|g" /etc/wireguard/wg0.conf
-
+
echo "Done setting template."
else
echo "Existing wg0 configuration file found, using that."
@@ -93,63 +121,75 @@ ensure_installation() {
set_envvars() {
printf "\n------------- SETTING ENVIRONMENT VARIABLES ----------------\n"
-
- # Check if the file is empty
+
+ # Check if config file is empty
if [ ! -s "${config_file}" ]; then
- echo "Config file is empty. Creating [Peers] section."
-
- # Create [Peers] section with initial values
- {
- echo "[Peers]"
- echo "peer_global_dns = ${global_dns}"
- echo "remote_endpoint = ${public_ip}"
- echo -e "\n[Server]"
- echo "app_port = ${wgd_port}"
- } > "${config_file}"
-
- else
- echo "Config file is not empty, using pre-existing."
+ echo "Config file is empty. Creating initial structure."
fi
-
- echo "Verifying current variables..."
-
- # Check and update the DNS if it has changed
- current_dns=$(grep "peer_global_dns = " "${config_file}" | awk '{print $NF}')
- if [ "${global_dns}" == "$current_dns" ]; then
- echo "DNS is set correctly, moving on."
-
- else
- echo "Changing default DNS..."
- sed -i "s/^peer_global_dns = .*/peer_global_dns = ${global_dns}/" "${config_file}"
+
+ echo "Checking basic configuration:"
+ set_ini Peers peer_global_dns "${global_dns}"
+
+ if [ -z "${public_ip}" ]; then
+ public_ip=$(curl -s ifconfig.me)
+ echo "Automatically detected public IP: ${public_ip}"
fi
-
- # Checking the current set public IP and changing it if it has changed.
- current_public_ip=$(grep "remote_endpoint = " "${config_file}" | awk '{print $NF}')
- if [ "${public_ip}" == "" ]; then
- default_ip=$(curl -s ifconfig.me)
-
- echo "Trying to fetch the Public-IP using ifconfig.me: ${default_ip}"
- sed -i "s/^remote_endpoint = .*/remote_endpoint = ${default_ip}/" "${config_file}"
- elif [ "${current_public_ip}" != "${public_ip}" ]; then
- sed -i "s/^remote_endpoint = .*/remote_endpoint = ${public_ip}/" "${config_file}"
- else
- echo "Public-IP is correct, moving on."
+
+ set_ini Peers remote_endpoint "${public_ip}"
+ set_ini Server app_port "${wgd_port}"
+
+ # Account settings - process all parameters
+ [[ -n "$username" ]] && echo "Configuring user account:"
+ # Basic account variables
+ [[ -n "$username" ]] && set_ini Account username "${username}"
+
+ if [[ -n "$password" ]]; then
+ echo "- Setting password"
+ set_ini Account password "$(hash_password "${password}")"
fi
-
- # Checking the current WGDashboard web port and changing if needed.
- current_wgd_port=$(grep "app_port = " "${config_file}" | awk '{print $NF}')
- if [ "${current_wgd_port}" == "${wgd_port}" ]; then
- echo "Current WGD port is set correctly, moving on."
- else
- echo "Changing default WGD port..."
- sed -i "s/^app_port = .*/app_port = ${wgd_port}/" "${config_file}"
+
+ # Additional account variables
+ [[ -n "$enable_totp" ]] && set_ini Account enable_totp "${enable_totp}"
+ [[ -n "$totp_verified" ]] && set_ini Account totp_verified "${totp_verified}"
+ [[ -n "$totp_key" ]] && set_ini Account totp_key "${totp_key}"
+
+ # Welcome session
+ [[ -n "$welcome_session" ]] && set_ini Other welcome_session "${welcome_session}"
+ # If username and password are set but welcome_session isn't, disable it
+ if [[ -n "$username" && -n "$password" && -z "$welcome_session" ]]; then
+ set_ini Other welcome_session "false"
fi
+
+ # Autostart WireGuard
+ if [[ -n "$wg_autostart" ]]; then
+ echo "Configuring WireGuard autostart:"
+ set_ini WireGuardConfiguration autostart "${wg_autostart}"
+ fi
+
+ # Email (check if any settings need to be configured)
+ email_vars=("email_server" "email_port" "email_encryption" "email_username" "email_password" "email_from" "email_template")
+ for var in "${email_vars[@]}"; do
+ if [ -n "${!var}" ]; then
+ echo "Configuring email settings:"
+ break
+ fi
+ done
+
+ # Email (iterate through all possible fields)
+ email_fields=("server:email_server" "port:email_port" "encryption:email_encryption"
+ "username:email_username" "email_password:email_password"
+ "send_from:email_from" "email_template:email_template")
+
+ for field_pair in "${email_fields[@]}"; do
+ IFS=: read -r field var <<< "$field_pair"
+ [[ -n "${!var}" ]] && set_ini Email "$field" "${!var}"
+ done
}
-# === CORE SERVICES ===
-start_core() {
+# Start service and monitor logs
+start_and_monitor() {
printf "\n---------------------- STARTING CORE -----------------------\n"
-
+
# Due to some instances complaining about this, making sure its there every time.
mkdir -p /dev/net
mknod /dev/net/tun c 10 200
@@ -157,23 +197,19 @@ start_core() {
# Actually starting WGDashboard
echo "Activating Python venv and executing the WireGuard Dashboard service."
- /bin/bash ./wgd.sh start
-}
-
-ensure_blocking() {
+ bash ./wgd.sh start
+
# Wait a second before continuing, to give the python program some time to get ready.
- sleep 1s
+ sleep 1
echo -e "\nEnsuring container continuation."
-
- # Find and tail the latest error and access logs if they exist
+
+ # Find and monitor log file
local logdir="${WGDASH}/src/log"
-
latestErrLog=$(find "$logdir" -name "error_*.log" -type f -print | sort -r | head -n 1)
-
+
# Only tail the logs if they are found
if [ -n "$latestErrLog" ]; then
tail -f "$latestErrLog" &
-
# Wait for the tail process to end.
wait $!
else
@@ -182,8 +218,7 @@ ensure_blocking() {
fi
}
-# Execute functions for the WireGuard Dashboard services, then set the environment variables
+# Main execution flow
ensure_installation
set_envvars
-start_core
-ensure_blocking
+start_and_monitor
\ No newline at end of file
diff --git a/package-lock.json b/package-lock.json
index 3c274ecf..5c55fc14 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -5,6 +5,7 @@
"packages": {
"": {
"dependencies": {
+ "axios": "^1.9.0",
"marked": "^15.0.7",
"openai": "^4.89.0",
"pinia-plugin-persistedstate": "^4.2.0"
@@ -163,6 +164,17 @@
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
"license": "MIT"
},
+ "node_modules/axios": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmmirror.com/axios/-/axios-1.9.0.tgz",
+ "integrity": "sha512-re4CqKTJaURpzbLHtIi6XpDv20/CnpXOtjRY5/CU32L8gU8ek9UIivcfvSWvmKEngmVbrUtPpdDwWDWL7DNHvg==",
+ "license": "MIT",
+ "dependencies": {
+ "follow-redirects": "^1.15.6",
+ "form-data": "^4.0.0",
+ "proxy-from-env": "^1.1.0"
+ }
+ },
"node_modules/braces": {
"version": "3.0.3",
"resolved": "https://registry.npmmirror.com/braces/-/braces-3.0.3.tgz",
@@ -444,6 +456,26 @@
"node": ">=8"
}
},
+ "node_modules/follow-redirects": {
+ "version": "1.15.9",
+ "resolved": "https://registry.npmmirror.com/follow-redirects/-/follow-redirects-1.15.9.tgz",
+ "integrity": "sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==",
+ "funding": [
+ {
+ "type": "individual",
+ "url": "https://github.com/sponsors/RubenVerborgh"
+ }
+ ],
+ "license": "MIT",
+ "engines": {
+ "node": ">=4.0"
+ },
+ "peerDependenciesMeta": {
+ "debug": {
+ "optional": true
+ }
+ }
+ },
"node_modules/form-data": {
"version": "4.0.2",
"resolved": "https://registry.npmmirror.com/form-data/-/form-data-4.0.2.tgz",
@@ -998,6 +1030,12 @@
"integrity": "sha512-hkT3yDPFbs95mNCy1+7qNKC6Pro+/ibzYxtM2iqEigpf0sVw+bg4Zh9/snjsBcf990vfIsg5+1U7VyiyBb3etg==",
"license": "MIT"
},
+ "node_modules/proxy-from-env": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmmirror.com/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
+ "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==",
+ "license": "MIT"
+ },
"node_modules/quansync": {
"version": "0.2.10",
"resolved": "https://registry.npmmirror.com/quansync/-/quansync-0.2.10.tgz",
diff --git a/package.json b/package.json
index f83ed9d3..652e4a95 100644
--- a/package.json
+++ b/package.json
@@ -1,5 +1,6 @@
{
"dependencies": {
+ "axios": "^1.9.0",
"marked": "^15.0.7",
"openai": "^4.89.0",
"pinia-plugin-persistedstate": "^4.2.0"
diff --git a/src/client.py b/src/client.py
new file mode 100644
index 00000000..cc7ac218
--- /dev/null
+++ b/src/client.py
@@ -0,0 +1,232 @@
+import datetime
+
+from tzlocal import get_localzone
+
+from functools import wraps
+
+from flask import Blueprint, render_template, abort, request, Flask, current_app, session, redirect, url_for
+import os
+
+from modules.WireguardConfiguration import WireguardConfiguration
+from modules.DashboardConfig import DashboardConfig
+from modules.Email import EmailSender
+
+
+def ResponseObject(status=True, message=None, data=None, status_code = 200) -> Flask.response_class:
+ response = Flask.make_response(current_app, {
+ "status": status,
+ "message": message,
+ "data": data
+ })
+ response.status_code = status_code
+ response.content_type = "application/json"
+ return response
+
+
+
+from modules.DashboardClients import DashboardClients
+def createClientBlueprint(wireguardConfigurations: dict[WireguardConfiguration], dashboardConfig: DashboardConfig, dashboardClients: DashboardClients):
+
+ client = Blueprint('client', __name__, template_folder=os.path.abspath("./static/dist/WGDashboardClient"))
+ prefix = f'{dashboardConfig.GetConfig("Server", "app_prefix")[1]}/client'
+
+ def login_required(f):
+ @wraps(f)
+ def func(*args, **kwargs):
+ if session.get("Email") is None or session.get("TotpVerified") is None or not session.get("TotpVerified") or session.get("Role") != "client":
+ return ResponseObject(False, "Unauthorized access.", data=None, status_code=401)
+
+ if not dashboardClients.GetClient(session.get("ClientID")):
+ session.clear()
+ return ResponseObject(False, "Unauthorized access.", data=None, status_code=401)
+
+ return f(*args, **kwargs)
+ return func
+
+ @client.before_request
+ def clientBeforeRequest():
+ if not dashboardConfig.GetConfig("Clients", "enable")[1]:
+ abort(404)
+
+ if request.method.lower() == 'options':
+ return ResponseObject(True)
+
+ @client.post(f'{prefix}/api/signup')
+ def ClientAPI_SignUp():
+ data = request.get_json()
+ status, msg = dashboardClients.SignUp(**data)
+ return ResponseObject(status, msg)
+
+ @client.get(f'{prefix}/api/signin/oidc/providers')
+ def ClientAPI_SignIn_OIDC_GetProviders():
+ _, oidc = dashboardConfig.GetConfig("OIDC", "client_enable")
+ if not oidc:
+ return ResponseObject(status=False, message="OIDC is disabled")
+
+ return ResponseObject(data=dashboardClients.OIDC.GetProviders())
+
+ @client.post(f'{prefix}/api/signin/oidc')
+ def ClientAPI_SignIn_OIDC():
+ _, oidc = dashboardConfig.GetConfig("OIDC", "client_enable")
+ if not oidc:
+ return ResponseObject(status=False, message="OIDC is disabled")
+
+ data = request.get_json()
+ status, oidcData = dashboardClients.SignIn_OIDC(**data)
+ if not status:
+ return ResponseObject(status, oidcData)
+
+ session['Email'] = oidcData.get('email')
+ session['Role'] = 'client'
+ session['TotpVerified'] = True
+
+ return ResponseObject()
+
+ @client.post(f'{prefix}/api/signin')
+ def ClientAPI_SignIn():
+ data = request.get_json()
+ status, msg = dashboardClients.SignIn(**data)
+ if status:
+ session['Email'] = data.get('Email')
+ session['Role'] = 'client'
+ session['TotpVerified'] = False
+ return ResponseObject(status, msg)
+
+ @client.post(f'{prefix}/api/resetPassword/generateResetToken')
+ def ClientAPI_ResetPassword_GenerateResetToken():
+ date = datetime.datetime.now(tz=datetime.timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')
+
+ emailSender = EmailSender(dashboardConfig)
+ if not emailSender.ready():
+ return ResponseObject(False, "We can't send you an email due to your Administrator has not setup email service. Please contact your administrator.")
+
+ data = request.get_json()
+ email = data.get('Email', None)
+ if not email:
+ return ResponseObject(False, "Please provide a valid Email")
+
+ u = dashboardClients.SignIn_UserExistence(email)
+ if not u:
+ return ResponseObject(False, "Please provide a valid Email")
+
+ token = dashboardClients.GenerateClientPasswordResetToken(u.get('ClientID'))
+
+ status, msg = emailSender.send(
+ email, "[WGDashboard | Client] Reset Password",
+ f"Hi {email}, \n\nIt looks like you're trying to reset your password at {date} \n\nEnter this 6 digits code on the Forgot Password to continue:\n\n{token}\n\nThis code will expire in 30 minutes for your security. If you didn’t request a password reset, you can safely ignore this email—your current password will remain unchanged.\n\nIf you need help, feel free to contact support.\n\nBest regards,\n\nWGDashboard"
+ )
+
+ return ResponseObject(status, msg)
+
+ @client.post(f'{prefix}/api/resetPassword/validateResetToken')
+ def ClientAPI_ResetPassword_ValidateResetToken():
+ data = request.get_json()
+ email = data.get('Email', None)
+ token = data.get('Token', None)
+ if not all([email, token]):
+ return ResponseObject(False, "Please provide a valid Email")
+
+ u = dashboardClients.SignIn_UserExistence(email)
+ if not u:
+ return ResponseObject(False, "Please provide a valid Email")
+
+ return ResponseObject(status=dashboardClients.ValidateClientPasswordResetToken(u.get('ClientID'), token))
+
+ @client.post(f'{prefix}/api/resetPassword')
+ def ClientAPI_ResetPassword():
+ data = request.get_json()
+ email = data.get('Email', None)
+ token = data.get('Token', None)
+ password = data.get('Password', None)
+ confirmPassword = data.get('ConfirmPassword', None)
+ if not all([email, token, password, confirmPassword]):
+ return ResponseObject(False, "Please provide a valid Email")
+
+ u = dashboardClients.SignIn_UserExistence(email)
+ if not u:
+ return ResponseObject(False, "Please provide a valid Email")
+
+ if not dashboardClients.ValidateClientPasswordResetToken(u.get('ClientID'), token):
+ return ResponseObject(False, "Verification code is either invalid or expired")
+
+ status, msg = dashboardClients.ResetClientPassword(u.get('ClientID'), password, confirmPassword)
+
+ dashboardClients.RevokeClientPasswordResetToken(u.get('ClientID'), token)
+
+ return ResponseObject(status, msg)
+
+
+ @client.get(f'{prefix}/api/signout')
+ def ClientAPI_SignOut():
+ if session.get("SignInMethod") == "OIDC":
+ dashboardClients.SignOut_OIDC()
+ session.clear()
+ return ResponseObject(True)
+
+ @client.get(f'{prefix}/api/signin/totp')
+ def ClientAPI_SignIn_TOTP():
+ token = request.args.get('Token', None)
+ if not token:
+ return ResponseObject(False, "Please provide TOTP token")
+
+ status, msg = dashboardClients.SignIn_GetTotp(token)
+ return ResponseObject(status, msg)
+
+ @client.post(f'{prefix}/api/signin/totp')
+ def ClientAPI_SignIn_ValidateTOTP():
+ data = request.get_json()
+ token = data.get('Token', None)
+ userProvidedTotp = data.get('UserProvidedTOTP', None)
+ if not all([token, userProvidedTotp]):
+ return ResponseObject(False, "Please fill in all fields")
+ status, msg = dashboardClients.SignIn_GetTotp(token, userProvidedTotp)
+ if status:
+ if session.get('Email') is None:
+ return ResponseObject(False, "Sign in status is invalid", status_code=401)
+ session['TotpVerified'] = True
+ profile = dashboardClients.GetClientProfile(session.get("ClientID"))
+
+ return ResponseObject(True, data={
+ "Email": session.get('Email'),
+ "Profile": profile
+ })
+ return ResponseObject(status, msg)
+
+ @client.get(prefix)
+ def ClientIndex():
+ return render_template('client.html')
+
+ @client.get(f'{prefix}/api/serverInformation')
+ def ClientAPI_ServerInformation():
+ return ResponseObject(data={
+ "ServerTimezone": str(get_localzone())
+ })
+
+ @client.get(f'{prefix}/api/validateAuthentication')
+ @login_required
+ def ClientAPI_ValidateAuthentication():
+ return ResponseObject(True)
+
+ @client.get(f'{prefix}/api/configurations')
+ @login_required
+ def ClientAPI_Configurations():
+ return ResponseObject(True, data=dashboardClients.GetClientAssignedPeers(session['ClientID']))
+
+ @client.get(f'{prefix}/api/settings/getClientProfile')
+ @login_required
+ def ClientAPI_Settings_GetClientProfile():
+ return ResponseObject(data={
+ "Email": session.get("Email"),
+ "SignInMethod": session.get("SignInMethod"),
+ "Profile": dashboardClients.GetClientProfile(session.get("ClientID"))
+ })
+
+ @client.post(f'{prefix}/api/settings/updatePassword')
+ @login_required
+ def ClientAPI_Settings_UpdatePassword():
+ data = request.get_json()
+ status, message = dashboardClients.UpdateClientPassword(session['ClientID'], **data)
+
+ return ResponseObject(status, message)
+
+ return client
\ No newline at end of file
diff --git a/src/dashboard.py b/src/dashboard.py
index 8f9a454a..fde8c53b 100644
--- a/src/dashboard.py
+++ b/src/dashboard.py
@@ -1,41 +1,45 @@
+import logging
import random, shutil, sqlite3, configparser, hashlib, ipaddress, json, os, secrets, subprocess
-import time, re, urllib.error, uuid, bcrypt, psutil, pyotp, threading
+import time, re, uuid, bcrypt, psutil, pyotp, threading
+import traceback
from uuid import uuid4
from zipfile import ZipFile
from datetime import datetime, timedelta
-from typing import Any
+
+import sqlalchemy
from jinja2 import Template
from flask import Flask, request, render_template, session, send_file
-from json import JSONEncoder
from flask_cors import CORS
from icmplib import ping, traceroute
from flask.json.provider import DefaultJSONProvider
from itertools import islice
-from Utilities import (
- RegexMatch, GetRemoteEndpoint, StringToBoolean,
+
+from sqlalchemy import RowMapping
+
+from modules.Utilities import (
+ RegexMatch, StringToBoolean,
ValidateIPAddressesWithRange, ValidateDNSAddress,
GenerateWireguardPublicKey, GenerateWireguardPrivateKey
)
from packaging import version
from modules.Email import EmailSender
-from modules.Log import Log
from modules.DashboardLogger import DashboardLogger
-from modules.PeerJobLogger import PeerJobLogger
from modules.PeerJob import PeerJob
from modules.SystemStatus import SystemStatus
-SystemStatus = SystemStatus()
+from modules.PeerShareLinks import PeerShareLinks
+from modules.PeerJobs import PeerJobs
+from modules.DashboardConfig import DashboardConfig
+from modules.WireguardConfiguration import WireguardConfiguration
+from modules.AmneziaWireguardConfiguration import AmneziaWireguardConfiguration
-DASHBOARD_VERSION = 'v4.2.5'
+from client import createClientBlueprint
-CONFIGURATION_PATH = os.getenv('CONFIGURATION_PATH', '.')
-DB_PATH = os.path.join(CONFIGURATION_PATH, 'db')
-if not os.path.isdir(DB_PATH):
- os.mkdir(DB_PATH)
-DASHBOARD_CONF = os.path.join(CONFIGURATION_PATH, 'wg-dashboard.ini')
-UPDATE = None
-app = Flask("WGDashboard", template_folder=os.path.abspath("./static/app/dist"))
-app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 5206928
-app.secret_key = secrets.token_urlsafe(32)
+from logging.config import dictConfig
+
+from modules.DashboardClients import DashboardClients
+from modules.DashboardPlugins import DashboardPlugins
+from modules.DashboardWebHooks import DashboardWebHooks
+from modules.NewConfigurationTemplates import NewConfigurationTemplates
class CustomJsonEncoder(DefaultJSONProvider):
def __init__(self, app):
@@ -44,8 +48,13 @@ class CustomJsonEncoder(DefaultJSONProvider):
def default(self, o):
if callable(getattr(o, "toJson", None)):
return o.toJson()
+ if type(o) is RowMapping:
+ return dict(o)
+ if type(o) is datetime:
+ return o.strftime("%Y-%m-%d %H:%M:%S")
return super().default(self)
-app.json = CustomJsonEncoder(app)
+
+
'''
Response Object
@@ -58,1959 +67,151 @@ def ResponseObject(status=True, message=None, data=None, status_code = 200) -> F
})
response.status_code = status_code
response.content_type = "application/json"
- return response
+ return response
-"""
-Peer Jobs
-"""
-class PeerJobs:
- def __init__(self):
- self.Jobs: list[PeerJob] = []
- self.jobdb = sqlite3.connect(os.path.join(CONFIGURATION_PATH, 'db', 'wgdashboard_job.db'),
- check_same_thread=False)
- self.jobdb.row_factory = sqlite3.Row
- self.__createPeerJobsDatabase()
- self.__getJobs()
+'''
+Flask App
+'''
+app = Flask("WGDashboard", template_folder=os.path.abspath("./static/dist/WGDashboardAdmin"))
- def __getJobs(self):
- self.Jobs.clear()
- with self.jobdb:
- jobdbCursor = self.jobdb.cursor()
- jobs = jobdbCursor.execute("SELECT * FROM PeerJobs WHERE ExpireDate IS NULL").fetchall()
- for job in jobs:
- self.Jobs.append(PeerJob(
- job['JobID'], job['Configuration'], job['Peer'], job['Field'], job['Operator'], job['Value'],
- job['CreationDate'], job['ExpireDate'], job['Action']))
-
- def getAllJobs(self, configuration: str = None):
- if configuration is not None:
- with self.jobdb:
- jobdbCursor = self.jobdb.cursor()
- jobs = jobdbCursor.execute(
- f"SELECT * FROM PeerJobs WHERE Configuration = ?", (configuration, )).fetchall()
- j = []
- for job in jobs:
- j.append(PeerJob(
- job['JobID'], job['Configuration'], job['Peer'], job['Field'], job['Operator'], job['Value'],
- job['CreationDate'], job['ExpireDate'], job['Action']))
- return j
- return []
-
- def __createPeerJobsDatabase(self):
- with self.jobdb:
- jobdbCursor = self.jobdb.cursor()
-
- existingTable = jobdbCursor.execute("SELECT name from sqlite_master where type='table'").fetchall()
- existingTable = [t['name'] for t in existingTable]
-
- if "PeerJobs" not in existingTable:
- jobdbCursor.execute('''
- CREATE TABLE PeerJobs (JobID VARCHAR NOT NULL, Configuration VARCHAR NOT NULL, Peer VARCHAR NOT NULL,
- Field VARCHAR NOT NULL, Operator VARCHAR NOT NULL, Value VARCHAR NOT NULL, CreationDate DATETIME,
- ExpireDate DATETIME, Action VARCHAR NOT NULL, PRIMARY KEY (JobID))
- ''')
- self.jobdb.commit()
-
- def toJson(self):
- return [x.toJson() for x in self.Jobs]
-
- def searchJob(self, Configuration: str, Peer: str):
- return list(filter(lambda x: x.Configuration == Configuration and x.Peer == Peer, self.Jobs))
-
- def searchJobById(self, JobID):
- return list(filter(lambda x: x.JobID == JobID, self.Jobs))
-
- def saveJob(self, Job: PeerJob) -> tuple[bool, list] | tuple[bool, str]:
- try:
- with self.jobdb:
- jobdbCursor = self.jobdb.cursor()
- if len(self.searchJobById(Job.JobID)) == 0:
- jobdbCursor.execute('''
- INSERT INTO PeerJobs VALUES (?, ?, ?, ?, ?, ?, strftime('%Y-%m-%d %H:%M:%S','now'), NULL, ?)
- ''', (Job.JobID, Job.Configuration, Job.Peer, Job.Field, Job.Operator, Job.Value, Job.Action,))
- JobLogger.log(Job.JobID, Message=f"Job is created if {Job.Field} {Job.Operator} {Job.Value} then {Job.Action}")
- else:
- currentJob = jobdbCursor.execute('SELECT * FROM PeerJobs WHERE JobID = ?', (Job.JobID, )).fetchone()
- if currentJob is not None:
- jobdbCursor.execute('''
- UPDATE PeerJobs SET Field = ?, Operator = ?, Value = ?, Action = ? WHERE JobID = ?
- ''', (Job.Field, Job.Operator, Job.Value, Job.Action, Job.JobID))
- JobLogger.log(Job.JobID,
- Message=f"Job is updated from if {currentJob['Field']} {currentJob['Operator']} {currentJob['value']} then {currentJob['Action']}; to if {Job.Field} {Job.Operator} {Job.Value} then {Job.Action}")
-
- self.jobdb.commit()
- self.__getJobs()
- return True, list(
- filter(lambda x: x.Configuration == Job.Configuration and x.Peer == Job.Peer and x.JobID == Job.JobID,
- self.Jobs))
- except Exception as e:
- return False, str(e)
-
- def deleteJob(self, Job: PeerJob) -> tuple[bool, list] | tuple[bool, str]:
- try:
- if (len(str(Job.CreationDate))) == 0:
- return False, "Job does not exist"
- with self.jobdb:
- jobdbCursor = self.jobdb.cursor()
- jobdbCursor.execute('''
- UPDATE PeerJobs SET ExpireDate = strftime('%Y-%m-%d %H:%M:%S','now') WHERE JobID = ?
- ''', (Job.JobID,))
- self.jobdb.commit()
- JobLogger.log(Job.JobID, Message=f"Job is removed due to being deleted or finshed.")
- self.__getJobs()
- return True, list(
- filter(lambda x: x.Configuration == Job.Configuration and x.Peer == Job.Peer and x.JobID == Job.JobID,
- self.Jobs))
- except Exception as e:
- return False, str(e)
-
- def updateJobConfigurationName(self, ConfigurationName: str, NewConfigurationName: str) -> tuple[bool, str] | tuple[bool, None]:
- try:
- with self.jobdb:
- jobdbCursor = self.jobdb.cursor()
- jobdbCursor.execute('''
- UPDATE PeerJobs SET Configuration = ? WHERE Configuration = ?
- ''', (NewConfigurationName, ConfigurationName, ))
- self.jobdb.commit()
- self.__getJobs()
- return True, None
- except Exception as e:
- return False, str(e)
-
-
- def runJob(self):
- needToDelete = []
- self.__getJobs()
- for job in self.Jobs:
- c = WireguardConfigurations.get(job.Configuration)
- if c is not None:
- f, fp = c.searchPeer(job.Peer)
- if f:
- if job.Field in ["total_receive", "total_sent", "total_data"]:
- s = job.Field.split("_")[1]
- x: float = getattr(fp, f"total_{s}") + getattr(fp, f"cumu_{s}")
- y: float = float(job.Value)
- else:
- x: datetime = datetime.now()
- y: datetime = datetime.strptime(job.Value, "%Y-%m-%d %H:%M:%S")
- runAction: bool = self.__runJob_Compare(x, y, job.Operator)
- if runAction:
- s = False
- if job.Action == "restrict":
- s = c.restrictPeers([fp.id]).get_json()
- elif job.Action == "delete":
- s = c.deletePeers([fp.id]).get_json()
-
- if s['status'] is True:
- JobLogger.log(job.JobID, s["status"],
- f"Peer {fp.id} from {c.Name} is successfully {job.Action}ed."
- )
- needToDelete.append(job)
- else:
- JobLogger.log(job.JobID, s["status"],
- f"Peer {fp.id} from {c.Name} failed {job.Action}ed."
- )
- else:
- JobLogger.log(job.JobID, False,
- f"Somehow can't find this peer {job.Peer} from {c.Name} failed {job.Action}ed."
- )
- else:
- JobLogger.log(job.JobID, False,
- f"Somehow can't find this peer {job.Peer} from {job.Configuration} failed {job.Action}ed."
- )
- for j in needToDelete:
- self.deleteJob(j)
-
- def __runJob_Compare(self, x: float | datetime, y: float | datetime, operator: str):
- if operator == "eq":
- return x == y
- if operator == "neq":
- return x != y
- if operator == "lgt":
- return x > y
- if operator == "lst":
- return x < y
-
-"""
-Peer Share Link
-"""
-class PeerShareLink:
- def __init__(self, ShareID:str, Configuration: str, Peer: str, ExpireDate: datetime, ShareDate: datetime):
- self.ShareID = ShareID
- self.Peer = Peer
- self.Configuration = Configuration
- self.ShareDate = ShareDate
- self.ExpireDate = ExpireDate
-
-
- def toJson(self):
- return {
- "ShareID": self.ShareID,
- "Peer": self.Peer,
- "Configuration": self.Configuration,
- "ExpireDate": self.ExpireDate
- }
-
-"""
-Peer Share Links
-"""
-class PeerShareLinks:
- def __init__(self):
- self.Links: list[PeerShareLink] = []
- existingTables = sqlSelect("SELECT name FROM sqlite_master WHERE type='table' and name = 'PeerShareLinks'").fetchall()
- if len(existingTables) == 0:
- sqlUpdate(
- """
- CREATE TABLE PeerShareLinks (
- ShareID VARCHAR NOT NULL PRIMARY KEY, Configuration VARCHAR NOT NULL, Peer VARCHAR NOT NULL,
- ExpireDate DATETIME,
- SharedDate DATETIME DEFAULT (datetime('now', 'localtime'))
- )
- """
- )
- self.__getSharedLinks()
- def __getSharedLinks(self):
- self.Links.clear()
- allLinks = sqlSelect("SELECT * FROM PeerShareLinks WHERE ExpireDate IS NULL OR ExpireDate > datetime('now', 'localtime')").fetchall()
- for link in allLinks:
- self.Links.append(PeerShareLink(*link))
-
- def getLink(self, Configuration: str, Peer: str) -> list[PeerShareLink]:
- return list(filter(lambda x : x.Configuration == Configuration and x.Peer == Peer, self.Links))
-
- def getLinkByID(self, ShareID: str) -> list[PeerShareLink]:
- self.__getSharedLinks()
- return list(filter(lambda x : x.ShareID == ShareID, self.Links))
-
- def addLink(self, Configuration: str, Peer: str, ExpireDate: datetime = None) -> tuple[bool, str]:
- try:
- newShareID = str(uuid.uuid4())
- if len(self.getLink(Configuration, Peer)) > 0:
- sqlUpdate("UPDATE PeerShareLinks SET ExpireDate = datetime('now', 'localtime') WHERE Configuration = ? AND Peer = ?", (Configuration, Peer, ))
- sqlUpdate("INSERT INTO PeerShareLinks (ShareID, Configuration, Peer, ExpireDate) VALUES (?, ?, ?, ?)", (newShareID, Configuration, Peer, ExpireDate, ))
- self.__getSharedLinks()
- except Exception as e:
- return False, str(e)
- return True, newShareID
-
- def updateLinkExpireDate(self, ShareID, ExpireDate: datetime = None) -> tuple[bool, str]:
- sqlUpdate("UPDATE PeerShareLinks SET ExpireDate = ? WHERE ShareID = ?;", (ExpireDate, ShareID, ))
- self.__getSharedLinks()
- return True, ""
-
-"""
-WireGuard Configuration
-"""
-class WireguardConfiguration:
- class InvalidConfigurationFileException(Exception):
- def __init__(self, m):
- self.message = m
-
- def __str__(self):
- return self.message
-
- def __init__(self, name: str = None, data: dict = None, backup: dict = None, startup: bool = False, wg: bool = True):
-
-
- self.__parser: configparser.ConfigParser = configparser.RawConfigParser(strict=False)
- self.__parser.optionxform = str
- self.__configFileModifiedTime = None
-
- self.Status: bool = False
- self.Name: str = ""
- self.PrivateKey: str = ""
- self.PublicKey: str = ""
-
- self.ListenPort: str = ""
- self.Address: str = ""
- self.DNS: str = ""
- self.Table: str = ""
- self.MTU: str = ""
- self.PreUp: str = ""
- self.PostUp: str = ""
- self.PreDown: str = ""
- self.PostDown: str = ""
- self.SaveConfig: bool = True
- self.Name = name
- self.Protocol = "wg" if wg else "awg"
- self.configPath = os.path.join(self.__getProtocolPath(), f'{self.Name}.conf') if wg else os.path.join(DashboardConfig.GetConfig("Server", "awg_conf_path")[1], f'{self.Name}.conf')
-
- if name is not None:
- if data is not None and "Backup" in data.keys():
- db = self.__importDatabase(
- os.path.join(
- self.__getProtocolPath(),
- 'WGDashboard_Backup',
- data["Backup"].replace(".conf", ".sql")))
- else:
- self.createDatabase()
-
- self.__parseConfigurationFile()
- self.__initPeersList()
-
- else:
- self.Name = data["ConfigurationName"]
- self.configPath = os.path.join(self.__getProtocolPath(), f'{self.Name}.conf')
-
- for i in dir(self):
- if str(i) in data.keys():
- if isinstance(getattr(self, i), bool):
- setattr(self, i, StringToBoolean(data[i]))
- else:
- setattr(self, i, str(data[i]))
-
- self.__parser["Interface"] = {
- "PrivateKey": self.PrivateKey,
- "Address": self.Address,
- "ListenPort": self.ListenPort,
- "PreUp": f"{self.PreUp}",
- "PreDown": f"{self.PreDown}",
- "PostUp": f"{self.PostUp}",
- "PostDown": f"{self.PostDown}",
- "SaveConfig": "true"
- }
-
- if self.Protocol == 'awg':
- self.__parser["Interface"]["Jc"] = self.Jc
- self.__parser["Interface"]["Jc"] = self.Jc
- self.__parser["Interface"]["Jmin"] = self.Jmin
- self.__parser["Interface"]["Jmax"] = self.Jmax
- self.__parser["Interface"]["S1"] = self.S1
- self.__parser["Interface"]["S2"] = self.S2
- self.__parser["Interface"]["H1"] = self.H1
- self.__parser["Interface"]["H2"] = self.H2
- self.__parser["Interface"]["H3"] = self.H3
- self.__parser["Interface"]["H4"] = self.H4
-
- if "Backup" not in data.keys():
- self.createDatabase()
- with open(self.configPath, "w+") as configFile:
- self.__parser.write(configFile)
- print(f"[WGDashboard] Configuration file {self.configPath} created")
- self.__initPeersList()
-
- if not os.path.exists(os.path.join(self.__getProtocolPath(), 'WGDashboard_Backup')):
- os.mkdir(os.path.join(self.__getProtocolPath(), 'WGDashboard_Backup'))
-
- print(f"[WGDashboard] Initialized Configuration: {name}")
- if self.getAutostartStatus() and not self.getStatus() and startup:
- self.toggleConfiguration()
- print(f"[WGDashboard] Autostart Configuration: {name}")
-
- def __getProtocolPath(self):
- return DashboardConfig.GetConfig("Server", "wg_conf_path")[1] if self.Protocol == "wg" \
- else DashboardConfig.GetConfig("Server", "awg_conf_path")[1]
-
- def __initPeersList(self):
- self.Peers: list[Peer] = []
- self.getPeersList()
- self.getRestrictedPeersList()
-
- def getRawConfigurationFile(self):
- return open(self.configPath, 'r').read()
-
- def updateRawConfigurationFile(self, newRawConfiguration):
- backupStatus, backup = self.backupConfigurationFile()
- if not backupStatus:
- return False, "Cannot create backup"
-
- if self.Status:
- self.toggleConfiguration()
-
- with open(self.configPath, 'w') as f:
- f.write(newRawConfiguration)
-
- status, err = self.toggleConfiguration()
- if not status:
- restoreStatus = self.restoreBackup(backup['filename'])
- print(f"Restore status: {restoreStatus}")
- self.toggleConfiguration()
- return False, err
- return True, None
-
- def __parseConfigurationFile(self):
- with open(self.configPath, 'r') as f:
- original = [l.rstrip("\n") for l in f.readlines()]
+def peerInformationBackgroundThread():
+ global WireguardConfigurations
+ app.logger.info("Background Thread #1 Started")
+ app.logger.info("Background Thread #1 PID:" + str(threading.get_native_id()))
+ delay = 6
+ time.sleep(10)
+ while True:
+ with app.app_context():
try:
- start = original.index("[Interface]")
- # Clean
- for i in range(start, len(original)):
- if original[i] == "[Peer]":
- break
- split = re.split(r'\s*=\s*', original[i], 1)
- if len(split) == 2:
- key = split[0]
- if key in dir(self):
- if isinstance(getattr(self, key), bool):
- setattr(self, key, False)
- else:
- setattr(self, key, "")
-
- # Set
- for i in range(start, len(original)):
- if original[i] == "[Peer]":
- break
- split = re.split(r'\s*=\s*', original[i], 1)
- if len(split) == 2:
- key = split[0]
- value = split[1]
- if key in dir(self):
- if isinstance(getattr(self, key), bool):
- setattr(self, key, StringToBoolean(value))
- else:
- if len(getattr(self, key)) > 0:
- if key not in ["PostUp", "PostDown", "PreUp", "PreDown"]:
- setattr(self, key, f"{getattr(self, key)}, {value}")
- else:
- setattr(self, key, f"{getattr(self, key)}; {value}")
- else:
- setattr(self, key, value)
- except ValueError as e:
- raise self.InvalidConfigurationFileException(
- "[Interface] section not found in " + self.configPath)
- if self.PrivateKey:
- self.PublicKey = self.__getPublicKey()
- self.Status = self.getStatus()
-
- def __dropDatabase(self):
- existingTables = [self.Name, f'{self.Name}_restrict_access', f'{self.Name}_transfer', f'{self.Name}_deleted']
- # existingTables = sqlSelect(f"SELECT name FROM sqlite_master WHERE type='table' AND name LIKE '{self.Name}%'").fetchall()
- for t in existingTables:
- sqlUpdate("DROP TABLE '%s'" % t)
-
- # existingTables = sqlSelect(f"SELECT name FROM sqlite_master WHERE type='table' AND name LIKE '{self.Name}%'").fetchall()
-
- def createDatabase(self, dbName = None):
- if dbName is None:
- dbName = self.Name
-
- existingTables = sqlSelect("SELECT name FROM sqlite_master WHERE type='table'").fetchall()
- existingTables = [t['name'] for t in existingTables]
- if dbName not in existingTables:
- sqlUpdate(
- """
- CREATE TABLE '%s'(
- id VARCHAR NOT NULL, private_key VARCHAR NULL, DNS VARCHAR NULL,
- endpoint_allowed_ip VARCHAR NULL, name VARCHAR NULL, total_receive FLOAT NULL,
- total_sent FLOAT NULL, total_data FLOAT NULL, endpoint VARCHAR NULL,
- status VARCHAR NULL, latest_handshake VARCHAR NULL, allowed_ip VARCHAR NULL,
- cumu_receive FLOAT NULL, cumu_sent FLOAT NULL, cumu_data FLOAT NULL, mtu INT NULL,
- keepalive INT NULL, remote_endpoint VARCHAR NULL, preshared_key VARCHAR NULL,
- PRIMARY KEY (id)
- )
- """ % dbName
- )
- if f'{dbName}_restrict_access' not in existingTables:
- sqlUpdate(
- """
- CREATE TABLE '%s_restrict_access' (
- id VARCHAR NOT NULL, private_key VARCHAR NULL, DNS VARCHAR NULL,
- endpoint_allowed_ip VARCHAR NULL, name VARCHAR NULL, total_receive FLOAT NULL,
- total_sent FLOAT NULL, total_data FLOAT NULL, endpoint VARCHAR NULL,
- status VARCHAR NULL, latest_handshake VARCHAR NULL, allowed_ip VARCHAR NULL,
- cumu_receive FLOAT NULL, cumu_sent FLOAT NULL, cumu_data FLOAT NULL, mtu INT NULL,
- keepalive INT NULL, remote_endpoint VARCHAR NULL, preshared_key VARCHAR NULL,
- PRIMARY KEY (id)
- )
- """ % dbName
- )
- if f'{dbName}_transfer' not in existingTables:
- sqlUpdate(
- """
- CREATE TABLE '%s_transfer' (
- id VARCHAR NOT NULL, total_receive FLOAT NULL,
- total_sent FLOAT NULL, total_data FLOAT NULL,
- cumu_receive FLOAT NULL, cumu_sent FLOAT NULL, cumu_data FLOAT NULL, time DATETIME
- )
- """ % dbName
- )
- if f'{dbName}_deleted' not in existingTables:
- sqlUpdate(
- """
- CREATE TABLE '%s_deleted' (
- id VARCHAR NOT NULL, private_key VARCHAR NULL, DNS VARCHAR NULL,
- endpoint_allowed_ip VARCHAR NULL, name VARCHAR NULL, total_receive FLOAT NULL,
- total_sent FLOAT NULL, total_data FLOAT NULL, endpoint VARCHAR NULL,
- status VARCHAR NULL, latest_handshake VARCHAR NULL, allowed_ip VARCHAR NULL,
- cumu_receive FLOAT NULL, cumu_sent FLOAT NULL, cumu_data FLOAT NULL, mtu INT NULL,
- keepalive INT NULL, remote_endpoint VARCHAR NULL, preshared_key VARCHAR NULL,
- PRIMARY KEY (id)
- )
- """ % dbName
- )
-
- def __dumpDatabase(self):
- for line in sqldb.iterdump():
- if (line.startswith(f"INSERT INTO \"{self.Name}\"")
- or line.startswith(f'INSERT INTO "{self.Name}_restrict_access"')
- or line.startswith(f'INSERT INTO "{self.Name}_transfer"')
- or line.startswith(f'INSERT INTO "{self.Name}_deleted"')
- ):
- yield line
-
- def __importDatabase(self, sqlFilePath) -> bool:
- self.__dropDatabase()
- self.createDatabase()
- if not os.path.exists(sqlFilePath):
- return False
- with open(sqlFilePath, 'r') as f:
- for l in f.readlines():
- l = l.rstrip("\n")
- if len(l) > 0:
- sqlUpdate(l)
- return True
-
- def __getPublicKey(self) -> str:
- return GenerateWireguardPublicKey(self.PrivateKey)[1]
-
- def getStatus(self) -> bool:
- self.Status = self.Name in psutil.net_if_addrs().keys()
- return self.Status
-
- def getAutostartStatus(self):
- s, d = DashboardConfig.GetConfig("WireGuardConfiguration", "autostart")
- return self.Name in d
-
- def getRestrictedPeers(self):
- self.RestrictedPeers = []
- restricted = sqlSelect("SELECT * FROM '%s_restrict_access'" % self.Name).fetchall()
- for i in restricted:
- self.RestrictedPeers.append(Peer(i, self))
-
- def configurationFileChanged(self) :
- mt = os.path.getmtime(self.configPath)
- changed = self.__configFileModifiedTime is None or self.__configFileModifiedTime != mt
- self.__configFileModifiedTime = mt
- return changed
-
- def getPeers(self):
- if self.configurationFileChanged():
- self.Peers = []
- with open(self.configPath, 'r') as configFile:
- p = []
- pCounter = -1
- content = configFile.read().split('\n')
- try:
- peerStarts = content.index("[Peer]")
- content = content[peerStarts:]
- for i in content:
- if not RegexMatch("#(.*)", i) and not RegexMatch(";(.*)", i):
- if i == "[Peer]":
- pCounter += 1
- p.append({})
- p[pCounter]["name"] = ""
- else:
- if len(i) > 0:
- split = re.split(r'\s*=\s*', i, 1)
- if len(split) == 2:
- p[pCounter][split[0]] = split[1]
-
- if RegexMatch("#Name# = (.*)", i):
- split = re.split(r'\s*=\s*', i, 1)
- if len(split) == 2:
- p[pCounter]["name"] = split[1]
-
- for i in p:
- if "PublicKey" in i.keys():
- checkIfExist = sqlSelect("SELECT * FROM '%s' WHERE id = ?" % self.Name,
- ((i['PublicKey']),)).fetchone()
- if checkIfExist is None:
- newPeer = {
- "id": i['PublicKey'],
- "private_key": "",
- "DNS": DashboardConfig.GetConfig("Peers", "peer_global_DNS")[1],
- "endpoint_allowed_ip": DashboardConfig.GetConfig("Peers", "peer_endpoint_allowed_ip")[
- 1],
- "name": i.get("name"),
- "total_receive": 0,
- "total_sent": 0,
- "total_data": 0,
- "endpoint": "N/A",
- "status": "stopped",
- "latest_handshake": "N/A",
- "allowed_ip": i.get("AllowedIPs", "N/A"),
- "cumu_receive": 0,
- "cumu_sent": 0,
- "cumu_data": 0,
- "traffic": [],
- "mtu": DashboardConfig.GetConfig("Peers", "peer_mtu")[1],
- "keepalive": DashboardConfig.GetConfig("Peers", "peer_keep_alive")[1],
- "remote_endpoint": DashboardConfig.GetConfig("Peers", "remote_endpoint")[1],
- "preshared_key": i["PresharedKey"] if "PresharedKey" in i.keys() else ""
- }
- sqlUpdate(
- """
- INSERT INTO '%s'
- VALUES (:id, :private_key, :DNS, :endpoint_allowed_ip, :name, :total_receive, :total_sent,
- :total_data, :endpoint, :status, :latest_handshake, :allowed_ip, :cumu_receive, :cumu_sent,
- :cumu_data, :mtu, :keepalive, :remote_endpoint, :preshared_key);
- """ % self.Name
- , newPeer)
- self.Peers.append(Peer(newPeer, self))
- else:
- sqlUpdate("UPDATE '%s' SET allowed_ip = ? WHERE id = ?" % self.Name,
- (i.get("AllowedIPs", "N/A"), i['PublicKey'],))
- self.Peers.append(Peer(checkIfExist, self))
- except Exception as e:
- if __name__ == '__main__':
- print(f"[WGDashboard] {self.Name} Error: {str(e)}")
- else:
- self.Peers.clear()
- checkIfExist = sqlSelect("SELECT * FROM '%s'" % self.Name).fetchall()
- for i in checkIfExist:
- self.Peers.append(Peer(i, self))
-
- def addPeers(self, peers: list) -> tuple[bool, dict]:
- result = {
- "message": None,
- "peers": []
- }
- try:
- for i in peers:
- newPeer = {
- "id": i['id'],
- "private_key": i['private_key'],
- "DNS": i['DNS'],
- "endpoint_allowed_ip": i['endpoint_allowed_ip'],
- "name": i['name'],
- "total_receive": 0,
- "total_sent": 0,
- "total_data": 0,
- "endpoint": "N/A",
- "status": "stopped",
- "latest_handshake": "N/A",
- "allowed_ip": i.get("allowed_ip", "N/A"),
- "cumu_receive": 0,
- "cumu_sent": 0,
- "cumu_data": 0,
- "traffic": [],
- "mtu": i['mtu'],
- "keepalive": i['keepalive'],
- "remote_endpoint": DashboardConfig.GetConfig("Peers", "remote_endpoint")[1],
- "preshared_key": i["preshared_key"]
- }
- sqlUpdate(
- """
- INSERT INTO '%s'
- VALUES (:id, :private_key, :DNS, :endpoint_allowed_ip, :name, :total_receive, :total_sent,
- :total_data, :endpoint, :status, :latest_handshake, :allowed_ip, :cumu_receive, :cumu_sent,
- :cumu_data, :mtu, :keepalive, :remote_endpoint, :preshared_key);
- """ % self.Name
- , newPeer)
- for p in peers:
- presharedKeyExist = len(p['preshared_key']) > 0
- rd = random.Random()
- uid = str(uuid.UUID(int=rd.getrandbits(128), version=4))
- if presharedKeyExist:
- with open(uid, "w+") as f:
- f.write(p['preshared_key'])
-
- subprocess.check_output(f"{self.Protocol} set {self.Name} peer {p['id']} allowed-ips {p['allowed_ip'].replace(' ', '')}{f' preshared-key {uid}' if presharedKeyExist else ''}",
- shell=True, stderr=subprocess.STDOUT)
- if presharedKeyExist:
- os.remove(uid)
- subprocess.check_output(
- f"{self.Protocol}-quick save {self.Name}", shell=True, stderr=subprocess.STDOUT)
- self.getPeersList()
- for p in peers:
- p = self.searchPeer(p['id'])
- if p[0]:
- result['peers'].append(p[1])
- return True, result
- except Exception as e:
- result['message'] = str(e)
- return False, result
-
- def searchPeer(self, publicKey):
- for i in self.Peers:
- if i.id == publicKey:
- return True, i
- return False, None
-
- def allowAccessPeers(self, listOfPublicKeys):
- if not self.getStatus():
- self.toggleConfiguration()
-
- for i in listOfPublicKeys:
- p = sqlSelect("SELECT * FROM '%s_restrict_access' WHERE id = ?" % self.Name, (i,)).fetchone()
- if p is not None:
- sqlUpdate("INSERT INTO '%s' SELECT * FROM '%s_restrict_access' WHERE id = ?"
- % (self.Name, self.Name,), (p['id'],))
- sqlUpdate("DELETE FROM '%s_restrict_access' WHERE id = ?"
- % self.Name, (p['id'],))
-
- presharedKeyExist = len(p['preshared_key']) > 0
- rd = random.Random()
- uid = str(uuid.UUID(int=rd.getrandbits(128), version=4))
- if presharedKeyExist:
- with open(uid, "w+") as f:
- f.write(p['preshared_key'])
-
- subprocess.check_output(f"{self.Protocol} set {self.Name} peer {p['id']} allowed-ips {p['allowed_ip'].replace(' ', '')}{f' preshared-key {uid}' if presharedKeyExist else ''}",
- shell=True, stderr=subprocess.STDOUT)
- if presharedKeyExist: os.remove(uid)
- else:
- return ResponseObject(False, "Failed to allow access of peer " + i)
- if not self.__wgSave():
- return ResponseObject(False, "Failed to save configuration through WireGuard")
-
- self.getPeers()
- return ResponseObject(True, "Allow access successfully")
-
- def restrictPeers(self, listOfPublicKeys):
- numOfRestrictedPeers = 0
- numOfFailedToRestrictPeers = 0
- if not self.getStatus():
- self.toggleConfiguration()
- for p in listOfPublicKeys:
- found, pf = self.searchPeer(p)
- if found:
- try:
- subprocess.check_output(f"{self.Protocol} set {self.Name} peer {pf.id} remove",
- shell=True, stderr=subprocess.STDOUT)
- sqlUpdate("INSERT INTO '%s_restrict_access' SELECT * FROM '%s' WHERE id = ?" %
- (self.Name, self.Name,), (pf.id,))
- sqlUpdate("UPDATE '%s_restrict_access' SET status = 'stopped' WHERE id = ?" %
- (self.Name,), (pf.id,))
- sqlUpdate("DELETE FROM '%s' WHERE id = ?" % self.Name, (pf.id,))
- numOfRestrictedPeers += 1
- except Exception as e:
- numOfFailedToRestrictPeers += 1
-
- if not self.__wgSave():
- return ResponseObject(False, "Failed to save configuration through WireGuard")
-
- self.getPeers()
-
- if numOfRestrictedPeers == len(listOfPublicKeys):
- return ResponseObject(True, f"Restricted {numOfRestrictedPeers} peer(s)")
- return ResponseObject(False,
- f"Restricted {numOfRestrictedPeers} peer(s) successfully. Failed to restrict {numOfFailedToRestrictPeers} peer(s)")
- pass
-
- def deletePeers(self, listOfPublicKeys):
- numOfDeletedPeers = 0
- numOfFailedToDeletePeers = 0
- if not self.getStatus():
- self.toggleConfiguration()
- for p in listOfPublicKeys:
- found, pf = self.searchPeer(p)
- if found:
- try:
- subprocess.check_output(f"{self.Protocol} set {self.Name} peer {pf.id} remove",
- shell=True, stderr=subprocess.STDOUT)
- sqlUpdate("DELETE FROM '%s' WHERE id = ?" % self.Name, (pf.id,))
- numOfDeletedPeers += 1
- except Exception as e:
- numOfFailedToDeletePeers += 1
-
- if not self.__wgSave():
- return ResponseObject(False, "Failed to save configuration through WireGuard")
-
- self.getPeers()
-
- if numOfDeletedPeers == 0 and numOfFailedToDeletePeers == 0:
- return ResponseObject(False, "No peer(s) to delete found", responseCode=404)
-
- if numOfDeletedPeers == len(listOfPublicKeys):
- return ResponseObject(True, f"Deleted {numOfDeletedPeers} peer(s)")
- return ResponseObject(False,
- f"Deleted {numOfDeletedPeers} peer(s) successfully. Failed to delete {numOfFailedToDeletePeers} peer(s)")
-
- def __wgSave(self) -> tuple[bool, str] | tuple[bool, None]:
- try:
- subprocess.check_output(f"{self.Protocol}-quick save {self.Name}", shell=True, stderr=subprocess.STDOUT)
- return True, None
- except subprocess.CalledProcessError as e:
- return False, str(e)
-
- def getPeersLatestHandshake(self):
- if not self.getStatus():
- self.toggleConfiguration()
- try:
- latestHandshake = subprocess.check_output(f"{self.Protocol} show {self.Name} latest-handshakes",
- shell=True, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError:
- return "stopped"
- latestHandshake = latestHandshake.decode("UTF-8").split()
- count = 0
- now = datetime.now()
- time_delta = timedelta(minutes=2)
- for _ in range(int(len(latestHandshake) / 2)):
- minus = now - datetime.fromtimestamp(int(latestHandshake[count + 1]))
- if minus < time_delta:
- status = "running"
- else:
- status = "stopped"
- if int(latestHandshake[count + 1]) > 0:
- sqlUpdate("UPDATE '%s' SET latest_handshake = ?, status = ? WHERE id= ?" % self.Name
- , (str(minus).split(".", maxsplit=1)[0], status, latestHandshake[count],))
- else:
- sqlUpdate("UPDATE '%s' SET latest_handshake = 'No Handshake', status = ? WHERE id= ?" % self.Name
- , (status, latestHandshake[count],))
- count += 2
-
- def getPeersTransfer(self):
- if not self.getStatus():
- self.toggleConfiguration()
- try:
- data_usage = subprocess.check_output(f"{self.Protocol} show {self.Name} transfer",
- shell=True, stderr=subprocess.STDOUT)
- data_usage = data_usage.decode("UTF-8").split("\n")
- data_usage = [p.split("\t") for p in data_usage]
- for i in range(len(data_usage)):
- if len(data_usage[i]) == 3:
- cur_i = sqlSelect(
- "SELECT total_receive, total_sent, cumu_receive, cumu_sent, status FROM '%s' WHERE id= ? "
- % self.Name, (data_usage[i][0],)).fetchone()
- if cur_i is not None:
- cur_i = dict(cur_i)
- total_sent = cur_i['total_sent']
- total_receive = cur_i['total_receive']
- cur_total_sent = float(data_usage[i][2]) / (1024 ** 3)
- cur_total_receive = float(data_usage[i][1]) / (1024 ** 3)
- cumulative_receive = cur_i['cumu_receive'] + total_receive
- cumulative_sent = cur_i['cumu_sent'] + total_sent
- if total_sent <= cur_total_sent and total_receive <= cur_total_receive:
- total_sent = cur_total_sent
- total_receive = cur_total_receive
- else:
- sqlUpdate(
- "UPDATE '%s' SET cumu_receive = ?, cumu_sent = ?, cumu_data = ? WHERE id = ?" %
- self.Name, (cumulative_receive, cumulative_sent,
- cumulative_sent + cumulative_receive,
- data_usage[i][0],))
- total_sent = 0
- total_receive = 0
- _, p = self.searchPeer(data_usage[i][0])
- if p.total_receive != total_receive or p.total_sent != total_sent:
- sqlUpdate(
- "UPDATE '%s' SET total_receive = ?, total_sent = ?, total_data = ? WHERE id = ?"
- % self.Name, (total_receive, total_sent,
- total_receive + total_sent, data_usage[i][0],))
- except Exception as e:
- print(f"[WGDashboard] {self.Name} Error: {str(e)} {str(e.__traceback__)}")
-
- def getPeersEndpoint(self):
- if not self.getStatus():
- self.toggleConfiguration()
- try:
- data_usage = subprocess.check_output(f"{self.Protocol} show {self.Name} endpoints",
- shell=True, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError:
- return "stopped"
- data_usage = data_usage.decode("UTF-8").split()
- count = 0
- for _ in range(int(len(data_usage) / 2)):
- sqlUpdate("UPDATE '%s' SET endpoint = ? WHERE id = ?" % self.Name
- , (data_usage[count + 1], data_usage[count],))
- count += 2
-
- def toggleConfiguration(self) -> [bool, str]:
- self.getStatus()
- if self.Status:
- try:
- check = subprocess.check_output(f"{self.Protocol}-quick down {self.Name}",
- shell=True, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as exc:
- return False, str(exc.output.strip().decode("utf-8"))
- else:
- try:
- check = subprocess.check_output(f"{self.Protocol}-quick up {self.Name}", shell=True, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as exc:
- return False, str(exc.output.strip().decode("utf-8"))
- self.__parseConfigurationFile()
- self.getStatus()
- return True, None
-
- def getPeersList(self):
- self.getPeers()
- return self.Peers
-
- def getRestrictedPeersList(self) -> list:
- self.getRestrictedPeers()
- return self.RestrictedPeers
-
- def toJson(self):
- self.Status = self.getStatus()
- return {
- "Status": self.Status,
- "Name": self.Name,
- "PrivateKey": self.PrivateKey,
- "PublicKey": self.PublicKey,
- "Address": self.Address,
- "ListenPort": self.ListenPort,
- "PreUp": self.PreUp,
- "PreDown": self.PreDown,
- "PostUp": self.PostUp,
- "PostDown": self.PostDown,
- "SaveConfig": self.SaveConfig,
- "DataUsage": {
- "Total": sum(list(map(lambda x: x.cumu_data + x.total_data, self.Peers))),
- "Sent": sum(list(map(lambda x: x.cumu_sent + x.total_sent, self.Peers))),
- "Receive": sum(list(map(lambda x: x.cumu_receive + x.total_receive, self.Peers)))
- },
- "ConnectedPeers": len(list(filter(lambda x: x.status == "running", self.Peers))),
- "TotalPeers": len(self.Peers),
- "Protocol": self.Protocol,
- "Table": self.Table,
- }
-
- def backupConfigurationFile(self) -> tuple[bool, dict[str, str]]:
- if not os.path.exists(os.path.join(self.__getProtocolPath(), 'WGDashboard_Backup')):
- os.mkdir(os.path.join(self.__getProtocolPath(), 'WGDashboard_Backup'))
- time = datetime.now().strftime("%Y%m%d%H%M%S")
- shutil.copy(
- self.configPath,
- os.path.join(self.__getProtocolPath(), 'WGDashboard_Backup', f'{self.Name}_{time}.conf')
- )
- with open(os.path.join(self.__getProtocolPath(), 'WGDashboard_Backup', f'{self.Name}_{time}.sql'), 'w+') as f:
- for l in self.__dumpDatabase():
- f.write(l + "\n")
-
- return True, {
- "filename": f'{self.Name}_{time}.conf',
- "backupDate": datetime.now().strftime("%Y%m%d%H%M%S")
- }
-
- def getBackups(self, databaseContent: bool = False) -> list[dict[str: str, str: str, str: str]]:
- backups = []
-
- directory = os.path.join(self.__getProtocolPath(), 'WGDashboard_Backup')
- files = [(file, os.path.getctime(os.path.join(directory, file)))
- for file in os.listdir(directory) if os.path.isfile(os.path.join(directory, file))]
- files.sort(key=lambda x: x[1], reverse=True)
-
- for f, ct in files:
- if RegexMatch(f"^({self.Name})_(.*)\\.(conf)$", f):
- s = re.search(f"^({self.Name})_(.*)\\.(conf)$", f)
- date = s.group(2)
- d = {
- "filename": f,
- "backupDate": date,
- "content": open(os.path.join(self.__getProtocolPath(), 'WGDashboard_Backup', f), 'r').read()
- }
- if f.replace(".conf", ".sql") in list(os.listdir(directory)):
- d['database'] = True
- if databaseContent:
- d['databaseContent'] = open(os.path.join(self.__getProtocolPath(), 'WGDashboard_Backup', f.replace(".conf", ".sql")), 'r').read()
- backups.append(d)
-
- return backups
-
- def restoreBackup(self, backupFileName: str) -> bool:
- backups = list(map(lambda x : x['filename'], self.getBackups()))
- if backupFileName not in backups:
- return False
- if self.Status:
- self.toggleConfiguration()
- target = os.path.join(self.__getProtocolPath(), 'WGDashboard_Backup', backupFileName)
- targetSQL = os.path.join(self.__getProtocolPath(), 'WGDashboard_Backup', backupFileName.replace(".conf", ".sql"))
- if not os.path.exists(target):
- return False
- targetContent = open(target, 'r').read()
- try:
- with open(self.configPath, 'w') as f:
- f.write(targetContent)
- except Exception as e:
- return False
- self.__parseConfigurationFile()
- self.__dropDatabase()
- self.__importDatabase(targetSQL)
- self.__initPeersList()
- return True
-
- def deleteBackup(self, backupFileName: str) -> bool:
- backups = list(map(lambda x : x['filename'], self.getBackups()))
- if backupFileName not in backups:
- return False
- try:
- os.remove(os.path.join(self.__getProtocolPath(), 'WGDashboard_Backup', backupFileName))
- except Exception as e:
- return False
- return True
-
- def downloadBackup(self, backupFileName: str) -> tuple[bool, str] | tuple[bool, None]:
- backup = list(filter(lambda x : x['filename'] == backupFileName, self.getBackups()))
- if len(backup) == 0:
- return False, None
- zip = f'{str(uuid.UUID(int=random.Random().getrandbits(128), version=4))}.zip'
- with ZipFile(os.path.join('download', zip), 'w') as zipF:
- zipF.write(
- os.path.join(self.__getProtocolPath(), 'WGDashboard_Backup', backup[0]['filename']),
- os.path.basename(os.path.join(self.__getProtocolPath(), 'WGDashboard_Backup', backup[0]['filename']))
- )
- if backup[0]['database']:
- zipF.write(
- os.path.join(self.__getProtocolPath(), 'WGDashboard_Backup', backup[0]['filename'].replace('.conf', '.sql')),
- os.path.basename(os.path.join(self.__getProtocolPath(), 'WGDashboard_Backup', backup[0]['filename'].replace('.conf', '.sql')))
- )
-
- return True, zip
-
- def updateConfigurationSettings(self, newData: dict) -> tuple[bool, str]:
- if self.Status:
- self.toggleConfiguration()
- original = []
- dataChanged = False
- with open(self.configPath, 'r') as f:
- original = [l.rstrip("\n") for l in f.readlines()]
- allowEdit = ["Address", "PreUp", "PostUp", "PreDown", "PostDown", "ListenPort", "Table"]
- if self.Protocol == 'awg':
- allowEdit += ["Jc", "Jmin", "Jmax", "S1", "S2", "H1", "H2", "H3", "H4"]
- start = original.index("[Interface]")
- try:
- end = original.index("[Peer]")
- except ValueError as e:
- end = len(original)
- new = ["[Interface]"]
- peerFound = False
- for line in range(start, end):
- split = re.split(r'\s*=\s*', original[line], 1)
- if len(split) == 2:
- if split[0] not in allowEdit:
- new.append(original[line])
- for key in allowEdit:
- new.insert(1, f"{key} = {str(newData[key]).strip()}")
- new.append("")
- for line in range(end, len(original)):
- new.append(original[line])
- self.backupConfigurationFile()
- with open(self.configPath, 'w') as f:
- f.write("\n".join(new))
-
- status, msg = self.toggleConfiguration()
- if not status:
- return False, msg
- for i in allowEdit:
- if isinstance(getattr(self, i), bool):
- setattr(self, i, _strToBool(newData[i]))
- else:
- setattr(self, i, str(newData[i]))
- return True, ""
-
- def deleteConfiguration(self):
- if self.getStatus():
- self.toggleConfiguration()
- os.remove(self.configPath)
- self.__dropDatabase()
- return True
-
- def renameConfiguration(self, newConfigurationName) -> tuple[bool, str]:
- if newConfigurationName in WireguardConfigurations.keys():
- return False, "Configuration name already exist"
- try:
- if self.getStatus():
- self.toggleConfiguration()
- self.createDatabase(newConfigurationName)
- sqlUpdate(f'INSERT INTO "{newConfigurationName}" SELECT * FROM "{self.Name}"')
- sqlUpdate(f'INSERT INTO "{newConfigurationName}_restrict_access" SELECT * FROM "{self.Name}_restrict_access"')
- sqlUpdate(f'INSERT INTO "{newConfigurationName}_deleted" SELECT * FROM "{self.Name}_deleted"')
- sqlUpdate(f'INSERT INTO "{newConfigurationName}_transfer" SELECT * FROM "{self.Name}_transfer"')
- AllPeerJobs.updateJobConfigurationName(self.Name, newConfigurationName)
- shutil.copy(
- self.configPath,
- os.path.join(self.__getProtocolPath(), f'{newConfigurationName}.conf')
- )
- self.deleteConfiguration()
- except Exception as e:
- return False, str(e)
- return True, None
-
- def getNumberOfAvailableIP(self):
- if len(self.Address) < 0:
- return False, None
- existedAddress = set()
- availableAddress = {}
- for p in self.Peers + self.getRestrictedPeersList():
- peerAllowedIP = p.allowed_ip.split(',')
- for pip in peerAllowedIP:
- ppip = pip.strip().split('/')
- if len(ppip) == 2:
- try:
- check = ipaddress.ip_network(ppip[0])
- existedAddress.add(check)
- except Exception as e:
- print(f"[WGDashboard] Error: {self.Name} peer {p.id} have invalid ip")
- configurationAddresses = self.Address.split(',')
- for ca in configurationAddresses:
- ca = ca.strip()
- caSplit = ca.split('/')
- try:
- if len(caSplit) == 2:
- network = ipaddress.ip_network(ca, False)
- existedAddress.add(ipaddress.ip_network(caSplit[0]))
- availableAddress[ca] = network.num_addresses
- for p in existedAddress:
- if p.version == network.version and p.subnet_of(network):
- availableAddress[ca] -= 1
+ curKeys = list(WireguardConfigurations.keys())
+ for name in curKeys:
+ if name in WireguardConfigurations.keys() and WireguardConfigurations.get(name) is not None:
+ c = WireguardConfigurations.get(name)
+ if c.getStatus():
+ c.getPeersLatestHandshake()
+ c.getPeersTransfer()
+ c.getPeersEndpoint()
+ c.getPeers()
+ if delay == 6:
+ c.logPeersTraffic()
+ c.logPeersHistoryEndpoint()
+ c.getRestrictedPeersList()
except Exception as e:
- print(e)
- print(f"[WGDashboard] Error: Failed to parse IP address {ca} from {self.Name}")
- return True, availableAddress
-
- def getAvailableIP(self, threshold = 255):
- if len(self.Address) < 0:
- return False, None
- existedAddress = set()
- availableAddress = {}
- for p in self.Peers + self.getRestrictedPeersList():
- peerAllowedIP = p.allowed_ip.split(',')
- for pip in peerAllowedIP:
- ppip = pip.strip().split('/')
- if len(ppip) == 2:
- try:
- check = ipaddress.ip_network(ppip[0])
- existedAddress.add(check.compressed)
- except Exception as e:
- print(f"[WGDashboard] Error: {self.Name} peer {p.id} have invalid ip")
- configurationAddresses = self.Address.split(',')
- for ca in configurationAddresses:
- ca = ca.strip()
- caSplit = ca.split('/')
+ app.logger.error(f"[WGDashboard] Background Thread #1 Error", e)
+
+ if delay == 6:
+ delay = 1
+ else:
+ delay += 1
+ time.sleep(10)
+
+def peerJobScheduleBackgroundThread():
+ with app.app_context():
+ app.logger.info(f"Background Thread #2 Started")
+ app.logger.info(f"Background Thread #2 PID:" + str(threading.get_native_id()))
+ time.sleep(10)
+ while True:
try:
- if len(caSplit) == 2:
- network = ipaddress.ip_network(ca, False)
- existedAddress.add(ipaddress.ip_network(caSplit[0]).compressed)
- if threshold == -1:
- availableAddress[ca] = filter(lambda ip : ip not in existedAddress,
- map(lambda iph : ipaddress.ip_network(iph).compressed, network.hosts()))
- else:
- availableAddress[ca] = list(islice(filter(lambda ip : ip not in existedAddress,
- map(lambda iph : ipaddress.ip_network(iph).compressed, network.hosts())), threshold))
+ AllPeerJobs.runJob()
+ time.sleep(180)
except Exception as e:
- print(e)
- print(f"[WGDashboard] Error: Failed to parse IP address {ca} from {self.Name}")
- print("Generated IP")
- return True, availableAddress
+ app.logger.error("Background Thread #2 Error", e)
- def getRealtimeTrafficUsage(self):
- stats = psutil.net_io_counters(pernic=True, nowrap=True)
- if self.Name in stats.keys():
- stat = stats[self.Name]
- recv1 = stat.bytes_recv
- sent1 = stat.bytes_sent
- time.sleep(1)
- stats = psutil.net_io_counters(pernic=True, nowrap=True)
- if self.Name in stats.keys():
- stat = stats[self.Name]
- recv2 = stat.bytes_recv
- sent2 = stat.bytes_sent
- net_in = round((recv2 - recv1) / 1024 / 1024, 3)
- net_out = round((sent2 - sent1) / 1024 / 1024, 3)
- return {
- "sent": net_out,
- "recv": net_in
- }
- else:
- return { "sent": 0, "recv": 0 }
- else:
- return { "sent": 0, "recv": 0 }
-
-"""
-AmneziaWG Configuration
-"""
-class AmneziaWireguardConfiguration(WireguardConfiguration):
- def __init__(self, name: str = None, data: dict = None, backup: dict = None, startup: bool = False):
- self.Jc = ""
- self.Jmin = ""
- self.Jmax = ""
- self.S1 = ""
- self.S2 = ""
- self.H1 = ""
- self.H2 = ""
- self.H3 = ""
- self.H4 = ""
-
- super().__init__(name, data, backup, startup, wg=False)
+def gunicornConfig():
+ _, app_ip = DashboardConfig.GetConfig("Server", "app_ip")
+ _, app_port = DashboardConfig.GetConfig("Server", "app_port")
+ return app_ip, app_port
- def toJson(self):
- self.Status = self.getStatus()
- return {
- "Status": self.Status,
- "Name": self.Name,
- "PrivateKey": self.PrivateKey,
- "PublicKey": self.PublicKey,
- "Address": self.Address,
- "ListenPort": self.ListenPort,
- "PreUp": self.PreUp,
- "PreDown": self.PreDown,
- "PostUp": self.PostUp,
- "PostDown": self.PostDown,
- "SaveConfig": self.SaveConfig,
- "DataUsage": {
- "Total": sum(list(map(lambda x: x.cumu_data + x.total_data, self.Peers))),
- "Sent": sum(list(map(lambda x: x.cumu_sent + x.total_sent, self.Peers))),
- "Receive": sum(list(map(lambda x: x.cumu_receive + x.total_receive, self.Peers)))
- },
- "ConnectedPeers": len(list(filter(lambda x: x.status == "running", self.Peers))),
- "TotalPeers": len(self.Peers),
- "Table": self.Table,
- "Protocol": self.Protocol,
- "Jc": self.Jc,
- "Jmin": self.Jmin,
- "Jmax": self.Jmax,
- "S1": self.S1,
- "S2": self.S2,
- "H1": self.H1,
- "H2": self.H2,
- "H3": self.H3,
- "H4": self.H4
- }
+def ProtocolsEnabled() -> list[str]:
+ from shutil import which
+ protocols = []
+ if which('awg') is not None and which('awg-quick') is not None:
+ protocols.append("awg")
+ if which('wg') is not None and which('wg-quick') is not None:
+ protocols.append("wg")
+ return protocols
- def createDatabase(self, dbName = None):
- if dbName is None:
- dbName = self.Name
-
- existingTables = sqlSelect("SELECT name FROM sqlite_master WHERE type='table'").fetchall()
- existingTables = [t['name'] for t in existingTables]
- if dbName not in existingTables:
- sqlUpdate(
- """
- CREATE TABLE '%s'(
- id VARCHAR NOT NULL, private_key VARCHAR NULL, DNS VARCHAR NULL, advanced_security VARCHAR NULL,
- endpoint_allowed_ip VARCHAR NULL, name VARCHAR NULL, total_receive FLOAT NULL,
- total_sent FLOAT NULL, total_data FLOAT NULL, endpoint VARCHAR NULL,
- status VARCHAR NULL, latest_handshake VARCHAR NULL, allowed_ip VARCHAR NULL,
- cumu_receive FLOAT NULL, cumu_sent FLOAT NULL, cumu_data FLOAT NULL, mtu INT NULL,
- keepalive INT NULL, remote_endpoint VARCHAR NULL, preshared_key VARCHAR NULL,
- PRIMARY KEY (id)
- )
- """ % dbName
- )
-
- if f'{dbName}_restrict_access' not in existingTables:
- sqlUpdate(
- """
- CREATE TABLE '%s_restrict_access' (
- id VARCHAR NOT NULL, private_key VARCHAR NULL, DNS VARCHAR NULL, advanced_security VARCHAR NULL,
- endpoint_allowed_ip VARCHAR NULL, name VARCHAR NULL, total_receive FLOAT NULL,
- total_sent FLOAT NULL, total_data FLOAT NULL, endpoint VARCHAR NULL,
- status VARCHAR NULL, latest_handshake VARCHAR NULL, allowed_ip VARCHAR NULL,
- cumu_receive FLOAT NULL, cumu_sent FLOAT NULL, cumu_data FLOAT NULL, mtu INT NULL,
- keepalive INT NULL, remote_endpoint VARCHAR NULL, preshared_key VARCHAR NULL,
- PRIMARY KEY (id)
- )
- """ % dbName
- )
- if f'{dbName}_transfer' not in existingTables:
- sqlUpdate(
- """
- CREATE TABLE '%s_transfer' (
- id VARCHAR NOT NULL, total_receive FLOAT NULL,
- total_sent FLOAT NULL, total_data FLOAT NULL,
- cumu_receive FLOAT NULL, cumu_sent FLOAT NULL, cumu_data FLOAT NULL, time DATETIME
- )
- """ % dbName
- )
- if f'{dbName}_deleted' not in existingTables:
- sqlUpdate(
- """
- CREATE TABLE '%s_deleted' (
- id VARCHAR NOT NULL, private_key VARCHAR NULL, DNS VARCHAR NULL, advanced_security VARCHAR NULL,
- endpoint_allowed_ip VARCHAR NULL, name VARCHAR NULL, total_receive FLOAT NULL,
- total_sent FLOAT NULL, total_data FLOAT NULL, endpoint VARCHAR NULL,
- status VARCHAR NULL, latest_handshake VARCHAR NULL, allowed_ip VARCHAR NULL,
- cumu_receive FLOAT NULL, cumu_sent FLOAT NULL, cumu_data FLOAT NULL, mtu INT NULL,
- keepalive INT NULL, remote_endpoint VARCHAR NULL, preshared_key VARCHAR NULL,
- PRIMARY KEY (id)
- )
- """ % dbName
- )
-
- def getPeers(self):
- if self.configurationFileChanged():
- self.Peers = []
- with open(self.configPath, 'r') as configFile:
- p = []
- pCounter = -1
- content = configFile.read().split('\n')
+def InitWireguardConfigurationsList(startup: bool = False):
+ if os.path.exists(DashboardConfig.GetConfig("Server", "wg_conf_path")[1]):
+ confs = os.listdir(DashboardConfig.GetConfig("Server", "wg_conf_path")[1])
+ confs.sort()
+ for i in confs:
+ if RegexMatch("^(.{1,}).(conf)$", i):
+ i = i.replace('.conf', '')
try:
- peerStarts = content.index("[Peer]")
- content = content[peerStarts:]
- for i in content:
- if not RegexMatch("#(.*)", i) and not RegexMatch(";(.*)", i):
- if i == "[Peer]":
- pCounter += 1
- p.append({})
- p[pCounter]["name"] = ""
- else:
- if len(i) > 0:
- split = re.split(r'\s*=\s*', i, 1)
- if len(split) == 2:
- p[pCounter][split[0]] = split[1]
+ if i in WireguardConfigurations.keys():
+ if WireguardConfigurations[i].configurationFileChanged():
+ with app.app_context():
+ WireguardConfigurations[i] = WireguardConfiguration(DashboardConfig, AllPeerJobs, AllPeerShareLinks, DashboardWebHooks, i)
+ else:
+ with app.app_context():
+ WireguardConfigurations[i] = WireguardConfiguration(DashboardConfig, AllPeerJobs, AllPeerShareLinks, DashboardWebHooks, i, startup=startup)
+ except WireguardConfiguration.InvalidConfigurationFileException as e:
+ app.logger.error(f"{i} have an invalid configuration file.")
- if RegexMatch("#Name# = (.*)", i):
- split = re.split(r'\s*=\s*', i, 1)
- if len(split) == 2:
- p[pCounter]["name"] = split[1]
-
- for i in p:
- if "PublicKey" in i.keys():
- checkIfExist = sqlSelect("SELECT * FROM '%s' WHERE id = ?" % self.Name,
- ((i['PublicKey']),)).fetchone()
- if checkIfExist is None:
- newPeer = {
- "id": i['PublicKey'],
- "advanced_security": i.get('AdvancedSecurity', 'off'),
- "private_key": "",
- "DNS": DashboardConfig.GetConfig("Peers", "peer_global_DNS")[1],
- "endpoint_allowed_ip": DashboardConfig.GetConfig("Peers", "peer_endpoint_allowed_ip")[
- 1],
- "name": i.get("name"),
- "total_receive": 0,
- "total_sent": 0,
- "total_data": 0,
- "endpoint": "N/A",
- "status": "stopped",
- "latest_handshake": "N/A",
- "allowed_ip": i.get("AllowedIPs", "N/A"),
- "cumu_receive": 0,
- "cumu_sent": 0,
- "cumu_data": 0,
- "traffic": [],
- "mtu": DashboardConfig.GetConfig("Peers", "peer_mtu")[1],
- "keepalive": DashboardConfig.GetConfig("Peers", "peer_keep_alive")[1],
- "remote_endpoint": DashboardConfig.GetConfig("Peers", "remote_endpoint")[1],
- "preshared_key": i["PresharedKey"] if "PresharedKey" in i.keys() else ""
- }
- sqlUpdate(
- """
- INSERT INTO '%s'
- VALUES (:id, :private_key, :DNS, :advanced_security, :endpoint_allowed_ip, :name, :total_receive, :total_sent,
- :total_data, :endpoint, :status, :latest_handshake, :allowed_ip, :cumu_receive, :cumu_sent,
- :cumu_data, :mtu, :keepalive, :remote_endpoint, :preshared_key);
- """ % self.Name
- , newPeer)
- self.Peers.append(AmneziaWGPeer(newPeer, self))
- else:
- sqlUpdate("UPDATE '%s' SET allowed_ip = ? WHERE id = ?" % self.Name,
- (i.get("AllowedIPs", "N/A"), i['PublicKey'],))
- self.Peers.append(AmneziaWGPeer(checkIfExist, self))
- except Exception as e:
- if __name__ == '__main__':
- print(f"[WGDashboard] {self.Name} Error: {str(e)}")
- else:
- self.Peers.clear()
- checkIfExist = sqlSelect("SELECT * FROM '%s'" % self.Name).fetchall()
- for i in checkIfExist:
- self.Peers.append(AmneziaWGPeer(i, self))
-
- def addPeers(self, peers: list) -> tuple[bool, dict]:
- result = {
- "message": None,
- "peers": []
- }
- try:
- for i in peers:
- newPeer = {
- "id": i['id'],
- "private_key": i['private_key'],
- "DNS": i['DNS'],
- "endpoint_allowed_ip": i['endpoint_allowed_ip'],
- "name": i['name'],
- "total_receive": 0,
- "total_sent": 0,
- "total_data": 0,
- "endpoint": "N/A",
- "status": "stopped",
- "latest_handshake": "N/A",
- "allowed_ip": i.get("allowed_ip", "N/A"),
- "cumu_receive": 0,
- "cumu_sent": 0,
- "cumu_data": 0,
- "traffic": [],
- "mtu": i['mtu'],
- "keepalive": i['keepalive'],
- "remote_endpoint": DashboardConfig.GetConfig("Peers", "remote_endpoint")[1],
- "preshared_key": i["preshared_key"],
- "advanced_security": i['advanced_security']
- }
- sqlUpdate(
- """
- INSERT INTO '%s'
- VALUES (:id, :private_key, :DNS, :advanced_security, :endpoint_allowed_ip, :name, :total_receive, :total_sent,
- :total_data, :endpoint, :status, :latest_handshake, :allowed_ip, :cumu_receive, :cumu_sent,
- :cumu_data, :mtu, :keepalive, :remote_endpoint, :preshared_key);
- """ % self.Name
- , newPeer)
- for p in peers:
- presharedKeyExist = len(p['preshared_key']) > 0
- rd = random.Random()
- uid = str(uuid.UUID(int=rd.getrandbits(128), version=4))
- if presharedKeyExist:
- with open(uid, "w+") as f:
- f.write(p['preshared_key'])
-
- subprocess.check_output(
- f"{self.Protocol} set {self.Name} peer {p['id']} allowed-ips {p['allowed_ip'].replace(' ', '')}{f' preshared-key {uid}' if presharedKeyExist else ''}",
- shell=True, stderr=subprocess.STDOUT)
- if presharedKeyExist:
- os.remove(uid)
- subprocess.check_output(
- f"{self.Protocol}-quick save {self.Name}", shell=True, stderr=subprocess.STDOUT)
- self.getPeersList()
- for p in peers:
- p = self.searchPeer(p['id'])
- if p[0]:
- result['peers'].append(p[1])
- return True, result
- except Exception as e:
- result['message'] = str(e)
- return False, result
-
- def getRestrictedPeers(self):
- self.RestrictedPeers = []
- restricted = sqlSelect("SELECT * FROM '%s_restrict_access'" % self.Name).fetchall()
- for i in restricted:
- self.RestrictedPeers.append(AmneziaWGPeer(i, self))
-
-"""
-Peer
-"""
-class Peer:
- def __init__(self, tableData, configuration: WireguardConfiguration):
- self.configuration = configuration
- self.id = tableData["id"]
- self.private_key = tableData["private_key"]
- self.DNS = tableData["DNS"]
- self.endpoint_allowed_ip = tableData["endpoint_allowed_ip"]
- self.name = tableData["name"]
- self.total_receive = tableData["total_receive"]
- self.total_sent = tableData["total_sent"]
- self.total_data = tableData["total_data"]
- self.endpoint = tableData["endpoint"]
- self.status = tableData["status"]
- self.latest_handshake = tableData["latest_handshake"]
- self.allowed_ip = tableData["allowed_ip"]
- self.cumu_receive = tableData["cumu_receive"]
- self.cumu_sent = tableData["cumu_sent"]
- self.cumu_data = tableData["cumu_data"]
- self.mtu = tableData["mtu"]
- self.keepalive = tableData["keepalive"]
- self.remote_endpoint = tableData["remote_endpoint"]
- self.preshared_key = tableData["preshared_key"]
- self.jobs: list[PeerJob] = []
- self.ShareLink: list[PeerShareLink] = []
- self.getJobs()
- self.getShareLink()
-
- def toJson(self):
- self.getJobs()
- self.getShareLink()
- return self.__dict__
-
- def __repr__(self):
- return str(self.toJson())
-
- def updatePeer(self, name: str, private_key: str,
- preshared_key: str,
- dns_addresses: str, allowed_ip: str, endpoint_allowed_ip: str, mtu: int,
- keepalive: int) -> ResponseObject:
- if not self.configuration.getStatus():
- self.configuration.toggleConfiguration()
-
- existingAllowedIps = [item for row in list(
- map(lambda x: [q.strip() for q in x.split(',')],
- map(lambda y: y.allowed_ip,
- list(filter(lambda k: k.id != self.id, self.configuration.getPeersList()))))) for item in row]
-
- if allowed_ip in existingAllowedIps:
- return ResponseObject(False, "Allowed IP already taken by another peer")
- if not ValidateIPAddressesWithRange(endpoint_allowed_ip):
- return ResponseObject(False, f"Endpoint Allowed IPs format is incorrect")
- if len(dns_addresses) > 0 and not ValidateDNSAddress(dns_addresses):
- return ResponseObject(False, f"DNS format is incorrect")
- if mtu < 0 or mtu > 1460:
- return ResponseObject(False, "MTU format is not correct")
- if keepalive < 0:
- return ResponseObject(False, "Persistent Keepalive format is not correct")
- if len(private_key) > 0:
- pubKey = GenerateWireguardPublicKey(private_key)
- if not pubKey[0] or pubKey[1] != self.id:
- return ResponseObject(False, "Private key does not match with the public key")
- try:
- rd = random.Random()
- uid = str(uuid.UUID(int=rd.getrandbits(128), version=4))
- pskExist = len(preshared_key) > 0
-
- if pskExist:
- with open(uid, "w+") as f:
- f.write(preshared_key)
- newAllowedIPs = allowed_ip.replace(" ", "")
- updateAllowedIp = subprocess.check_output(
- f"{self.configuration.Protocol} set {self.configuration.Name} peer {self.id} allowed-ips {newAllowedIPs} {f'preshared-key {uid}' if pskExist else 'preshared-key /dev/null'}",
- shell=True, stderr=subprocess.STDOUT)
-
- if pskExist: os.remove(uid)
- if len(updateAllowedIp.decode().strip("\n")) != 0:
- return ResponseObject(False,
- "Update peer failed when updating Allowed IPs")
- saveConfig = subprocess.check_output(f"{self.configuration.Protocol}-quick save {self.configuration.Name}",
- shell=True, stderr=subprocess.STDOUT)
- if f"wg showconf {self.configuration.Name}" not in saveConfig.decode().strip('\n'):
- return ResponseObject(False,
- "Update peer failed when saving the configuration")
- sqlUpdate(
- '''UPDATE '%s' SET name = ?, private_key = ?, DNS = ?, endpoint_allowed_ip = ?, mtu = ?,
- keepalive = ?, preshared_key = ? WHERE id = ?''' % self.configuration.Name,
- (name, private_key, dns_addresses, endpoint_allowed_ip, mtu,
- keepalive, preshared_key, self.id,)
- )
- return ResponseObject()
- except subprocess.CalledProcessError as exc:
- return ResponseObject(False, exc.output.decode("UTF-8").strip())
-
- def downloadPeer(self) -> dict[str, str]:
- filename = self.name
- if len(filename) == 0:
- filename = "UntitledPeer"
- filename = "".join(filename.split(' '))
- filename = f"{filename}"
- illegal_filename = [".", ",", "/", "?", "<", ">", "\\", ":", "*", '|' '\"', "com1", "com2", "com3",
- "com4", "com5", "com6", "com7", "com8", "com9", "lpt1", "lpt2", "lpt3", "lpt4",
- "lpt5", "lpt6", "lpt7", "lpt8", "lpt9", "con", "nul", "prn"]
- for i in illegal_filename:
- filename = filename.replace(i, "")
-
- finalFilename = ""
- for i in filename:
- if re.match("^[a-zA-Z0-9_=+.-]$", i):
- finalFilename += i
-
- peerConfiguration = f'''[Interface]
-PrivateKey = {self.private_key}
-Address = {self.allowed_ip}
-MTU = {str(self.mtu)}
-'''
- if len(self.DNS) > 0:
- peerConfiguration += f"DNS = {self.DNS}\n"
-
- peerConfiguration += f'''
-[Peer]
-PublicKey = {self.configuration.PublicKey}
-AllowedIPs = {self.endpoint_allowed_ip}
-Endpoint = {DashboardConfig.GetConfig("Peers", "remote_endpoint")[1]}:{self.configuration.ListenPort}
-PersistentKeepalive = {str(self.keepalive)}
-'''
- if len(self.preshared_key) > 0:
- peerConfiguration += f"PresharedKey = {self.preshared_key}\n"
- return {
- "fileName": finalFilename,
- "file": peerConfiguration
- }
-
- def getJobs(self):
- self.jobs = AllPeerJobs.searchJob(self.configuration.Name, self.id)
-
- def getShareLink(self):
- self.ShareLink = AllPeerShareLinks.getLink(self.configuration.Name, self.id)
-
- def resetDataUsage(self, type):
- try:
- if type == "total":
- sqlUpdate("UPDATE '%s' SET total_data = 0, cumu_data = 0, total_receive = 0, cumu_receive = 0, total_sent = 0, cumu_sent = 0 WHERE id = ?" % self.configuration.Name, (self.id, ))
- self.total_data = 0
- self.total_receive = 0
- self.total_sent = 0
- self.cumu_data = 0
- self.cumu_sent = 0
- self.cumu_receive = 0
- elif type == "receive":
- sqlUpdate("UPDATE '%s' SET total_receive = 0, cumu_receive = 0 WHERE id = ?" % self.configuration.Name, (self.id, ))
- self.cumu_receive = 0
- self.total_receive = 0
- elif type == "sent":
- sqlUpdate("UPDATE '%s' SET total_sent = 0, cumu_sent = 0 WHERE id = ?" % self.configuration.Name, (self.id, ))
- self.cumu_sent = 0
- self.total_sent = 0
- else:
- return False
- except Exception as e:
- print(e)
- return False
-
- return True
-
-class AmneziaWGPeer(Peer):
- def __init__(self, tableData, configuration: AmneziaWireguardConfiguration):
- self.advanced_security = tableData["advanced_security"]
- super().__init__(tableData, configuration)
-
- def downloadPeer(self) -> dict[str, str]:
- filename = self.name
- if len(filename) == 0:
- filename = "UntitledPeer"
- filename = "".join(filename.split(' '))
- filename = f"{filename}_{self.configuration.Name}"
- illegal_filename = [".", ",", "/", "?", "<", ">", "\\", ":", "*", '|' '\"', "com1", "com2", "com3",
- "com4", "com5", "com6", "com7", "com8", "com9", "lpt1", "lpt2", "lpt3", "lpt4",
- "lpt5", "lpt6", "lpt7", "lpt8", "lpt9", "con", "nul", "prn"]
- for i in illegal_filename:
- filename = filename.replace(i, "")
-
- finalFilename = ""
- for i in filename:
- if re.match("^[a-zA-Z0-9_=+.-]$", i):
- finalFilename += i
-
- peerConfiguration = f'''[Interface]
-PrivateKey = {self.private_key}
-Address = {self.allowed_ip}
-MTU = {str(self.mtu)}
-Jc = {self.configuration.Jc}
-Jmin = {self.configuration.Jmin}
-Jmax = {self.configuration.Jmax}
-S1 = {self.configuration.S1}
-S2 = {self.configuration.S2}
-H1 = {self.configuration.H1}
-H2 = {self.configuration.H2}
-H3 = {self.configuration.H3}
-H4 = {self.configuration.H4}
-'''
- if len(self.DNS) > 0:
- peerConfiguration += f"DNS = {self.DNS}\n"
- peerConfiguration += f'''
-[Peer]
-PublicKey = {self.configuration.PublicKey}
-AllowedIPs = {self.endpoint_allowed_ip}
-Endpoint = {DashboardConfig.GetConfig("Peers", "remote_endpoint")[1]}:{self.configuration.ListenPort}
-PersistentKeepalive = {str(self.keepalive)}
-'''
- if len(self.preshared_key) > 0:
- peerConfiguration += f"PresharedKey = {self.preshared_key}\n"
- return {
- "fileName": finalFilename,
- "file": peerConfiguration
- }
-
- def updatePeer(self, name: str, private_key: str,
- preshared_key: str,
- dns_addresses: str, allowed_ip: str, endpoint_allowed_ip: str, mtu: int,
- keepalive: int, advanced_security: str) -> ResponseObject:
- if not self.configuration.getStatus():
- self.configuration.toggleConfiguration()
-
- existingAllowedIps = [item for row in list(
- map(lambda x: [q.strip() for q in x.split(',')],
- map(lambda y: y.allowed_ip,
- list(filter(lambda k: k.id != self.id, self.configuration.getPeersList()))))) for item in row]
-
- if allowed_ip in existingAllowedIps:
- return ResponseObject(False, "Allowed IP already taken by another peer")
- if not ValidateIPAddressesWithRange(endpoint_allowed_ip):
- return ResponseObject(False, f"Endpoint Allowed IPs format is incorrect")
- if len(dns_addresses) > 0 and not ValidateDNSAddress(dns_addresses):
- return ResponseObject(False, f"DNS format is incorrect")
- if mtu < 0 or mtu > 1460:
- return ResponseObject(False, "MTU format is not correct")
- if keepalive < 0:
- return ResponseObject(False, "Persistent Keepalive format is not correct")
- if advanced_security != "on" and advanced_security != "off":
- return ResponseObject(False, "Advanced Security can only be on or off")
- if len(private_key) > 0:
- pubKey = GenerateWireguardPublicKey(private_key)
- if not pubKey[0] or pubKey[1] != self.id:
- return ResponseObject(False, "Private key does not match with the public key")
- try:
- rd = random.Random()
- uid = str(uuid.UUID(int=rd.getrandbits(128), version=4))
- pskExist = len(preshared_key) > 0
-
- if pskExist:
- with open(uid, "w+") as f:
- f.write(preshared_key)
- newAllowedIPs = allowed_ip.replace(" ", "")
- updateAllowedIp = subprocess.check_output(
- f"{self.configuration.Protocol} set {self.configuration.Name} peer {self.id} allowed-ips {newAllowedIPs} {f'preshared-key {uid}' if pskExist else 'preshared-key /dev/null'}",
- shell=True, stderr=subprocess.STDOUT)
-
- if pskExist: os.remove(uid)
-
- if len(updateAllowedIp.decode().strip("\n")) != 0:
- return ResponseObject(False,
- "Update peer failed when updating Allowed IPs")
- saveConfig = subprocess.check_output(f"{self.configuration.Protocol}-quick save {self.configuration.Name}",
- shell=True, stderr=subprocess.STDOUT)
- if f"wg showconf {self.configuration.Name}" not in saveConfig.decode().strip('\n'):
- return ResponseObject(False,
- "Update peer failed when saving the configuration")
- sqlUpdate(
- '''UPDATE '%s' SET name = ?, private_key = ?, DNS = ?, endpoint_allowed_ip = ?, mtu = ?,
- keepalive = ?, preshared_key = ?, advanced_security = ? WHERE id = ?''' % self.configuration.Name,
- (name, private_key, dns_addresses, endpoint_allowed_ip, mtu,
- keepalive, preshared_key, advanced_security, self.id,)
- )
- return ResponseObject()
- except subprocess.CalledProcessError as exc:
- return ResponseObject(False, exc.output.decode("UTF-8").strip())
-
-"""
-Dashboard API Key
-"""
-class DashboardAPIKey:
- def __init__(self, Key: str, CreatedAt: str, ExpiredAt: str):
- self.Key = Key
- self.CreatedAt = CreatedAt
- self.ExpiredAt = ExpiredAt
-
- def toJson(self):
- return self.__dict__
-
-"""
-Dashboard Configuration
-"""
-class DashboardConfig:
-
- def __init__(self):
- if not os.path.exists(DASHBOARD_CONF):
- open(DASHBOARD_CONF, "x")
- self.__config = configparser.RawConfigParser(strict=False)
- self.__config.read_file(open(DASHBOARD_CONF, "r+"))
- self.hiddenAttribute = ["totp_key", "auth_req"]
- self.__default = {
- "Account": {
- "username": "admin",
- "password": "admin",
- "enable_totp": "false",
- "totp_verified": "false",
- "totp_key": pyotp.random_base32()
- },
- "Server": {
- "wg_conf_path": "/etc/wireguard",
- "awg_conf_path": "/etc/amnezia/amneziawg",
- "app_prefix": "",
- "app_ip": "0.0.0.0",
- "app_port": "10086",
- "auth_req": "true",
- "version": DASHBOARD_VERSION,
- "dashboard_refresh_interval": "60000",
- "dashboard_peer_list_display": "grid",
- "dashboard_sort": "status",
- "dashboard_theme": "dark",
- "dashboard_api_key": "false",
- "dashboard_language": "en"
- },
- "Peers": {
- "peer_global_DNS": "1.1.1.1",
- "peer_endpoint_allowed_ip": "0.0.0.0/0",
- "peer_display_mode": "grid",
- "remote_endpoint": GetRemoteEndpoint(),
- "peer_MTU": "1420",
- "peer_keep_alive": "21"
- },
- "Other": {
- "welcome_session": "true"
- },
- "Database":{
- "type": "sqlite"
- },
- "Email":{
- "server": "",
- "port": "",
- "encryption": "",
- "username": "",
- "email_password": "",
- "send_from": "",
- "email_template": ""
- },
- "WireGuardConfiguration": {
- "autostart": ""
- }
- }
-
- for section, keys in self.__default.items():
- for key, value in keys.items():
- exist, currentData = self.GetConfig(section, key)
- if not exist:
- self.SetConfig(section, key, value, True)
- self.__createAPIKeyTable()
- self.DashboardAPIKeys = self.__getAPIKeys()
- self.APIAccessed = False
- self.SetConfig("Server", "version", DASHBOARD_VERSION)
-
- def __createAPIKeyTable(self):
- existingTable = sqlSelect("SELECT name FROM sqlite_master WHERE type='table' AND name = 'DashboardAPIKeys'").fetchall()
- if len(existingTable) == 0:
- sqlUpdate("CREATE TABLE DashboardAPIKeys (Key VARCHAR NOT NULL PRIMARY KEY, CreatedAt DATETIME NOT NULL DEFAULT (datetime('now', 'localtime')), ExpiredAt VARCHAR)")
-
- def __getAPIKeys(self) -> list[DashboardAPIKey]:
- keys = sqlSelect("SELECT * FROM DashboardAPIKeys WHERE ExpiredAt IS NULL OR ExpiredAt > datetime('now', 'localtime') ORDER BY CreatedAt DESC").fetchall()
- fKeys = []
- for k in keys:
-
- fKeys.append(DashboardAPIKey(*k))
- return fKeys
-
- def createAPIKeys(self, ExpiredAt = None):
- newKey = secrets.token_urlsafe(32)
- sqlUpdate('INSERT INTO DashboardAPIKeys (Key, ExpiredAt) VALUES (?, ?)', (newKey, ExpiredAt,))
-
- self.DashboardAPIKeys = self.__getAPIKeys()
-
- def deleteAPIKey(self, key):
- sqlUpdate("UPDATE DashboardAPIKeys SET ExpiredAt = datetime('now', 'localtime') WHERE Key = ?", (key, ))
- self.DashboardAPIKeys = self.__getAPIKeys()
-
- def __configValidation(self, section : str, key: str, value: Any) -> [bool, str]:
- if (type(value) is str and len(value) == 0
- and section not in ['Email', 'WireGuardConfiguration'] and
- (section == 'Peer' and key == 'peer_global_dns')):
- return False, "Field cannot be empty!"
- if section == "Peers" and key == "peer_global_dns" and len(value) > 0:
- return ValidateDNSAddress(value)
- if section == "Peers" and key == "peer_endpoint_allowed_ip":
- value = value.split(",")
- for i in value:
- i = i.strip()
+ if "awg" in ProtocolsEnabled():
+ confs = os.listdir(DashboardConfig.GetConfig("Server", "awg_conf_path")[1])
+ confs.sort()
+ for i in confs:
+ if RegexMatch("^(.{1,}).(conf)$", i):
+ i = i.replace('.conf', '')
try:
- ipaddress.ip_network(i, strict=False)
- except Exception as e:
- return False, str(e)
- if section == "Server" and key == "wg_conf_path":
- if not os.path.exists(value):
- return False, f"{value} is not a valid path"
- if section == "Account" and key == "password":
- if self.GetConfig("Account", "password")[0]:
- if not self.__checkPassword(
- value["currentPassword"], self.GetConfig("Account", "password")[1].encode("utf-8")):
- return False, "Current password does not match."
- if value["newPassword"] != value["repeatNewPassword"]:
- return False, "New passwords does not match"
- return True, ""
+ if i in WireguardConfigurations.keys():
+ if WireguardConfigurations[i].configurationFileChanged():
+ with app.app_context():
+ WireguardConfigurations[i] = AmneziaWireguardConfiguration(DashboardConfig, AllPeerJobs, AllPeerShareLinks, DashboardWebHooks, i)
+ else:
+ with app.app_context():
+ WireguardConfigurations[i] = AmneziaWireguardConfiguration(DashboardConfig, AllPeerJobs, AllPeerShareLinks, DashboardWebHooks, i, startup=startup)
+ except WireguardConfiguration.InvalidConfigurationFileException as e:
+ app.logger.error(f"{i} have an invalid configuration file.")
- def generatePassword(self, plainTextPassword: str):
- return bcrypt.hashpw(plainTextPassword.encode("utf-8"), bcrypt.gensalt())
+def startThreads():
+ bgThread = threading.Thread(target=peerInformationBackgroundThread, daemon=True)
+ bgThread.start()
+ scheduleJobThread = threading.Thread(target=peerJobScheduleBackgroundThread, daemon=True)
+ scheduleJobThread.start()
- def __checkPassword(self, plainTextPassword: str, hashedPassword: bytes):
- return bcrypt.checkpw(plainTextPassword.encode("utf-8"), hashedPassword)
-
- def SetConfig(self, section: str, key: str, value: any, init: bool = False) -> [bool, str]:
- if key in self.hiddenAttribute and not init:
- return False, None
-
- if not init:
- valid, msg = self.__configValidation(section, key, value)
- if not valid:
- return False, msg
-
- if section == "Account" and key == "password":
- if not init:
- value = self.generatePassword(value["newPassword"]).decode("utf-8")
- else:
- value = self.generatePassword(value).decode("utf-8")
-
- if section == "Email" and key == "email_template":
- value = value.encode('unicode_escape').decode('utf-8')
-
- if section == "Server" and key == "wg_conf_path":
- if not os.path.exists(value):
- return False, "Path does not exist"
-
- if section not in self.__config:
- if init:
- self.__config[section] = {}
- else:
- return False, "Section does not exist"
-
- if ((key not in self.__config[section].keys() and init) or
- (key in self.__config[section].keys())):
- if type(value) is bool:
- if value:
- self.__config[section][key] = "true"
- else:
- self.__config[section][key] = "false"
- elif type(value) in [int, float]:
- self.__config[section][key] = str(value)
- elif type(value) is list:
- self.__config[section][key] = "||".join(value).strip("||")
- else:
- self.__config[section][key] = value
- return self.SaveConfig(), ""
- else:
- return False, f"{key} does not exist under {section}"
- return True, ""
-
- def SaveConfig(self) -> bool:
- try:
- with open(DASHBOARD_CONF, "w+", encoding='utf-8') as configFile:
- self.__config.write(configFile)
- return True
- except Exception as e:
- return False
-
- def GetConfig(self, section, key) -> [bool, any]:
- if section not in self.__config:
- return False, None
-
- if key not in self.__config[section]:
- return False, None
-
- if section == "Email" and key == "email_template":
- return True, self.__config[section][key].encode('utf-8').decode('unicode_escape')
-
- if section == "WireGuardConfiguration" and key == "autostart":
- return True, list(filter(lambda x: len(x) > 0, self.__config[section][key].split("||")))
-
- if self.__config[section][key] in ["1", "yes", "true", "on"]:
- return True, True
-
- if self.__config[section][key] in ["0", "no", "false", "off"]:
- return True, False
-
-
- return True, self.__config[section][key]
-
- def toJson(self) -> dict[str, dict[Any, Any]]:
- the_dict = {}
-
- for section in self.__config.sections():
- the_dict[section] = {}
- for key, val in self.__config.items(section):
- if key not in self.hiddenAttribute:
- the_dict[section][key] = self.GetConfig(section, key)[1]
- return the_dict
+dictConfig({
+ 'version': 1,
+ 'formatters': {'default': {
+ 'format': '[%(asctime)s] [%(levelname)s] in [%(module)s] %(message)s',
+ }},
+ 'root': {
+ 'level': 'INFO'
+ }
+})
-"""
-Database Connection Functions
-"""
+WireguardConfigurations: dict[str, WireguardConfiguration] = {}
+CONFIGURATION_PATH = os.getenv('CONFIGURATION_PATH', '.')
-sqldb = sqlite3.connect(os.path.join(CONFIGURATION_PATH, 'db', 'wgdashboard.db'), check_same_thread=False)
-sqldb.row_factory = sqlite3.Row
+app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 5206928
+app.secret_key = secrets.token_urlsafe(32)
+app.json = CustomJsonEncoder(app)
+with app.app_context():
+ SystemStatus = SystemStatus()
+ DashboardConfig = DashboardConfig()
+ EmailSender = EmailSender(DashboardConfig)
+ AllPeerShareLinks: PeerShareLinks = PeerShareLinks(DashboardConfig, WireguardConfigurations)
+ AllPeerJobs: PeerJobs = PeerJobs(DashboardConfig, WireguardConfigurations)
+ DashboardLogger: DashboardLogger = DashboardLogger()
+ DashboardPlugins: DashboardPlugins = DashboardPlugins(app, WireguardConfigurations)
+ DashboardWebHooks: DashboardWebHooks = DashboardWebHooks(DashboardConfig)
+ NewConfigurationTemplates: NewConfigurationTemplates = NewConfigurationTemplates()
+ InitWireguardConfigurationsList(startup=True)
+ DashboardClients: DashboardClients = DashboardClients(WireguardConfigurations)
+ app.register_blueprint(createClientBlueprint(WireguardConfigurations, DashboardConfig, DashboardClients))
-def sqlSelect(statement: str, paramters: tuple = ()) -> sqlite3.Cursor:
- result = []
- try:
- cursor = sqldb.cursor()
- result = cursor.execute(statement, paramters)
- except Exception as error:
- print("[WGDashboard] SQLite Error:" + str(error) + " | Statement: " + statement)
- return result
-
-def sqlUpdate(statement: str, paramters: tuple = ()) -> sqlite3.Cursor:
- sqldb = sqlite3.connect(os.path.join(CONFIGURATION_PATH, 'db', 'wgdashboard.db'))
- sqldb.row_factory = sqlite3.Row
- cursor = sqldb.cursor()
- with sqldb:
- cursor = sqldb.cursor()
- try:
- statement = statement.rstrip(';')
- s = f'BEGIN TRANSACTION;{statement};END TRANSACTION;'
- cursor.execute(statement, paramters)
- # sqldb.commit()
- except Exception as error:
- print("[WGDashboard] SQLite Error:" + str(error) + " | Statement: " + statement)
- sqldb.close()
-
-DashboardConfig = DashboardConfig()
-EmailSender = EmailSender(DashboardConfig)
_, APP_PREFIX = DashboardConfig.GetConfig("Server", "app_prefix")
cors = CORS(app, resources={rf"{APP_PREFIX}/api/*": {
"origins": "*",
"methods": "DELETE, POST, GET, OPTIONS",
"allow_headers": ["Content-Type", "wg-dashboard-apikey"]
}})
+_, app_ip = DashboardConfig.GetConfig("Server", "app_ip")
+_, app_port = DashboardConfig.GetConfig("Server", "app_port")
+_, WG_CONF_PATH = DashboardConfig.GetConfig("Server", "wg_conf_path")
'''
API Routes
@@ -2051,12 +252,13 @@ def auth_req():
else:
DashboardConfig.APIAccessed = False
whiteList = [
- '/static/', 'validateAuthentication', 'authenticate',
+ '/static/', 'validateAuthentication', 'authenticate', 'getDashboardConfiguration',
'getDashboardTheme', 'getDashboardVersion', 'sharePeer/get', 'isTotpEnabled', 'locale',
- '/fileDownload'
+ '/fileDownload',
+ '/client'
]
- if ("username" not in session
+ if (("username" not in session or session.get("role") != "admin")
and (f"{(APP_PREFIX if len(APP_PREFIX) > 0 else '')}/" != request.path
and f"{(APP_PREFIX if len(APP_PREFIX) > 0 else '')}" != request.path)
and len(list(filter(lambda x : x not in request.path, whiteList))) == len(whiteList)
@@ -2094,6 +296,7 @@ def API_AuthenticateLogin():
if DashboardConfig.APIAccessed:
authToken = hashlib.sha256(f"{request.headers.get('wg-dashboard-apikey')}{datetime.now()}".encode()).hexdigest()
+ session['role'] = 'admin'
session['username'] = authToken
resp = ResponseObject(True, DashboardConfig.GetConfig("Other", "welcome_session")[1])
resp.set_cookie("authToken", authToken)
@@ -2111,6 +314,7 @@ def API_AuthenticateLogin():
and ((totpEnabled and totpValid) or not totpEnabled)
):
authToken = hashlib.sha256(f"{data['username']}{datetime.now()}".encode()).hexdigest()
+ session['role'] = 'admin'
session['username'] = authToken
resp = ResponseObject(True, DashboardConfig.GetConfig("Other", "welcome_session")[1])
resp.set_cookie("authToken", authToken)
@@ -2130,12 +334,40 @@ def API_SignOut():
session.clear()
return resp
-@app.route(f'{APP_PREFIX}/api/getWireguardConfigurations', methods=["GET"])
+@app.get(f'{APP_PREFIX}/api/getWireguardConfigurations')
def API_getWireguardConfigurations():
InitWireguardConfigurationsList()
return ResponseObject(data=[wc for wc in WireguardConfigurations.values()])
-@app.route(f'{APP_PREFIX}/api/addWireguardConfiguration', methods=["POST"])
+@app.get(f'{APP_PREFIX}/api/newConfigurationTemplates')
+def API_NewConfigurationTemplates():
+ return ResponseObject(data=NewConfigurationTemplates.GetTemplates())
+
+@app.get(f'{APP_PREFIX}/api/newConfigurationTemplates/createTemplate')
+def API_NewConfigurationTemplates_CreateTemplate():
+ return ResponseObject(data=NewConfigurationTemplates.CreateTemplate().model_dump())
+
+@app.post(f'{APP_PREFIX}/api/newConfigurationTemplates/updateTemplate')
+def API_NewConfigurationTemplates_UpdateTemplate():
+ data = request.get_json()
+ template = data.get('Template', None)
+ if not template:
+ return ResponseObject(False, "Please provide template")
+
+ status, msg = NewConfigurationTemplates.UpdateTemplate(template)
+ return ResponseObject(status, msg)
+
+@app.post(f'{APP_PREFIX}/api/newConfigurationTemplates/deleteTemplate')
+def API_NewConfigurationTemplates_DeleteTemplate():
+ data = request.get_json()
+ template = data.get('Template', None)
+ if not template:
+ return ResponseObject(False, "Please provide template")
+
+ status, msg = NewConfigurationTemplates.DeleteTemplate(template)
+ return ResponseObject(status, msg)
+
+@app.post(f'{APP_PREFIX}/api/addWireguardConfiguration')
def API_addWireguardConfiguration():
data = request.get_json()
requiredKeys = [
@@ -2184,9 +416,13 @@ def API_addWireguardConfiguration():
os.path.join(path[protocol], 'WGDashboard_Backup', data["Backup"]),
os.path.join(path[protocol], f'{data["ConfigurationName"]}.conf')
)
- WireguardConfigurations[data['ConfigurationName']] = WireguardConfiguration(data=data, name=data['ConfigurationName']) if protocol == 'wg' else AmneziaWireguardConfiguration(data=data, name=data['ConfigurationName'])
+ WireguardConfigurations[data['ConfigurationName']] = (
+ WireguardConfiguration(DashboardConfig, AllPeerJobs, AllPeerShareLinks, data=data, name=data['ConfigurationName'])) if protocol == 'wg' else (
+ AmneziaWireguardConfiguration(DashboardConfig, AllPeerJobs, AllPeerShareLinks, DashboardWebHooks, data=data, name=data['ConfigurationName']))
else:
- WireguardConfigurations[data['ConfigurationName']] = WireguardConfiguration(data=data) if data.get('Protocol') == 'wg' else AmneziaWireguardConfiguration(data=data)
+ WireguardConfigurations[data['ConfigurationName']] = (
+ WireguardConfiguration(DashboardConfig, AllPeerJobs, AllPeerShareLinks, DashboardWebHooks, data=data)) if data.get('Protocol') == 'wg' else (
+ AmneziaWireguardConfiguration(DashboardConfig, AllPeerJobs, AllPeerShareLinks, DashboardWebHooks, data=data))
return ResponseObject()
@app.get(f'{APP_PREFIX}/api/toggleWireguardConfiguration')
@@ -2213,6 +449,21 @@ def API_updateWireguardConfiguration():
return ResponseObject(status, message=msg, data=WireguardConfigurations[name])
+@app.post(f'{APP_PREFIX}/api/updateWireguardConfigurationInfo')
+def API_updateWireguardConfigurationInfo():
+ data = request.get_json()
+ name = data.get('Name')
+ key = data.get('Key')
+ value = data.get('Value')
+ if not all([data, key, name]):
+ return ResponseObject(status=False, message="Please provide configuration name, key and value")
+ if name not in WireguardConfigurations.keys():
+ return ResponseObject(False, "Configuration does not exist", status_code=404)
+
+ status, msg, key = WireguardConfigurations[name].updateConfigurationInfo(key, value)
+
+ return ResponseObject(status=status, message=msg, data=key)
+
@app.get(f'{APP_PREFIX}/api/getWireguardConfigurationRawFile')
def API_GetWireguardConfigurationRawFile():
configurationName = request.args.get('configurationName')
@@ -2245,9 +496,11 @@ def API_deleteWireguardConfiguration():
data = request.get_json()
if "ConfigurationName" not in data.keys() or data.get("ConfigurationName") is None or data.get("ConfigurationName") not in WireguardConfigurations.keys():
return ResponseObject(False, "Please provide the configuration name you want to delete", status_code=404)
- status = WireguardConfigurations[data.get("ConfigurationName")].deleteConfiguration()
- if status:
- WireguardConfigurations.pop(data.get("ConfigurationName"))
+ rp = WireguardConfigurations.pop(data.get("ConfigurationName"))
+
+ status = rp.deleteConfiguration()
+ if not status:
+ WireguardConfigurations[data.get("ConfigurationName")] = rp
return ResponseObject(status)
@app.post(f'{APP_PREFIX}/api/renameWireguardConfiguration')
@@ -2258,11 +511,17 @@ def API_renameWireguardConfiguration():
if (k not in data.keys() or data.get(k) is None or len(data.get(k)) == 0 or
(k == "ConfigurationName" and data.get(k) not in WireguardConfigurations.keys())):
return ResponseObject(False, "Please provide the configuration name you want to rename", status_code=404)
-
- status, message = WireguardConfigurations[data.get("ConfigurationName")].renameConfiguration(data.get("NewConfigurationName"))
+
+ if data.get("NewConfigurationName") in WireguardConfigurations.keys():
+ return ResponseObject(False, "Configuration name already exist", status_code=400)
+
+ rc = WireguardConfigurations.pop(data.get("ConfigurationName"))
+
+ status, message = rc.renameConfiguration(data.get("NewConfigurationName"))
if status:
- WireguardConfigurations.pop(data.get("ConfigurationName"))
- WireguardConfigurations[data.get("NewConfigurationName")] = WireguardConfiguration(data.get("NewConfigurationName"))
+ WireguardConfigurations[data.get("NewConfigurationName")] = (WireguardConfiguration(DashboardConfig, AllPeerJobs, AllPeerShareLinks, DashboardWebHooks, data.get("NewConfigurationName")) if rc.Protocol == 'wg' else AmneziaWireguardConfiguration(DashboardConfig, AllPeerJobs, AllPeerShareLinks, DashboardWebHooks, data.get("NewConfigurationName")))
+ else:
+ WireguardConfigurations[data.get("ConfigurationName")] = rc
return ResponseObject(status, message)
@app.get(f'{APP_PREFIX}/api/getWireguardConfigurationRealtimeTraffic')
@@ -2293,29 +552,30 @@ def API_getAllWireguardConfigurationBackup():
for protocol in ProtocolsEnabled():
directory = os.path.join(DashboardConfig.GetConfig("Server", f"{protocol}_conf_path")[1], 'WGDashboard_Backup')
- files = [(file, os.path.getctime(os.path.join(directory, file)))
- for file in os.listdir(directory) if os.path.isfile(os.path.join(directory, file))]
- files.sort(key=lambda x: x[1], reverse=True)
-
- for f, ct in files:
- if RegexMatch(r"^(.*)_(.*)\.(conf)$", f):
- s = re.search(r"^(.*)_(.*)\.(conf)$", f)
- name = s.group(1)
- if name not in existingConfiguration:
- if name not in data['NonExistingConfigurations'].keys():
- data['NonExistingConfigurations'][name] = []
-
- date = s.group(2)
- d = {
- "protocol": protocol,
- "filename": f,
- "backupDate": date,
- "content": open(os.path.join(DashboardConfig.GetConfig("Server", f"{protocol}_conf_path")[1], 'WGDashboard_Backup', f), 'r').read()
- }
- if f.replace(".conf", ".sql") in list(os.listdir(directory)):
- d['database'] = True
- d['databaseContent'] = open(os.path.join(DashboardConfig.GetConfig("Server", f"{protocol}_conf_path")[1], 'WGDashboard_Backup', f.replace(".conf", ".sql")), 'r').read()
- data['NonExistingConfigurations'][name].append(d)
+ if os.path.exists(directory):
+ files = [(file, os.path.getctime(os.path.join(directory, file)))
+ for file in os.listdir(directory) if os.path.isfile(os.path.join(directory, file))]
+ files.sort(key=lambda x: x[1], reverse=True)
+
+ for f, ct in files:
+ if RegexMatch(r"^(.*)_(.*)\.(conf)$", f):
+ s = re.search(r"^(.*)_(.*)\.(conf)$", f)
+ name = s.group(1)
+ if name not in existingConfiguration:
+ if name not in data['NonExistingConfigurations'].keys():
+ data['NonExistingConfigurations'][name] = []
+
+ date = s.group(2)
+ d = {
+ "protocol": protocol,
+ "filename": f,
+ "backupDate": date,
+ "content": open(os.path.join(DashboardConfig.GetConfig("Server", f"{protocol}_conf_path")[1], 'WGDashboard_Backup', f), 'r').read()
+ }
+ if f.replace(".conf", ".sql") in list(os.listdir(directory)):
+ d['database'] = True
+ d['databaseContent'] = open(os.path.join(DashboardConfig.GetConfig("Server", f"{protocol}_conf_path")[1], 'WGDashboard_Backup', f.replace(".conf", ".sql")), 'r').read()
+ data['NonExistingConfigurations'][name].append(d)
return ResponseObject(data=data)
@app.get(f'{APP_PREFIX}/api/createWireguardConfigurationBackup')
@@ -2387,7 +647,7 @@ def API_updateDashboardConfigurationItem():
if data['key'] == 'wg_conf_path':
WireguardConfigurations.clear()
WireguardConfigurations.clear()
- InitWireguardConfigurationsList()
+ InitWireguardConfigurationsList()
return ResponseObject(True, data=DashboardConfig.GetConfig(data["section"], data["key"])[1])
@app.get(f'{APP_PREFIX}/api/getDashboardAPIKeys')
@@ -2439,11 +699,17 @@ def API_updatePeerSettings(configName):
foundPeer, peer = wireguardConfig.searchPeer(id)
if foundPeer:
if wireguardConfig.Protocol == 'wg':
- return peer.updatePeer(name, private_key, preshared_key, dns_addresses,
+ status, msg = peer.updatePeer(name, private_key, preshared_key, dns_addresses,
allowed_ip, endpoint_allowed_ip, mtu, keepalive)
-
- return peer.updatePeer(name, private_key, preshared_key, dns_addresses,
- allowed_ip, endpoint_allowed_ip, mtu, keepalive, "off")
+ else:
+ status, msg = peer.updatePeer(name, private_key, preshared_key, dns_addresses,
+ allowed_ip, endpoint_allowed_ip, mtu, keepalive, "off")
+ wireguardConfig.getPeers()
+ DashboardWebHooks.RunWebHook('peer_updated', {
+ "configuration": wireguardConfig.Name,
+ "peers": [id]
+ })
+ return ResponseObject(status, msg)
return ResponseObject(False, "Peer does not exist")
@@ -2474,7 +740,16 @@ def API_deletePeers(configName: str) -> ResponseObject:
if len(peers) == 0:
return ResponseObject(False, "Please specify one or more peers", status_code=400)
configuration = WireguardConfigurations.get(configName)
- return configuration.deletePeers(peers)
+ status, msg = configuration.deletePeers(peers, AllPeerJobs, AllPeerShareLinks)
+
+ # Delete Assignment
+
+ for p in peers:
+ assignments = DashboardClients.DashboardClientsPeerAssignment.GetAssignedClients(configName, p)
+ for c in assignments:
+ DashboardClients.DashboardClientsPeerAssignment.UnassignClients(c.AssignmentID)
+
+ return ResponseObject(status, msg)
return ResponseObject(False, "Configuration does not exist", status_code=404)
@@ -2486,7 +761,8 @@ def API_restrictPeers(configName: str) -> ResponseObject:
if len(peers) == 0:
return ResponseObject(False, "Please specify one or more peers")
configuration = WireguardConfigurations.get(configName)
- return configuration.restrictPeers(peers)
+ status, msg = configuration.restrictPeers(peers)
+ return ResponseObject(status, msg)
return ResponseObject(False, "Configuration does not exist", status_code=404)
@app.post(f'{APP_PREFIX}/api/sharePeer/create')
@@ -2503,7 +779,7 @@ def API_sharePeer_create():
"This peer is already sharing. Please view data for shared link.",
data=activeLink[0]
)
- status, message = AllPeerShareLinks.addLink(Configuration, Peer, ExpireDate)
+ status, message = AllPeerShareLinks.addLink(Configuration, Peer, datetime.strptime(ExpireDate, "%Y-%m-%d %H:%M:%S"))
if not status:
return ResponseObject(status, message)
return ResponseObject(data=AllPeerShareLinks.getLinkByID(message))
@@ -2514,13 +790,13 @@ def API_sharePeer_update():
ShareID: str = data.get("ShareID")
ExpireDate: str = data.get("ExpireDate")
- if ShareID is None:
+ if not all([ShareID, ExpireDate]):
return ResponseObject(False, "Please specify ShareID")
if len(AllPeerShareLinks.getLinkByID(ShareID)) == 0:
return ResponseObject(False, "ShareID does not exist")
- status, message = AllPeerShareLinks.updateLinkExpireDate(ShareID, ExpireDate)
+ status, message = AllPeerShareLinks.updateLinkExpireDate(ShareID, datetime.strptime(ExpireDate, "%Y-%m-%d %H:%M:%S"))
if not status:
return ResponseObject(status, message)
return ResponseObject(data=AllPeerShareLinks.getLinkByID(ShareID))
@@ -2552,14 +828,16 @@ def API_allowAccessPeers(configName: str) -> ResponseObject:
if len(peers) == 0:
return ResponseObject(False, "Please specify one or more peers")
configuration = WireguardConfigurations.get(configName)
- return configuration.allowAccessPeers(peers)
+ status, msg = configuration.allowAccessPeers(peers)
+ return ResponseObject(status, msg)
return ResponseObject(False, "Configuration does not exist")
@app.post(f'{APP_PREFIX}/api/addPeers/
=0;){const s=r[0];for(let u=0;u1?2-l:l,l=this._easing(Math.min(1,Math.max(0,l))),this._target[n]=this._fn(o,a,l)}wait(){const t=this._promises||(this._promises=[]);return new Promise((e,s)=>{t.push({res:e,rej:s})})}_notify(t){const e=t?"res":"rej",s=this._promises||[];for(let n=0;nc.box.fullSize),!0),s=Wt(Ht(t,"left"),!0),n=Wt(Ht(t,"right")),o=Wt(Ht(t,"top"),!0),r=Wt(Ht(t,"bottom")),a=ts(t,"x"),l=ts(t,"y");return{fullSize:e,leftAndTop:s.concat(o),rightAndBottom:n.concat(l).concat(r).concat(a),chartArea:Ht(t,"chartArea"),vertical:s.concat(n).concat(l),horizontal:o.concat(r).concat(a)}}function es(i,t,e,s){return Math.max(i[e],t[e])+Math.max(i[s],t[s])}function xn(i,t){i.top=Math.max(i.top,t.top),i.left=Math.max(i.left,t.left),i.bottom=Math.max(i.bottom,t.bottom),i.right=Math.max(i.right,t.right)}function ba(i,t,e,s){const{pos:n,box:o}=e,r=i.maxPadding;if(!O(n)){e.size&&(i[n]-=e.size);const d=s[e.stack]||{size:0,count:1};d.size=Math.max(d.size,e.horizontal?o.height:o.width),e.size=d.size/d.count,i[n]+=e.size}o.getPadding&&xn(r,o.getPadding());const a=Math.max(0,t.outerWidth-es(r,i,"left","right")),l=Math.max(0,t.outerHeight-es(r,i,"top","bottom")),c=a!==i.w,h=l!==i.h;return i.w=a,i.h=l,e.horizontal?{same:c,other:h}:{same:h,other:c}}function _a(i){const t=i.maxPadding;function e(s){const n=Math.max(t[s]-i[s],0);return i[s]+=n,n}i.y+=e("top"),i.x+=e("left"),e("right"),e("bottom")}function xa(i,t){const e=t.maxPadding;function s(n){const o={left:0,top:0,right:0,bottom:0};return n.forEach(r=>{o[r]=Math.max(t[r],e[r])}),o}return s(i?["left","right"]:["top","bottom"])}function $t(i,t,e,s){const n=[];let o,r,a,l,c,h;for(o=0,r=i.length,c=0;oi==="left"?"right":i==="right"?"left":i,os=(i,t,e)=>t==="top"||t==="left"?i[t]+e:i[t]-e,rs=(i,t)=>Math.min(t||i,i);function as(i,t){const e=[],s=i.length/t,n=i.length;let o=0;for(;o({width:r[M]||0,height:a[M]||0});return{first:k(0),last:k(e-1),widest:k(S),highest:k(w),widths:r,heights:a}}getLabelForValue(t){return t}getPixelForValue(t,e){return NaN}getValueForPixel(t){}getPixelForTick(t){const e=this.ticks;return t<0||t>e.length-1?null:this.getPixelForValue(e[t].value)}getPixelForDecimal(t){this._reversePixels&&(t=1-t);const e=this._startPixel+t*this._length;return Co(this._alignToPixels?bt(this.chart,e,0):e)}getDecimalForPixel(t){const e=(t-this._startPixel)/this._length;return this._reversePixels?1-e:e}getBasePixel(){return this.getPixelForValue(this.getBaseValue())}getBaseValue(){const{min:t,max:e}=this;return t<0&&e<0?e:t>0&&e>0?t:0}getContext(t){const e=this.ticks||[];if(t>=0&&te.length&&delete this._stacks,t.forEach((s,n)=>{e.filter(o=>o===s._dataset).length===0&&this._destroyDatasetMeta(n)})}buildOrUpdateControllers(){const t=[],e=this.data.datasets;let s,n;for(this._removeUnreferencedMetasets(),s=0,n=e.length;s=0;--e)this._drawDataset(t[e]);this.notifyPlugins("afterDatasetsDraw")}_drawDataset(t){const e=this.ctx,s=t._clip,n=!s.disabled,o=ml(t,this.chartArea),r={meta:t,index:t.index,cancelable:!0};this.notifyPlugins("beforeDatasetDraw",r)!==!1&&(n&&Te(e,{left:s.left===!1?0:o.left-s.left,right:s.right===!1?this.width:o.right+s.right,top:s.top===!1?0:o.top-s.top,bottom:s.bottom===!1?this.height:o.bottom+s.bottom}),t.controller.draw(),n&&Le(e),r.cancelable=!1,this.notifyPlugins("afterDatasetDraw",r))}isPointInArea(t){return te(t,this.chartArea,this._minPadding)}getElementsAtEventForMode(t,e,s,n){const o=fa.modes[e];return typeof o=="function"?o(this,t,s,n):[]}getDatasetMeta(t){const e=this.data.datasets[t],s=this._metasets;let n=s.filter(o=>o&&o._dataset===e).pop();return n||(n={type:null,data:[],dataset:null,controller:null,hidden:null,xAxisID:null,yAxisID:null,order:e&&e.order||0,index:t,_dataset:e,_parsed:[],_sorted:!1},s.push(n)),n}getContext(){return this.$context||(this.$context=Mt(null,{chart:this,type:"chart"}))}getVisibleDatasetCount(){return this.getSortedVisibleDatasetMetas().length}isDatasetVisible(t){const e=this.data.datasets[t];if(!e)return!1;const s=this.getDatasetMeta(t);return typeof s.hidden=="boolean"?!s.hidden:!e.hidden}setDatasetVisibility(t,e){const s=this.getDatasetMeta(t);s.hidden=!e}toggleDataVisibility(t){this._hiddenIndices[t]=!this._hiddenIndices[t]}getDataVisibility(t){return!this._hiddenIndices[t]}_updateVisibility(t,e,s){const n=s?"show":"hide",o=this.getDatasetMeta(t),r=o.controller._resolveAnimations(void 0,n);Qt(e)?(o.data[e].hidden=!s,this.update()):(this.setDatasetVisibility(t,s),r.update(o,{visible:s}),this.update(a=>a.datasetIndex===t?n:void 0))}hide(t,e){this._updateVisibility(t,e,!1)}show(t,e){this._updateVisibility(t,e,!0)}_destroyDatasetMeta(t){const e=this._metasets[t];e&&e.controller&&e.controller._destroy(),delete this._metasets[t]}_stop(){let t,e;for(this.stop(),ot.remove(this),t=0,e=this.data.datasets.length;ti.height-s/2?"bottom":"center"}function nc(i,t,e,s){const{x:n,width:o}=s,r=e.caretSize+e.caretPadding;if(i==="left"&&n+o+r>t.width||i==="right"&&n-o-r<0)return!0}function oc(i,t,e,s){const{x:n,width:o}=e,{width:r,chartArea:{left:a,right:l}}=i;let c="center";return s==="center"?c=n<=(a+l)/2?"left":"right":n<=o/2?c="left":n>=r-o/2&&(c="right"),nc(c,i,t,e)&&(c="center"),c}function Ds(i,t,e){const s=e.yAlign||t.yAlign||sc(i,e);return{xAlign:e.xAlign||t.xAlign||oc(i,t,e,s),yAlign:s}}function rc(i,t){let{x:e,width:s}=i;return t==="right"?e-=s:t==="center"&&(e-=s/2),e}function ac(i,t,e){let{y:s,height:n}=i;return t==="top"?s+=e:t==="bottom"?s-=n+e:s-=n/2,s}function Os(i,t,e,s){const{caretSize:n,caretPadding:o,cornerRadius:r}=i,{xAlign:a,yAlign:l}=e,c=n+o,{topLeft:h,topRight:d,bottomLeft:f,bottomRight:u}=Tt(r);let p=rc(t,a);const g=ac(t,l,c);return l==="center"?a==="left"?p+=c:a==="right"&&(p-=c):a==="left"?p-=Math.max(h,f)+n:a==="right"&&(p+=Math.max(d,u)+n),{x:Y(p,0,s.width-t.width),y:Y(g,0,s.height-t.height)}}function ye(i,t,e){const s=q(e.padding);return t==="center"?i.x+i.width/2:t==="right"?i.x+i.width-s.right:i.x+s.left}function Cs(i){return tt([],rt(i))}function lc(i,t,e){return Mt(i,{tooltip:t,tooltipItems:e,type:"tooltip"})}function Ts(i,t){const e=t&&t.dataset&&t.dataset.tooltip&&t.dataset.tooltip.callbacks;return e?i.override(e):i}const In={beforeTitle:nt,title(i){if(i.length>0){const t=i[0],e=t.chart.data.labels,s=e?e.length:0;if(this&&this.options&&this.options.mode==="dataset")return t.dataset.label||"";if(t.label)return t.label;if(s>0&&t.dataIndex"u"?In[t].call(e,s):n}class Ls extends ut{static positioners=Yt;constructor(t){super(),this.opacity=0,this._active=[],this._eventPosition=void 0,this._size=void 0,this._cachedAnimations=void 0,this._tooltipItems=[],this.$animations=void 0,this.$context=void 0,this.chart=t.chart,this.options=t.options,this.dataPoints=void 0,this.title=void 0,this.beforeBody=void 0,this.body=void 0,this.afterBody=void 0,this.footer=void 0,this.xAlign=void 0,this.yAlign=void 0,this.x=void 0,this.y=void 0,this.height=void 0,this.width=void 0,this.caretX=void 0,this.caretY=void 0,this.labelColors=void 0,this.labelPointStyles=void 0,this.labelTextColors=void 0}initialize(t){this.options=t,this._cachedAnimations=void 0,this.$context=void 0}_resolveAnimations(){const t=this._cachedAnimations;if(t)return t;const e=this.chart,s=this.options.setContext(this.getContext()),n=s.enabled&&e.options.animation&&s.animations,o=new pn(this.chart,n);return n._cacheable&&(this._cachedAnimations=Object.freeze(o)),o}getContext(){return this.$context||(this.$context=lc(this.chart.getContext(),this,this._tooltipItems))}getTitle(t,e){const{callbacks:s}=e,n=j(s,"beforeTitle",this,t),o=j(s,"title",this,t),r=j(s,"afterTitle",this,t);let a=[];return a=tt(a,rt(n)),a=tt(a,rt(o)),a=tt(a,rt(r)),a}getBeforeBody(t,e){return Cs(j(e.callbacks,"beforeBody",this,t))}getBody(t,e){const{callbacks:s}=e,n=[];return L(t,o=>{const r={before:[],lines:[],after:[]},a=Ts(s,o);tt(r.before,rt(j(a,"beforeLabel",this,o))),tt(r.lines,j(a,"label",this,o)),tt(r.after,rt(j(a,"afterLabel",this,o))),n.push(r)}),n}getAfterBody(t,e){return Cs(j(e.callbacks,"afterBody",this,t))}getFooter(t,e){const{callbacks:s}=e,n=j(s,"beforeFooter",this,t),o=j(s,"footer",this,t),r=j(s,"afterFooter",this,t);let a=[];return a=tt(a,rt(n)),a=tt(a,rt(o)),a=tt(a,rt(r)),a}_createItems(t){const e=this._active,s=this.chart.data,n=[],o=[],r=[];let a=[],l,c;for(l=0,c=e.length;l0&&e.stroke()}_updateAnimationTarget(t){const e=this.chart,s=this.$animations,n=s&&s.x,o=s&&s.y;if(n||o){const r=Yt[t.position].call(this,this._active,this._eventPosition);if(!r)return;const a=this._size=Ps(this,t),l=Object.assign({},r,this._size),c=Ds(e,t,l),h=Os(t,l,c,e);(n._to!==h.x||o._to!==h.y)&&(this.xAlign=c.xAlign,this.yAlign=c.yAlign,this.width=a.width,this.height=a.height,this.caretX=r.x,this.caretY=r.y,this._resolveAnimations().update(this,h))}}_willRender(){return!!this.opacity}draw(t){const e=this.options.setContext(this.getContext());let s=this.opacity;if(!s)return;this._updateAnimationTarget(e);const n={width:this.width,height:this.height},o={x:this.x,y:this.y};s=Math.abs(s)<.001?0:s;const r=q(e.padding),a=this.title.length||this.beforeBody.length||this.body.length||this.afterBody.length||this.footer.length;e.enabled&&a&&(t.save(),t.globalAlpha=s,this.drawBackground(o,t,n,e),hn(t,e.textDirection),o.y+=r.top,this.drawTitle(o,t,e),this.drawBody(o,t,e),this.drawFooter(o,t,e),dn(t,e.textDirection),t.restore())}getActiveElements(){return this._active||[]}setActiveElements(t,e){const s=this._active,n=t.map(({datasetIndex:a,index:l})=>{const c=this.chart.getDatasetMeta(a);if(!c)throw new Error("Cannot find a dataset at index "+a);return{datasetIndex:a,element:c.data[l],index:l}}),o=!Se(s,n),r=this._positionChanged(n,e);(o||r)&&(this._active=n,this._eventPosition=e,this._ignoreReplayEvents=!0,this.update(!0))}handleEvent(t,e,s=!0){if(e&&this._ignoreReplayEvents)return!1;this._ignoreReplayEvents=!1;const n=this.options,o=this._active||[],r=this._getActiveElements(t,o,e,s),a=this._positionChanged(r,t),l=e||!Se(r,o)||a;return l&&(this._active=r,(n.enabled||n.external)&&(this._eventPosition={x:t.x,y:t.y},this.update(!0,e))),l}_getActiveElements(t,e,s,n){const o=this.options;if(t.type==="mouseout")return[];if(!n)return e.filter(a=>this.chart.data.datasets[a.datasetIndex]&&this.chart.getDatasetMeta(a.datasetIndex).controller.getParsed(a.index)!==void 0);const r=this.chart.getElementsAtEventForMode(t,o.mode,o,s);return o.reverse&&r.reverse(),r}_positionChanged(t,e){const{caretX:s,caretY:n,options:o}=this,r=Yt[o.position].call(this,t,e);return r!==!1&&(s!==r.x||n!==r.y)}}var Lc={id:"tooltip",_element:Ls,positioners:Yt,afterInit(i,t,e){e&&(i.tooltip=new Ls({chart:i,options:e}))},beforeUpdate(i,t,e){i.tooltip&&i.tooltip.initialize(e)},reset(i,t,e){i.tooltip&&i.tooltip.initialize(e)},afterDraw(i){const t=i.tooltip;if(t&&t._willRender()){const e={tooltip:t};if(i.notifyPlugins("beforeTooltipDraw",{...e,cancelable:!0})===!1)return;t.draw(i.ctx),i.notifyPlugins("afterTooltipDraw",e)}},afterEvent(i,t){if(i.tooltip){const e=t.replay;i.tooltip.handleEvent(t.event,e,t.inChartArea)&&(t.changed=!0)}},defaults:{enabled:!0,external:null,position:"average",backgroundColor:"rgba(0,0,0,0.8)",titleColor:"#fff",titleFont:{weight:"bold"},titleSpacing:2,titleMarginBottom:6,titleAlign:"left",bodyColor:"#fff",bodySpacing:2,bodyFont:{},bodyAlign:"left",footerColor:"#fff",footerSpacing:2,footerMarginTop:6,footerFont:{weight:"bold"},footerAlign:"left",padding:6,caretPadding:2,caretSize:5,cornerRadius:6,boxHeight:(i,t)=>t.bodyFont.size,boxWidth:(i,t)=>t.bodyFont.size,multiKeyBackground:"#fff",displayColors:!0,boxPadding:0,borderColor:"rgba(0,0,0,0)",borderWidth:0,animation:{duration:400,easing:"easeOutQuart"},animations:{numbers:{type:"number",properties:["x","y","width","height","caretX","caretY"]},opacity:{easing:"linear",duration:200}},callbacks:In},defaultRoutes:{bodyFont:"font",footerFont:"font",titleFont:"font"},descriptors:{_scriptable:i=>i!=="filter"&&i!=="itemSort"&&i!=="external",_indexable:!1,callbacks:{_scriptable:!1,_indexable:!1},animation:{_fallback:!1},animations:{_fallback:"animation"}},additionalOptionScopes:["interaction"]};const cc=(i,t,e,s)=>(typeof t=="string"?(e=i.push(t)-1,s.unshift({index:e,label:t})):isNaN(t)&&(e=null),e);function hc(i,t,e,s){const n=i.indexOf(t);if(n===-1)return cc(i,t,e,s);const o=i.lastIndexOf(t);return n!==o?e:n}const dc=(i,t)=>i===null?null:Y(Math.round(i),0,t);function As(i){const t=this.getLabels();return i>=0&&i+g)}getLabelForValue(t){const e=this._adapter,s=this.options.time;return s.tooltipFormat?e.format(t,s.tooltipFormat):e.format(t,s.displayFormats.datetime)}format(t,e){const n=this.options.time.displayFormats,o=this._unit,r=e||n[o];return this._adapter.format(t,r)}_tickFormatFunction(t,e,s,n){const o=this.options,r=o.ticks.callback;if(r)return F(r,[t,e,s],this);const a=o.time.displayFormats,l=this._unit,c=this._majorUnit,h=l&&a[l],d=c&&a[c],f=s[e],u=c&&d&&f&&f.major;return this._adapter.format(t,n||(u?d:h))}generateTickLabels(t){let e,s,n;for(e=0,s=t.length;e0?a:1}getDataTimestamps(){let t=this._cache.data||[],e,s;if(t.length)return t;const n=this.getMatchingVisibleMetas();if(this._normalized&&n.length)return this._cache.data=n[0].controller.getAllParsedValues(this);for(e=0,s=n.length;e=i[s].pos&&t<=i[n].pos&&({lo:s,hi:n}=kt(i,"pos",t)),{pos:o,time:a}=i[s],{pos:r,time:l}=i[n]):(t>=i[s].time&&t<=i[n].time&&({lo:s,hi:n}=kt(i,"time",t)),{time:o,pos:a}=i[s],{time:r,pos:l}=i[n]);const c=r-o;return c?a+(l-a)*(t-o)/c:a}class Ic extends Hs{static id="timeseries";static defaults=Hs.defaults;constructor(t){super(t),this._table=[],this._minPos=void 0,this._tableRange=void 0}initOffsets(){const t=this._getTimestampsForTable(),e=this._table=this.buildLookupTable(t);this._minPos=ve(e,this.min),this._tableRange=ve(e,this.max)-this._minPos,super.initOffsets(t)}buildLookupTable(t){const{min:e,max:s}=this,n=[],o=[];let r,a,l,c,h;for(r=0,a=t.length;r=e&&c<=s&&n.push(c);if(n.length<2)return[{time:e,pos:0},{time:s,pos:1}];for(r=0,a=n.length;rn-o)}_getTimestampsForTable(){let t=this._cache.all||[];if(t.length)return t;const e=this.getDataTimestamps(),s=this.getLabelTimestamps();return e.length&&s.length?t=this.normalize(e.concat(s)):t=e.length?e:s,t=this._cache.all=t,t}getDecimalForValue(t){return(ve(this._table,t)-this._minPos)/this._tableRange}getValueForPixel(t){const e=this._offsets,s=this.getDecimalForPixel(t)/e.factor-e.end;return ve(this._table,s*this._tableRange+this._minPos,!0)}}const Rn={data:{type:Object,required:!0},options:{type:Object,default:()=>({})},plugins:{type:Array,default:()=>[]},datasetIdKey:{type:String,default:"label"},updateMode:{type:String,default:void 0}},bc={ariaLabel:{type:String},ariaDescribedby:{type:String}},_c={type:{type:String,required:!0},destroyDelay:{type:Number,default:0},...Rn,...bc},xc=Hn[0]==="2"?(i,t)=>Object.assign(i,{attrs:t}):(i,t)=>Object.assign(i,t);function Ct(i){return Ns(i)?Xe(i):i}function yc(i){let t=arguments.length>1&&arguments[1]!==void 0?arguments[1]:i;return Ns(t)?new Proxy(i,{}):i}function vc(i,t){const e=i.options;e&&t&&Object.assign(e,t)}function zn(i,t){i.labels=t}function En(i,t,e){const s=[];i.datasets=t.map(n=>{const o=i.datasets.find(r=>r[e]===n[e]);return!o||!n.data||s.includes(o)?{...n}:(s.push(o),Object.assign(o,n),o)})}function kc(i,t){const e={labels:[],datasets:[]};return zn(e,i.labels),En(e,i.datasets,t),e}const wc=Ws({props:_c,setup(i,t){let{expose:e,slots:s}=t;const n=Wn(null),o=Vs(null);e({chart:o});const r=()=>{if(!n.value)return;const{type:c,data:h,options:d,plugins:f,datasetIdKey:u}=i,p=kc(h,u),g=yc(p,h);o.value=new bi(n.value,{type:c,data:g,options:{...d},plugins:f})},a=()=>{const c=Xe(o.value);c&&(i.destroyDelay>0?setTimeout(()=>{c.destroy(),o.value=null},i.destroyDelay):(c.destroy(),o.value=null))},l=c=>{c.update(i.updateMode)};return Vn(r),Nn(a),jn([()=>i.options,()=>i.data],(c,h)=>{let[d,f]=c,[u,p]=h;const g=Xe(o.value);if(!g)return;let m=!1;if(d){const b=Ct(d),_=Ct(u);b&&b!==_&&(vc(g,b),m=!0)}if(f){const b=Ct(f.labels),_=Ct(p.labels),y=Ct(f.datasets),v=Ct(p.datasets);b!==_&&(zn(g.config.data,b),m=!0),y&&y!==v&&(En(g.config.data,y,i.datasetIdKey),m=!0)}m&&$n(()=>{l(g)})},{deep:!0}),()=>Ue("canvas",{role:"img",ariaLabel:i.ariaLabel,ariaDescribedby:i.ariaDescribedby,ref:n},[Ue("p",{},[s.default?s.default():""])])}});function Bn(i,t){return bi.register(t),Ws({props:Rn,setup(e,s){let{expose:n}=s;const o=Vs(null),r=a=>{o.value=a?.chart};return n({chart:o}),()=>Ue(wc,xc({ref:r},{type:i,...e}))}})}const Rc=Bn("bar",oa),zc=Bn("line",ra);export{Dc as B,bi as C,_i as L,Pc as P,oa as a,ra as b,Fc as c,Tc as d,Lc as e,Ac as f,zc as g,Rc as h,Oc as i,Cc as p};
diff --git a/src/static/app/dist/assets/index-Cor6obBz.js b/src/static/app/dist/assets/index-Cor6obBz.js
deleted file mode 100644
index 8df71b5e..00000000
--- a/src/static/app/dist/assets/index-Cor6obBz.js
+++ /dev/null
@@ -1 +0,0 @@
-import{r as M,o as H,a as o,c,b as e,d as s,e as v,f as b,t as h,_ as S,D as y,w as r,T as w,n as k,u as L,W as T,g as G,G as N,F as A,h as D,i as x,j as $,k as f,l as W,S as z}from"./index-oBQzjt8-.js";import{L as u}from"./localeText-CuybU_0U.js";import{M as V}from"./message-Buu3PA8w.js";import"./dayjs.min-BHDUvWAB.js";const O={class:"peerSettingContainer w-100 h-100 position-absolute top-0 start-0 overflow-y-scroll"},U={class:"container d-flex h-100 w-100"},j={class:"m-auto modal-dialog-centered dashboardModal"},B={class:"card rounded-3 shadow flex-grow-1"},R={class:"card-header bg-transparent d-flex align-items-center gap-2 border-0 p-4 pb-2"},F={class:"mb-0"},I={class:"card-body px-4 pb-4 d-flex flex-column gap-2"},q={class:"card text-decoration-none",target:"_blank",role:"button",href:"https://discord.gg/72TwzjeuWm"},E={class:"card-body d-flex gap-4 align-items-center"},J={class:"d-flex align-items-center"},P={class:"badge rounded-pill text-bg-primary ms-2"},Y={key:0,class:"spinner-border spinner-border-sm",style:{width:"0.7rem",height:"0.7rem"}},K={key:1},Q={class:"text-muted"},X={class:"card text-decoration-none",href:"https://donaldzou.github.io/WGDashboard-Documentation/",target:"_blank"},Z={class:"card-body d-flex gap-4 align-items-center"},ee={class:"mb-0"},te={class:"text-muted"},se={__name:"helpModal",setup(i){const t=M(!0),g=M(void 0);return H(()=>{t.value=!0,fetch("https://discord.com/api/guilds/1276818723637956628/widget.json").then(d=>d.json()).then(d=>{g.value=d,t.value=!1}).catch(()=>{t.value=!1})}),(d,a)=>(o(),c("div",O,[e("div",U,[e("div",j,[e("div",B,[e("div",R,[e("h4",F,[s(u,{t:"Help"})]),e("button",{type:"button",class:"btn-close ms-auto",onClick:a[0]||(a[0]=l=>d.$emit("close"))})]),e("div",I,[e("a",q,[e("div",E,[a[3]||(a[3]=e("h1",{class:"mb-0"},[e("i",{class:"bi bi-discord"})],-1)),e("div",null,[e("div",J,[a[2]||(a[2]=e("h5",{class:"mb-0"}," Discord Server ",-1)),e("span",P,[t.value?(o(),c("span",Y)):v("",!0),g.value!==void 0&&!t.value?(o(),c("span",K,[a[1]||(a[1]=e("i",{class:"bi bi-person-fill me-2"},null,-1)),b(h(g.value.presence_count)+" Online ",1)])):v("",!0)])]),e("small",Q,[s(u,{t:"Join our Discord server for quick help or chat about WGDashboard!"})])])])]),e("a",X,[e("div",Z,[a[4]||(a[4]=e("h1",{class:"mb-0"},[e("i",{class:"bi bi-hash"})],-1)),e("div",null,[e("h5",ee,[s(u,{t:"Official Documentation"})]),e("small",te,[s(u,{t:"Official documentation contains User Guides and more..."})])])])])])])])])]))}},oe={key:"header",class:"shadow"},ae={class:"p-3 d-flex gap-2 flex-column"},ne={class:"d-flex text-body"},ie={class:"d-flex flex-column align-items-start gap-1"},le={class:"mb-0"},re={class:"mb-0"},de={class:"list-group"},ce={href:"https://donaldzou.github.io/WGDashboard-Documentation/",target:"_blank",class:"list-group-item list-group-item-action d-flex align-items-center"},ue={target:"_blank",role:"button",href:"https://discord.gg/72TwzjeuWm",class:"list-group-item list-group-item-action d-flex align-items-center"},me={__name:"agentModal",emits:["close"],setup(i,{emit:t}){const g=t,d=y();return(a,l)=>(o(),c("div",{class:k(["agentContainer m-2 rounded-3 d-flex flex-column text-body",{enabled:L(d).HelpAgent.Enable}])},[s(w,{name:"agent-message"},{default:r(()=>[e("div",oe,[e("div",ae,[e("div",ne,[e("div",ie,[e("h5",le,[s(u,{t:"Help"})])]),e("a",{role:"button",class:"ms-auto text-body",onClick:l[0]||(l[0]=n=>g("close"))},l[1]||(l[1]=[e("h5",{class:"mb-0"},[e("i",{class:"bi bi-x-lg"})],-1)]))]),e("p",re,[s(u,{t:"You can visit our: "})]),e("div",de,[e("a",ce,[l[2]||(l[2]=e("i",{class:"bi bi-book-fill"},null,-1)),s(u,{class:"ms-auto",t:"Official Documentation"})]),e("a",ue,[l[3]||(l[3]=e("i",{class:"bi bi-discord"},null,-1)),s(u,{class:"ms-auto",t:"Discord Server"})])])])])]),_:1})],2))}},ge=S(me,[["__scopeId","data-v-44ff028e"]]),_e={name:"navbar",components:{HelpModal:se,LocaleText:u,AgentModal:ge},setup(){const i=T(),t=y();return{wireguardConfigurationsStore:i,dashboardConfigurationStore:t}},data(){return{updateAvailable:!1,updateMessage:"Checking for update...",updateUrl:"",openHelpModal:!1,openAgentModal:!1}},computed:{getActiveCrossServer(){if(this.dashboardConfigurationStore.ActiveServerConfiguration)return new URL(this.dashboardConfigurationStore.CrossServerConfiguration.ServerList[this.dashboardConfigurationStore.ActiveServerConfiguration].host)}},mounted(){G("/api/getDashboardUpdate",{},i=>{i.status?(i.data&&(this.updateAvailable=!0,this.updateUrl=i.data),this.updateMessage=i.message):(this.updateMessage=N("Failed to check available update"),console.log(`Failed to get update: ${i.message}`))})}},fe=["data-bs-theme"],pe={id:"sidebarMenu",class:"bg-body-tertiary sidebar border h-100 rounded-3 shadow overflow-y-scroll"},be={class:"sidebar-sticky"},he={class:"text-white text-center m-0 py-3 mb-2 btn-brand"},ve={key:0,class:"ms-auto"},xe={class:"nav flex-column px-2 gap-1"},Ce={class:"nav-item"},ke={class:"nav-item"},$e={class:"nav-item"},Se={class:"sidebar-heading px-3 mt-3 mb-1 text-muted text-center"},ye={class:"nav flex-column px-2 gap-1"},Me={class:"nav-item"},we={class:"sidebar-heading px-3 mt-3 mb-1 text-muted text-center"},Ae={class:"nav flex-column px-2 gap-1"},De={class:"nav-item"},He={class:"nav-item"},Le={class:"nav-item"},Te={class:"nav flex-column px-2 mb-3"},Ge={class:"nav-item"},Ne={class:"nav-item",style:{"font-size":"0.8rem"}},We=["href"],ze={class:"nav-link text-muted rounded-3"},Ve={key:1,class:"nav-link text-muted rounded-3"};function Oe(i,t,g,d,a,l){const n=f("LocaleText"),m=f("RouterLink"),C=f("HelpModal"),p=f("AgentModal");return o(),c("div",{class:k(["col-md-3 col-lg-2 d-md-block p-2 navbar-container",{active:this.dashboardConfigurationStore.ShowNavBar}]),"data-bs-theme":d.dashboardConfigurationStore.Configuration.Server.dashboard_theme},[e("nav",pe,[e("div",be,[e("div",he,[t[5]||(t[5]=e("h5",{class:"mb-0"}," WGDashboard ",-1)),l.getActiveCrossServer!==void 0?(o(),c("small",ve,[t[4]||(t[4]=e("i",{class:"bi bi-hdd-rack-fill me-2"},null,-1)),b(h(l.getActiveCrossServer.host),1)])):v("",!0)]),e("ul",xe,[e("li",Ce,[s(m,{class:"nav-link rounded-3",to:"/","exact-active-class":"active"},{default:r(()=>[t[6]||(t[6]=e("i",{class:"bi bi-house me-2"},null,-1)),s(n,{t:"Home"})]),_:1})]),e("li",ke,[s(m,{class:"nav-link rounded-3",to:"/settings","exact-active-class":"active"},{default:r(()=>[t[7]||(t[7]=e("i",{class:"bi bi-gear me-2"},null,-1)),s(n,{t:"Settings"})]),_:1})]),e("li",$e,[e("a",{class:"nav-link rounded-3",role:"button",onClick:t[0]||(t[0]=_=>a.openAgentModal=!0)},[t[8]||(t[8]=e("i",{class:"bi bi-question-circle me-2"},null,-1)),s(n,{t:"Help"})])])]),t[11]||(t[11]=e("hr",{class:"text-body my-2"},null,-1)),e("h6",Se,[s(n,{t:"WireGuard Configurations"})]),e("ul",ye,[(o(!0),c(A,null,D(this.wireguardConfigurationsStore.Configurations,_=>(o(),c("li",Me,[s(m,{to:"/configuration/"+_.Name+"/peers",class:"nav-link nav-conf-link rounded-3","active-class":"active"},{default:r(()=>[e("span",{class:k(["dot me-2",{active:_.Status}])},null,2),b(" "+h(_.Name),1)]),_:2},1032,["to"])]))),256))]),t[12]||(t[12]=e("hr",{class:"text-body my-2"},null,-1)),e("h6",we,[s(n,{t:"Tools"})]),e("ul",Ae,[e("li",De,[s(m,{to:"/system_status",class:"nav-link rounded-3","active-class":"active"},{default:r(()=>[s(n,{t:"System Status"})]),_:1})]),e("li",He,[s(m,{to:"/ping",class:"nav-link rounded-3","active-class":"active"},{default:r(()=>[s(n,{t:"Ping"})]),_:1})]),e("li",Le,[s(m,{to:"/traceroute",class:"nav-link rounded-3","active-class":"active"},{default:r(()=>[s(n,{t:"Traceroute"})]),_:1})])]),t[13]||(t[13]=e("hr",{class:"text-body my-2"},null,-1)),e("ul",Te,[e("li",Ge,[e("a",{class:"nav-link text-danger rounded-3",onClick:t[1]||(t[1]=_=>this.dashboardConfigurationStore.signOut()),role:"button",style:{"font-weight":"bold"}},[t[9]||(t[9]=e("i",{class:"bi bi-box-arrow-left me-2"},null,-1)),s(n,{t:"Sign Out"})])]),e("li",Ne,[this.updateAvailable?(o(),c("a",{key:0,href:this.updateUrl,class:"text-decoration-none rounded-3",target:"_blank"},[e("small",ze,[s(n,{t:this.updateMessage},null,8,["t"]),t[10]||(t[10]=b(" (")),s(n,{t:"Current Version:"}),b(" "+h(d.dashboardConfigurationStore.Configuration.Server.version)+") ",1)])],8,We)):(o(),c("small",Ve,[s(n,{t:this.updateMessage},null,8,["t"]),b(" ("+h(d.dashboardConfigurationStore.Configuration.Server.version)+") ",1)]))])])])]),s($,{name:"zoom"},{default:r(()=>[this.openHelpModal?(o(),x(C,{key:0,onClose:t[2]||(t[2]=_=>{a.openHelpModal=!1})})):v("",!0)]),_:1}),s($,{name:"slideIn"},{default:r(()=>[this.openAgentModal?(o(),x(p,{key:0,onClose:t[3]||(t[3]=_=>a.openAgentModal=!1)})):v("",!0)]),_:1})],10,fe)}const Ue=S(_e,[["render",Oe],["__scopeId","data-v-58e71749"]]),je={name:"index",components:{Message:V,Navbar:Ue},async setup(){return{dashboardConfigurationStore:y()}},computed:{getMessages(){return this.dashboardConfigurationStore.Messages.filter(i=>i.show)}}},Be=["data-bs-theme"],Re={class:"row h-100"},Fe={class:"col-md-9 col-lg-10 overflow-y-scroll mb-0 pt-2"},Ie={class:"messageCentre text-body position-absolute d-flex"};function qe(i,t,g,d,a,l){const n=f("Navbar"),m=f("RouterView"),C=f("Message");return o(),c("div",{class:"container-fluid flex-grow-1 main","data-bs-theme":this.dashboardConfigurationStore.Configuration.Server.dashboard_theme},[e("div",Re,[s(n),e("main",Fe,[(o(),x(z,null,{default:r(()=>[s(m,null,{default:r(({Component:p})=>[s($,{name:"fade2",mode:"out-in",appear:""},{default:r(()=>[(o(),x(W(p)))]),_:2},1024)]),_:1})]),_:1})),e("div",Ie,[s(w,{name:"message",tag:"div",class:"position-relative flex-sm-grow-0 flex-grow-1 d-flex align-items-end ms-sm-auto flex-column gap-2"},{default:r(()=>[(o(!0),c(A,null,D(l.getMessages.slice().reverse(),p=>(o(),x(C,{message:p,key:p.id},null,8,["message"]))),128))]),_:1})])])])],8,Be)}const Ke=S(je,[["render",qe],["__scopeId","data-v-0c6a5068"]]);export{Ke as default};
diff --git a/src/static/app/dist/assets/index-DFl-XeJT.css b/src/static/app/dist/assets/index-DFl-XeJT.css
deleted file mode 100644
index ed6c6c75..00000000
--- a/src/static/app/dist/assets/index-DFl-XeJT.css
+++ /dev/null
@@ -1,15 +0,0 @@
-@charset "UTF-8";*{font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Helvetica,Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji",Segoe UI Symbol}.dp__input{font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Helvetica,Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji",Segoe UI Symbol!important}::-webkit-scrollbar{display:none}.codeFont{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace}.feather{width:16px;height:16px;vertical-align:text-bottom}.btn-primary{font-weight:700}#app{width:100%;height:100vh}@supports (height: 100dvh){#app{height:100dvh}}@property --brandColor1{syntax: ">>8&255,y>>>16&255,y>>>24&255)}function D(b,y){b.push(y&255,y>>>8&255)}function P(b,y){for(var S=0;S=this.getLength())return;const e=this.array_[t];return this.array_.splice(t,1),this.updateLength_(),this.dispatchEvent(new gn(wt.REMOVE,e,t)),e}setAt(t,e){const i=this.getLength();if(t>=i){this.insertAt(t,e);return}if(t<0)throw new Error("Index out of bounds: "+t);this.unique_&&this.assertUnique_(e,t);const s=this.array_[t];this.array_[t]=e,this.dispatchEvent(new gn(wt.REMOVE,s,t)),this.dispatchEvent(new gn(wt.ADD,e,t))}updateLength_(){this.set(Nr.LENGTH,this.array_.length)}assertUnique_(t,e){for(let i=0,s=this.array_.length;i1?(e=s,i=r):l>0&&(e+=o*l,i+=a*l)}return hi(n,t,e,i)}function hi(n,t,e,i){const s=e-n,r=i-t;return s*s+r*r}function fl(n){const t=n.length;for(let i=0;ipn?o=pn:o<-pn&&(o=-pn),t[r+1]=o}return t}function yl(n,t,e,i){const s=n.length;e=e>1?e:2,i=i??e,t===void 0&&(e>2?t=n.slice():t=new Array(s));for(let r=0;rs&&(l=l|ht.RIGHT),ar&&(l=l|ht.ABOVE),l===ht.UNKNOWN&&(l=ht.INTERSECTING),l}function Ft(){return[1/0,1/0,-1/0,-1/0]}function Se(n,t,e,i,s){return s?(s[0]=n,s[1]=t,s[2]=e,s[3]=i,s):[n,t,e,i]}function Bn(n){return Se(1/0,1/0,-1/0,-1/0,n)}function Yo(n,t){const e=n[0],i=n[1];return Se(e,i,e,i,t)}function $s(n,t,e,i,s){const r=Bn(s);return Zo(r,n,t,e,i)}function Yi(n,t){return n[0]==t[0]&&n[2]==t[2]&&n[1]==t[1]&&n[3]==t[3]}function Tl(n,t){return t[0]Math.max(s,Pn(n,r)),0);return e[t]=i,i}function Zh(n,t){const e=[],i=[],s=[];let r=0,o=0,a=0,l=0;for(let c=0,h=t.length;c<=h;c+=2){const u=t[c];if(u===`
-`||c===h){r=Math.max(r,o),s.push(o),o=0,a+=l,l=0;continue}const d=t[c+1]||n.font,f=Pn(d,u);e.push(f),o+=f;const g=Yh(d);i.push(g),l=Math.max(l,g)}return{width:r,height:a,widths:e,heights:i,lineWidths:s}}function Kh(n,t,e,i,s,r,o,a,l,c,h){n.save(),e!==1&&(n.globalAlpha===void 0?n.globalAlpha=u=>u.globalAlpha*=e:n.globalAlpha*=e),t&&n.transform.apply(n,t),i.contextInstructions?(n.translate(l,c),n.scale(h[0],h[1]),Bh(i,n)):h[0]<0||h[1]<0?(n.translate(l,c),n.scale(h[0],h[1]),n.drawImage(i,s,r,o,a,0,0,o,a)):n.drawImage(i,s,r,o,a,l,c,o*h[0],a*h[1]),n.restore()}function Bh(n,t){const e=n.contextInstructions;for(let i=0,s=e.length;ithis.imageState_=Y.LOADED),this.render()}clone(){const t=this.getScale(),e=new es({fill:this.getFill()?this.getFill().clone():void 0,points:this.getPoints(),radius:this.getRadius(),radius2:this.getRadius2(),angle:this.getAngle(),stroke:this.getStroke()?this.getStroke().clone():void 0,rotation:this.getRotation(),rotateWithView:this.getRotateWithView(),scale:Array.isArray(t)?t.slice():t,displacement:this.getDisplacement().slice(),declutterMode:this.getDeclutterMode()});return e.setOpacity(this.getOpacity()),e}getAnchor(){const t=this.size_,e=this.getDisplacement(),i=this.getScaleArray();return[t[0]/2-e[0]/i[0],t[1]/2+e[1]/i[1]]}getAngle(){return this.angle_}getFill(){return this.fill_}setFill(t){this.fill_=t,this.render()}getHitDetectionImage(){return this.hitDetectionCanvas_||(this.hitDetectionCanvas_=this.createHitDetectionCanvas_(this.renderOptions_)),this.hitDetectionCanvas_}getImage(t){let e=this.canvases_[t];if(!e){const i=this.renderOptions_,s=rt(i.size*t,i.size*t);this.draw_(i,s,t),e=s.canvas,this.canvases_[t]=e}return e}getPixelRatio(t){return t}getImageSize(){return this.size_}getImageState(){return this.imageState_}getOrigin(){return this.origin_}getPoints(){return this.points_}getRadius(){return this.radius}getRadius2(){return this.radius2_}getSize(){return this.size_}getStroke(){return this.stroke_}setStroke(t){this.stroke_=t,this.render()}listenImageChange(t){}load(){}unlistenImageChange(t){}calculateLineJoinSize_(t,e,i){if(e===0||this.points_===1/0||t!=="bevel"&&t!=="miter")return e;let s=this.radius,r=this.radius2_===void 0?s:this.radius2_;if(si(r)<=s(r);case C.GreaterThan:return r=>i(r)>s(r);case C.GreaterThanOrEqualTo:return r=>i(r)>=s(r);default:throw new Error(`Unsupported comparison operator ${e}`)}}function uc(n,t){const e=n.operator,i=n.args.length,s=new Array(i);for(let r=0;r{for(let o=0;o{for(let o=0;o{const o=s[0](r),a=s[1](r),l=s[2](r);return o>=a&&o<=l};case C.In:return r=>{const o=s[0](r);for(let a=1;a!s[0](r);default:throw new Error(`Unsupported logical operator ${e}`)}}function dc(n,t){const e=n.operator,i=n.args.length,s=new Array(i);for(let r=0;r{let o=1;for(let a=0;as[0](r)/s[1](r);case C.Add:return r=>{let o=0;for(let a=0;as[0](r)-s[1](r);case C.Clamp:return r=>{const o=s[0](r),a=s[1](r);if(ol?l:o};case C.Mod:return r=>s[0](r)%s[1](r);case C.Pow:return r=>Math.pow(s[0](r),s[1](r));case C.Abs:return r=>Math.abs(s[0](r));case C.Floor:return r=>Math.floor(s[0](r));case C.Ceil:return r=>Math.ceil(s[0](r));case C.Round:return r=>Math.round(s[0](r));case C.Sin:return r=>Math.sin(s[0](r));case C.Cos:return r=>Math.cos(s[0](r));case C.Atan:return i===2?r=>Math.atan2(s[0](r),s[1](r)):r=>Math.atan(s[0](r));case C.Sqrt:return r=>Math.sqrt(s[0](r));default:throw new Error(`Unsupported numeric operator ${e}`)}}function fc(n,t){const e=n.args.length,i=new Array(e);for(let s=0;s{this.clickTimeoutId_=void 0;const i=new we(nt.SINGLECLICK,this.map_,t);this.dispatchEvent(i)},250)}updateActivePointers_(t){const e=t,i=e.pointerId;if(e.type==nt.POINTERUP||e.type==nt.POINTERCANCEL){delete this.trackedTouches_[i];for(const s in this.trackedTouches_)if(this.trackedTouches_[s].target!==e.target){delete this.trackedTouches_[s];break}}else(e.type==nt.POINTERDOWN||e.type==nt.POINTERMOVE)&&(this.trackedTouches_[i]=e);this.activePointers_=Object.values(this.trackedTouches_)}handlePointerUp_(t){this.updateActivePointers_(t);const e=new we(nt.POINTERUP,this.map_,t,void 0,void 0,this.activePointers_);this.dispatchEvent(e),this.emulateClicks_&&!e.defaultPrevented&&!this.dragging_&&this.isMouseActionButton_(t)&&this.emulateClick_(this.down_),this.activePointers_.length===0&&(this.dragListenerKeys_.forEach(tt),this.dragListenerKeys_.length=0,this.dragging_=!1,this.down_=null)}isMouseActionButton_(t){return t.button===0}handlePointerDown_(t){this.emulateClicks_=this.activePointers_.length===0,this.updateActivePointers_(t);const e=new we(nt.POINTERDOWN,this.map_,t,void 0,void 0,this.activePointers_);if(this.dispatchEvent(e),this.down_=new PointerEvent(t.type,t),Object.defineProperty(this.down_,"target",{writable:!1,value:t.target}),this.dragListenerKeys_.length===0){const i=this.map_.getOwnerDocument();this.dragListenerKeys_.push(V(i,nt.POINTERMOVE,this.handlePointerMove_,this),V(i,nt.POINTERUP,this.handlePointerUp_,this),V(this.element_,nt.POINTERCANCEL,this.handlePointerUp_,this)),this.element_.getRootNode&&this.element_.getRootNode()!==i&&this.dragListenerKeys_.push(V(this.element_.getRootNode(),nt.POINTERUP,this.handlePointerUp_,this))}}handlePointerMove_(t){if(this.isMoving_(t)){this.updateActivePointers_(t),this.dragging_=!0;const e=new we(nt.POINTERDRAG,this.map_,t,this.dragging_,void 0,this.activePointers_);this.dispatchEvent(e)}}relayMoveEvent_(t){this.originalPointerMoveEvent_=t;const e=!!(this.down_&&this.isMoving_(t));this.dispatchEvent(new we(nt.POINTERMOVE,this.map_,t,e))}handleTouchMove_(t){const e=this.originalPointerMoveEvent_;(!e||e.defaultPrevented)&&(typeof t.cancelable!="boolean"||t.cancelable===!0)&&t.preventDefault()}isMoving_(t){return this.dragging_||Math.abs(t.clientX-this.down_.clientX)>this.moveTolerance_||Math.abs(t.clientY-this.down_.clientY)>this.moveTolerance_}disposeInternal(){this.relayedListenerKey_&&(tt(this.relayedListenerKey_),this.relayedListenerKey_=null),this.element_.removeEventListener(z.TOUCHMOVE,this.boundHandleTouchMove_),this.pointerdownListenerKey_&&(tt(this.pointerdownListenerKey_),this.pointerdownListenerKey_=null),this.dragListenerKeys_.forEach(tt),this.dragListenerKeys_.length=0,this.element_=null,super.disposeInternal()}}const Ce={POSTRENDER:"postrender",MOVESTART:"movestart",MOVEEND:"moveend",LOADSTART:"loadstart",LOADEND:"loadend"},_t={LAYERGROUP:"layergroup",SIZE:"size",TARGET:"target",VIEW:"view"},kn=1/0;class Nc{constructor(t,e){this.priorityFunction_=t,this.keyFunction_=e,this.elements_=[],this.priorities_=[],this.queuedElements_={}}clear(){this.elements_.length=0,this.priorities_.length=0,nn(this.queuedElements_)}dequeue(){const t=this.elements_,e=this.priorities_,i=t[0];t.length==1?(t.length=0,e.length=0):(t[0]=t.pop(),e[0]=e.pop(),this.siftUp_(0));const s=this.keyFunction_(i);return delete this.queuedElements_[s],i}enqueue(t){j(!(this.keyFunction_(t)in this.queuedElements_),"Tried to enqueue an `element` that was already added to the queue");const e=this.priorityFunction_(t);return e!=kn?(this.elements_.push(t),this.priorities_.push(e),this.queuedElements_[this.keyFunction_(t)]=!0,this.siftDown_(0,this.elements_.length-1),!0):!1}getCount(){return this.elements_.length}getLeftChildIndex_(t){return t*2+1}getRightChildIndex_(t){return t*2+2}getParentIndex_(t){return t-1>>1}heapify_(){let t;for(t=(this.elements_.length>>1)-1;t>=0;t--)this.siftUp_(t)}isEmpty(){return this.elements_.length===0}isKeyQueued(t){return t in this.queuedElements_}isQueued(t){return this.isKeyQueued(this.keyFunction_(t))}siftUp_(t){const e=this.elements_,i=this.priorities_,s=e.length,r=e[t],o=i[t],a=t;for(;t>1;){const l=this.getLeftChildIndex_(t),c=this.getRightChildIndex_(t),h=ct;){const a=this.getParentIndex_(e);if(s[a]>o)i[e]=i[a],s[e]=s[a],e=a;else break}i[e]=r,s[e]=o}reprioritize(){const t=this.priorityFunction_,e=this.elements_,i=this.priorities_;let s=0;const r=e.length;let o,a,l;for(a=0;a