mirror of
https://github.com/giongto35/cloud-game.git
synced 2026-01-23 02:34:42 +00:00
parent
66a13943bc
commit
fb8ee791df
15 changed files with 283 additions and 269 deletions
4
.github/workflows/cd/_provider/digital-ocean/run.sh
vendored
Normal file
4
.github/workflows/cd/_provider/digital-ocean/run.sh
vendored
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
#!/bin/sh
|
||||
|
||||
ufw disable 2> /dev/null;
|
||||
source /etc/profile;
|
||||
15
.github/workflows/cd/_provider/oracle-cloud/run-once.sh
vendored
Normal file
15
.github/workflows/cd/_provider/oracle-cloud/run-once.sh
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
#!/bin/bash
|
||||
#sudo
|
||||
|
||||
iptables -P INPUT ACCEPT
|
||||
iptables -P OUTPUT ACCEPT
|
||||
iptables -P FORWARD ACCEPT
|
||||
iptables -F
|
||||
|
||||
iptables --flush
|
||||
|
||||
which "iptables-persistent" > /dev/null 2>&1
|
||||
if [ $? = 0 ]; then
|
||||
apt-get install iptables-persistent
|
||||
fi
|
||||
netfilter-persistent save
|
||||
4
.github/workflows/cd/cloudretro.io/coordinator.env
vendored
Normal file
4
.github/workflows/cd/cloudretro.io/coordinator.env
vendored
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
CLOUD_GAME_COORDINATOR_SERVER_ADDRESS=:80
|
||||
CLOUD_GAME_COORDINATOR_SERVER_HTTPS=true
|
||||
CLOUD_GAME_COORDINATOR_SERVER_TLS_DOMAIN=cloudretro.io
|
||||
CLOUD_GAME_ENVIRONMENT=prod
|
||||
5
.github/workflows/cd/cloudretro.io/script.env
vendored
Normal file
5
.github/workflows/cd/cloudretro.io/script.env
vendored
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
COORDINATORS="167.172.70.98 cloudretro.io"
|
||||
DO_ADDRESS_LIST="cloud-gaming cloud-gaming-eu cloud-gaming-usw"
|
||||
IP_LIST=47.244.229.182
|
||||
SPLIT_HOSTS=1
|
||||
WORKERS=${WORKERS:-5}
|
||||
7
.github/workflows/cd/cloudretro.io/worker.env
vendored
Normal file
7
.github/workflows/cd/cloudretro.io/worker.env
vendored
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
CLOUD_GAME_WORKER_NETWORK_COORDINATORADDRESS=cloudretro.io
|
||||
CLOUD_GAME_WORKER_NETWORK_PUBLICADDRESS=cloudretro.io
|
||||
CLOUD_GAME_WORKER_NETWORK_SECURE=true
|
||||
CLOUD_GAME_WORKER_SERVER_ADDRESS=:80
|
||||
CLOUD_GAME_WORKER_SERVER_HTTPS=true
|
||||
CLOUD_GAME_WORKER_SERVER_TLS_ADDRESS=:443
|
||||
CLOUD_GAME_WORKER_SERVER_TLS_DOMAIN=cloudretro.io
|
||||
200
.github/workflows/cd/deploy-app.sh
vendored
Normal file
200
.github/workflows/cd/deploy-app.sh
vendored
Normal file
|
|
@ -0,0 +1,200 @@
|
|||
#!/bin/sh
|
||||
|
||||
# parse commandline arguments
|
||||
for arg in "$@"
|
||||
do
|
||||
case $arg in
|
||||
-e=*|--env-dir=*)
|
||||
ENV_DIR="${arg#*=}"
|
||||
shift # Remove --ssh-key= from processing
|
||||
;;
|
||||
-p=*|--provider-dir=*)
|
||||
PROVIDER_DIR="${arg#*=}"
|
||||
shift
|
||||
;;
|
||||
-s=*|--ssh-key=*)
|
||||
SSH_KEY="${arg#*=}"
|
||||
shift # Remove --ssh-key= from processing
|
||||
;;
|
||||
*)
|
||||
REST_ARGUMENTS+=("$1")
|
||||
shift # Remove generic argument from processing
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Environment merging
|
||||
#
|
||||
# Import optional script.env file.
|
||||
# This file contains script runtime params.
|
||||
if [[ ! -z "${ENV_DIR}" ]]; then
|
||||
f="$ENV_DIR/script.env"
|
||||
if [[ -e "$f" ]]
|
||||
then
|
||||
echo $'\n'"script.env:"
|
||||
cat "$f"
|
||||
set -a
|
||||
source $f
|
||||
set +a
|
||||
echo ""
|
||||
fi
|
||||
fi
|
||||
|
||||
# ^._.^
|
||||
REQUIRED_PACKAGES="cat jq ssh"
|
||||
|
||||
# Deployment addresses
|
||||
#
|
||||
# The chose of deployment app is following:
|
||||
# - by default it will deploy worker app onto each server in the IP_LIST list
|
||||
# - if the current address is in the COORDINATORS list, then it will deploy coordinator app instead
|
||||
#
|
||||
# a list of machines to deploy to
|
||||
IP_LIST=${IP_LIST:-}
|
||||
# a list of machines mark some addresses to deploy only a coordinator there
|
||||
COORDINATORS=${COORDINATORS:-}
|
||||
|
||||
# Digital Ocean operations
|
||||
#DO_TOKEN
|
||||
DO_ADDRESS_LIST=${DO_ADDRESS_LIST:-}
|
||||
DO_API_ENDPOINT=${DO_API_ENDPOINT:-"https://api.digitalocean.com/v2/droplets?tag_name="}
|
||||
|
||||
LOCAL_WORK_DIR=${LOCAL_WORK_DIR:-"./.github/workflows/cd"}
|
||||
REMOTE_WORK_DIR=${REMOTE_WORK_DIR:-"/cloud-game"}
|
||||
DOCKER_IMAGE_TAG=${DOCKER_IMAGE_TAG:-latest}
|
||||
echo "Docker tag:$DOCKER_IMAGE_TAG"
|
||||
# the total number of worker replicas to deploy
|
||||
WORKERS=${WORKERS:-5}
|
||||
|
||||
echo "Starting deployment"
|
||||
|
||||
if [[ ! -z "${DO_TOKEN}" ]]; then
|
||||
REQUIRED_PACKAGES+=" curl"
|
||||
fi
|
||||
|
||||
for pkg in $REQUIRED_PACKAGES; do
|
||||
which $pkg > /dev/null 2>&1
|
||||
if [ ! $? == 0 ]; then
|
||||
echo "Required package: $pkg is not installed"
|
||||
echo "Please run: sudo apt-get -qq update && sudo apt-get -qq install -y $REQUIRED_PACKAGES"
|
||||
exit;
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ! -z "${DO_TOKEN}" ]]; then
|
||||
for tag in $DO_ADDRESS_LIST; do
|
||||
echo "$tag processing..."
|
||||
call=$(curl -Ss -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DO_TOKEN" $DO_API_ENDPOINT$tag)
|
||||
res=$?
|
||||
if test "$res" == "0"; then
|
||||
IP_LIST+=$(echo "$call" | jq -r -j \
|
||||
".droplets[] | .networks.v4[] | select(.type | contains(\"public\")).ip_address, \" \"")
|
||||
else
|
||||
echo "curl failed with the code [$res]"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
echo "IPs:" $IP_LIST
|
||||
|
||||
# Run command builder
|
||||
#
|
||||
# By default it will run docker-compose with both coordinator and worker apps.
|
||||
# With the SPLIT_HOSTS parameter specified, it will run either coordinator app
|
||||
# if the current server address is found in the IP_LIST variable, otherwise it
|
||||
# will run just the worker app.
|
||||
#
|
||||
# flags
|
||||
deploy_coordinator=1
|
||||
deploy_worker=1
|
||||
# build run command
|
||||
cmd="ZONE=\$zone docker-compose up -d --remove-orphans --scale worker=\${workers:-$WORKERS}"
|
||||
if [ ! -z "$SPLIT_HOSTS" ]; then
|
||||
cmd+=" worker"
|
||||
deploy_coordinator=0
|
||||
deploy_worker=1
|
||||
fi
|
||||
|
||||
for ip in $IP_LIST; do
|
||||
echo "Processing "$ip
|
||||
if ! ssh-keygen -q -F $ip &>/dev/null; then
|
||||
echo "Adding new host to the known_hosts file"
|
||||
ssh-keyscan $ip >> ~/.ssh/known_hosts
|
||||
fi
|
||||
|
||||
# override run command
|
||||
if [ ! -z "$SPLIT_HOSTS" ]; then
|
||||
for addr in $COORDINATORS; do
|
||||
if [ "$ip" == $addr ]; then
|
||||
cmd="docker-compose up -d --remove-orphans coordinator"
|
||||
deploy_coordinator=1
|
||||
deploy_worker=0
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
run="#!/bin/bash"$'\n'
|
||||
# add a custom run script
|
||||
if [[ ! -z "${PROVIDER_DIR}" ]]; then
|
||||
f=$PROVIDER_DIR/run.sh
|
||||
if [[ -e "$f" ]]; then
|
||||
echo "A custom run script has been found"
|
||||
run+=$(tail -n +2 $f)$'\n'
|
||||
fi
|
||||
fi
|
||||
run+="IMAGE_TAG=$DOCKER_IMAGE_TAG APP_DIR=$REMOTE_WORK_DIR $cmd"
|
||||
|
||||
echo ""
|
||||
echo "run.sh:"$'\n'"$run"
|
||||
echo ""
|
||||
|
||||
compose_src=$(cat $LOCAL_WORK_DIR/docker-compose.yml)
|
||||
|
||||
# build Docker container env file
|
||||
run_env=""
|
||||
if [[ ! -z "${ENV_DIR}" ]]; then
|
||||
if [ $deploy_coordinator == 1 ]; then
|
||||
env_f=$ENV_DIR/coordinator.env
|
||||
if [[ -e "$env_f" ]]; then
|
||||
echo "Merge coordinator .env -> run.env"
|
||||
run_env+=$(cat $env_f)$'\n'
|
||||
fi
|
||||
fi
|
||||
if [ $deploy_worker == 1 ]; then
|
||||
env_f=$ENV_DIR/worker.env
|
||||
if [[ -e "$env_f" ]]; then
|
||||
echo "Merge worker .env -> run.env"
|
||||
run_env+=$(cat $env_f)
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
echo $'\n'"run.env:"$'\n'"$run_env"$'\n'
|
||||
|
||||
# optional ssh key param
|
||||
ssh_i=""
|
||||
if [[ ! -z "${SSH_KEY}" ]]; then
|
||||
ssh_i="-i ${SSH_KEY}"
|
||||
fi
|
||||
|
||||
# !to add docker-compose install / warning
|
||||
|
||||
# add a custom run script
|
||||
if [[ ! -z "${PROVIDER_DIR}" ]]; then
|
||||
f=$PROVIDER_DIR/run-once.sh
|
||||
if [[ -e "$f" ]]; then
|
||||
echo "A custom run once script has been found"
|
||||
echo $'\n'"run-once.sh:"$'\n'"$(cat $f)"$'\n'
|
||||
ssh ubuntu@$ip -t ${ssh_i:-} sudo sh < $f
|
||||
fi
|
||||
fi
|
||||
|
||||
ssh ubuntu@$ip ${ssh_i:-} "\
|
||||
mkdir -p $REMOTE_WORK_DIR; \
|
||||
cd $REMOTE_WORK_DIR; \
|
||||
echo '$compose_src' > ./docker-compose.yml; \
|
||||
echo '$run_env' > ./run.env; \
|
||||
docker-compose pull; \
|
||||
echo $run > ./run.sh; \
|
||||
chmod +x ./run.sh; \
|
||||
./run.sh"
|
||||
done
|
||||
30
.github/workflows/cd/docker-compose.yml
vendored
Normal file
30
.github/workflows/cd/docker-compose.yml
vendored
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
version: "3"
|
||||
|
||||
x-params:
|
||||
&default-params
|
||||
env_file: run.env
|
||||
image: ghcr.io/giongto35/cloud-game/cloud-game:${IMAGE_TAG:-latest}
|
||||
network_mode: "host"
|
||||
privileged: true
|
||||
restart: always
|
||||
|
||||
services:
|
||||
|
||||
coordinator:
|
||||
<<: *default-params
|
||||
command: coordinator --v=5
|
||||
volumes:
|
||||
- ${APP_DIR:-/cloud-game}/cache:/usr/local/share/cloud-game/assets/cache
|
||||
- ${APP_DIR:-/cloud-game}/games:/usr/local/share/cloud-game/assets/games
|
||||
|
||||
worker:
|
||||
<<: *default-params
|
||||
environment:
|
||||
- MESA_GL_VERSION_OVERRIDE=3.3
|
||||
entrypoint: [ "/bin/sh", "-c", "xvfb-run -a $$@", "" ]
|
||||
command: worker --v=5 --zone=${ZONE:-}
|
||||
volumes:
|
||||
- ${APP_DIR:-/cloud-game}/cache:/usr/local/share/cloud-game/assets/cache
|
||||
- ${APP_DIR:-/cloud-game}/cores:/usr/local/share/cloud-game/assets/cores
|
||||
- ${APP_DIR:-/cloud-game}/games:/usr/local/share/cloud-game/assets/games
|
||||
- ${APP_DIR:-/cloud-game}/home:/root/.cr
|
||||
8
.github/workflows/deploy.yml
vendored
8
.github/workflows/deploy.yml
vendored
|
|
@ -18,7 +18,9 @@ jobs:
|
|||
|
||||
- name: Deploy to all servers
|
||||
env:
|
||||
USERNAME: ${{ github.repository_owner }}
|
||||
PASSWORD: ${{ secrets.GITHUB_TOKEN }}
|
||||
DO_TOKEN: ${{ secrets.DO_TOKEN }}
|
||||
run: ./.github/workflows/redeploy/redeploy.sh
|
||||
run: |
|
||||
cd ./.github/workflows/cd
|
||||
./deploy-app.sh \
|
||||
--env-dir=./cloudretro.io \
|
||||
--provider-dir=./_provider/digital-ocean
|
||||
|
|
|
|||
192
.github/workflows/redeploy/config.yaml
vendored
192
.github/workflows/redeploy/config.yaml
vendored
|
|
@ -1,192 +0,0 @@
|
|||
# Application configuration file
|
||||
#
|
||||
|
||||
# application environment (dev, staging, prod)
|
||||
environment: prod
|
||||
|
||||
coordinator:
|
||||
# address if the server want to connect directly to debug
|
||||
debugHost:
|
||||
# games library
|
||||
library:
|
||||
# some directory which is gonna be the root folder for the library
|
||||
# where games are stored
|
||||
basePath: assets/games
|
||||
# an explicit list of supported file extensions
|
||||
# which overrides Libretro emulator ROMs configs
|
||||
supported:
|
||||
# a list of ignored words in the ROM filenames
|
||||
ignored:
|
||||
- neogeo
|
||||
- pgm
|
||||
# print some additional info
|
||||
verbose: true
|
||||
# enable library directory live reload
|
||||
# (experimental)
|
||||
watchMode: false
|
||||
monitoring:
|
||||
port: 6601
|
||||
# enable Go profiler HTTP server
|
||||
profilingEnabled: false
|
||||
metricEnabled: false
|
||||
urlPrefix: /coordinator
|
||||
# the public domain of the coordinator
|
||||
publicDomain: cloudretro.io
|
||||
# specify the worker address that the client can ping (with protocol and port)
|
||||
pingServer:
|
||||
# HTTP(S) server config
|
||||
server:
|
||||
port: 80
|
||||
httpsPort: 443
|
||||
httpsKey:
|
||||
httpsChain:
|
||||
|
||||
worker:
|
||||
network:
|
||||
# a coordinator address to connect to
|
||||
coordinatorAddress: cloudretro.io
|
||||
# ISO Alpha-2 country code to group workers by zones
|
||||
zone:
|
||||
monitoring:
|
||||
# monitoring server port
|
||||
port: 6601
|
||||
profilingEnabled: false
|
||||
# monitoring server URL prefix
|
||||
metricEnabled: true
|
||||
urlPrefix: /worker
|
||||
server:
|
||||
port: 80
|
||||
httpsPort: 443
|
||||
httpsKey:
|
||||
httpsChain:
|
||||
|
||||
emulator:
|
||||
# set output viewport scale factor
|
||||
scale: 1
|
||||
|
||||
aspectRatio:
|
||||
# enable aspect ratio changing
|
||||
# (experimental)
|
||||
keep: false
|
||||
# recalculate emulator game frame size to the given WxH
|
||||
width: 320
|
||||
height: 240
|
||||
|
||||
# save directory for emulator states
|
||||
# special tag {user} will be replaced with current user's home dir
|
||||
storage: "{user}/.cr/save"
|
||||
|
||||
libretro:
|
||||
cores:
|
||||
paths:
|
||||
libs: assets/cores
|
||||
configs: assets/cores
|
||||
# Config params for Libretro cores repository,
|
||||
# available types are:
|
||||
# - buildbot (the default Libretro nightly repository)
|
||||
# - github (GitHub raw repository with a similar structure to buildbot)
|
||||
# - raw (just a link to a zip file extracted as is)
|
||||
repo:
|
||||
# enable auto-download for the list of cores (list->lib)
|
||||
sync: true
|
||||
main:
|
||||
type: buildbot
|
||||
url: https://buildbot.libretro.com/nightly
|
||||
# if repo has file compression
|
||||
compression: zip
|
||||
# a secondary repo to use i.e. for not found in the main cores
|
||||
secondary:
|
||||
type: github
|
||||
url: https://github.com/sergystepanov/libretro-spiegel/blob/main
|
||||
compression: zip
|
||||
# Libretro core configuration
|
||||
#
|
||||
# Available config params:
|
||||
# - lib (string)
|
||||
# - config (string)
|
||||
# - roms ([]string)
|
||||
# - width (int)
|
||||
# - height (int)
|
||||
# - ratio (float)
|
||||
# - isGlAllowed (bool)
|
||||
# - usesLibCo (bool)
|
||||
# - hasMultitap (bool)
|
||||
list:
|
||||
gba:
|
||||
lib: mgba_libretro
|
||||
roms: [ "gba", "gbc" ]
|
||||
width: 240
|
||||
height: 160
|
||||
pcsx:
|
||||
lib: pcsx_rearmed_libretro
|
||||
config: pcsx_rearmed_libretro.cfg
|
||||
roms: [ "cue" ]
|
||||
width: 350
|
||||
height: 240
|
||||
nes:
|
||||
lib: nestopia_libretro
|
||||
roms: [ "nes" ]
|
||||
width: 256
|
||||
height: 240
|
||||
snes:
|
||||
lib: snes9x_libretro
|
||||
roms: [ "smc", "sfc", "swc", "fig", "bs" ]
|
||||
width: 256
|
||||
height: 224
|
||||
hasMultitap: true
|
||||
n64:
|
||||
lib: mupen64plus_next_libretro
|
||||
config: mupen64plus_next_libretro.cfg
|
||||
roms: [ "n64", "v64", "z64" ]
|
||||
width: 320
|
||||
height: 240
|
||||
isGlAllowed: true
|
||||
usesLibCo: true
|
||||
|
||||
encoder:
|
||||
audio:
|
||||
channels: 2
|
||||
# audio frame duration needed for WebRTC (Opus)
|
||||
frame: 20
|
||||
frequency: 48000
|
||||
video:
|
||||
# h264, vpx (VP8)
|
||||
codec: h264
|
||||
# see: https://trac.ffmpeg.org/wiki/Encode/H.264
|
||||
h264:
|
||||
# Constant Rate Factor (CRF) 0-51 (default: 23)
|
||||
crf: 17
|
||||
# ultrafast, superfast, veryfast, faster, fast, medium, slow, slower, veryslow, placebo
|
||||
preset: veryfast
|
||||
# baseline, main, high, high10, high422, high444
|
||||
profile: main
|
||||
# film, animation, grain, stillimage, psnr, ssim, fastdecode, zerolatency
|
||||
tune: zerolatency
|
||||
# 0-3
|
||||
logLevel: 0
|
||||
# see: https://www.webmproject.org/docs/encoder-parameters
|
||||
vpx:
|
||||
# target bitrate (KBit/s)
|
||||
bitrate: 1200
|
||||
# force keyframe interval
|
||||
keyframeInterval: 5
|
||||
# run without a game
|
||||
# (experimental)
|
||||
withoutGame: false
|
||||
|
||||
webrtc:
|
||||
# turn off default Pion interceptors for performance reasons
|
||||
# (experimental)
|
||||
disableDefaultInterceptors:
|
||||
# a list of STUN/TURN servers for the client
|
||||
iceServers:
|
||||
- url: stun:stun.l.google.com:19302
|
||||
# instead of random unlimited port range for
|
||||
# WebRTC UDP connections, these params
|
||||
# define ICE candidates port range explicitly
|
||||
icePorts:
|
||||
min:
|
||||
max:
|
||||
# override ICE candidate IP, see: https://github.com/pion/webrtc/issues/835,
|
||||
# can be used for Docker bridged network internal IP override
|
||||
iceIpMap:
|
||||
21
.github/workflows/redeploy/redeploy.sh
vendored
21
.github/workflows/redeploy/redeploy.sh
vendored
|
|
@ -1,21 +0,0 @@
|
|||
#! /bin/bash
|
||||
|
||||
iplist="47.244.229.182"
|
||||
for tagName in cloud-gaming cloud-gaming-eu cloud-gaming-usw; do
|
||||
echo "scanning: $tagName"
|
||||
regional_iplist=$(curl -X GET -H "Content-Type: application/json" -H "Authorization: Bearer "$DO_TOKEN "https://api.digitalocean.com/v2/droplets?tag_name=$tagName" | jq -r ".droplets[]" | jq -r ".networks.v4[0].ip_address")
|
||||
|
||||
for ip_address in $regional_iplist
|
||||
do
|
||||
iplist+=" $ip_address"
|
||||
done
|
||||
done
|
||||
|
||||
echo "iplist "$iplist
|
||||
|
||||
for ip_address in $iplist
|
||||
do
|
||||
.github/workflows/redeploy/redeploy_specific.sh $ip_address
|
||||
done
|
||||
|
||||
echo 'done'
|
||||
34
.github/workflows/redeploy/redeploy_specific.sh
vendored
34
.github/workflows/redeploy/redeploy_specific.sh
vendored
|
|
@ -1,34 +0,0 @@
|
|||
#! /bin/bash
|
||||
|
||||
iplist=$1
|
||||
|
||||
for ip_address in $iplist
|
||||
do
|
||||
echo $ip_address
|
||||
ssh-keyscan -H $ip_address >> ~/.ssh/known_hosts
|
||||
sleep 2
|
||||
|
||||
if [ "$ip_address" == "167.172.70.98" ] || [ "$ip_address" == "cloudretro.io" ]
|
||||
then
|
||||
launchcommand="coordinator > /tmp/startup.log"
|
||||
else
|
||||
launchcommand="Xvfb :99 & worker --coordinatorhost cloudretro.io --zone \$zone > /tmp/startup.log"
|
||||
fi
|
||||
|
||||
ssh root@$ip_address "mkdir -p /cloud-game/configs"
|
||||
rsync ./.github/workflows/redeploy/config.yaml root@$ip_address:/cloud-game/configs/config.yaml
|
||||
run_content="'#! /bin/bash
|
||||
echo $PASSWORD | docker login https://docker.pkg.github.com --username $USERNAME --password-stdin;
|
||||
ufw disable;
|
||||
docker system prune -f;
|
||||
source /etc/profile;
|
||||
docker pull docker.pkg.github.com/giongto35/cloud-game/cloud-game:latest;
|
||||
docker rm cloud-game -f;
|
||||
docker run --privileged -d --network=host --env DISPLAY=:99 --env MESA_GL_VERSION_OVERRIDE=3.3 -v cores:/usr/local/share/cloud-game/assets/cores -v /cloud-game/configs:/usr/local/share/cloud-game/configs -v /cloud-game/games:/usr/local/share/cloud-game/assets/games -v /cloud-game/cache:/usr/local/share/cloud-game/assets/cache --name cloud-game docker.pkg.github.com/giongto35/cloud-game/cloud-game bash -c \"$launchcommand\"'"
|
||||
|
||||
ssh root@$ip_address "echo $run_content > ~/run.sh"
|
||||
ssh root@$ip_address "chmod +x run.sh; ./run.sh"
|
||||
|
||||
done
|
||||
|
||||
echo 'done'
|
||||
3
configs/config.yaml
vendored
3
configs/config.yaml
vendored
|
|
@ -39,7 +39,6 @@ coordinator:
|
|||
# Letsencrypt or self cert config
|
||||
tls:
|
||||
address: :443
|
||||
letsencryptUrl:
|
||||
# allowed host name
|
||||
domain:
|
||||
# if both are set then will use certs
|
||||
|
|
@ -79,8 +78,6 @@ worker:
|
|||
# LetsEncrypt config
|
||||
# allowed host name
|
||||
domain:
|
||||
# if empty will use URL from Go
|
||||
letsencryptUrl:
|
||||
# Own certs config
|
||||
httpsCert:
|
||||
httpsKey:
|
||||
|
|
|
|||
|
|
@ -11,11 +11,10 @@ type Server struct {
|
|||
Address string
|
||||
Https bool
|
||||
Tls struct {
|
||||
Address string
|
||||
Domain string
|
||||
LetsencryptUrl string
|
||||
HttpsKey string
|
||||
HttpsCert string
|
||||
Address string
|
||||
Domain string
|
||||
HttpsKey string
|
||||
HttpsCert string
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,21 +1,20 @@
|
|||
package httpx
|
||||
|
||||
import (
|
||||
"golang.org/x/crypto/acme"
|
||||
"golang.org/x/crypto/acme/autocert"
|
||||
)
|
||||
import "golang.org/x/crypto/acme/autocert"
|
||||
|
||||
type TLS struct {
|
||||
CertManager *autocert.Manager
|
||||
}
|
||||
|
||||
func NewTLSConfig(domain string) *TLS {
|
||||
return &TLS{
|
||||
func NewTLSConfig(host string) *TLS {
|
||||
tls := TLS{
|
||||
CertManager: &autocert.Manager{
|
||||
Prompt: autocert.AcceptTOS,
|
||||
HostPolicy: autocert.HostWhitelist(domain),
|
||||
Cache: autocert.DirCache("assets/cache"),
|
||||
Client: &acme.Client{DirectoryURL: acme.LetsEncryptURL},
|
||||
Prompt: autocert.AcceptTOS,
|
||||
Cache: autocert.DirCache("assets/cache"),
|
||||
},
|
||||
}
|
||||
if host != "" {
|
||||
tls.CertManager.HostPolicy = autocert.HostWhitelist(host)
|
||||
}
|
||||
return &tls
|
||||
}
|
||||
|
|
|
|||
1
scripts/redeploy.sh
vendored
1
scripts/redeploy.sh
vendored
|
|
@ -1 +0,0 @@
|
|||
echo "test"
|
||||
Loading…
Add table
Add a link
Reference in a new issue