cloud-game/pkg/worker/server.go
sergystepanov 980a97a526
Handle no config situation for workers (#253)
(experimental feature)

Before a worker can start, it should have a configuration file. In case if such a file is not found it may request configuration from the coordinator to which it connected.

Added example logic if a worker is needed to be blocked until a successful packet exchange with a coordinator is being made.

* Add error return for config loader

* Add config loaded flag to worker

* Add zone flag

* Add a custom mutex lock with timout

* Refactor worker runtime

* Refactor internal api

* Extract monitoring server config

* Extract worker HTTP(S) server

* Add generic sub-server interface

* Add internal coordinator API

* Add internal routes and handlers to worker

* Add internal worker API

* Refactor worker run

* Migrate serverId call to new API

* Add packet handler to cws

* Extract handlers for internal worker routes in coordinator

* Pass worker to the worker internal heandlers

* Cleanup worker handlers in coordinator

* Add closeRoom packet handler to the API

* Add GetRoom packet handler to the API

* Add RegisterRoom packet handler to the API

* Add IceCandidate packet handler to the API (internal and browser)

* Add Heartbeat packet handler to the API (internal and browser)

* Rename worker routes init function

* Extract worker/coordinator internal ws handlers

* Update timed locker

* Allow sequential timed locks

* Add config request from workers

* Add nil check for the route registration functions
2021-01-03 21:23:55 +03:00

121 lines
3.1 KiB
Go

package worker
import (
"crypto/tls"
"fmt"
"log"
"net/http"
"strconv"
"time"
"github.com/giongto35/cloud-game/v2/pkg/environment"
"golang.org/x/crypto/acme"
"golang.org/x/crypto/acme/autocert"
)
const stagingLEURL = "https://acme-staging-v02.api.letsencrypt.org/directory"
func makeServerFromMux(mux *http.ServeMux) *http.Server {
// set timeouts so that a slow or malicious client doesn't
// hold resources forever
return &http.Server{
ReadTimeout: 5 * time.Second,
WriteTimeout: 5 * time.Second,
IdleTimeout: 120 * time.Second,
Handler: mux,
}
}
func makeHTTPServer() *http.Server {
mux := &http.ServeMux{}
mux.HandleFunc("/echo", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
log.Println(w, "echo")
})
return makeServerFromMux(mux)
}
func makeHTTPToHTTPSRedirectServer() *http.Server {
handleRedirect := func(w http.ResponseWriter, r *http.Request) {
newURI := "https://" + r.Host + r.URL.String()
http.Redirect(w, r, newURI, http.StatusFound)
}
mux := &http.ServeMux{}
mux.HandleFunc("/", handleRedirect)
return makeServerFromMux(mux)
}
func (wrk *Worker) spawnServer(port int) {
var certManager *autocert.Manager
var httpsSrv *http.Server
mode := wrk.conf.Environment.Get()
if mode.AnyOf(environment.Production, environment.Staging) {
serverConfig := wrk.conf.Worker.Server
httpsSrv = makeHTTPServer()
httpsSrv.Addr = fmt.Sprintf(":%d", serverConfig.HttpsPort)
if serverConfig.HttpsChain == "" || serverConfig.HttpsKey == "" {
serverConfig.HttpsChain = ""
serverConfig.HttpsKey = ""
var leurl string
if mode == environment.Staging {
leurl = stagingLEURL
} else {
leurl = acme.LetsEncryptURL
}
certManager = &autocert.Manager{
Prompt: autocert.AcceptTOS,
Cache: autocert.DirCache("assets/cache"),
Client: &acme.Client{DirectoryURL: leurl},
}
httpsSrv.TLSConfig = &tls.Config{GetCertificate: certManager.GetCertificate}
}
go func(chain string, key string) {
log.Printf("Starting HTTPS server on %s\n", httpsSrv.Addr)
err := httpsSrv.ListenAndServeTLS(chain, key)
if err != nil {
log.Printf("httpsSrv.ListendAndServeTLS() failed with %s", err)
}
}(serverConfig.HttpsChain, serverConfig.HttpsKey)
}
var httpSrv *http.Server
if mode.AnyOf(environment.Production, environment.Staging) {
httpSrv = makeHTTPToHTTPSRedirectServer()
} else {
httpSrv = makeHTTPServer()
}
if certManager != nil {
httpSrv.Handler = certManager.HTTPHandler(httpSrv.Handler)
}
startServer(httpSrv, port)
}
func startServer(serv *http.Server, startPort int) {
// It's recommend to run one worker on one instance.
// This logic is to make sure more than 1 workers still work
for port, n := startPort, startPort+100; port < n; port++ {
serv.Addr = ":" + strconv.Itoa(port)
err := serv.ListenAndServe()
switch err {
case http.ErrServerClosed:
log.Printf("HTTP(S) server was closed")
return
default:
}
port++
if port == n {
log.Printf("error: couldn't find an open port in range %v-%v\n", startPort, port)
}
}
}