mirror of
https://github.com/giongto35/cloud-game.git
synced 2026-01-23 02:34:42 +00:00
Rename
This commit is contained in:
parent
10cb6d5092
commit
9558054449
18 changed files with 160 additions and 160 deletions
16
Makefile
vendored
16
Makefile
vendored
|
|
@ -24,9 +24,9 @@ dep:
|
|||
|
||||
# NOTE: there is problem with go mod vendor when it delete github.com/gen2brain/x264-go/x264c causing unable to build. https://github.com/golang/go/issues/26366
|
||||
#build.cross: build
|
||||
# CGO_ENABLED=1 GOOS=darwin GOARC=amd64 go build --ldflags '-linkmode external -extldflags "-static"' -o bin/overlord-darwin ./cmd/overlord
|
||||
# CGO_ENABLED=1 GOOS=darwin GOARC=amd64 go build --ldflags '-linkmode external -extldflags "-static"' -o bin/coordinator-darwin ./cmd/coordinator
|
||||
# CGO_ENABLED=1 GOOS=darwin GOARC=amd64 go build --ldflags '-linkmode external -extldflags "-static"' -o bin/overworker-darwin ./cmd/overworker
|
||||
# CC=arm-linux-musleabihf-gcc GOOS=linux GOARC=amd64 CGO_ENABLED=1 go build --ldflags '-linkmode external -extldflags "-static"' -o bin/overlord-linu ./cmd/overlord
|
||||
# CC=arm-linux-musleabihf-gcc GOOS=linux GOARC=amd64 CGO_ENABLED=1 go build --ldflags '-linkmode external -extldflags "-static"' -o bin/coordinator-linu ./cmd/coordinator
|
||||
# CC=arm-linux-musleabihf-gcc GOOS=linux GOARC=amd64 CGO_ENABLED=1 go build --ldflags '-linkmode external -extldflags "-static"' -o bin/overworker-linux ./cmd/overworker
|
||||
|
||||
# A user can invoke tests in different ways:
|
||||
|
|
@ -64,20 +64,20 @@ dev.tools:
|
|||
./hack/scripts/install_tools.sh
|
||||
|
||||
dev.build: compile
|
||||
go build -a -tags netgo -ldflags '-w' -o bin/overlord ./cmd/overlord
|
||||
go build -a -tags netgo -ldflags '-w' -o bin/coordinator ./cmd/coordinator
|
||||
go build -a -tags netgo -ldflags '-w' -o bin/overworker ./cmd/overworker
|
||||
|
||||
dev.build-local:
|
||||
go build -o bin/overlord ./cmd/overlord
|
||||
go build -o bin/coordinator ./cmd/coordinator
|
||||
go build -o bin/overworker ./cmd/overworker
|
||||
|
||||
dev.run: dev.build-local
|
||||
./bin/overlord --v=5 &
|
||||
./bin/overworker --overlordhost localhost:8000
|
||||
./bin/coordinator --v=5 &
|
||||
./bin/overworker --coordinatorhost localhost:8000
|
||||
|
||||
dev.run-docker:
|
||||
docker build . -t cloud-game-local
|
||||
docker stop cloud-game-local || true
|
||||
docker rm cloud-game-local || true
|
||||
# Overlord and worker should be run separately.
|
||||
docker run --privileged -v $PWD/games:/cloud-game/games -d --name cloud-game-local -p 8000:8000 -p 9000:9000 cloud-game-local bash -c "overlord --v=5 & overworker --overlordhost localhost:8000"
|
||||
# Coordinator and worker should be run separately.
|
||||
docker run --privileged -v $PWD/games:/cloud-game/games -d --name cloud-game-local -p 8000:8000 -p 9000:9000 cloud-game-local bash -c "coordinator --v=5 & overworker --coordinatorhost localhost:8000"
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import (
|
|||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/giongto35/cloud-game/pkg/overlord"
|
||||
"github.com/giongto35/cloud-game/pkg/coordinator"
|
||||
"github.com/giongto35/cloud-game/pkg/util/logging"
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/pflag"
|
||||
|
|
@ -16,7 +16,7 @@ import (
|
|||
func main() {
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
|
||||
cfg := overlord.NewDefaultConfig()
|
||||
cfg := coordinator.NewDefaultConfig()
|
||||
cfg.AddFlags(pflag.CommandLine)
|
||||
|
||||
logging.Init()
|
||||
|
|
@ -24,11 +24,11 @@ func main() {
|
|||
|
||||
ctx, cancelCtx := context.WithCancel(context.Background())
|
||||
|
||||
glog.Infof("Initializing overlord server")
|
||||
glog.V(4).Infof("Overlord configs %v", cfg)
|
||||
o := overlord.New(ctx, cfg)
|
||||
glog.Infof("Initializing coordinator server")
|
||||
glog.V(4).Infof("Coordinator configs %v", cfg)
|
||||
o := coordinator.New(ctx, cfg)
|
||||
if err := o.Run(); err != nil {
|
||||
glog.Errorf("Failed to run overlord server, reason %v", err)
|
||||
glog.Errorf("Failed to run coordinator server, reason %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
|
@ -36,7 +36,7 @@ func main() {
|
|||
signal.Notify(stop, os.Interrupt)
|
||||
select {
|
||||
case <-stop:
|
||||
glog.Infoln("Received SIGTERM, Quiting Overlord")
|
||||
glog.Infoln("Received SIGTERM, Quiting Coordinator")
|
||||
o.Shutdown()
|
||||
cancelCtx()
|
||||
}
|
||||
|
|
|
|||
16
docs/designdoc/README.md
vendored
16
docs/designdoc/README.md
vendored
|
|
@ -1,6 +1,6 @@
|
|||
# Web-based Cloud Gaming Service Design Document
|
||||
|
||||
Web-based Cloud Gaming Service contains multiple workers for gaming stream and a coordinator (Overlord) for distributing traffic and pairing up connection.
|
||||
Web-based Cloud Gaming Service contains multiple workers for gaming stream and a coordinator (Coordinator) for distributing traffic and pairing up connection.
|
||||
|
||||
## Worker
|
||||
|
||||
|
|
@ -12,14 +12,14 @@ Worker is responsible for streaming game to frontend
|
|||
- On the other hand, input from users is sent to workers over WebRTC DataChannel. Game logic on the emulator will be updated based on the input stream.
|
||||
- Game state is stored in cloud storage, so all workers can collaborate and keep the same understanding with each other. It allows user can continue from the saved state in the next time.
|
||||
|
||||
## Overlord
|
||||
## Coordinator
|
||||
|
||||
Overlord is loadbalancer and coordinator, which is in charge of picking the most suitable workers for a user. Every time a user connects to Overlord, it will collect all the metric from all workers, i.e free CPU resources and latency from worker to user. Overlord will decide the best candidate based on the metric and setup peer-to-peer connection between worker and user based on WebRTC protocol
|
||||
Coordinator is loadbalancer and coordinator, which is in charge of picking the most suitable workers for a user. Every time a user connects to Coordinator, it will collect all the metric from all workers, i.e free CPU resources and latency from worker to user. Coordinator will decide the best candidate based on the metric and setup peer-to-peer connection between worker and user based on WebRTC protocol
|
||||
|
||||

|
||||

|
||||
|
||||
1. A user connected to Overlord .
|
||||
2. Overlord will find the most suitable worker to serve the user.
|
||||
3. Overlord collects all latencies from workers to users as well as CPU usage on each machine.
|
||||
4. Overlord setup peer-to-peer handshake between worker and user by exchanging Session Description Protocol.
|
||||
1. A user connected to Coordinator .
|
||||
2. Coordinator will find the most suitable worker to serve the user.
|
||||
3. Coordinator collects all latencies from workers to users as well as CPU usage on each machine.
|
||||
4. Coordinator setup peer-to-peer handshake between worker and user by exchanging Session Description Protocol.
|
||||
5. A game is hosted on worker and streamed to the user.
|
||||
|
|
|
|||
16
docs/designdoc/implementation/README.md
vendored
16
docs/designdoc/implementation/README.md
vendored
|
|
@ -4,14 +4,14 @@
|
|||
```
|
||||
.
|
||||
├── cmd: service entrypoint
|
||||
│ ├── main.go: Spawn overlord or worker based on flag
|
||||
│ ├── main.go: Spawn coordinator or worker based on flag
|
||||
│ └── main_test.go
|
||||
├── static: static file for front end
|
||||
│ ├── js
|
||||
│ │ └── ws.js: client logic
|
||||
│ ├── game.html: frontend with gameboy ui
|
||||
│ └── index_ws.html: raw frontend without ui
|
||||
├── overlord: coordinator
|
||||
├── coordinator: coordinator
|
||||
│ ├── handlers.go: coordinator entrypoint
|
||||
│ ├── browser.go: router listening to browser
|
||||
│ └── worker.go: router listening to worker
|
||||
|
|
@ -21,7 +21,7 @@
|
|||
│ │ ├── room.go: room logic
|
||||
│ │ └── media.go: video + audio encoding
|
||||
│ ├── handlers.go: worker entrypoint
|
||||
│ └── overlord.go: router listening to overlord
|
||||
│ └── coordinator.go: router listening to coordinator
|
||||
├── emulator: emulator internal
|
||||
│ ├── nes: NES device internal
|
||||
│ ├── director.go: coordinator of views
|
||||
|
|
@ -35,9 +35,9 @@
|
|||
Room is a fundamental part of the system. Each user session will spawn a room with a game running inside. There is a pipeline to encode images and audio and stream them out from emulator to user. The pipeline also listens to all input and streams to the emulator.
|
||||
|
||||
## Worker
|
||||
Worker is an instance that can be provisioned to scale up the traffic. There are multiple rooms inside a worker. Worker will listen to overlord events in `overlord.go`.
|
||||
Worker is an instance that can be provisioned to scale up the traffic. There are multiple rooms inside a worker. Worker will listen to coordinator events in `coordinator.go`.
|
||||
|
||||
## Overlord
|
||||
Overlord is the coordinator, which handles all communication with workers and frontend.
|
||||
Overlord will pair up a worker and a user for peer streaming. In WebRTC handshaking, two peers need to exchange their signature (Session Description Protocol) to initiate a peerconnection.
|
||||
Events come from frontend will be handled in `overlord/browser.go`. Events come from worker will be handled in `overlord/worker.go`. Overlord stays in the middle and relays handshake packages between workers and user.
|
||||
## Coordinator
|
||||
Coordinator is the coordinator, which handles all communication with workers and frontend.
|
||||
Coordinator will pair up a worker and a user for peer streaming. In WebRTC handshaking, two peers need to exchange their signature (Session Description Protocol) to initiate a peerconnection.
|
||||
Events come from frontend will be handled in `coordinator/browser.go`. Events come from worker will be handled in `coordinator/worker.go`. Coordinator stays in the middle and relays handshake packages between workers and user.
|
||||
|
|
|
|||
BIN
docs/img/overlord.png
vendored
BIN
docs/img/overlord.png
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 53 KiB |
|
|
@ -6,8 +6,8 @@ import (
|
|||
)
|
||||
|
||||
type Config struct {
|
||||
Port int
|
||||
OverlordAddress string
|
||||
Port int
|
||||
CoordinatorAddress string
|
||||
|
||||
// video
|
||||
Scale int
|
||||
|
|
@ -21,13 +21,13 @@ type Config struct {
|
|||
|
||||
func NewDefaultConfig() Config {
|
||||
return Config{
|
||||
Port: 8800,
|
||||
OverlordAddress: "localhost:8000",
|
||||
Scale: 1,
|
||||
EnableAspectRatio: false,
|
||||
Width: 320,
|
||||
Height: 240,
|
||||
Zone: "",
|
||||
Port: 8800,
|
||||
CoordinatorAddress: "localhost:8000",
|
||||
Scale: 1,
|
||||
EnableAspectRatio: false,
|
||||
Width: 320,
|
||||
Height: 240,
|
||||
Zone: "",
|
||||
MonitoringConfig: monitoring.ServerMonitoringConfig{
|
||||
Port: 6601,
|
||||
URLPrefix: "/worker",
|
||||
|
|
@ -38,7 +38,7 @@ func NewDefaultConfig() Config {
|
|||
|
||||
func (c *Config) AddFlags(fs *pflag.FlagSet) *Config {
|
||||
fs.IntVarP(&c.Port, "port", "", 8800, "OverWorker server port")
|
||||
fs.StringVarP(&c.OverlordAddress, "overlordhost", "", c.OverlordAddress, "OverWorker URL to connect")
|
||||
fs.StringVarP(&c.CoordinatorAddress, "coordinatorhost", "", c.CoordinatorAddress, "OverWorker URL to connect")
|
||||
fs.StringVarP(&c.Zone, "zone", "z", c.Zone, "Zone of the worker")
|
||||
|
||||
fs.IntVarP(&c.Scale, "scale", "s", c.Scale, "Set output viewport scale factor")
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
package overlord
|
||||
package coordinator
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
|
@ -20,8 +20,8 @@ func (s *Session) RouteBrowser() {
|
|||
})
|
||||
|
||||
browserClient.Receive("icecandidate", func(resp cws.WSPacket) cws.WSPacket {
|
||||
log.Println("Overlord: Received icecandidate from a browser", resp.Data)
|
||||
log.Println("Overlord: Relay icecandidate from a browser to worker")
|
||||
log.Println("Coordinator: Received icecandidate from a browser", resp.Data)
|
||||
log.Println("Coordinator: Relay icecandidate from a browser to worker")
|
||||
|
||||
wc, ok := s.handler.workerClients[s.ServerID]
|
||||
if !ok {
|
||||
|
|
@ -33,12 +33,12 @@ func (s *Session) RouteBrowser() {
|
|||
})
|
||||
|
||||
browserClient.Receive("initwebrtc", func(resp cws.WSPacket) cws.WSPacket {
|
||||
log.Println("Overlord: Received sdp request from a browser")
|
||||
log.Println("Overlord: Relay sdp request from a browser to worker")
|
||||
log.Println("Coordinator: Received sdp request from a browser")
|
||||
log.Println("Coordinator: Relay sdp request from a browser to worker")
|
||||
|
||||
// relay SDP to target worker and get back SDP of the worker
|
||||
// TODO: Async
|
||||
log.Println("Overlord: serverID: ", s.ServerID, resp.SessionID)
|
||||
log.Println("Coordinator: serverID: ", s.ServerID, resp.SessionID)
|
||||
resp.SessionID = s.ID
|
||||
wc, ok := s.handler.workerClients[s.ServerID]
|
||||
if !ok {
|
||||
|
|
@ -48,15 +48,15 @@ func (s *Session) RouteBrowser() {
|
|||
resp,
|
||||
)
|
||||
|
||||
log.Println("Overlord: Received sdp request from a worker")
|
||||
log.Println("Overlord: Sending back sdp to browser")
|
||||
log.Println("Coordinator: Received sdp request from a worker")
|
||||
log.Println("Coordinator: Sending back sdp to browser")
|
||||
|
||||
return sdp
|
||||
})
|
||||
|
||||
browserClient.Receive("quit", func(resp cws.WSPacket) (req cws.WSPacket) {
|
||||
log.Println("Overlord: Received quit request from a browser")
|
||||
log.Println("Overlord: Relay quit request from a browser to worker")
|
||||
log.Println("Coordinator: Received quit request from a browser")
|
||||
log.Println("Coordinator: Relay quit request from a browser to worker")
|
||||
|
||||
// TODO: Async
|
||||
resp.SessionID = s.ID
|
||||
|
|
@ -72,8 +72,8 @@ func (s *Session) RouteBrowser() {
|
|||
})
|
||||
|
||||
browserClient.Receive("start", func(resp cws.WSPacket) cws.WSPacket {
|
||||
log.Println("Overlord: Received start request from a browser")
|
||||
log.Println("Overlord: Relay start request from a browser to worker")
|
||||
log.Println("Coordinator: Received start request from a browser")
|
||||
log.Println("Coordinator: Relay start request from a browser to worker")
|
||||
// TODO: Async
|
||||
resp.SessionID = s.ID
|
||||
wc, ok := s.handler.workerClients[s.ServerID]
|
||||
|
|
@ -91,8 +91,8 @@ func (s *Session) RouteBrowser() {
|
|||
})
|
||||
|
||||
browserClient.Receive("save", func(resp cws.WSPacket) (req cws.WSPacket) {
|
||||
log.Println("Overlord: Received save request from a browser")
|
||||
log.Println("Overlord: Relay save request from a browser to worker")
|
||||
log.Println("Coordinator: Received save request from a browser")
|
||||
log.Println("Coordinator: Relay save request from a browser to worker")
|
||||
// TODO: Async
|
||||
resp.SessionID = s.ID
|
||||
resp.RoomID = s.RoomID
|
||||
|
|
@ -108,8 +108,8 @@ func (s *Session) RouteBrowser() {
|
|||
})
|
||||
|
||||
browserClient.Receive("load", func(resp cws.WSPacket) (req cws.WSPacket) {
|
||||
log.Println("Overlord: Received load request from a browser")
|
||||
log.Println("Overlord: Relay load request from a browser to worker")
|
||||
log.Println("Coordinator: Received load request from a browser")
|
||||
log.Println("Coordinator: Relay load request from a browser to worker")
|
||||
// TODO: Async
|
||||
resp.SessionID = s.ID
|
||||
resp.RoomID = s.RoomID
|
||||
|
|
@ -125,8 +125,8 @@ func (s *Session) RouteBrowser() {
|
|||
})
|
||||
|
||||
browserClient.Receive("playerIdx", func(resp cws.WSPacket) (req cws.WSPacket) {
|
||||
log.Println("Overlord: Received update player index request from a browser")
|
||||
log.Println("Overlord: Relay update player index request from a browser to worker")
|
||||
log.Println("Coordinator: Received update player index request from a browser")
|
||||
log.Println("Coordinator: Relay update player index request from a browser to worker")
|
||||
// TODO: Async
|
||||
resp.SessionID = s.ID
|
||||
resp.RoomID = s.RoomID
|
||||
|
|
@ -142,7 +142,7 @@ func (s *Session) RouteBrowser() {
|
|||
})
|
||||
}
|
||||
|
||||
// NewOverlordClient returns a client connecting to browser. This connection exchanges information between clients and server
|
||||
// NewCoordinatorClient returns a client connecting to browser. This connection exchanges information between clients and server
|
||||
func NewBrowserClient(c *websocket.Conn) *BrowserClient {
|
||||
return &BrowserClient{
|
||||
Client: cws.NewClient(c),
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
package overlord
|
||||
package coordinator
|
||||
|
||||
import (
|
||||
"github.com/giongto35/cloud-game/pkg/monitoring"
|
||||
|
|
@ -21,7 +21,7 @@ func NewDefaultConfig() Config {
|
|||
|
||||
MonitoringConfig: monitoring.ServerMonitoringConfig{
|
||||
Port: 6601,
|
||||
URLPrefix: "/overlord",
|
||||
URLPrefix: "/coordinator",
|
||||
MetricEnabled: false,
|
||||
ProfilingEnabled: false,
|
||||
},
|
||||
|
|
@ -29,7 +29,7 @@ func NewDefaultConfig() Config {
|
|||
}
|
||||
|
||||
func (c *Config) AddFlags(fs *pflag.FlagSet) *Config {
|
||||
fs.IntVarP(&c.Port, "port", "", 8800, "Overlord server port")
|
||||
fs.IntVarP(&c.Port, "port", "", 8800, "Coordinator server port")
|
||||
|
||||
fs.BoolVarP(&c.MonitoringConfig.MetricEnabled, "monitoring.metric", "m", c.MonitoringConfig.MetricEnabled, "Enable prometheus metric for server")
|
||||
fs.BoolVarP(&c.MonitoringConfig.ProfilingEnabled, "monitoring.pprof", "p", c.MonitoringConfig.ProfilingEnabled, "Enable golang pprof for server")
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
package overlord
|
||||
package coordinator
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
|
@ -19,15 +19,15 @@ import (
|
|||
|
||||
const stagingLEURL = "https://acme-staging-v02.api.letsencrypt.org/directory"
|
||||
|
||||
type Overlord struct {
|
||||
type Coordinator struct {
|
||||
ctx context.Context
|
||||
cfg Config
|
||||
|
||||
monitoringServer *monitoring.ServerMonitoring
|
||||
}
|
||||
|
||||
func New(ctx context.Context, cfg Config) *Overlord {
|
||||
return &Overlord{
|
||||
func New(ctx context.Context, cfg Config) *Coordinator {
|
||||
return &Coordinator{
|
||||
ctx: ctx,
|
||||
cfg: cfg,
|
||||
|
||||
|
|
@ -35,21 +35,21 @@ func New(ctx context.Context, cfg Config) *Overlord {
|
|||
}
|
||||
}
|
||||
|
||||
func (o *Overlord) Run() error {
|
||||
go o.initializeOverlord()
|
||||
func (o *Coordinator) Run() error {
|
||||
go o.initializeCoordinator()
|
||||
go o.RunMonitoringServer()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Overlord) RunMonitoringServer() {
|
||||
glog.Infoln("Starting monitoring server for overlord")
|
||||
func (o *Coordinator) RunMonitoringServer() {
|
||||
glog.Infoln("Starting monitoring server for coordinator")
|
||||
err := o.monitoringServer.Run()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to start monitoring server, reason %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Overlord) Shutdown() {
|
||||
func (o *Coordinator) Shutdown() {
|
||||
if err := o.monitoringServer.Shutdown(o.ctx); err != nil {
|
||||
glog.Errorln("Failed to shutdown monitoring server")
|
||||
}
|
||||
|
|
@ -93,14 +93,14 @@ func makeHTTPToHTTPSRedirectServer(server *Server) *http.Server {
|
|||
return makeServerFromMux(svmux)
|
||||
}
|
||||
|
||||
// initializeOverlord setup an overlord server
|
||||
func (o *Overlord) initializeOverlord() {
|
||||
overlord := NewServer(o.cfg)
|
||||
// initializeCoordinator setup an coordinator server
|
||||
func (o *Coordinator) initializeCoordinator() {
|
||||
coordinator := NewServer(o.cfg)
|
||||
|
||||
var certManager *autocert.Manager
|
||||
var httpsSrv *http.Server
|
||||
|
||||
log.Println("Initializing Overlord Server")
|
||||
log.Println("Initializing Coordinator Server")
|
||||
if *config.Mode == config.ProdEnv || *config.Mode == config.StagingEnv {
|
||||
var leurl string
|
||||
if *config.Mode == config.StagingEnv {
|
||||
|
|
@ -116,7 +116,7 @@ func (o *Overlord) initializeOverlord() {
|
|||
Client: &acme.Client{DirectoryURL: leurl},
|
||||
}
|
||||
|
||||
httpsSrv = makeHTTPServer(overlord)
|
||||
httpsSrv = makeHTTPServer(coordinator)
|
||||
httpsSrv.Addr = ":443"
|
||||
httpsSrv.TLSConfig = &tls.Config{GetCertificate: certManager.GetCertificate}
|
||||
|
||||
|
|
@ -131,9 +131,9 @@ func (o *Overlord) initializeOverlord() {
|
|||
|
||||
var httpSrv *http.Server
|
||||
if *config.Mode == config.ProdEnv || *config.Mode == config.StagingEnv {
|
||||
httpSrv = makeHTTPToHTTPSRedirectServer(overlord)
|
||||
httpSrv = makeHTTPToHTTPSRedirectServer(coordinator)
|
||||
} else {
|
||||
httpSrv = makeHTTPServer(overlord)
|
||||
httpSrv = makeHTTPServer(coordinator)
|
||||
}
|
||||
|
||||
if certManager != nil {
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
package overlord
|
||||
package coordinator
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
|
@ -78,17 +78,17 @@ func (o *Server) getPingServer(zone string) string {
|
|||
return devPingServer
|
||||
}
|
||||
|
||||
// WSO handles all connections from a new worker to overlord
|
||||
// WSO handles all connections from a new worker to coordinator
|
||||
func (o *Server) WSO(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Println("Connected")
|
||||
c, err := upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
log.Print("Overlord: [!] WS upgrade:", err)
|
||||
log.Print("Coordinator: [!] WS upgrade:", err)
|
||||
return
|
||||
}
|
||||
// Register new server
|
||||
serverID := uuid.Must(uuid.NewV4()).String()
|
||||
log.Println("Overlord: A new server connected to Overlord", serverID)
|
||||
log.Println("Coordinator: A new server connected to Coordinator", serverID)
|
||||
|
||||
// Register to workersClients map the client connection
|
||||
address := util.GetRemoteAddress(c)
|
||||
|
|
@ -99,10 +99,10 @@ func (o *Server) WSO(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
fmt.Printf("Is public: %v zone: %v\n", util.IsPublicIP(address), zone)
|
||||
|
||||
// In case worker and overlord in the same host
|
||||
// In case worker and coordinator in the same host
|
||||
if !util.IsPublicIP(address) && *config.Mode == config.ProdEnv {
|
||||
// Don't accept private IP for worker's address in prod mode
|
||||
// However, if the worker in the same host with overlord, we can get public IP of worker
|
||||
// However, if the worker in the same host with coordinator, we can get public IP of worker
|
||||
log.Printf("Error: address %s is invalid", address)
|
||||
address = util.GetHostPublicIP()
|
||||
log.Println("Find public address:", address)
|
||||
|
|
@ -128,9 +128,9 @@ func (o *Server) WSO(w http.ResponseWriter, r *http.Request) {
|
|||
client.Listen()
|
||||
}
|
||||
|
||||
// WSO handles all connections from user/frontend to overlord
|
||||
// WSO handles all connections from user/frontend to coordinator
|
||||
func (o *Server) WS(w http.ResponseWriter, r *http.Request) {
|
||||
log.Println("A user connected to overlord ", r.URL)
|
||||
log.Println("A user connected to coordinator ", r.URL)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.Println("Warn: Something wrong. Recovered in ", r)
|
||||
|
|
@ -332,7 +332,7 @@ func (o *Server) getLatencyMapFromBrowser(workerClients map[string]*WorkerClient
|
|||
// cleanConnection is called when a worker is disconnected
|
||||
// connection from worker (client) to server is also closed
|
||||
func (o *Server) cleanConnection(client *WorkerClient, serverID string) {
|
||||
log.Println("Unregister server from overlord")
|
||||
log.Println("Unregister server from coordinator")
|
||||
// Remove serverID from servers
|
||||
delete(o.workerClients, serverID)
|
||||
// Clean all rooms connecting to that server
|
||||
|
|
@ -1,14 +1,14 @@
|
|||
package overlord
|
||||
package coordinator
|
||||
|
||||
// Session represents a session connected from the browser to the current server
|
||||
// It requires one connection to browser and one connection to the overlord
|
||||
// connection to browser is 1-1. connection to overlord is n - 1
|
||||
// It requires one connection to browser and one connection to the coordinator
|
||||
// connection to browser is 1-1. connection to coordinator is n - 1
|
||||
// Peerconnection can be from other server to ensure better latency
|
||||
type Session struct {
|
||||
ID string
|
||||
BrowserClient *BrowserClient
|
||||
WorkerClient *WorkerClient
|
||||
// OverlordClient *OverlordClient
|
||||
// CoordinatorClient *CoordinatorClient
|
||||
// peerconnection *webrtc.WebRTC
|
||||
|
||||
// TODO: Decouple this
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
package overlord
|
||||
package coordinator
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
|
@ -23,11 +23,11 @@ type WorkerClient struct {
|
|||
// RouteWorker are all routes server received from worker
|
||||
func (o *Server) RouteWorker(workerClient *WorkerClient) {
|
||||
// registerRoom event from a worker, when worker created a new room.
|
||||
// RoomID is global so it is managed by overlord.
|
||||
// RoomID is global so it is managed by coordinator.
|
||||
workerClient.Receive("registerRoom", func(resp cws.WSPacket) cws.WSPacket {
|
||||
log.Printf("Overlord: Received registerRoom room %s from worker %s", resp.Data, workerClient.ServerID)
|
||||
log.Printf("Coordinator: Received registerRoom room %s from worker %s", resp.Data, workerClient.ServerID)
|
||||
o.roomToWorker[resp.Data] = workerClient.ServerID
|
||||
log.Printf("Overlord: Current room list is: %+v", o.roomToWorker)
|
||||
log.Printf("Coordinator: Current room list is: %+v", o.roomToWorker)
|
||||
|
||||
return cws.WSPacket{
|
||||
ID: "registerRoom",
|
||||
|
|
@ -36,9 +36,9 @@ func (o *Server) RouteWorker(workerClient *WorkerClient) {
|
|||
|
||||
// closeRoom event from a worker, when worker close a room
|
||||
workerClient.Receive("closeRoom", func(resp cws.WSPacket) cws.WSPacket {
|
||||
log.Printf("Overlord: Received closeRoom room %s from worker %s", resp.Data, workerClient.ServerID)
|
||||
log.Printf("Coordinator: Received closeRoom room %s from worker %s", resp.Data, workerClient.ServerID)
|
||||
delete(o.roomToWorker, resp.Data)
|
||||
log.Printf("Overlord: Current room list is: %+v", o.roomToWorker)
|
||||
log.Printf("Coordinator: Current room list is: %+v", o.roomToWorker)
|
||||
|
||||
return cws.WSPacket{
|
||||
ID: "closeRoom",
|
||||
|
|
@ -47,7 +47,7 @@ func (o *Server) RouteWorker(workerClient *WorkerClient) {
|
|||
|
||||
// getRoom returns the server ID based on requested roomID.
|
||||
workerClient.Receive("getRoom", func(resp cws.WSPacket) cws.WSPacket {
|
||||
log.Println("Overlord: Received a getroom request")
|
||||
log.Println("Coordinator: Received a getroom request")
|
||||
log.Println("Result: ", o.roomToWorker[resp.Data])
|
||||
return cws.WSPacket{
|
||||
ID: "getRoom",
|
||||
|
|
@ -12,34 +12,34 @@ import (
|
|||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
// OverlordClient maintans connection to overlord
|
||||
// We expect only one OverlordClient for each server
|
||||
type OverlordClient struct {
|
||||
// CoordinatorClient maintans connection to coordinator
|
||||
// We expect only one CoordinatorClient for each server
|
||||
type CoordinatorClient struct {
|
||||
*cws.Client
|
||||
}
|
||||
|
||||
// NewOverlordClient returns a client connecting to overlord for coordiation between different server
|
||||
func NewOverlordClient(oc *websocket.Conn) *OverlordClient {
|
||||
// NewCoordinatorClient returns a client connecting to coordinator for coordiation between different server
|
||||
func NewCoordinatorClient(oc *websocket.Conn) *CoordinatorClient {
|
||||
if oc == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
oClient := &OverlordClient{
|
||||
oClient := &CoordinatorClient{
|
||||
Client: cws.NewClient(oc),
|
||||
}
|
||||
return oClient
|
||||
}
|
||||
|
||||
// RouteOverlord are all routes server received from overlord
|
||||
func (h *Handler) RouteOverlord() {
|
||||
// RouteCoordinator are all routes server received from coordinator
|
||||
func (h *Handler) RouteCoordinator() {
|
||||
iceCandidates := map[string][]string{}
|
||||
oClient := h.oClient
|
||||
|
||||
// Received from overlord the serverID
|
||||
// Received from coordinator the serverID
|
||||
oClient.Receive(
|
||||
"serverID",
|
||||
func(response cws.WSPacket) (request cws.WSPacket) {
|
||||
// Stick session with serverID got from overlord
|
||||
// Stick session with serverID got from coordinator
|
||||
log.Println("Received serverID ", response.Data)
|
||||
h.serverID = response.Data
|
||||
|
||||
|
|
@ -50,7 +50,7 @@ func (h *Handler) RouteOverlord() {
|
|||
oClient.Receive(
|
||||
"initwebrtc",
|
||||
func(resp cws.WSPacket) (req cws.WSPacket) {
|
||||
log.Println("Received relay SDP of a browser from overlord")
|
||||
log.Println("Received relay SDP of a browser from coordinator")
|
||||
|
||||
peerconnection := webrtc.NewWebRTC()
|
||||
var initPacket struct {
|
||||
|
|
@ -87,7 +87,7 @@ func (h *Handler) RouteOverlord() {
|
|||
oClient.Receive(
|
||||
"start",
|
||||
func(resp cws.WSPacket) (req cws.WSPacket) {
|
||||
log.Println("Received a start request from overlord")
|
||||
log.Println("Received a start request from coordinator")
|
||||
session, _ := h.sessions[resp.SessionID]
|
||||
|
||||
peerconnection := session.peerconnection
|
||||
|
|
@ -117,7 +117,7 @@ func (h *Handler) RouteOverlord() {
|
|||
oClient.Receive(
|
||||
"quit",
|
||||
func(resp cws.WSPacket) (req cws.WSPacket) {
|
||||
log.Println("Received a quit request from overlord")
|
||||
log.Println("Received a quit request from coordinator")
|
||||
session, ok := h.sessions[resp.SessionID]
|
||||
log.Println("Find ", resp.SessionID, session, ok)
|
||||
|
||||
|
|
@ -134,7 +134,7 @@ func (h *Handler) RouteOverlord() {
|
|||
oClient.Receive(
|
||||
"save",
|
||||
func(resp cws.WSPacket) (req cws.WSPacket) {
|
||||
log.Println("Received a save game from overlord")
|
||||
log.Println("Received a save game from coordinator")
|
||||
log.Println("RoomID:", resp.RoomID)
|
||||
req.ID = "save"
|
||||
req.Data = "ok"
|
||||
|
|
@ -158,7 +158,7 @@ func (h *Handler) RouteOverlord() {
|
|||
oClient.Receive(
|
||||
"load",
|
||||
func(resp cws.WSPacket) (req cws.WSPacket) {
|
||||
log.Println("Received a load game from overlord")
|
||||
log.Println("Received a load game from coordinator")
|
||||
log.Println("Loading game state")
|
||||
req.ID = "load"
|
||||
req.Data = "ok"
|
||||
|
|
@ -179,7 +179,7 @@ func (h *Handler) RouteOverlord() {
|
|||
oClient.Receive(
|
||||
"playerIdx",
|
||||
func(resp cws.WSPacket) (req cws.WSPacket) {
|
||||
log.Println("Received an update player index event from overlord")
|
||||
log.Println("Received an update player index event from coordinator")
|
||||
req.ID = "playerIdx"
|
||||
|
||||
room := h.getRoom(resp.RoomID)
|
||||
|
|
@ -200,7 +200,7 @@ func (h *Handler) RouteOverlord() {
|
|||
oClient.Receive(
|
||||
"icecandidate",
|
||||
func(resp cws.WSPacket) (req cws.WSPacket) {
|
||||
log.Println("Received a icecandidate from overlord: ", resp.Data)
|
||||
log.Println("Received a icecandidate from coordinator: ", resp.Data)
|
||||
iceCandidates[resp.SessionID] = append(iceCandidates[resp.SessionID], resp.Data)
|
||||
|
||||
return cws.EmptyPacket
|
||||
|
|
@ -224,15 +224,15 @@ func (h *Handler) RouteOverlord() {
|
|||
)
|
||||
}
|
||||
|
||||
func getServerIDOfRoom(oc *OverlordClient, roomID string) string {
|
||||
log.Println("Request overlord roomID ", roomID)
|
||||
func getServerIDOfRoom(oc *CoordinatorClient, roomID string) string {
|
||||
log.Println("Request coordinator roomID ", roomID)
|
||||
packet := oc.SyncSend(
|
||||
cws.WSPacket{
|
||||
ID: "getRoom",
|
||||
Data: roomID,
|
||||
},
|
||||
)
|
||||
log.Println("Received roomID from overlord ", packet.Data)
|
||||
log.Println("Received roomID from coordinator ", packet.Data)
|
||||
|
||||
return packet.Data
|
||||
}
|
||||
|
|
@ -240,7 +240,7 @@ func getServerIDOfRoom(oc *OverlordClient, roomID string) string {
|
|||
// startGameHandler starts a game if roomID is given, if not create new room
|
||||
func (h *Handler) startGameHandler(gameName, existedRoomID string, playerIndex int, peerconnection *webrtc.WebRTC, videoEncoderType string) *room.Room {
|
||||
log.Println("Starting game", gameName)
|
||||
// If we are connecting to overlord, request corresponding serverID based on roomID
|
||||
// If we are connecting to coordinator, request corresponding serverID based on roomID
|
||||
// TODO: check if existedRoomID is in the current server
|
||||
room := h.getRoom(existedRoomID)
|
||||
// If room is not running
|
||||
|
|
@ -254,7 +254,7 @@ func (h *Handler) startGameHandler(gameName, existedRoomID string, playerIndex i
|
|||
go func() {
|
||||
<-room.Done
|
||||
h.detachRoom(room.ID)
|
||||
// send signal to overlord that the room is closed, overlord will remove that room
|
||||
// send signal to coordinator that the room is closed, coordinator will remove that room
|
||||
h.oClient.Send(cws.WSPacket{
|
||||
ID: "closeRoom",
|
||||
Data: room.ID,
|
||||
|
|
@ -269,7 +269,7 @@ func (h *Handler) startGameHandler(gameName, existedRoomID string, playerIndex i
|
|||
room.AddConnectionToRoom(peerconnection)
|
||||
}
|
||||
|
||||
// Register room to overlord if we are connecting to overlord
|
||||
// Register room to coordinator if we are connecting to coordinator
|
||||
if room != nil && h.oClient != nil {
|
||||
h.oClient.Send(cws.WSPacket{
|
||||
ID: "registerRoom",
|
||||
|
|
@ -23,15 +23,15 @@ const (
|
|||
debugIndex = "./static/game.html"
|
||||
)
|
||||
|
||||
// Flag to determine if the server is overlord or not
|
||||
// Flag to determine if the server is coordinator or not
|
||||
var upgrader = websocket.Upgrader{}
|
||||
|
||||
type Handler struct {
|
||||
// Client that connects to overlord
|
||||
oClient *OverlordClient
|
||||
// Raw address of overlord
|
||||
overlordHost string
|
||||
cfg worker.Config
|
||||
// Client that connects to coordinator
|
||||
oClient *CoordinatorClient
|
||||
// Raw address of coordinator
|
||||
coordinatorHost string
|
||||
cfg worker.Config
|
||||
// Rooms map : RoomID -> Room
|
||||
rooms map[string]*room.Room
|
||||
// ID of the current server globalwise
|
||||
|
|
@ -50,34 +50,34 @@ func NewHandler(cfg worker.Config) *Handler {
|
|||
// Init online storage
|
||||
onlineStorage := storage.NewInitClient()
|
||||
return &Handler{
|
||||
rooms: map[string]*room.Room{},
|
||||
sessions: map[string]*Session{},
|
||||
overlordHost: cfg.OverlordAddress,
|
||||
cfg: cfg,
|
||||
onlineStorage: onlineStorage,
|
||||
rooms: map[string]*room.Room{},
|
||||
sessions: map[string]*Session{},
|
||||
coordinatorHost: cfg.CoordinatorAddress,
|
||||
cfg: cfg,
|
||||
onlineStorage: onlineStorage,
|
||||
}
|
||||
}
|
||||
|
||||
// Run starts a Handler running logic
|
||||
func (h *Handler) Run() {
|
||||
for {
|
||||
oClient, err := setupOverlordConnection(h.overlordHost, h.cfg.Zone)
|
||||
oClient, err := setupCoordinatorConnection(h.coordinatorHost, h.cfg.Zone)
|
||||
if err != nil {
|
||||
log.Printf("Cannot connect to overlord. %v Retrying...", err)
|
||||
log.Printf("Cannot connect to coordinator. %v Retrying...", err)
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
h.oClient = oClient
|
||||
log.Println("Connected to overlord successfully.", oClient, err)
|
||||
log.Println("Connected to coordinator successfully.", oClient, err)
|
||||
go h.oClient.Heartbeat()
|
||||
h.RouteOverlord()
|
||||
h.RouteCoordinator()
|
||||
h.oClient.Listen()
|
||||
// If cannot listen, reconnect to overlord
|
||||
// If cannot listen, reconnect to coordinator
|
||||
}
|
||||
}
|
||||
|
||||
func setupOverlordConnection(ohost string, zone string) (*OverlordClient, error) {
|
||||
func setupCoordinatorConnection(ohost string, zone string) (*CoordinatorClient, error) {
|
||||
var scheme string
|
||||
|
||||
if *config.Mode == config.ProdEnv || *config.Mode == config.StagingEnv {
|
||||
|
|
@ -86,22 +86,22 @@ func setupOverlordConnection(ohost string, zone string) (*OverlordClient, error)
|
|||
scheme = "ws"
|
||||
}
|
||||
|
||||
overlordURL := url.URL{
|
||||
coordinatorURL := url.URL{
|
||||
Scheme: scheme,
|
||||
Host: ohost,
|
||||
Path: "/wso",
|
||||
RawQuery: "zone=" + zone,
|
||||
}
|
||||
log.Println("Worker connecting to overlord:", overlordURL.String())
|
||||
log.Println("Worker connecting to coordinator:", coordinatorURL.String())
|
||||
|
||||
conn, err := createOverlordConnection(&overlordURL)
|
||||
conn, err := createCoordinatorConnection(&coordinatorURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewOverlordClient(conn), nil
|
||||
return NewCoordinatorClient(conn), nil
|
||||
}
|
||||
|
||||
func createOverlordConnection(ourl *url.URL) (*websocket.Conn, error) {
|
||||
func createCoordinatorConnection(ourl *url.URL) (*websocket.Conn, error) {
|
||||
var d websocket.Dialer
|
||||
if ourl.Scheme == "wss" {
|
||||
d = websocket.Dialer{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}
|
||||
|
|
@ -117,7 +117,7 @@ func createOverlordConnection(ourl *url.URL) (*websocket.Conn, error) {
|
|||
return ws, nil
|
||||
}
|
||||
|
||||
func (h *Handler) GetOverlordClient() *OverlordClient {
|
||||
func (h *Handler) GetCoordinatorClient() *CoordinatorClient {
|
||||
return h.oClient
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -3,8 +3,8 @@ package worker
|
|||
import "github.com/giongto35/cloud-game/pkg/webrtc"
|
||||
|
||||
// Session represents a session connected from the browser to the current server
|
||||
// It requires one connection to browser and one connection to the overlord
|
||||
// connection to browser is 1-1. connection to overlord is n - 1
|
||||
// It requires one connection to browser and one connection to the coordinator
|
||||
// connection to browser is 1-1. connection to coordinator is n - 1
|
||||
// Peerconnection can be from other server to ensure better latency
|
||||
type Session struct {
|
||||
ID string
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ package e2e
|
|||
// URLs: []string{"stun:stun.l.google.com:19302"},
|
||||
// }}}
|
||||
//
|
||||
// func initOverlord() (*httptest.Server, *httptest.Server) {
|
||||
// func initCoordinator() (*httptest.Server, *httptest.Server) {
|
||||
// server := overlord.NewServer()
|
||||
// overlordWorker := httptest.NewServer(http.HandlerFunc(server.WSO))
|
||||
// overlordBrowser := httptest.NewServer(http.HandlerFunc(server.WS))
|
||||
|
|
@ -137,7 +137,7 @@ package e2e
|
|||
// // If receive roomID, the server is running correctly
|
||||
// }
|
||||
//
|
||||
// func TestSingleServerOneOverlord(t *testing.T) {
|
||||
// func TestSingleServerOneCoordinator(t *testing.T) {
|
||||
// /*
|
||||
// Case scenario:
|
||||
// - A server X are initilized
|
||||
|
|
@ -146,7 +146,7 @@ package e2e
|
|||
// - Room received not empty.
|
||||
// */
|
||||
//
|
||||
// oworker, obrowser := initOverlord()
|
||||
// oworker, obrowser := initCoordinator()
|
||||
// defer obrowser.Close()
|
||||
// defer oworker.Close()
|
||||
//
|
||||
|
|
@ -178,7 +178,7 @@ package e2e
|
|||
// fmt.Println("Done")
|
||||
// }
|
||||
//
|
||||
// func TestTwoServerOneOverlord(t *testing.T) {
|
||||
// func TestTwoServerOneCoordinator(t *testing.T) {
|
||||
// /*
|
||||
// Case scenario:
|
||||
// - Two server X, Y are initilized
|
||||
|
|
@ -190,7 +190,7 @@ package e2e
|
|||
// - Client B can join a room hosted on A
|
||||
// */
|
||||
//
|
||||
// oworker, obrowser := initOverlord()
|
||||
// oworker, obrowser := initCoordinator()
|
||||
// defer obrowser.Close()
|
||||
// defer oworker.Close()
|
||||
//
|
||||
|
|
@ -277,7 +277,7 @@ package e2e
|
|||
// TODO: Current test just make sure the game is running, not check if the game is the same
|
||||
// */
|
||||
//
|
||||
// oworker, obrowser := initOverlord()
|
||||
// oworker, obrowser := initCoordinator()
|
||||
// defer obrowser.Close()
|
||||
// defer oworker.Close()
|
||||
//
|
||||
|
|
@ -306,7 +306,7 @@ package e2e
|
|||
//
|
||||
// log.Println("Closing room and server")
|
||||
// client.Close()
|
||||
// worker.GetOverlordClient().Close()
|
||||
// worker.GetCoordinatorClient().Close()
|
||||
// worker.Close()
|
||||
//
|
||||
// // Close server and reconnect
|
||||
|
|
@ -356,7 +356,7 @@ package e2e
|
|||
// */
|
||||
// // This test only run if GCP storage is set
|
||||
//
|
||||
// oworker, obrowser := initOverlord()
|
||||
// oworker, obrowser := initCoordinator()
|
||||
// defer obrowser.Close()
|
||||
// defer oworker.Close()
|
||||
//
|
||||
|
|
@ -390,7 +390,7 @@ package e2e
|
|||
//
|
||||
// log.Println("Closing room and server")
|
||||
// client.Close()
|
||||
// worker.GetOverlordClient().Close()
|
||||
// worker.GetCoordinatorClient().Close()
|
||||
// worker.Close()
|
||||
// // Remove room on local
|
||||
// path := util.GetSavePath(saveRoomID)
|
||||
|
|
@ -429,7 +429,7 @@ package e2e
|
|||
// fmt.Println("Done")
|
||||
// }
|
||||
|
||||
//func TestRejoinNoOverlordMultiple(t *testing.T) {
|
||||
//func TestRejoinNoCoordinatorMultiple(t *testing.T) {
|
||||
//[>
|
||||
//Case scenario:
|
||||
//- A server X without connecting to overlord
|
||||
|
|
@ -470,7 +470,7 @@ package e2e
|
|||
|
||||
//}
|
||||
|
||||
//func TestRejoinWithOverlordMultiple(t *testing.T) {
|
||||
//func TestRejoinWithCoordinatorMultiple(t *testing.T) {
|
||||
//[>
|
||||
//Case scenario:
|
||||
//- A server X is initialized connecting to overlord
|
||||
|
|
@ -480,10 +480,10 @@ package e2e
|
|||
//*/
|
||||
|
||||
//// Init slave server
|
||||
//o := initOverlord()
|
||||
//o := initCoordinator()
|
||||
//defer o.Close()
|
||||
|
||||
//oconn := connectTestOverlordServer(t, o.URL)
|
||||
//oconn := connectTestCoordinatorServer(t, o.URL)
|
||||
//// Init slave server
|
||||
//s := initServer(t, oconn)
|
||||
//defer s.Close()
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue