From 03fb03a71a2eddd24ba7334ff40a596b2756ab58 Mon Sep 17 00:00:00 2001 From: Sergey Stepanov Date: Fri, 30 Jul 2021 13:30:15 +0300 Subject: [PATCH] Rename old app references --- cmd/coordinator/main.go | 6 +-- cmd/worker/main.go | 6 +-- pkg/coordinator/coordinator.go | 30 ++++++------ pkg/coordinator/handlers.go | 84 +++++++++++++++++----------------- pkg/coordinator/routes.go | 30 ++++++------ 5 files changed, 78 insertions(+), 78 deletions(-) diff --git a/cmd/coordinator/main.go b/cmd/coordinator/main.go index bdabb333..bbbaad7c 100644 --- a/cmd/coordinator/main.go +++ b/cmd/coordinator/main.go @@ -33,8 +33,8 @@ func main() { glog.Infof("[coordinator] version: %v", Version) glog.Infof("Initializing coordinator server") glog.V(4).Infof("Coordinator configs %v", conf) - o := coordinator.New(ctx, conf) - if err := o.Run(); err != nil { + app := coordinator.New(ctx, conf) + if err := app.Run(); err != nil { glog.Errorf("Failed to run coordinator server, reason %v", err) os.Exit(1) } @@ -51,6 +51,6 @@ func main() { }() <-done - o.Shutdown() + app.Shutdown() cancelCtx() } diff --git a/cmd/worker/main.go b/cmd/worker/main.go index 3126b8b1..8d4c4397 100644 --- a/cmd/worker/main.go +++ b/cmd/worker/main.go @@ -36,8 +36,8 @@ func run() { glog.Infof("[worker] version: %v", Version) glog.V(4).Info("[worker] Initialization") glog.V(4).Infof("[worker] Local configuration %+v", conf) - wrk := worker.New(ctx, conf) - wrk.Run() + app := worker.New(ctx, conf) + app.Run() signals := make(chan os.Signal, 1) done := make(chan struct{}, 1) @@ -51,7 +51,7 @@ func run() { }() <-done - wrk.Shutdown() + app.Shutdown() cancelCtx() } diff --git a/pkg/coordinator/coordinator.go b/pkg/coordinator/coordinator.go index 7741e8f4..92111c6d 100644 --- a/pkg/coordinator/coordinator.go +++ b/pkg/coordinator/coordinator.go @@ -37,22 +37,22 @@ func New(ctx context.Context, cfg coordinator.Config) *Coordinator { } } -func (o *Coordinator) Run() error { - go o.initializeCoordinator() - go o.RunMonitoringServer() +func (c *Coordinator) Run() error { + go c.initializeCoordinator() + go c.RunMonitoringServer() return nil } -func (o *Coordinator) RunMonitoringServer() { +func (c *Coordinator) RunMonitoringServer() { glog.Infoln("Starting monitoring server for coordinator") - err := o.monitoringServer.Run() + err := c.monitoringServer.Run() if err != nil { glog.Errorf("Failed to start monitoring server, reason %s", err) } } -func (o *Coordinator) Shutdown() { - if err := o.monitoringServer.Shutdown(o.ctx); err != nil { +func (c *Coordinator) Shutdown() { + if err := c.monitoringServer.Shutdown(c.ctx); err != nil { glog.Errorln("Failed to shutdown monitoring server") } } @@ -96,24 +96,24 @@ func makeHTTPToHTTPSRedirectServer(server *Server) *http.Server { } // initializeCoordinator setup an coordinator server -func (o *Coordinator) initializeCoordinator() { +func (c *Coordinator) initializeCoordinator() { // init games library - libraryConf := o.cfg.Coordinator.Library + libraryConf := c.cfg.Coordinator.Library if len(libraryConf.Supported) == 0 { - libraryConf.Supported = o.cfg.Emulator.GetSupportedExtensions() + libraryConf.Supported = c.cfg.Emulator.GetSupportedExtensions() } lib := games.NewLibrary(libraryConf) lib.Scan() - server := NewServer(o.cfg, lib) + server := NewServer(c.cfg, lib) var certManager *autocert.Manager var httpsSrv *http.Server log.Println("Initializing Coordinator Server") - mode := o.cfg.Environment.Get() + mode := c.cfg.Environment.Get() if mode.AnyOf(environment.Production, environment.Staging) { - serverConfig := o.cfg.Coordinator.Server + serverConfig := c.cfg.Coordinator.Server httpsSrv = makeHTTPServer(server) httpsSrv.Addr = fmt.Sprintf(":%d", serverConfig.HttpsPort) @@ -130,7 +130,7 @@ func (o *Coordinator) initializeCoordinator() { certManager = &autocert.Manager{ Prompt: autocert.AcceptTOS, - HostPolicy: autocert.HostWhitelist(o.cfg.Coordinator.PublicDomain), + HostPolicy: autocert.HostWhitelist(c.cfg.Coordinator.PublicDomain), Cache: autocert.DirCache("assets/cache"), Client: &acme.Client{DirectoryURL: leurl}, } @@ -158,7 +158,7 @@ func (o *Coordinator) initializeCoordinator() { httpSrv.Handler = certManager.HTTPHandler(httpSrv.Handler) } - httpSrv.Addr = ":" + strconv.Itoa(o.cfg.Coordinator.Server.Port) + httpSrv.Addr = ":" + strconv.Itoa(c.cfg.Coordinator.Server.Port) err := httpSrv.ListenAndServe() if err != nil { log.Fatalf("httpSrv.ListenAndServe() failed with %s", err) diff --git a/pkg/coordinator/handlers.go b/pkg/coordinator/handlers.go index 91b4b0b6..8e7fb1a3 100644 --- a/pkg/coordinator/handlers.go +++ b/pkg/coordinator/handlers.go @@ -54,7 +54,7 @@ func NewServer(cfg coordinator.Config, library games.GameLibrary) *Server { } // GetWeb returns web frontend -func (o *Server) GetWeb(conf coordinator.Config) http.Handler { +func (s *Server) GetWeb(conf coordinator.Config) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { tpl, err := template.ParseFiles(index) if err != nil { @@ -70,20 +70,20 @@ func (o *Server) GetWeb(conf coordinator.Config) http.Handler { // getPingServer returns the server for latency check of a zone. // In latency check to find best worker step, we use this server to find the closest worker. -func (o *Server) getPingServer(zone string) string { - if o.cfg.Coordinator.PingServer != "" { - return fmt.Sprintf("%s/echo", o.cfg.Coordinator.PingServer) +func (s *Server) getPingServer(zone string) string { + if s.cfg.Coordinator.PingServer != "" { + return fmt.Sprintf("%s/echo", s.cfg.Coordinator.PingServer) } - mode := o.cfg.Environment.Get() + mode := s.cfg.Environment.Get() if mode.AnyOf(environment.Production, environment.Staging) { - return fmt.Sprintf(pingServerTemp, zone, o.cfg.Coordinator.PublicDomain) + return fmt.Sprintf(pingServerTemp, zone, s.cfg.Coordinator.PublicDomain) } return devPingServer } // WSO handles all connections from a new worker to coordinator -func (o *Server) WSO(w http.ResponseWriter, r *http.Request) { +func (s *Server) WSO(w http.ResponseWriter, r *http.Request) { log.Println("Coordinator: A worker is connecting...") // be aware of ReadBufferSize, WriteBufferSize (default 4096) @@ -99,7 +99,7 @@ func (o *Server) WSO(w http.ResponseWriter, r *http.Request) { for { workerID = uuid.Must(uuid.NewV4()).String() // check duplicate - if _, ok := o.workerClients[workerID]; !ok { + if _, ok := s.workerClients[workerID]; !ok { break } } @@ -115,12 +115,12 @@ func (o *Server) WSO(w http.ResponseWriter, r *http.Request) { zone := r.URL.Query().Get("zone") wc.Printf("Is public: %v zone: %v", util.IsPublicIP(address), zone) - pingServer := o.getPingServer(zone) + pingServer := s.getPingServer(zone) wc.Printf("Set ping server address: %s", pingServer) // In case worker and coordinator in the same host - if !util.IsPublicIP(address) && o.cfg.Environment.Get() == environment.Production { + if !util.IsPublicIP(address) && s.cfg.Environment.Get() == environment.Production { // Don't accept private IP for worker's address in prod mode // However, if the worker in the same host with coordinator, we can get public IP of worker wc.Printf("[!] Address %s is invalid", address) @@ -137,22 +137,22 @@ func (o *Server) WSO(w http.ResponseWriter, r *http.Request) { // Create a workerClient instance wc.Address = address - wc.StunTurnServer = ice.ToJson(o.cfg.Webrtc.IceServers, ice.Replacement{From: "server-ip", To: address}) + wc.StunTurnServer = ice.ToJson(s.cfg.Webrtc.IceServers, ice.Replacement{From: "server-ip", To: address}) wc.Zone = zone wc.PingServer = pingServer // Attach to Server instance with workerID, add defer - o.workerClients[workerID] = wc - defer o.cleanWorker(wc, workerID) + s.workerClients[workerID] = wc + defer s.cleanWorker(wc, workerID) wc.Send(api.ServerIdPacket(workerID), nil) - o.workerRoutes(wc) + s.workerRoutes(wc) wc.Listen() } // WSO handles all connections from user/frontend to coordinator -func (o *Server) WS(w http.ResponseWriter, r *http.Request) { +func (s *Server) WS(w http.ResponseWriter, r *http.Request) { log.Println("Coordinator: A user is connecting...") defer func() { if r := recover(); r != nil { @@ -173,7 +173,7 @@ func (o *Server) WS(w http.ResponseWriter, r *http.Request) { for { sessionID = uuid.Must(uuid.NewV4()).String() // check duplicate - if _, ok := o.browserClients[sessionID]; !ok { + if _, ok := s.browserClients[sessionID]; !ok { break } } @@ -199,8 +199,8 @@ func (o *Server) WS(w http.ResponseWriter, r *http.Request) { if roomID != "" { bc.Printf("Detected roomID %v from URL", roomID) - if workerID, ok := o.roomToWorker[roomID]; ok { - wc = o.workerClients[workerID] + if workerID, ok := s.roomToWorker[roomID]; ok { + wc = s.workerClients[workerID] if userZone != "" && wc.Zone != userZone { // if there is zone param, we need to ensure ther worker in that zone // if not we consider the room is missing @@ -214,7 +214,7 @@ func (o *Server) WS(w http.ResponseWriter, r *http.Request) { // If there is no existing server to connect to, we find the best possible worker for the frontend if wc == nil { // Get best server for frontend to connect to - wc, err = o.getBestWorkerClient(bc, userZone) + wc, err = s.getBestWorkerClient(bc, userZone) if err != nil { return } @@ -228,15 +228,15 @@ func (o *Server) WS(w http.ResponseWriter, r *http.Request) { // Everything is cool // Attach to Server instance with sessionID - o.browserClients[sessionID] = bc - defer o.cleanBrowser(bc, sessionID) + s.browserClients[sessionID] = bc + defer s.cleanBrowser(bc, sessionID) // Routing browserClient message - o.useragentRoutes(bc) + s.useragentRoutes(bc) bc.Send(cws.WSPacket{ ID: "init", - Data: createInitPackage(wc.StunTurnServer, o.library.GetAll()), + Data: createInitPackage(wc.StunTurnServer, s.library.GetAll()), }, nil) // If peerconnection is done (client.Done is signalled), we close peerconnection @@ -246,11 +246,11 @@ func (o *Server) WS(w http.ResponseWriter, r *http.Request) { wc.Send(api.TerminateSessionPacket(sessionID), nil) } -func (o *Server) getBestWorkerClient(client *BrowserClient, zone string) (*WorkerClient, error) { - conf := o.cfg.Coordinator +func (s *Server) getBestWorkerClient(client *BrowserClient, zone string) (*WorkerClient, error) { + conf := s.cfg.Coordinator if conf.DebugHost != "" { client.Println("Connecting to debug host instead prod servers", conf.DebugHost) - wc := o.getWorkerFromAddress(conf.DebugHost) + wc := s.getWorkerFromAddress(conf.DebugHost) if wc != nil { return wc, nil } @@ -258,21 +258,21 @@ func (o *Server) getBestWorkerClient(client *BrowserClient, zone string) (*Worke client.Println("Not found, connecting to all available servers") } - workerClients := o.getAvailableWorkers() + workerClients := s.getAvailableWorkers() - serverID, err := o.findBestServerFromBrowser(workerClients, client, zone) + serverID, err := s.findBestServerFromBrowser(workerClients, client, zone) if err != nil { log.Println(err) return nil, err } - return o.workerClients[serverID], nil + return s.workerClients[serverID], nil } // getAvailableWorkers returns the list of available worker -func (o *Server) getAvailableWorkers() map[string]*WorkerClient { +func (s *Server) getAvailableWorkers() map[string]*WorkerClient { workerClients := map[string]*WorkerClient{} - for k, w := range o.workerClients { + for k, w := range s.workerClients { if w.HasGameSlot() { workerClients[k] = w } @@ -282,8 +282,8 @@ func (o *Server) getAvailableWorkers() map[string]*WorkerClient { } // getWorkerFromAddress returns the worker has given address -func (o *Server) getWorkerFromAddress(address string) *WorkerClient { - for _, w := range o.workerClients { +func (s *Server) getWorkerFromAddress(address string) *WorkerClient { + for _, w := range s.workerClients { if w.HasGameSlot() && w.Address == address { return w } @@ -294,13 +294,13 @@ func (o *Server) getWorkerFromAddress(address string) *WorkerClient { // findBestServerFromBrowser returns the best server for a session // All workers addresses are sent to user and user will ping to get latency -func (o *Server) findBestServerFromBrowser(workerClients map[string]*WorkerClient, client *BrowserClient, zone string) (string, error) { +func (s *Server) findBestServerFromBrowser(workerClients map[string]*WorkerClient, client *BrowserClient, zone string) (string, error) { // TODO: Find best Server by latency, currently return by ping if len(workerClients) == 0 { return "", errors.New("no server found") } - latencies := o.getLatencyMapFromBrowser(workerClients, client) + latencies := s.getLatencyMapFromBrowser(workerClients, client) client.Println("Latency map", latencies) if len(latencies) == 0 { @@ -327,7 +327,7 @@ func (o *Server) findBestServerFromBrowser(workerClients map[string]*WorkerClien } // getLatencyMapFromBrowser get all latencies from worker to user -func (o *Server) getLatencyMapFromBrowser(workerClients map[string]*WorkerClient, client *BrowserClient) map[*WorkerClient]int64 { +func (s *Server) getLatencyMapFromBrowser(workerClients map[string]*WorkerClient, client *BrowserClient) map[*WorkerClient]int64 { var workersList []*WorkerClient var addressList []string uniqueAddresses := map[string]bool{} @@ -365,23 +365,23 @@ func (o *Server) getLatencyMapFromBrowser(workerClients map[string]*WorkerClient } // cleanBrowser is called when a browser is disconnected -func (o *Server) cleanBrowser(bc *BrowserClient, sessionID string) { +func (s *Server) cleanBrowser(bc *BrowserClient, sessionID string) { bc.Println("Disconnect from coordinator") - delete(o.browserClients, sessionID) + delete(s.browserClients, sessionID) bc.Close() } // cleanWorker is called when a worker is disconnected // connection from worker to coordinator is also closed -func (o *Server) cleanWorker(wc *WorkerClient, workerID string) { +func (s *Server) cleanWorker(wc *WorkerClient, workerID string) { wc.Println("Unregister worker from coordinator") // Remove workerID from workerClients - delete(o.workerClients, workerID) + delete(s.workerClients, workerID) // Clean all rooms connecting to that server - for roomID, roomServer := range o.roomToWorker { + for roomID, roomServer := range s.roomToWorker { if roomServer == workerID { wc.Printf("Remove room %s", roomID) - delete(o.roomToWorker, roomID) + delete(s.roomToWorker, roomID) } } diff --git a/pkg/coordinator/routes.go b/pkg/coordinator/routes.go index 483d9e4e..aa054f85 100644 --- a/pkg/coordinator/routes.go +++ b/pkg/coordinator/routes.go @@ -3,31 +3,31 @@ package coordinator import "github.com/giongto35/cloud-game/v2/pkg/cws/api" // workerRoutes adds all worker request routes. -func (o *Server) workerRoutes(wc *WorkerClient) { +func (s *Server) workerRoutes(wc *WorkerClient) { if wc == nil { return } wc.Receive(api.ConfigRequest, wc.handleConfigRequest()) wc.Receive(api.Heartbeat, wc.handleHeartbeat()) - wc.Receive(api.RegisterRoom, wc.handleRegisterRoom(o)) - wc.Receive(api.GetRoom, wc.handleGetRoom(o)) - wc.Receive(api.CloseRoom, wc.handleCloseRoom(o)) - wc.Receive(api.IceCandidate, wc.handleIceCandidate(o)) + wc.Receive(api.RegisterRoom, wc.handleRegisterRoom(s)) + wc.Receive(api.GetRoom, wc.handleGetRoom(s)) + wc.Receive(api.CloseRoom, wc.handleCloseRoom(s)) + wc.Receive(api.IceCandidate, wc.handleIceCandidate(s)) } // useragentRoutes adds all useragent (browser) request routes. -func (o *Server) useragentRoutes(bc *BrowserClient) { +func (s *Server) useragentRoutes(bc *BrowserClient) { if bc == nil { return } bc.Receive(api.Heartbeat, bc.handleHeartbeat()) - bc.Receive(api.InitWebrtc, bc.handleInitWebrtc(o)) - bc.Receive(api.Answer, bc.handleAnswer(o)) - bc.Receive(api.IceCandidate, bc.handleIceCandidate(o)) - bc.Receive(api.GameStart, bc.handleGameStart(o)) - bc.Receive(api.GameQuit, bc.handleGameQuit(o)) - bc.Receive(api.GameSave, bc.handleGameSave(o)) - bc.Receive(api.GameLoad, bc.handleGameLoad(o)) - bc.Receive(api.GamePlayerSelect, bc.handleGamePlayerSelect(o)) - bc.Receive(api.GameMultitap, bc.handleGameMultitap(o)) + bc.Receive(api.InitWebrtc, bc.handleInitWebrtc(s)) + bc.Receive(api.Answer, bc.handleAnswer(s)) + bc.Receive(api.IceCandidate, bc.handleIceCandidate(s)) + bc.Receive(api.GameStart, bc.handleGameStart(s)) + bc.Receive(api.GameQuit, bc.handleGameQuit(s)) + bc.Receive(api.GameSave, bc.handleGameSave(s)) + bc.Receive(api.GameLoad, bc.handleGameLoad(s)) + bc.Receive(api.GamePlayerSelect, bc.handleGamePlayerSelect(s)) + bc.Receive(api.GameMultitap, bc.handleGameMultitap(s)) }