client/control: refactor code

This commit is contained in:
fatedier 2018-11-06 18:35:05 +08:00
parent 1a8ac148ca
commit b33ea9274c
6 changed files with 323 additions and 313 deletions

View File

@ -85,7 +85,7 @@ func (svr *Service) apiReload(w http.ResponseWriter, r *http.Request) {
return return
} }
err = svr.ctl.reloadConf(pxyCfgs, visitorCfgs) err = svr.ctl.ReloadConf(pxyCfgs, visitorCfgs)
if err != nil { if err != nil {
res.Code = 4 res.Code = 4
res.Msg = err.Error() res.Msg = err.Error()

View File

@ -17,8 +17,6 @@ package client
import ( import (
"fmt" "fmt"
"io" "io"
"io/ioutil"
"runtime"
"runtime/debug" "runtime/debug"
"sync" "sync"
"time" "time"
@ -28,24 +26,15 @@ import (
"github.com/fatedier/frp/models/msg" "github.com/fatedier/frp/models/msg"
"github.com/fatedier/frp/utils/log" "github.com/fatedier/frp/utils/log"
frpNet "github.com/fatedier/frp/utils/net" frpNet "github.com/fatedier/frp/utils/net"
"github.com/fatedier/frp/utils/util"
"github.com/fatedier/frp/utils/version"
"github.com/fatedier/golib/control/shutdown" "github.com/fatedier/golib/control/shutdown"
"github.com/fatedier/golib/crypto" "github.com/fatedier/golib/crypto"
fmux "github.com/hashicorp/yamux" fmux "github.com/hashicorp/yamux"
) )
const (
connReadTimeout time.Duration = 10 * time.Second
)
type Control struct { type Control struct {
// frpc service // uniq id got from frps, attach it in loginMsg
svr *Service runId string
// login message to server, only used
loginMsg *msg.Login
// manage all proxies // manage all proxies
pm *ProxyManager pm *ProxyManager
@ -65,14 +54,10 @@ type Control struct {
// read from this channel to get the next message sent by server // read from this channel to get the next message sent by server
readCh chan (msg.Message) readCh chan (msg.Message)
// run id got from server
runId string
// if we call close() in control, do not reconnect to server
exit bool
// goroutines can block by reading from this channel, it will be closed only in reader() when control connection is closed // goroutines can block by reading from this channel, it will be closed only in reader() when control connection is closed
closedCh chan int closedCh chan struct{}
closedDoneCh chan struct{}
// last time got the Pong message // last time got the Pong message
lastPong time.Time lastPong time.Time
@ -86,50 +71,28 @@ type Control struct {
log.Logger log.Logger
} }
func NewControl(svr *Service, pxyCfgs map[string]config.ProxyConf, visitorCfgs map[string]config.VisitorConf) *Control { func NewControl(runId string, conn frpNet.Conn, session *fmux.Session, pxyCfgs map[string]config.ProxyConf, visitorCfgs map[string]config.VisitorConf) *Control {
loginMsg := &msg.Login{
Arch: runtime.GOARCH,
Os: runtime.GOOS,
PoolCount: g.GlbClientCfg.PoolCount,
User: g.GlbClientCfg.User,
Version: version.Full(),
}
ctl := &Control{ ctl := &Control{
svr: svr, runId: runId,
loginMsg: loginMsg, conn: conn,
session: session,
sendCh: make(chan msg.Message, 100), sendCh: make(chan msg.Message, 100),
readCh: make(chan msg.Message, 100), readCh: make(chan msg.Message, 100),
closedCh: make(chan int), closedCh: make(chan struct{}),
closedDoneCh: make(chan struct{}),
readerShutdown: shutdown.New(), readerShutdown: shutdown.New(),
writerShutdown: shutdown.New(), writerShutdown: shutdown.New(),
msgHandlerShutdown: shutdown.New(), msgHandlerShutdown: shutdown.New(),
Logger: log.NewPrefixLogger(""), Logger: log.NewPrefixLogger(""),
} }
ctl.pm = NewProxyManager(ctl, ctl.sendCh, "") ctl.pm = NewProxyManager(ctl.sendCh, "")
ctl.pm.Reload(pxyCfgs, false) ctl.pm.Reload(pxyCfgs, false)
ctl.vm = NewVisitorManager(ctl) ctl.vm = NewVisitorManager(ctl)
ctl.vm.Reload(visitorCfgs) ctl.vm.Reload(visitorCfgs)
return ctl return ctl
} }
func (ctl *Control) Run() (err error) { func (ctl *Control) Run() {
for {
err = ctl.login()
if err != nil {
ctl.Warn("login to server failed: %v", err)
// if login_fail_exit is true, just exit this program
// otherwise sleep a while and continues relogin to server
if g.GlbClientCfg.LoginFailExit {
return
} else {
time.Sleep(10 * time.Second)
}
} else {
break
}
}
go ctl.worker() go ctl.worker()
// start all local visitors and send NewProxy message for all configured proxies // start all local visitors and send NewProxy message for all configured proxies
@ -137,7 +100,7 @@ func (ctl *Control) Run() (err error) {
ctl.pm.CheckAndStartProxy([]string{ProxyStatusNew}) ctl.pm.CheckAndStartProxy([]string{ProxyStatusNew})
go ctl.vm.Run() go ctl.vm.Run()
return nil return
} }
func (ctl *Control) HandleReqWorkConn(inMsg *msg.ReqWorkConn) { func (ctl *Control) HandleReqWorkConn(inMsg *msg.ReqWorkConn) {
@ -179,80 +142,13 @@ func (ctl *Control) HandleNewProxyResp(inMsg *msg.NewProxyResp) {
} }
func (ctl *Control) Close() error { func (ctl *Control) Close() error {
ctl.mu.Lock()
defer ctl.mu.Unlock()
ctl.exit = true
ctl.pm.CloseProxies() ctl.pm.CloseProxies()
return nil return nil
} }
// login send a login message to server and wait for a loginResp message. // ClosedDoneCh returns a channel which will be closed after all resources are released
func (ctl *Control) login() (err error) { func (ctl *Control) ClosedDoneCh() <-chan struct{} {
if ctl.conn != nil { return ctl.closedDoneCh
ctl.conn.Close()
}
if ctl.session != nil {
ctl.session.Close()
}
conn, err := frpNet.ConnectServerByProxy(g.GlbClientCfg.HttpProxy, g.GlbClientCfg.Protocol,
fmt.Sprintf("%s:%d", g.GlbClientCfg.ServerAddr, g.GlbClientCfg.ServerPort))
if err != nil {
return err
}
defer func() {
if err != nil {
conn.Close()
}
}()
if g.GlbClientCfg.TcpMux {
fmuxCfg := fmux.DefaultConfig()
fmuxCfg.LogOutput = ioutil.Discard
session, errRet := fmux.Client(conn, fmuxCfg)
if errRet != nil {
return errRet
}
stream, errRet := session.OpenStream()
if errRet != nil {
session.Close()
return errRet
}
conn = frpNet.WrapConn(stream)
ctl.session = session
}
now := time.Now().Unix()
ctl.loginMsg.PrivilegeKey = util.GetAuthKey(g.GlbClientCfg.Token, now)
ctl.loginMsg.Timestamp = now
ctl.loginMsg.RunId = ctl.runId
if err = msg.WriteMsg(conn, ctl.loginMsg); err != nil {
return err
}
var loginRespMsg msg.LoginResp
conn.SetReadDeadline(time.Now().Add(connReadTimeout))
if err = msg.ReadMsgInto(conn, &loginRespMsg); err != nil {
return err
}
conn.SetReadDeadline(time.Time{})
if loginRespMsg.Error != "" {
err = fmt.Errorf("%s", loginRespMsg.Error)
ctl.Error("%s", loginRespMsg.Error)
return err
}
ctl.conn = conn
// update runId got from server
ctl.runId = loginRespMsg.RunId
g.GlbClientCfg.ServerUdpPort = loginRespMsg.ServerUdpPort
ctl.ClearLogPrefix()
ctl.AddLogPrefix(loginRespMsg.RunId)
ctl.Info("login to server success, get run id [%s], server udp port [%d]", loginRespMsg.RunId, loginRespMsg.ServerUdpPort)
return nil
} }
// connectServer return a new connection to frps // connectServer return a new connection to frps
@ -373,87 +269,38 @@ func (ctl *Control) msgHandler() {
} }
} }
// controler keep watching closedCh, start a new connection if previous control connection is closed. // If controler is notified by closedCh, reader and writer and handler will exit
// If controler is notified by closedCh, reader and writer and handler will exit, then recall these functions.
func (ctl *Control) worker() { func (ctl *Control) worker() {
go ctl.msgHandler() go ctl.msgHandler()
go ctl.reader() go ctl.reader()
go ctl.writer() go ctl.writer()
var err error
maxDelayTime := 20 * time.Second
delayTime := time.Second
checkInterval := 60 * time.Second checkInterval := 60 * time.Second
checkProxyTicker := time.NewTicker(checkInterval) checkProxyTicker := time.NewTicker(checkInterval)
for { for {
select { select {
case <-checkProxyTicker.C: case <-checkProxyTicker.C:
// check which proxy registered failed and reregister it to server // check which proxy registered failed and reregister it to server
ctl.pm.CheckAndStartProxy([]string{ProxyStatusStartErr, ProxyStatusClosed}) ctl.pm.CheckAndStartProxy([]string{ProxyStatusStartErr, ProxyStatusClosed})
case _, ok := <-ctl.closedCh: case <-ctl.closedCh:
// we won't get any variable from this channel // close related channels and wait until other goroutines done
if !ok { close(ctl.readCh)
// close related channels and wait until other goroutines done ctl.readerShutdown.WaitDone()
close(ctl.readCh) ctl.msgHandlerShutdown.WaitDone()
ctl.readerShutdown.WaitDone()
ctl.msgHandlerShutdown.WaitDone()
close(ctl.sendCh) close(ctl.sendCh)
ctl.writerShutdown.WaitDone() ctl.writerShutdown.WaitDone()
ctl.pm.CloseProxies() ctl.pm.CloseProxies()
// if ctl.exit is true, just exit
ctl.mu.RLock()
exit := ctl.exit
ctl.mu.RUnlock()
if exit {
return
}
// loop util reconnecting to server success close(ctl.closedDoneCh)
for { return
ctl.Info("try to reconnect to server...")
err = ctl.login()
if err != nil {
ctl.Warn("reconnect to server error: %v", err)
time.Sleep(delayTime)
delayTime = delayTime * 2
if delayTime > maxDelayTime {
delayTime = maxDelayTime
}
continue
}
// reconnect success, init delayTime
delayTime = time.Second
break
}
// init related channels and variables
ctl.sendCh = make(chan msg.Message, 100)
ctl.readCh = make(chan msg.Message, 100)
ctl.closedCh = make(chan int)
ctl.readerShutdown = shutdown.New()
ctl.writerShutdown = shutdown.New()
ctl.msgHandlerShutdown = shutdown.New()
ctl.pm.Reset(ctl.sendCh, ctl.runId)
// previous work goroutines should be closed and start them here
go ctl.msgHandler()
go ctl.writer()
go ctl.reader()
// start all configured proxies
ctl.pm.CheckAndStartProxy([]string{ProxyStatusNew, ProxyStatusClosed})
checkProxyTicker.Stop()
checkProxyTicker = time.NewTicker(checkInterval)
}
} }
} }
} }
func (ctl *Control) reloadConf(pxyCfgs map[string]config.ProxyConf, visitorCfgs map[string]config.VisitorConf) error { func (ctl *Control) ReloadConf(pxyCfgs map[string]config.ProxyConf, visitorCfgs map[string]config.VisitorConf) error {
ctl.vm.Reload(visitorCfgs) ctl.vm.Reload(visitorCfgs)
err := ctl.pm.Reload(pxyCfgs, true) err := ctl.pm.Reload(pxyCfgs, true)
return err return err

View File

@ -35,7 +35,7 @@ import (
"github.com/fatedier/golib/pool" "github.com/fatedier/golib/pool"
) )
// Proxy defines how to deal with work connections for different proxy type. // Proxy defines how to handle work connections for different proxy type.
type Proxy interface { type Proxy interface {
Run() error Run() error

View File

@ -22,7 +22,6 @@ const (
) )
type ProxyManager struct { type ProxyManager struct {
ctl *Control
sendCh chan (msg.Message) sendCh chan (msg.Message)
proxies map[string]*ProxyWrapper proxies map[string]*ProxyWrapper
closed bool closed bool
@ -31,122 +30,8 @@ type ProxyManager struct {
log.Logger log.Logger
} }
type ProxyWrapper struct { func NewProxyManager(msgSendCh chan (msg.Message), logPrefix string) *ProxyManager {
Name string
Type string
Status string
Err string
Cfg config.ProxyConf
RemoteAddr string
pxy Proxy
mu sync.RWMutex
}
type ProxyStatus struct {
Name string `json:"name"`
Type string `json:"type"`
Status string `json:"status"`
Err string `json:"err"`
Cfg config.ProxyConf `json:"cfg"`
// Got from server.
RemoteAddr string `json:"remote_addr"`
}
func NewProxyWrapper(cfg config.ProxyConf) *ProxyWrapper {
return &ProxyWrapper{
Name: cfg.GetBaseInfo().ProxyName,
Type: cfg.GetBaseInfo().ProxyType,
Status: ProxyStatusNew,
Cfg: cfg,
pxy: nil,
}
}
func (pw *ProxyWrapper) GetStatusStr() string {
pw.mu.RLock()
defer pw.mu.RUnlock()
return pw.Status
}
func (pw *ProxyWrapper) GetStatus() *ProxyStatus {
pw.mu.RLock()
defer pw.mu.RUnlock()
ps := &ProxyStatus{
Name: pw.Name,
Type: pw.Type,
Status: pw.Status,
Err: pw.Err,
Cfg: pw.Cfg,
RemoteAddr: pw.RemoteAddr,
}
return ps
}
func (pw *ProxyWrapper) WaitStart() {
pw.mu.Lock()
defer pw.mu.Unlock()
pw.Status = ProxyStatusWaitStart
}
func (pw *ProxyWrapper) Start(remoteAddr string, serverRespErr string) error {
if pw.pxy != nil {
pw.pxy.Close()
pw.pxy = nil
}
if serverRespErr != "" {
pw.mu.Lock()
pw.Status = ProxyStatusStartErr
pw.RemoteAddr = remoteAddr
pw.Err = serverRespErr
pw.mu.Unlock()
return fmt.Errorf(serverRespErr)
}
pxy := NewProxy(pw.Cfg)
pw.mu.Lock()
defer pw.mu.Unlock()
pw.RemoteAddr = remoteAddr
if err := pxy.Run(); err != nil {
pw.Status = ProxyStatusStartErr
pw.Err = err.Error()
return err
}
pw.Status = ProxyStatusRunning
pw.Err = ""
pw.pxy = pxy
return nil
}
func (pw *ProxyWrapper) InWorkConn(workConn frpNet.Conn) {
pw.mu.RLock()
pxy := pw.pxy
pw.mu.RUnlock()
if pxy != nil {
workConn.Debug("start a new work connection, localAddr: %s remoteAddr: %s", workConn.LocalAddr().String(), workConn.RemoteAddr().String())
go pxy.InWorkConn(workConn)
} else {
workConn.Close()
}
}
func (pw *ProxyWrapper) Close() {
pw.mu.Lock()
defer pw.mu.Unlock()
if pw.pxy != nil {
pw.pxy.Close()
pw.pxy = nil
}
pw.Status = ProxyStatusClosed
}
func NewProxyManager(ctl *Control, msgSendCh chan (msg.Message), logPrefix string) *ProxyManager {
return &ProxyManager{ return &ProxyManager{
ctl: ctl,
proxies: make(map[string]*ProxyWrapper), proxies: make(map[string]*ProxyWrapper),
sendCh: msgSendCh, sendCh: msgSendCh,
closed: false, closed: false,
@ -309,3 +194,118 @@ func (pm *ProxyManager) GetAllProxyStatus() []*ProxyStatus {
} }
return ps return ps
} }
type ProxyStatus struct {
Name string `json:"name"`
Type string `json:"type"`
Status string `json:"status"`
Err string `json:"err"`
Cfg config.ProxyConf `json:"cfg"`
// Got from server.
RemoteAddr string `json:"remote_addr"`
}
// ProxyWrapper is a wrapper of Proxy interface only used in ProxyManager
// Add additional proxy status info
type ProxyWrapper struct {
Name string
Type string
Status string
Err string
Cfg config.ProxyConf
RemoteAddr string
pxy Proxy
mu sync.RWMutex
}
func NewProxyWrapper(cfg config.ProxyConf) *ProxyWrapper {
return &ProxyWrapper{
Name: cfg.GetBaseInfo().ProxyName,
Type: cfg.GetBaseInfo().ProxyType,
Status: ProxyStatusNew,
Cfg: cfg,
pxy: nil,
}
}
func (pw *ProxyWrapper) GetStatusStr() string {
pw.mu.RLock()
defer pw.mu.RUnlock()
return pw.Status
}
func (pw *ProxyWrapper) GetStatus() *ProxyStatus {
pw.mu.RLock()
defer pw.mu.RUnlock()
ps := &ProxyStatus{
Name: pw.Name,
Type: pw.Type,
Status: pw.Status,
Err: pw.Err,
Cfg: pw.Cfg,
RemoteAddr: pw.RemoteAddr,
}
return ps
}
func (pw *ProxyWrapper) WaitStart() {
pw.mu.Lock()
defer pw.mu.Unlock()
pw.Status = ProxyStatusWaitStart
}
func (pw *ProxyWrapper) Start(remoteAddr string, serverRespErr string) error {
if pw.pxy != nil {
pw.pxy.Close()
pw.pxy = nil
}
if serverRespErr != "" {
pw.mu.Lock()
pw.Status = ProxyStatusStartErr
pw.RemoteAddr = remoteAddr
pw.Err = serverRespErr
pw.mu.Unlock()
return fmt.Errorf(serverRespErr)
}
pxy := NewProxy(pw.Cfg)
pw.mu.Lock()
defer pw.mu.Unlock()
pw.RemoteAddr = remoteAddr
if err := pxy.Run(); err != nil {
pw.Status = ProxyStatusStartErr
pw.Err = err.Error()
return err
}
pw.Status = ProxyStatusRunning
pw.Err = ""
pw.pxy = pxy
return nil
}
func (pw *ProxyWrapper) InWorkConn(workConn frpNet.Conn) {
pw.mu.RLock()
pxy := pw.pxy
pw.mu.RUnlock()
if pxy != nil {
workConn.Debug("start a new work connection, localAddr: %s remoteAddr: %s", workConn.LocalAddr().String(), workConn.RemoteAddr().String())
go pxy.InWorkConn(workConn)
} else {
workConn.Close()
}
}
func (pw *ProxyWrapper) Close() {
pw.mu.Lock()
defer pw.mu.Unlock()
if pw.pxy != nil {
pw.pxy.Close()
pw.pxy = nil
}
pw.Status = ProxyStatusClosed
}

View File

@ -15,35 +15,85 @@
package client package client
import ( import (
"fmt"
"io/ioutil"
"runtime"
"sync"
"sync/atomic"
"time"
"github.com/fatedier/frp/g" "github.com/fatedier/frp/g"
"github.com/fatedier/frp/models/config" "github.com/fatedier/frp/models/config"
"github.com/fatedier/frp/models/msg"
"github.com/fatedier/frp/utils/log" "github.com/fatedier/frp/utils/log"
frpNet "github.com/fatedier/frp/utils/net"
"github.com/fatedier/frp/utils/util"
"github.com/fatedier/frp/utils/version"
fmux "github.com/hashicorp/yamux"
) )
type Service struct { type Service struct {
// manager control connection with server // uniq id got from frps, attach it in loginMsg
ctl *Control runId string
// manager control connection with server
ctl *Control
ctlMu sync.RWMutex
pxyCfgs map[string]config.ProxyConf
visitorCfgs map[string]config.VisitorConf
cfgMu sync.RWMutex
exit uint32 // 0 means not exit
closedCh chan int closedCh chan int
} }
func NewService(pxyCfgs map[string]config.ProxyConf, visitorCfgs map[string]config.VisitorConf) (svr *Service) { func NewService(pxyCfgs map[string]config.ProxyConf, visitorCfgs map[string]config.VisitorConf) (svr *Service) {
svr = &Service{ svr = &Service{
closedCh: make(chan int), pxyCfgs: pxyCfgs,
visitorCfgs: visitorCfgs,
exit: 0,
closedCh: make(chan int),
} }
ctl := NewControl(svr, pxyCfgs, visitorCfgs)
svr.ctl = ctl
return return
} }
func (svr *Service) GetController() *Control {
svr.ctlMu.RLock()
defer svr.ctlMu.RUnlock()
return svr.ctl
}
func (svr *Service) Run() error { func (svr *Service) Run() error {
err := svr.ctl.Run() // first login
if err != nil { for {
return err conn, session, err := svr.login()
if err != nil {
log.Warn("login to server failed: %v", err)
// if login_fail_exit is true, just exit this program
// otherwise sleep a while and try again to connect to server
if g.GlbClientCfg.LoginFailExit {
return err
} else {
time.Sleep(10 * time.Second)
}
} else {
// login success
ctl := NewControl(svr.runId, conn, session, svr.pxyCfgs, svr.visitorCfgs)
ctl.Run()
svr.ctlMu.Lock()
svr.ctl = ctl
svr.ctlMu.Unlock()
break
}
} }
go svr.keepControllerWorking()
if g.GlbClientCfg.AdminPort != 0 { if g.GlbClientCfg.AdminPort != 0 {
err = svr.RunAdminServer(g.GlbClientCfg.AdminAddr, g.GlbClientCfg.AdminPort) err := svr.RunAdminServer(g.GlbClientCfg.AdminAddr, g.GlbClientCfg.AdminPort)
if err != nil { if err != nil {
log.Warn("run admin server error: %v", err) log.Warn("run admin server error: %v", err)
} }
@ -54,6 +104,119 @@ func (svr *Service) Run() error {
return nil return nil
} }
func (svr *Service) Close() { func (svr *Service) keepControllerWorking() {
svr.ctl.Close() maxDelayTime := 20 * time.Second
delayTime := time.Second
for {
<-svr.ctl.ClosedDoneCh()
if atomic.LoadUint32(&svr.exit) != 0 {
return
}
for {
log.Info("try to reconnect to server...")
conn, session, err := svr.login()
if err != nil {
log.Warn("reconnect to server error: %v", err)
time.Sleep(delayTime)
delayTime = delayTime * 2
if delayTime > maxDelayTime {
delayTime = maxDelayTime
}
continue
}
// reconnect success, init delayTime
delayTime = time.Second
ctl := NewControl(svr.runId, conn, session, svr.pxyCfgs, svr.visitorCfgs)
ctl.Run()
svr.ctlMu.Lock()
svr.ctl = ctl
svr.ctlMu.Unlock()
break
}
}
}
// login creates a connection to frps and registers it self as a client
// conn: control connection
// session: if it's not nil, using tcp mux
func (svr *Service) login() (conn frpNet.Conn, session *fmux.Session, err error) {
conn, err = frpNet.ConnectServerByProxy(g.GlbClientCfg.HttpProxy, g.GlbClientCfg.Protocol,
fmt.Sprintf("%s:%d", g.GlbClientCfg.ServerAddr, g.GlbClientCfg.ServerPort))
if err != nil {
return
}
defer func() {
if err != nil {
conn.Close()
}
}()
if g.GlbClientCfg.TcpMux {
fmuxCfg := fmux.DefaultConfig()
fmuxCfg.LogOutput = ioutil.Discard
session, err = fmux.Client(conn, fmuxCfg)
if err != nil {
return
}
stream, errRet := session.OpenStream()
if errRet != nil {
session.Close()
err = errRet
return
}
conn = frpNet.WrapConn(stream)
}
now := time.Now().Unix()
loginMsg := &msg.Login{
Arch: runtime.GOARCH,
Os: runtime.GOOS,
PoolCount: g.GlbClientCfg.PoolCount,
User: g.GlbClientCfg.User,
Version: version.Full(),
PrivilegeKey: util.GetAuthKey(g.GlbClientCfg.Token, now),
Timestamp: now,
RunId: svr.runId,
}
if err = msg.WriteMsg(conn, loginMsg); err != nil {
return
}
var loginRespMsg msg.LoginResp
conn.SetReadDeadline(time.Now().Add(10 * time.Second))
if err = msg.ReadMsgInto(conn, &loginRespMsg); err != nil {
return
}
conn.SetReadDeadline(time.Time{})
if loginRespMsg.Error != "" {
err = fmt.Errorf("%s", loginRespMsg.Error)
log.Error("%s", loginRespMsg.Error)
return
}
svr.runId = loginRespMsg.RunId
g.GlbClientCfg.ServerUdpPort = loginRespMsg.ServerUdpPort
log.Info("login to server success, get run id [%s], server udp port [%d]", loginRespMsg.RunId, loginRespMsg.ServerUdpPort)
return
}
func (svr *Service) ReloadConf(pxyCfgs map[string]config.ProxyConf, visitorCfgs map[string]config.VisitorConf) error {
svr.cfgMu.Lock()
svr.pxyCfgs = pxyCfgs
svr.visitorCfgs = visitorCfgs
svr.cfgMu.Unlock()
return svr.ctl.ReloadConf(pxyCfgs, visitorCfgs)
}
func (svr *Service) Close() {
atomic.StoreUint32(&svr.exit, 1)
svr.ctl.Close()
close(svr.closedCh)
} }

View File

@ -25,7 +25,7 @@ token = 12345678
admin_addr = 127.0.0.1 admin_addr = 127.0.0.1
admin_port = 7400 admin_port = 7400
admin_user = admin admin_user = admin
admin_passwd = admin admin_pwd = admin
# connections will be established in advance, default value is zero # connections will be established in advance, default value is zero
pool_count = 5 pool_count = 5