xtcp: when connection timeout occurs, support fallback to STCP (#3460)

This commit is contained in:
fatedier 2023-05-30 10:55:00 +08:00 committed by GitHub
parent 555db9d272
commit c7a0cfc66d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 230 additions and 68 deletions

View File

@ -369,7 +369,8 @@ func (cm *ConnectionManager) OpenConnection() error {
} }
tlsConfig.NextProtos = []string{"frp"} tlsConfig.NextProtos = []string{"frp"}
conn, err := quic.DialAddr( conn, err := quic.DialAddrContext(
cm.ctx,
net.JoinHostPort(cm.cfg.ServerAddr, strconv.Itoa(cm.cfg.ServerPort)), net.JoinHostPort(cm.cfg.ServerAddr, strconv.Itoa(cm.cfg.ServerPort)),
tlsConfig, &quic.Config{ tlsConfig, &quic.Config{
MaxIdleTimeout: time.Duration(cm.cfg.QUICMaxIdleTimeout) * time.Second, MaxIdleTimeout: time.Duration(cm.cfg.QUICMaxIdleTimeout) * time.Second,
@ -467,7 +468,8 @@ func (cm *ConnectionManager) realConnect() (net.Conn, error) {
Hook: utilnet.DialHookCustomTLSHeadByte(tlsConfig != nil, cm.cfg.DisableCustomTLSFirstByte), Hook: utilnet.DialHookCustomTLSHeadByte(tlsConfig != nil, cm.cfg.DisableCustomTLSFirstByte),
}), }),
) )
conn, err := libdial.Dial( conn, err := libdial.DialContext(
cm.ctx,
net.JoinHostPort(cm.cfg.ServerAddr, strconv.Itoa(cm.cfg.ServerPort)), net.JoinHostPort(cm.cfg.ServerAddr, strconv.Itoa(cm.cfg.ServerPort)),
dialOptions..., dialOptions...,
) )

View File

@ -35,17 +35,20 @@ type STCPVisitor struct {
} }
func (sv *STCPVisitor) Run() (err error) { func (sv *STCPVisitor) Run() (err error) {
sv.l, err = net.Listen("tcp", net.JoinHostPort(sv.cfg.BindAddr, strconv.Itoa(sv.cfg.BindPort))) if sv.cfg.BindPort > 0 {
if err != nil { sv.l, err = net.Listen("tcp", net.JoinHostPort(sv.cfg.BindAddr, strconv.Itoa(sv.cfg.BindPort)))
return if err != nil {
return
}
go sv.worker()
} }
go sv.worker() go sv.internalConnWorker()
return return
} }
func (sv *STCPVisitor) Close() { func (sv *STCPVisitor) Close() {
sv.l.Close() sv.BaseVisitor.Close()
} }
func (sv *STCPVisitor) worker() { func (sv *STCPVisitor) worker() {
@ -56,7 +59,18 @@ func (sv *STCPVisitor) worker() {
xl.Warn("stcp local listener closed") xl.Warn("stcp local listener closed")
return return
} }
go sv.handleConn(conn)
}
}
func (sv *STCPVisitor) internalConnWorker() {
xl := xlog.FromContextSafe(sv.ctx)
for {
conn, err := sv.internalLn.Accept()
if err != nil {
xl.Warn("stcp internal listener closed")
return
}
go sv.handleConn(conn) go sv.handleConn(conn)
} }
} }

View File

@ -254,6 +254,7 @@ func (sv *SUDPVisitor) Close() {
default: default:
close(sv.checkCloseCh) close(sv.checkCloseCh)
} }
sv.BaseVisitor.Close()
if sv.udpConn != nil { if sv.udpConn != nil {
sv.udpConn.Close() sv.udpConn.Close()
} }

View File

@ -21,12 +21,14 @@ import (
"github.com/fatedier/frp/pkg/config" "github.com/fatedier/frp/pkg/config"
"github.com/fatedier/frp/pkg/transport" "github.com/fatedier/frp/pkg/transport"
utilnet "github.com/fatedier/frp/pkg/util/net"
"github.com/fatedier/frp/pkg/util/xlog" "github.com/fatedier/frp/pkg/util/xlog"
) )
// Visitor is used for forward traffics from local port tot remote service. // Visitor is used for forward traffics from local port tot remote service.
type Visitor interface { type Visitor interface {
Run() error Run() error
AcceptConn(conn net.Conn) error
Close() Close()
} }
@ -35,14 +37,17 @@ func NewVisitor(
cfg config.VisitorConf, cfg config.VisitorConf,
clientCfg config.ClientCommonConf, clientCfg config.ClientCommonConf,
connectServer func() (net.Conn, error), connectServer func() (net.Conn, error),
transferConn func(string, net.Conn) error,
msgTransporter transport.MessageTransporter, msgTransporter transport.MessageTransporter,
) (visitor Visitor) { ) (visitor Visitor) {
xl := xlog.FromContextSafe(ctx).Spawn().AppendPrefix(cfg.GetBaseInfo().ProxyName) xl := xlog.FromContextSafe(ctx).Spawn().AppendPrefix(cfg.GetBaseInfo().ProxyName)
baseVisitor := BaseVisitor{ baseVisitor := BaseVisitor{
clientCfg: clientCfg, clientCfg: clientCfg,
connectServer: connectServer, connectServer: connectServer,
transferConn: transferConn,
msgTransporter: msgTransporter, msgTransporter: msgTransporter,
ctx: xlog.NewContext(ctx, xl), ctx: xlog.NewContext(ctx, xl),
internalLn: utilnet.NewInternalListener(),
} }
switch cfg := cfg.(type) { switch cfg := cfg.(type) {
case *config.STCPVisitorConf: case *config.STCPVisitorConf:
@ -69,9 +74,24 @@ func NewVisitor(
type BaseVisitor struct { type BaseVisitor struct {
clientCfg config.ClientCommonConf clientCfg config.ClientCommonConf
connectServer func() (net.Conn, error) connectServer func() (net.Conn, error)
transferConn func(string, net.Conn) error
msgTransporter transport.MessageTransporter msgTransporter transport.MessageTransporter
l net.Listener l net.Listener
internalLn *utilnet.InternalListener
mu sync.RWMutex mu sync.RWMutex
ctx context.Context ctx context.Context
} }
func (v *BaseVisitor) AcceptConn(conn net.Conn) error {
return v.internalLn.PutConn(conn)
}
func (v *BaseVisitor) Close() {
if v.l != nil {
v.l.Close()
}
if v.internalLn != nil {
v.internalLn.Close()
}
}

View File

@ -16,6 +16,7 @@ package visitor
import ( import (
"context" "context"
"fmt"
"net" "net"
"sync" "sync"
"time" "time"
@ -34,7 +35,7 @@ type Manager struct {
checkInterval time.Duration checkInterval time.Duration
mu sync.Mutex mu sync.RWMutex
ctx context.Context ctx context.Context
stopCh chan struct{} stopCh chan struct{}
@ -83,11 +84,24 @@ func (vm *Manager) Run() {
} }
} }
func (vm *Manager) Close() {
vm.mu.Lock()
defer vm.mu.Unlock()
for _, v := range vm.visitors {
v.Close()
}
select {
case <-vm.stopCh:
default:
close(vm.stopCh)
}
}
// Hold lock before calling this function. // Hold lock before calling this function.
func (vm *Manager) startVisitor(cfg config.VisitorConf) (err error) { func (vm *Manager) startVisitor(cfg config.VisitorConf) (err error) {
xl := xlog.FromContextSafe(vm.ctx) xl := xlog.FromContextSafe(vm.ctx)
name := cfg.GetBaseInfo().ProxyName name := cfg.GetBaseInfo().ProxyName
visitor := NewVisitor(vm.ctx, cfg, vm.clientCfg, vm.connectServer, vm.msgTransporter) visitor := NewVisitor(vm.ctx, cfg, vm.clientCfg, vm.connectServer, vm.TransferConn, vm.msgTransporter)
err = visitor.Run() err = visitor.Run()
if err != nil { if err != nil {
xl.Warn("start error: %v", err) xl.Warn("start error: %v", err)
@ -139,15 +153,13 @@ func (vm *Manager) Reload(cfgs map[string]config.VisitorConf) {
} }
} }
func (vm *Manager) Close() { // TransferConn transfers a connection to a visitor.
vm.mu.Lock() func (vm *Manager) TransferConn(name string, conn net.Conn) error {
defer vm.mu.Unlock() vm.mu.RLock()
for _, v := range vm.visitors { defer vm.mu.RUnlock()
v.Close() v, ok := vm.visitors[name]
} if !ok {
select { return fmt.Errorf("visitor [%s] not found", name)
case <-vm.stopCh:
default:
close(vm.stopCh)
} }
return v.AcceptConn(conn)
} }

View File

@ -59,12 +59,15 @@ func (sv *XTCPVisitor) Run() (err error) {
sv.session = NewQUICTunnelSession(&sv.clientCfg) sv.session = NewQUICTunnelSession(&sv.clientCfg)
} }
sv.l, err = net.Listen("tcp", net.JoinHostPort(sv.cfg.BindAddr, strconv.Itoa(sv.cfg.BindPort))) if sv.cfg.BindPort > 0 {
if err != nil { sv.l, err = net.Listen("tcp", net.JoinHostPort(sv.cfg.BindAddr, strconv.Itoa(sv.cfg.BindPort)))
return if err != nil {
return
}
go sv.worker()
} }
go sv.worker() go sv.internalConnWorker()
go sv.processTunnelStartEvents() go sv.processTunnelStartEvents()
if sv.cfg.KeepTunnelOpen { if sv.cfg.KeepTunnelOpen {
sv.retryLimiter = rate.NewLimiter(rate.Every(time.Hour/time.Duration(sv.cfg.MaxRetriesAnHour)), sv.cfg.MaxRetriesAnHour) sv.retryLimiter = rate.NewLimiter(rate.Every(time.Hour/time.Duration(sv.cfg.MaxRetriesAnHour)), sv.cfg.MaxRetriesAnHour)
@ -74,8 +77,12 @@ func (sv *XTCPVisitor) Run() (err error) {
} }
func (sv *XTCPVisitor) Close() { func (sv *XTCPVisitor) Close() {
sv.l.Close() sv.mu.Lock()
sv.cancel() defer sv.mu.Unlock()
sv.BaseVisitor.Close()
if sv.cancel != nil {
sv.cancel()
}
if sv.session != nil { if sv.session != nil {
sv.session.Close() sv.session.Close()
} }
@ -89,7 +96,18 @@ func (sv *XTCPVisitor) worker() {
xl.Warn("xtcp local listener closed") xl.Warn("xtcp local listener closed")
return return
} }
go sv.handleConn(conn)
}
}
func (sv *XTCPVisitor) internalConnWorker() {
xl := xlog.FromContextSafe(sv.ctx)
for {
conn, err := sv.internalLn.Accept()
if err != nil {
xl.Warn("xtcp internal listener closed")
return
}
go sv.handleConn(conn) go sv.handleConn(conn)
} }
} }
@ -139,15 +157,37 @@ func (sv *XTCPVisitor) keepTunnelOpenWorker() {
func (sv *XTCPVisitor) handleConn(userConn net.Conn) { func (sv *XTCPVisitor) handleConn(userConn net.Conn) {
xl := xlog.FromContextSafe(sv.ctx) xl := xlog.FromContextSafe(sv.ctx)
defer userConn.Close() isConnTrasfered := false
defer func() {
if !isConnTrasfered {
userConn.Close()
}
}()
xl.Debug("get a new xtcp user connection") xl.Debug("get a new xtcp user connection")
// Open a tunnel connection to the server. If there is already a successful hole-punching connection, // Open a tunnel connection to the server. If there is already a successful hole-punching connection,
// it will be reused. Otherwise, it will block and wait for a successful hole-punching connection until timeout. // it will be reused. Otherwise, it will block and wait for a successful hole-punching connection until timeout.
tunnelConn, err := sv.openTunnel() ctx := context.Background()
if sv.cfg.FallbackTo != "" {
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(sv.cfg.FallbackTimeoutMs)*time.Millisecond)
defer cancel()
ctx = timeoutCtx
}
tunnelConn, err := sv.openTunnel(ctx)
if err != nil { if err != nil {
xl.Error("open tunnel error: %v", err) xl.Error("open tunnel error: %v", err)
// no fallback, just return
if sv.cfg.FallbackTo == "" {
return
}
xl.Debug("try to transfer connection to visitor: %s", sv.cfg.FallbackTo)
if err := sv.transferConn(sv.cfg.FallbackTo, userConn); err != nil {
xl.Error("transfer connection to visitor %s error: %v", sv.cfg.FallbackTo, err)
return
}
isConnTrasfered = true
return return
} }
@ -171,7 +211,7 @@ func (sv *XTCPVisitor) handleConn(userConn net.Conn) {
} }
// openTunnel will open a tunnel connection to the target server. // openTunnel will open a tunnel connection to the target server.
func (sv *XTCPVisitor) openTunnel() (conn net.Conn, err error) { func (sv *XTCPVisitor) openTunnel(ctx context.Context) (conn net.Conn, err error) {
xl := xlog.FromContextSafe(sv.ctx) xl := xlog.FromContextSafe(sv.ctx)
ticker := time.NewTicker(500 * time.Millisecond) ticker := time.NewTicker(500 * time.Millisecond)
defer ticker.Stop() defer ticker.Stop()
@ -185,6 +225,8 @@ func (sv *XTCPVisitor) openTunnel() (conn net.Conn, err error) {
select { select {
case <-sv.ctx.Done(): case <-sv.ctx.Done():
return nil, sv.ctx.Err() return nil, sv.ctx.Err()
case <-ctx.Done():
return nil, ctx.Err()
case <-immediateTrigger: case <-immediateTrigger:
conn, err = sv.getTunnelConn() conn, err = sv.getTunnelConn()
case <-ticker.C: case <-ticker.C:

View File

@ -117,7 +117,6 @@ var rootCmd = &cobra.Command{
// Do not show command usage here. // Do not show command usage here.
err := runClient(cfgFile) err := runClient(cfgFile)
if err != nil { if err != nil {
fmt.Println(err)
os.Exit(1) os.Exit(1)
} }
return nil return nil
@ -199,6 +198,7 @@ func parseClientCommonCfgFromCmd() (cfg config.ClientCommonConf, err error) {
func runClient(cfgFilePath string) error { func runClient(cfgFilePath string) error {
cfg, pxyCfgs, visitorCfgs, err := config.ParseClientConfig(cfgFilePath) cfg, pxyCfgs, visitorCfgs, err := config.ParseClientConfig(cfgFilePath)
if err != nil { if err != nil {
fmt.Println(err)
return err return err
} }
return startService(cfg, pxyCfgs, visitorCfgs, cfgFilePath) return startService(cfg, pxyCfgs, visitorCfgs, cfgFilePath)
@ -214,8 +214,8 @@ func startService(
cfg.LogMaxDays, cfg.DisableLogColor) cfg.LogMaxDays, cfg.DisableLogColor)
if cfgFile != "" { if cfgFile != "" {
log.Trace("start frpc service for config file [%s]", cfgFile) log.Info("start frpc service for config file [%s]", cfgFile)
defer log.Trace("frpc service for config file [%s] stopped", cfgFile) defer log.Info("frpc service for config file [%s] stopped", cfgFile)
} }
svr, errRet := client.NewService(cfg, pxyCfgs, visitorCfgs, cfgFile) svr, errRet := client.NewService(cfg, pxyCfgs, visitorCfgs, cfgFile)
if errRet != nil { if errRet != nil {

View File

@ -337,6 +337,8 @@ server_name = secret_tcp
sk = abcdefg sk = abcdefg
# connect this address to visitor stcp server # connect this address to visitor stcp server
bind_addr = 127.0.0.1 bind_addr = 127.0.0.1
# bind_port can be less than 0, it means don't bind to the port and only receive connections redirected from
# other visitors. (This is not supported for SUDP now)
bind_port = 9000 bind_port = 9000
use_encryption = false use_encryption = false
use_compression = false use_compression = false
@ -355,6 +357,8 @@ type = xtcp
server_name = p2p_tcp server_name = p2p_tcp
sk = abcdefg sk = abcdefg
bind_addr = 127.0.0.1 bind_addr = 127.0.0.1
# bind_port can be less than 0, it means don't bind to the port and only receive connections redirected from
# other visitors. (This is not supported for SUDP now)
bind_port = 9001 bind_port = 9001
use_encryption = false use_encryption = false
use_compression = false use_compression = false
@ -363,6 +367,8 @@ keep_tunnel_open = false
# effective when keep_tunnel_open is set to true, the number of attempts to punch through per hour # effective when keep_tunnel_open is set to true, the number of attempts to punch through per hour
max_retries_an_hour = 8 max_retries_an_hour = 8
min_retry_interval = 90 min_retry_interval = 90
# fallback_to = stcp_visitor
# fallback_timeout_ms = 500
[tcpmuxhttpconnect] [tcpmuxhttpconnect]
type = tcpmux type = tcpmux

View File

@ -661,9 +661,10 @@ func Test_LoadClientBasicConf(t *testing.T) {
BindAddr: "127.0.0.1", BindAddr: "127.0.0.1",
BindPort: 9001, BindPort: 9001,
}, },
Protocol: "quic", Protocol: "quic",
MaxRetriesAnHour: 8, MaxRetriesAnHour: 8,
MinRetryInterval: 90, MinRetryInterval: 90,
FallbackTimeoutMs: 1000,
}, },
} }

View File

@ -49,7 +49,10 @@ type BaseVisitorConf struct {
Sk string `ini:"sk" json:"sk"` Sk string `ini:"sk" json:"sk"`
ServerName string `ini:"server_name" json:"server_name"` ServerName string `ini:"server_name" json:"server_name"`
BindAddr string `ini:"bind_addr" json:"bind_addr"` BindAddr string `ini:"bind_addr" json:"bind_addr"`
BindPort int `ini:"bind_port" json:"bind_port"` // BindPort is the port that visitor listens on.
// It can be less than 0, it means don't bind to the port and only receive connections redirected from
// other visitors. (This is not supported for SUDP now)
BindPort int `ini:"bind_port" json:"bind_port"`
} }
type SUDPVisitorConf struct { type SUDPVisitorConf struct {
@ -63,10 +66,12 @@ type STCPVisitorConf struct {
type XTCPVisitorConf struct { type XTCPVisitorConf struct {
BaseVisitorConf `ini:",extends"` BaseVisitorConf `ini:",extends"`
Protocol string `ini:"protocol" json:"protocol,omitempty"` Protocol string `ini:"protocol" json:"protocol,omitempty"`
KeepTunnelOpen bool `ini:"keep_tunnel_open" json:"keep_tunnel_open,omitempty"` KeepTunnelOpen bool `ini:"keep_tunnel_open" json:"keep_tunnel_open,omitempty"`
MaxRetriesAnHour int `ini:"max_retries_an_hour" json:"max_retries_an_hour,omitempty"` MaxRetriesAnHour int `ini:"max_retries_an_hour" json:"max_retries_an_hour,omitempty"`
MinRetryInterval int `ini:"min_retry_interval" json:"min_retry_interval,omitempty"` MinRetryInterval int `ini:"min_retry_interval" json:"min_retry_interval,omitempty"`
FallbackTo string `ini:"fallback_to" json:"fallback_to,omitempty"`
FallbackTimeoutMs int `ini:"fallback_timeout_ms" json:"fallback_timeout_ms,omitempty"`
} }
// DefaultVisitorConf creates a empty VisitorConf object by visitorType. // DefaultVisitorConf creates a empty VisitorConf object by visitorType.
@ -134,7 +139,9 @@ func (cfg *BaseVisitorConf) check() (err error) {
err = fmt.Errorf("bind_addr shouldn't be empty") err = fmt.Errorf("bind_addr shouldn't be empty")
return return
} }
if cfg.BindPort <= 0 { // BindPort can be less than 0, it means don't bind to the port and only receive connections redirected from
// other visitors
if cfg.BindPort == 0 {
err = fmt.Errorf("bind_port is required") err = fmt.Errorf("bind_port is required")
return return
} }
@ -155,7 +162,6 @@ func (cfg *BaseVisitorConf) unmarshalFromIni(prefix string, name string, section
if cfg.BindAddr == "" { if cfg.BindAddr == "" {
cfg.BindAddr = "127.0.0.1" cfg.BindAddr = "127.0.0.1"
} }
return nil return nil
} }
@ -169,7 +175,6 @@ func preVisitorUnmarshalFromIni(cfg VisitorConf, prefix string, name string, sec
if err != nil { if err != nil {
return err return err
} }
return nil return nil
} }
@ -268,7 +273,9 @@ func (cfg *XTCPVisitorConf) Compare(cmp VisitorConf) bool {
if cfg.Protocol != cmpConf.Protocol || if cfg.Protocol != cmpConf.Protocol ||
cfg.KeepTunnelOpen != cmpConf.KeepTunnelOpen || cfg.KeepTunnelOpen != cmpConf.KeepTunnelOpen ||
cfg.MaxRetriesAnHour != cmpConf.MaxRetriesAnHour || cfg.MaxRetriesAnHour != cmpConf.MaxRetriesAnHour ||
cfg.MinRetryInterval != cmpConf.MinRetryInterval { cfg.MinRetryInterval != cmpConf.MinRetryInterval ||
cfg.FallbackTo != cmpConf.FallbackTo ||
cfg.FallbackTimeoutMs != cmpConf.FallbackTimeoutMs {
return false return false
} }
return true return true
@ -290,6 +297,9 @@ func (cfg *XTCPVisitorConf) UnmarshalFromIni(prefix string, name string, section
if cfg.MinRetryInterval <= 0 { if cfg.MinRetryInterval <= 0 {
cfg.MinRetryInterval = 90 cfg.MinRetryInterval = 90
} }
if cfg.FallbackTimeoutMs <= 0 {
cfg.FallbackTimeoutMs = 1000
}
return return
} }

View File

@ -87,9 +87,10 @@ func Test_Visitor_UnmarshalFromIni(t *testing.T) {
BindAddr: "127.0.0.1", BindAddr: "127.0.0.1",
BindPort: 9001, BindPort: 9001,
}, },
Protocol: "quic", Protocol: "quic",
MaxRetriesAnHour: 8, MaxRetriesAnHour: 8,
MinRetryInterval: 90, MinRetryInterval: 90,
FallbackTimeoutMs: 1000,
}, },
}, },
} }

View File

@ -63,20 +63,20 @@ var (
} }
// mode 2, HardNAT is receiver, EasyNAT is sender // mode 2, HardNAT is receiver, EasyNAT is sender
// sender, portsRandomNumber 1000, sendDelayMs 2000 | receiver, listen 256 ports, ttl 7 // sender, portsRandomNumber 1000, sendDelayMs 3000 | receiver, listen 256 ports, ttl 7
// sender, portsRandomNumber 1000, sendDelayMs 2000 | receiver, listen 256 ports, ttl 4 // sender, portsRandomNumber 1000, sendDelayMs 3000 | receiver, listen 256 ports, ttl 4
// sender, portsRandomNumber 1000, sendDelayMs 2000 | receiver, listen 256 ports // sender, portsRandomNumber 1000, sendDelayMs 3000 | receiver, listen 256 ports
mode2Behaviors = []lo.Tuple2[RecommandBehavior, RecommandBehavior]{ mode2Behaviors = []lo.Tuple2[RecommandBehavior, RecommandBehavior]{
lo.T2( lo.T2(
RecommandBehavior{Role: DetectRoleSender, PortsRandomNumber: 1000, SendDelayMs: 2000}, RecommandBehavior{Role: DetectRoleSender, PortsRandomNumber: 1000, SendDelayMs: 3000},
RecommandBehavior{Role: DetectRoleReceiver, ListenRandomPorts: 256, TTL: 7}, RecommandBehavior{Role: DetectRoleReceiver, ListenRandomPorts: 256, TTL: 7},
), ),
lo.T2( lo.T2(
RecommandBehavior{Role: DetectRoleSender, PortsRandomNumber: 1000, SendDelayMs: 2000}, RecommandBehavior{Role: DetectRoleSender, PortsRandomNumber: 1000, SendDelayMs: 3000},
RecommandBehavior{Role: DetectRoleReceiver, ListenRandomPorts: 256, TTL: 4}, RecommandBehavior{Role: DetectRoleReceiver, ListenRandomPorts: 256, TTL: 4},
), ),
lo.T2( lo.T2(
RecommandBehavior{Role: DetectRoleSender, PortsRandomNumber: 1000, SendDelayMs: 2000}, RecommandBehavior{Role: DetectRoleSender, PortsRandomNumber: 1000, SendDelayMs: 3000},
RecommandBehavior{Role: DetectRoleReceiver, ListenRandomPorts: 256}, RecommandBehavior{Role: DetectRoleReceiver, ListenRandomPorts: 256},
), ),
} }
@ -98,21 +98,21 @@ var (
} }
// mode 4, Regular ports changes are usually the sender. // mode 4, Regular ports changes are usually the sender.
// sender, portsRandomNumber 1000, sendDelayMs: 2000 | receiver, listen 256 ports, ttl 7, portsRangeNumber 10 // sender, portsRandomNumber 1000, sendDelayMs: 2000 | receiver, listen 256 ports, ttl 7, portsRangeNumber 2
// sender, portsRandomNumber 1000, sendDelayMs: 2000 | receiver, listen 256 ports, ttl 4, portsRangeNumber 10 // sender, portsRandomNumber 1000, sendDelayMs: 2000 | receiver, listen 256 ports, ttl 4, portsRangeNumber 2
// sender, portsRandomNumber 1000, SendDelayMs: 2000 | receiver, listen 256 ports, portsRangeNumber 10 // sender, portsRandomNumber 1000, SendDelayMs: 2000 | receiver, listen 256 ports, portsRangeNumber 2
mode4Behaviors = []lo.Tuple2[RecommandBehavior, RecommandBehavior]{ mode4Behaviors = []lo.Tuple2[RecommandBehavior, RecommandBehavior]{
lo.T2( lo.T2(
RecommandBehavior{Role: DetectRoleSender, PortsRandomNumber: 1000, SendDelayMs: 2000}, RecommandBehavior{Role: DetectRoleSender, PortsRandomNumber: 1000, SendDelayMs: 3000},
RecommandBehavior{Role: DetectRoleReceiver, ListenRandomPorts: 256, TTL: 7, PortsRangeNumber: 10}, RecommandBehavior{Role: DetectRoleReceiver, ListenRandomPorts: 256, TTL: 7, PortsRangeNumber: 2},
), ),
lo.T2( lo.T2(
RecommandBehavior{Role: DetectRoleSender, PortsRandomNumber: 1000, SendDelayMs: 2000}, RecommandBehavior{Role: DetectRoleSender, PortsRandomNumber: 1000, SendDelayMs: 3000},
RecommandBehavior{Role: DetectRoleReceiver, ListenRandomPorts: 256, TTL: 4, PortsRangeNumber: 10}, RecommandBehavior{Role: DetectRoleReceiver, ListenRandomPorts: 256, TTL: 4, PortsRangeNumber: 2},
), ),
lo.T2( lo.T2(
RecommandBehavior{Role: DetectRoleSender, PortsRandomNumber: 1000, SendDelayMs: 2000}, RecommandBehavior{Role: DetectRoleSender, PortsRandomNumber: 1000, SendDelayMs: 3000},
RecommandBehavior{Role: DetectRoleReceiver, ListenRandomPorts: 256, PortsRangeNumber: 10}, RecommandBehavior{Role: DetectRoleReceiver, ListenRandomPorts: 256, PortsRangeNumber: 2},
), ),
} }
) )

View File

@ -85,11 +85,6 @@ func ClassifyNATFeature(addresses []string, localIPs []string) (*NatFeature, err
} }
} }
natFeature.PortsDifference = portMax - portMin
if natFeature.PortsDifference <= 10 && natFeature.PortsDifference >= 1 {
natFeature.RegularPortsChange = true
}
switch { switch {
case ipChanged && portChanged: case ipChanged && portChanged:
natFeature.NatType = HardNAT natFeature.NatType = HardNAT
@ -104,6 +99,12 @@ func ClassifyNATFeature(addresses []string, localIPs []string) (*NatFeature, err
natFeature.NatType = EasyNAT natFeature.NatType = EasyNAT
natFeature.Behavior = BehaviorNoChange natFeature.Behavior = BehaviorNoChange
} }
if natFeature.Behavior == BehaviorPortChanged {
natFeature.PortsDifference = portMax - portMin
if natFeature.PortsDifference <= 5 && natFeature.PortsDifference >= 1 {
natFeature.RegularPortsChange = true
}
}
return natFeature, nil return natFeature, nil
} }

View File

@ -384,7 +384,7 @@ func sendSidMessageToRangePorts(
if err := sendFunc(conn, detectAddr); err != nil { if err := sendFunc(conn, detectAddr); err != nil {
xl.Trace("send sid message from %s to %s error: %v", conn.LocalAddr(), detectAddr, err) xl.Trace("send sid message from %s to %s error: %v", conn.LocalAddr(), detectAddr, err)
} }
time.Sleep(5 * time.Millisecond) time.Sleep(2 * time.Millisecond)
} }
} }
} }

View File

@ -376,7 +376,7 @@ var _ = ginkgo.Describe("[Feature: Basic]", func() {
for _, test := range tests { for _, test := range tests {
framework.NewRequestExpect(f). framework.NewRequestExpect(f).
RequestModify(func(r *request.Request) { RequestModify(func(r *request.Request) {
r.Timeout(5 * time.Second) r.Timeout(10 * time.Second)
}). }).
Protocol(protocol). Protocol(protocol).
PortName(test.bindPortName). PortName(test.bindPortName).

52
test/e2e/basic/xtcp.go Normal file
View File

@ -0,0 +1,52 @@
package basic
import (
"fmt"
"time"
"github.com/onsi/ginkgo/v2"
"github.com/fatedier/frp/test/e2e/framework"
"github.com/fatedier/frp/test/e2e/framework/consts"
"github.com/fatedier/frp/test/e2e/pkg/port"
"github.com/fatedier/frp/test/e2e/pkg/request"
)
var _ = ginkgo.Describe("[Feature: XTCP]", func() {
f := framework.NewDefaultFramework()
ginkgo.It("Fallback To STCP", func() {
serverConf := consts.DefaultServerConfig
clientConf := consts.DefaultClientConfig
bindPortName := port.GenName("XTCP")
clientConf += fmt.Sprintf(`
[foo]
type = stcp
local_port = {{ .%s }}
[foo-visitor]
type = stcp
role = visitor
server_name = foo
bind_port = -1
[bar-visitor]
type = xtcp
role = visitor
server_name = bar
bind_port = {{ .%s }}
keep_tunnel_open = true
fallback_to = foo-visitor
fallback_timeout_ms = 200
`, framework.TCPEchoServerPort, bindPortName)
f.RunProcesses([]string{serverConf}, []string{clientConf})
framework.NewRequestExpect(f).
RequestModify(func(r *request.Request) {
r.Timeout(time.Second)
}).
PortName(bindPortName).
Ensure()
})
})