mirror of
https://github.com/MetaCubeX/mihomo.git
synced 2026-03-02 02:39:53 +00:00
Compare commits
38 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5f1f296213 | ||
|
|
66fd5c9f0c | ||
|
|
c3a3009a8c | ||
|
|
01cd7e2c0e | ||
|
|
deec7aafe5 | ||
|
|
a9b7e705f0 | ||
|
|
fb043df1b6 | ||
|
|
748b5df902 | ||
|
|
8cbae59d55 | ||
|
|
a37440c81b | ||
|
|
dbb002a5ba | ||
|
|
1a84153213 | ||
|
|
dfe6e0509b | ||
|
|
b6dde7ded7 | ||
|
|
9f1da11792 | ||
|
|
63ad95e10f | ||
|
|
b06ec5bef8 | ||
|
|
d4fbffd8e8 | ||
|
|
305020175d | ||
|
|
79decdc253 | ||
|
|
407c13b8a4 | ||
|
|
d84b182be3 | ||
|
|
8f18d3f6db | ||
|
|
b9260e06b8 | ||
|
|
6337151207 | ||
|
|
aa555ced5f | ||
|
|
349b773b40 | ||
|
|
300eb8b12a | ||
|
|
2b84dd3618 | ||
|
|
6a620ba287 | ||
|
|
56c3462b76 | ||
|
|
6f4fe71e41 | ||
|
|
ba3e7187a6 | ||
|
|
0d92b6724b | ||
|
|
241ae92bce | ||
|
|
91985c1ef8 | ||
|
|
6a9d428991 | ||
|
|
765cbbcc01 |
62
.github/workflows/build.yml
vendored
62
.github/workflows/build.yml
vendored
@@ -29,14 +29,20 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
jobs:
|
||||
- { goos: darwin, goarch: arm64, output: arm64 }
|
||||
- { goos: darwin, goarch: amd64, goamd64: v1, output: amd64-compatible }
|
||||
- { goos: darwin, goarch: amd64, goamd64: v1, output: amd64-compatible } # old style file name will be removed in next released
|
||||
- { goos: darwin, goarch: amd64, goamd64: v3, output: amd64 }
|
||||
- { goos: darwin, goarch: amd64, goamd64: v1, output: amd64-v1 }
|
||||
- { goos: darwin, goarch: amd64, goamd64: v2, output: amd64-v2 }
|
||||
- { goos: darwin, goarch: amd64, goamd64: v3, output: amd64-v3 }
|
||||
- { goos: darwin, goarch: arm64, output: arm64 }
|
||||
|
||||
- { goos: linux, goarch: '386', go386: sse2, output: '386', debian: i386, rpm: i386}
|
||||
- { goos: linux, goarch: '386', go386: softfloat, output: '386-softfloat' }
|
||||
- { goos: linux, goarch: amd64, goamd64: v1, output: amd64-compatible, test: test }
|
||||
- { goos: linux, goarch: amd64, goamd64: v1, output: amd64-compatible} # old style file name will be removed in next released
|
||||
- { goos: linux, goarch: amd64, goamd64: v3, output: amd64, debian: amd64, rpm: x86_64, pacman: x86_64}
|
||||
- { goos: linux, goarch: amd64, goamd64: v1, output: amd64-v1, debian: amd64, rpm: x86_64, pacman: x86_64, test: test }
|
||||
- { goos: linux, goarch: amd64, goamd64: v2, output: amd64-v2, debian: amd64, rpm: x86_64, pacman: x86_64}
|
||||
- { goos: linux, goarch: amd64, goamd64: v3, output: amd64-v3, debian: amd64, rpm: x86_64, pacman: x86_64}
|
||||
- { goos: linux, goarch: arm64, output: arm64, debian: arm64, rpm: aarch64, pacman: aarch64}
|
||||
- { goos: linux, goarch: arm, goarm: '5', output: armv5 }
|
||||
- { goos: linux, goarch: arm, goarm: '6', output: armv6, debian: armel, rpm: armv6hl}
|
||||
@@ -54,13 +60,19 @@ jobs:
|
||||
- { goos: linux, goarch: ppc64le, output: ppc64le, debian: ppc64el, rpm: ppc64le }
|
||||
|
||||
- { goos: windows, goarch: '386', output: '386' }
|
||||
- { goos: windows, goarch: amd64, goamd64: v1, output: amd64-compatible }
|
||||
- { goos: windows, goarch: amd64, goamd64: v1, output: amd64-compatible } # old style file name will be removed in next released
|
||||
- { goos: windows, goarch: amd64, goamd64: v3, output: amd64 }
|
||||
- { goos: windows, goarch: amd64, goamd64: v1, output: amd64-v1 }
|
||||
- { goos: windows, goarch: amd64, goamd64: v2, output: amd64-v2 }
|
||||
- { goos: windows, goarch: amd64, goamd64: v3, output: amd64-v3 }
|
||||
- { goos: windows, goarch: arm64, output: arm64 }
|
||||
|
||||
- { goos: freebsd, goarch: '386', output: '386' }
|
||||
- { goos: freebsd, goarch: amd64, goamd64: v1, output: amd64-compatible }
|
||||
- { goos: freebsd, goarch: amd64, goamd64: v1, output: amd64-compatible } # old style file name will be removed in next released
|
||||
- { goos: freebsd, goarch: amd64, goamd64: v3, output: amd64 }
|
||||
- { goos: freebsd, goarch: amd64, goamd64: v1, output: amd64-v1 }
|
||||
- { goos: freebsd, goarch: amd64, goamd64: v2, output: amd64-v2 }
|
||||
- { goos: freebsd, goarch: amd64, goamd64: v3, output: amd64-v3 }
|
||||
- { goos: freebsd, goarch: arm64, output: arm64 }
|
||||
|
||||
- { goos: android, goarch: '386', ndk: i686-linux-android34, output: '386' }
|
||||
@@ -71,46 +83,54 @@ jobs:
|
||||
# Go 1.23 with special patch can work on Windows 7
|
||||
# https://github.com/MetaCubeX/go/commits/release-branch.go1.23/
|
||||
- { goos: windows, goarch: '386', output: '386-go123', goversion: '1.23' }
|
||||
- { goos: windows, goarch: amd64, goamd64: v1, output: amd64-compatible-go123, goversion: '1.23' }
|
||||
- { goos: windows, goarch: amd64, goamd64: v3, output: amd64-go123, goversion: '1.23' }
|
||||
- { goos: windows, goarch: amd64, goamd64: v1, output: amd64-v1-go123, goversion: '1.23' }
|
||||
- { goos: windows, goarch: amd64, goamd64: v2, output: amd64-v2-go123, goversion: '1.23' }
|
||||
- { goos: windows, goarch: amd64, goamd64: v3, output: amd64-v3-go123, goversion: '1.23' }
|
||||
|
||||
# Go 1.22 with special patch can work on Windows 7
|
||||
# https://github.com/MetaCubeX/go/commits/release-branch.go1.22/
|
||||
- { goos: windows, goarch: '386', output: '386-go122', goversion: '1.22' }
|
||||
- { goos: windows, goarch: amd64, goamd64: v1, output: amd64-compatible-go122, goversion: '1.22' }
|
||||
- { goos: windows, goarch: amd64, goamd64: v3, output: amd64-go122, goversion: '1.22' }
|
||||
- { goos: windows, goarch: amd64, goamd64: v1, output: amd64-v1-go122, goversion: '1.22' }
|
||||
- { goos: windows, goarch: amd64, goamd64: v2, output: amd64-v2-go122, goversion: '1.22' }
|
||||
- { goos: windows, goarch: amd64, goamd64: v3, output: amd64-v3-go122, goversion: '1.22' }
|
||||
|
||||
# Go 1.21 can revert commit `9e4385` to work on Windows 7
|
||||
# https://github.com/golang/go/issues/64622#issuecomment-1847475161
|
||||
# (OR we can just use golang1.21.4 which unneeded any patch)
|
||||
- { goos: windows, goarch: '386', output: '386-go121', goversion: '1.21' }
|
||||
- { goos: windows, goarch: amd64, goamd64: v1, output: amd64-compatible-go121, goversion: '1.21' }
|
||||
- { goos: windows, goarch: amd64, goamd64: v3, output: amd64-go121, goversion: '1.21' }
|
||||
- { goos: windows, goarch: amd64, goamd64: v1, output: amd64-v1-go121, goversion: '1.21' }
|
||||
- { goos: windows, goarch: amd64, goamd64: v2, output: amd64-v2-go121, goversion: '1.21' }
|
||||
- { goos: windows, goarch: amd64, goamd64: v3, output: amd64-v3-go121, goversion: '1.21' }
|
||||
|
||||
# Go 1.20 is the last release that will run on any release of Windows 7, 8, Server 2008 and Server 2012. Go 1.21 will require at least Windows 10 or Server 2016.
|
||||
- { goos: windows, goarch: '386', output: '386-go120', goversion: '1.20' }
|
||||
- { goos: windows, goarch: amd64, goamd64: v1, output: amd64-compatible-go120, goversion: '1.20' }
|
||||
- { goos: windows, goarch: amd64, goamd64: v3, output: amd64-go120, goversion: '1.20' }
|
||||
- { goos: windows, goarch: amd64, goamd64: v1, output: amd64-v1-go120, goversion: '1.20' }
|
||||
- { goos: windows, goarch: amd64, goamd64: v2, output: amd64-v2-go120, goversion: '1.20' }
|
||||
- { goos: windows, goarch: amd64, goamd64: v3, output: amd64-v3-go120, goversion: '1.20' }
|
||||
|
||||
# Go 1.22 is the last release that will run on macOS 10.15 Catalina. Go 1.23 will require macOS 11 Big Sur or later.
|
||||
- { goos: darwin, goarch: arm64, output: arm64-go122, goversion: '1.22' }
|
||||
- { goos: darwin, goarch: amd64, goamd64: v1, output: amd64-compatible-go122, goversion: '1.22' }
|
||||
- { goos: darwin, goarch: amd64, goamd64: v3, output: amd64-go122, goversion: '1.22' }
|
||||
- { goos: darwin, goarch: amd64, goamd64: v1, output: amd64-v1-go122, goversion: '1.22' }
|
||||
- { goos: darwin, goarch: amd64, goamd64: v2, output: amd64-v2-go122, goversion: '1.22' }
|
||||
- { goos: darwin, goarch: amd64, goamd64: v3, output: amd64-v3-go122, goversion: '1.22' }
|
||||
|
||||
# Go 1.20 is the last release that will run on macOS 10.13 High Sierra or 10.14 Mojave. Go 1.21 will require macOS 10.15 Catalina or later.
|
||||
- { goos: darwin, goarch: arm64, output: arm64-go120, goversion: '1.20' }
|
||||
- { goos: darwin, goarch: amd64, goamd64: v1, output: amd64-compatible-go120, goversion: '1.20' }
|
||||
- { goos: darwin, goarch: amd64, goamd64: v3, output: amd64-go120, goversion: '1.20' }
|
||||
- { goos: darwin, goarch: amd64, goamd64: v1, output: amd64-v1-go120, goversion: '1.20' }
|
||||
- { goos: darwin, goarch: amd64, goamd64: v2, output: amd64-v2-go120, goversion: '1.20' }
|
||||
- { goos: darwin, goarch: amd64, goamd64: v3, output: amd64-v3-go120, goversion: '1.20' }
|
||||
|
||||
# Go 1.23 is the last release that requires Linux kernel version 2.6.32 or later. Go 1.24 will require Linux kernel version 3.2 or later.
|
||||
- { goos: linux, goarch: '386', output: '386-go123', goversion: '1.23' }
|
||||
- { goos: linux, goarch: amd64, goamd64: v1, output: amd64-compatible-go123, goversion: '1.23', test: test }
|
||||
- { goos: linux, goarch: amd64, goamd64: v3, output: amd64-go123, goversion: '1.23' }
|
||||
- { goos: linux, goarch: amd64, goamd64: v1, output: amd64-v1-go123, goversion: '1.23', test: test }
|
||||
- { goos: linux, goarch: amd64, goamd64: v2, output: amd64-v2-go123, goversion: '1.23' }
|
||||
- { goos: linux, goarch: amd64, goamd64: v3, output: amd64-v3-go123, goversion: '1.23' }
|
||||
|
||||
# only for test
|
||||
- { goos: linux, goarch: '386', output: '386-go120', goversion: '1.20' }
|
||||
- { goos: linux, goarch: amd64, goamd64: v1, output: amd64-compatible-go120, goversion: '1.20', test: test }
|
||||
- { goos: linux, goarch: amd64, goamd64: v3, output: amd64-go120, goversion: '1.20' }
|
||||
- { goos: linux, goarch: amd64, goamd64: v1, output: amd64-v1-go120, goversion: '1.20', test: test }
|
||||
- { goos: linux, goarch: amd64, goamd64: v2, output: amd64-v2-go120, goversion: '1.20' }
|
||||
- { goos: linux, goarch: amd64, goamd64: v3, output: amd64-v3-go120, goversion: '1.20' }
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
57
Makefile
57
Makefile
@@ -17,11 +17,19 @@ GOBUILD=CGO_ENABLED=0 go build -tags with_gvisor -trimpath -ldflags '-X "github.
|
||||
-w -s -buildid='
|
||||
|
||||
PLATFORM_LIST = \
|
||||
darwin-386 \
|
||||
darwin-amd64-compatible \
|
||||
darwin-amd64 \
|
||||
darwin-amd64-v1 \
|
||||
darwin-amd64-v2 \
|
||||
darwin-amd64-v3 \
|
||||
darwin-arm64 \
|
||||
linux-386 \
|
||||
linux-amd64-compatible \
|
||||
linux-amd64 \
|
||||
linux-amd64-v1 \
|
||||
linux-amd64-v2 \
|
||||
linux-amd64-v3 \
|
||||
linux-armv5 \
|
||||
linux-armv6 \
|
||||
linux-armv7 \
|
||||
@@ -43,37 +51,61 @@ WINDOWS_ARCH_LIST = \
|
||||
windows-386 \
|
||||
windows-amd64-compatible \
|
||||
windows-amd64 \
|
||||
windows-amd64-v1 \
|
||||
windows-amd64-v2 \
|
||||
windows-amd64-v3 \
|
||||
windows-arm64 \
|
||||
windows-arm32v7
|
||||
|
||||
all:linux-amd64 linux-arm64\
|
||||
darwin-amd64 darwin-arm64\
|
||||
windows-amd64 windows-arm64\
|
||||
all:linux-amd64-v3 linux-arm64\
|
||||
darwin-amd64-v3 darwin-arm64\
|
||||
windows-amd64-v3 windows-arm64\
|
||||
|
||||
|
||||
darwin-all: darwin-amd64 darwin-arm64
|
||||
darwin-all: darwin-amd64-v3 darwin-arm64
|
||||
|
||||
docker:
|
||||
GOAMD64=v1 $(GOBUILD) -o $(BINDIR)/$(NAME)-$@
|
||||
|
||||
darwin-amd64:
|
||||
GOARCH=amd64 GOOS=darwin GOAMD64=v3 $(GOBUILD) -o $(BINDIR)/$(NAME)-$@
|
||||
darwin-386:
|
||||
GOARCH=386 GOOS=darwin $(GOBUILD) -o $(BINDIR)/$(NAME)-$@
|
||||
|
||||
darwin-amd64-compatible:
|
||||
GOARCH=amd64 GOOS=darwin GOAMD64=v1 $(GOBUILD) -o $(BINDIR)/$(NAME)-$@
|
||||
|
||||
darwin-amd64:
|
||||
GOARCH=amd64 GOOS=darwin GOAMD64=v3 $(GOBUILD) -o $(BINDIR)/$(NAME)-$@
|
||||
|
||||
darwin-amd64-v1:
|
||||
GOARCH=amd64 GOOS=darwin GOAMD64=v1 $(GOBUILD) -o $(BINDIR)/$(NAME)-$@
|
||||
|
||||
darwin-amd64-v2:
|
||||
GOARCH=amd64 GOOS=darwin GOAMD64=v2 $(GOBUILD) -o $(BINDIR)/$(NAME)-$@
|
||||
|
||||
darwin-amd64-v3:
|
||||
GOARCH=amd64 GOOS=darwin GOAMD64=v3 $(GOBUILD) -o $(BINDIR)/$(NAME)-$@
|
||||
|
||||
darwin-arm64:
|
||||
GOARCH=arm64 GOOS=darwin $(GOBUILD) -o $(BINDIR)/$(NAME)-$@
|
||||
|
||||
linux-386:
|
||||
GOARCH=386 GOOS=linux $(GOBUILD) -o $(BINDIR)/$(NAME)-$@
|
||||
|
||||
linux-amd64-compatible:
|
||||
GOARCH=amd64 GOOS=linux GOAMD64=v1 $(GOBUILD) -o $(BINDIR)/$(NAME)-$@
|
||||
|
||||
linux-amd64:
|
||||
GOARCH=amd64 GOOS=linux GOAMD64=v3 $(GOBUILD) -o $(BINDIR)/$(NAME)-$@
|
||||
|
||||
linux-amd64-compatible:
|
||||
linux-amd64-v1:
|
||||
GOARCH=amd64 GOOS=linux GOAMD64=v1 $(GOBUILD) -o $(BINDIR)/$(NAME)-$@
|
||||
|
||||
linux-amd64-v2:
|
||||
GOARCH=amd64 GOOS=linux GOAMD64=v2 $(GOBUILD) -o $(BINDIR)/$(NAME)-$@
|
||||
|
||||
linux-amd64-v3:
|
||||
GOARCH=amd64 GOOS=linux GOAMD64=v3 $(GOBUILD) -o $(BINDIR)/$(NAME)-$@
|
||||
|
||||
linux-arm64:
|
||||
GOARCH=arm64 GOOS=linux $(GOBUILD) -o $(BINDIR)/$(NAME)-$@
|
||||
|
||||
@@ -125,12 +157,21 @@ freebsd-arm64:
|
||||
windows-386:
|
||||
GOARCH=386 GOOS=windows $(GOBUILD) -o $(BINDIR)/$(NAME)-$@.exe
|
||||
|
||||
windows-amd64-compatible:
|
||||
GOARCH=amd64 GOOS=windows GOAMD64=v1 $(GOBUILD) -o $(BINDIR)/$(NAME)-$@.exe
|
||||
|
||||
windows-amd64:
|
||||
GOARCH=amd64 GOOS=windows GOAMD64=v3 $(GOBUILD) -o $(BINDIR)/$(NAME)-$@.exe
|
||||
|
||||
windows-amd64-compatible:
|
||||
windows-amd64-v1:
|
||||
GOARCH=amd64 GOOS=windows GOAMD64=v1 $(GOBUILD) -o $(BINDIR)/$(NAME)-$@.exe
|
||||
|
||||
windows-amd64-v2:
|
||||
GOARCH=amd64 GOOS=windows GOAMD64=v2 $(GOBUILD) -o $(BINDIR)/$(NAME)-$@.exe
|
||||
|
||||
windows-amd64-v3:
|
||||
GOARCH=amd64 GOOS=windows GOAMD64=v3 $(GOBUILD) -o $(BINDIR)/$(NAME)-$@.exe
|
||||
|
||||
windows-arm64:
|
||||
GOARCH=arm64 GOOS=windows $(GOBUILD) -o $(BINDIR)/$(NAME)-$@.exe
|
||||
|
||||
|
||||
@@ -14,10 +14,10 @@ import (
|
||||
"github.com/metacubex/mihomo/common/atomic"
|
||||
"github.com/metacubex/mihomo/common/queue"
|
||||
"github.com/metacubex/mihomo/common/utils"
|
||||
"github.com/metacubex/mihomo/common/xsync"
|
||||
"github.com/metacubex/mihomo/component/ca"
|
||||
C "github.com/metacubex/mihomo/constant"
|
||||
"github.com/metacubex/mihomo/log"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
)
|
||||
|
||||
var UnifiedDelay = atomic.NewBool(false)
|
||||
@@ -35,7 +35,7 @@ type Proxy struct {
|
||||
C.ProxyAdapter
|
||||
alive atomic.Bool
|
||||
history *queue.Queue[C.DelayHistory]
|
||||
extra *xsync.MapOf[string, *internalProxyState]
|
||||
extra xsync.Map[string, *internalProxyState]
|
||||
}
|
||||
|
||||
// Adapter implements C.Proxy
|
||||
@@ -293,7 +293,7 @@ func NewProxy(adapter C.ProxyAdapter) *Proxy {
|
||||
ProxyAdapter: adapter,
|
||||
history: queue.New[C.DelayHistory](defaultHistoriesNum),
|
||||
alive: atomic.NewBool(true),
|
||||
extra: xsync.NewMapOf[string, *internalProxyState]()}
|
||||
}
|
||||
}
|
||||
|
||||
func urlToMetadata(rawURL string) (addr C.Metadata, err error) {
|
||||
|
||||
@@ -108,6 +108,9 @@ func ParseProxyProvider(name string, mapping map[string]any) (types.ProxyProvide
|
||||
switch schema.Type {
|
||||
case "file":
|
||||
path := C.Path.Resolve(schema.Path)
|
||||
if !C.Path.IsSafePath(path) {
|
||||
return nil, C.Path.ErrNotSafePath(path)
|
||||
}
|
||||
vehicle = resource.NewFileVehicle(path)
|
||||
case "http":
|
||||
path := C.Path.GetPathByHash("proxies", schema.URL)
|
||||
|
||||
@@ -208,6 +208,9 @@ func ConvertsV2Ray(buf []byte) ([]map[string]any, error) {
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if decodedHost, err := tryDecodeBase64([]byte(urlVLess.Host)); err == nil {
|
||||
urlVLess.Host = string(decodedHost)
|
||||
}
|
||||
query := urlVLess.Query()
|
||||
vless := make(map[string]any, 20)
|
||||
err = handleVShareLink(names, urlVLess, scheme, vless)
|
||||
|
||||
19
common/maphash/common.go
Normal file
19
common/maphash/common.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package maphash
|
||||
|
||||
import "hash/maphash"
|
||||
|
||||
type Seed = maphash.Seed
|
||||
|
||||
func MakeSeed() Seed {
|
||||
return maphash.MakeSeed()
|
||||
}
|
||||
|
||||
type Hash = maphash.Hash
|
||||
|
||||
func Bytes(seed Seed, b []byte) uint64 {
|
||||
return maphash.Bytes(seed, b)
|
||||
}
|
||||
|
||||
func String(seed Seed, s string) uint64 {
|
||||
return maphash.String(seed, s)
|
||||
}
|
||||
140
common/maphash/comparable_go120.go
Normal file
140
common/maphash/comparable_go120.go
Normal file
@@ -0,0 +1,140 @@
|
||||
//go:build !go1.24
|
||||
|
||||
package maphash
|
||||
|
||||
import "unsafe"
|
||||
|
||||
func Comparable[T comparable](s Seed, v T) uint64 {
|
||||
return comparableHash(*(*seedTyp)(unsafe.Pointer(&s)), v)
|
||||
}
|
||||
|
||||
func comparableHash[T comparable](seed seedTyp, v T) uint64 {
|
||||
s := seed.s
|
||||
var m map[T]struct{}
|
||||
mTyp := iTypeOf(m)
|
||||
var hasher func(unsafe.Pointer, uintptr) uintptr
|
||||
hasher = (*iMapType)(unsafe.Pointer(mTyp)).Hasher
|
||||
|
||||
p := escape(unsafe.Pointer(&v))
|
||||
|
||||
if ptrSize == 8 {
|
||||
return uint64(hasher(p, uintptr(s)))
|
||||
}
|
||||
lo := hasher(p, uintptr(s))
|
||||
hi := hasher(p, uintptr(s>>32))
|
||||
return uint64(hi)<<32 | uint64(lo)
|
||||
}
|
||||
|
||||
// WriteComparable adds x to the data hashed by h.
|
||||
func WriteComparable[T comparable](h *Hash, x T) {
|
||||
// writeComparable (not in purego mode) directly operates on h.state
|
||||
// without using h.buf. Mix in the buffer length so it won't
|
||||
// commute with a buffered write, which either changes h.n or changes
|
||||
// h.state.
|
||||
hash := (*hashTyp)(unsafe.Pointer(h))
|
||||
if hash.n != 0 {
|
||||
hash.state.s = comparableHash(hash.state, hash.n)
|
||||
}
|
||||
hash.state.s = comparableHash(hash.state, x)
|
||||
}
|
||||
|
||||
// go/src/hash/maphash/maphash.go
|
||||
type hashTyp struct {
|
||||
_ [0]func() // not comparable
|
||||
seed seedTyp // initial seed used for this hash
|
||||
state seedTyp // current hash of all flushed bytes
|
||||
buf [128]byte // unflushed byte buffer
|
||||
n int // number of unflushed bytes
|
||||
}
|
||||
|
||||
type seedTyp struct {
|
||||
s uint64
|
||||
}
|
||||
|
||||
type iTFlag uint8
|
||||
type iKind uint8
|
||||
type iNameOff int32
|
||||
|
||||
// TypeOff is the offset to a type from moduledata.types. See resolveTypeOff in runtime.
|
||||
type iTypeOff int32
|
||||
|
||||
type iType struct {
|
||||
Size_ uintptr
|
||||
PtrBytes uintptr // number of (prefix) bytes in the type that can contain pointers
|
||||
Hash uint32 // hash of type; avoids computation in hash tables
|
||||
TFlag iTFlag // extra type information flags
|
||||
Align_ uint8 // alignment of variable with this type
|
||||
FieldAlign_ uint8 // alignment of struct field with this type
|
||||
Kind_ iKind // enumeration for C
|
||||
// function for comparing objects of this type
|
||||
// (ptr to object A, ptr to object B) -> ==?
|
||||
Equal func(unsafe.Pointer, unsafe.Pointer) bool
|
||||
// GCData stores the GC type data for the garbage collector.
|
||||
// Normally, GCData points to a bitmask that describes the
|
||||
// ptr/nonptr fields of the type. The bitmask will have at
|
||||
// least PtrBytes/ptrSize bits.
|
||||
// If the TFlagGCMaskOnDemand bit is set, GCData is instead a
|
||||
// **byte and the pointer to the bitmask is one dereference away.
|
||||
// The runtime will build the bitmask if needed.
|
||||
// (See runtime/type.go:getGCMask.)
|
||||
// Note: multiple types may have the same value of GCData,
|
||||
// including when TFlagGCMaskOnDemand is set. The types will, of course,
|
||||
// have the same pointer layout (but not necessarily the same size).
|
||||
GCData *byte
|
||||
Str iNameOff // string form
|
||||
PtrToThis iTypeOff // type for pointer to this type, may be zero
|
||||
}
|
||||
|
||||
type iMapType struct {
|
||||
iType
|
||||
Key *iType
|
||||
Elem *iType
|
||||
Group *iType // internal type representing a slot group
|
||||
// function for hashing keys (ptr to key, seed) -> hash
|
||||
Hasher func(unsafe.Pointer, uintptr) uintptr
|
||||
}
|
||||
|
||||
func iTypeOf(a any) *iType {
|
||||
eface := *(*iEmptyInterface)(unsafe.Pointer(&a))
|
||||
// Types are either static (for compiler-created types) or
|
||||
// heap-allocated but always reachable (for reflection-created
|
||||
// types, held in the central map). So there is no need to
|
||||
// escape types. noescape here help avoid unnecessary escape
|
||||
// of v.
|
||||
return (*iType)(noescape(unsafe.Pointer(eface.Type)))
|
||||
}
|
||||
|
||||
type iEmptyInterface struct {
|
||||
Type *iType
|
||||
Data unsafe.Pointer
|
||||
}
|
||||
|
||||
// noescape hides a pointer from escape analysis. noescape is
|
||||
// the identity function but escape analysis doesn't think the
|
||||
// output depends on the input. noescape is inlined and currently
|
||||
// compiles down to zero instructions.
|
||||
// USE CAREFULLY!
|
||||
//
|
||||
// nolint:all
|
||||
//
|
||||
//go:nosplit
|
||||
//goland:noinspection ALL
|
||||
func noescape(p unsafe.Pointer) unsafe.Pointer {
|
||||
x := uintptr(p)
|
||||
return unsafe.Pointer(x ^ 0)
|
||||
}
|
||||
|
||||
var alwaysFalse bool
|
||||
var escapeSink any
|
||||
|
||||
// escape forces any pointers in x to escape to the heap.
|
||||
func escape[T any](x T) T {
|
||||
if alwaysFalse {
|
||||
escapeSink = x
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// ptrSize is the size of a pointer in bytes - unsafe.Sizeof(uintptr(0)) but as an ideal constant.
|
||||
// It is also the size of the machine's native word size (that is, 4 on 32-bit systems, 8 on 64-bit).
|
||||
const ptrSize = 4 << (^uintptr(0) >> 63)
|
||||
13
common/maphash/comparable_go124.go
Normal file
13
common/maphash/comparable_go124.go
Normal file
@@ -0,0 +1,13 @@
|
||||
//go:build go1.24
|
||||
|
||||
package maphash
|
||||
|
||||
import "hash/maphash"
|
||||
|
||||
func Comparable[T comparable](seed Seed, v T) uint64 {
|
||||
return maphash.Comparable(seed, v)
|
||||
}
|
||||
|
||||
func WriteComparable[T comparable](h *Hash, x T) {
|
||||
maphash.WriteComparable(h, x)
|
||||
}
|
||||
532
common/maphash/maphash_test.go
Normal file
532
common/maphash/maphash_test.go
Normal file
@@ -0,0 +1,532 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package maphash
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"hash"
|
||||
"math"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
rand "github.com/metacubex/randv2"
|
||||
)
|
||||
|
||||
func TestUnseededHash(t *testing.T) {
|
||||
m := map[uint64]struct{}{}
|
||||
for i := 0; i < 1000; i++ {
|
||||
h := new(Hash)
|
||||
m[h.Sum64()] = struct{}{}
|
||||
}
|
||||
if len(m) < 900 {
|
||||
t.Errorf("empty hash not sufficiently random: got %d, want 1000", len(m))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeededHash(t *testing.T) {
|
||||
s := MakeSeed()
|
||||
m := map[uint64]struct{}{}
|
||||
for i := 0; i < 1000; i++ {
|
||||
h := new(Hash)
|
||||
h.SetSeed(s)
|
||||
m[h.Sum64()] = struct{}{}
|
||||
}
|
||||
if len(m) != 1 {
|
||||
t.Errorf("seeded hash is random: got %d, want 1", len(m))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashGrouping(t *testing.T) {
|
||||
b := bytes.Repeat([]byte("foo"), 100)
|
||||
hh := make([]*Hash, 7)
|
||||
for i := range hh {
|
||||
hh[i] = new(Hash)
|
||||
}
|
||||
for _, h := range hh[1:] {
|
||||
h.SetSeed(hh[0].Seed())
|
||||
}
|
||||
hh[0].Write(b)
|
||||
hh[1].WriteString(string(b))
|
||||
|
||||
writeByte := func(h *Hash, b byte) {
|
||||
err := h.WriteByte(b)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteByte: %v", err)
|
||||
}
|
||||
}
|
||||
writeSingleByte := func(h *Hash, b byte) {
|
||||
_, err := h.Write([]byte{b})
|
||||
if err != nil {
|
||||
t.Fatalf("Write single byte: %v", err)
|
||||
}
|
||||
}
|
||||
writeStringSingleByte := func(h *Hash, b byte) {
|
||||
_, err := h.WriteString(string([]byte{b}))
|
||||
if err != nil {
|
||||
t.Fatalf("WriteString single byte: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
for i, x := range b {
|
||||
writeByte(hh[2], x)
|
||||
writeSingleByte(hh[3], x)
|
||||
if i == 0 {
|
||||
writeByte(hh[4], x)
|
||||
} else {
|
||||
writeSingleByte(hh[4], x)
|
||||
}
|
||||
writeStringSingleByte(hh[5], x)
|
||||
if i == 0 {
|
||||
writeByte(hh[6], x)
|
||||
} else {
|
||||
writeStringSingleByte(hh[6], x)
|
||||
}
|
||||
}
|
||||
|
||||
sum := hh[0].Sum64()
|
||||
for i, h := range hh {
|
||||
if sum != h.Sum64() {
|
||||
t.Errorf("hash %d not identical to a single Write", i)
|
||||
}
|
||||
}
|
||||
|
||||
if sum1 := Bytes(hh[0].Seed(), b); sum1 != hh[0].Sum64() {
|
||||
t.Errorf("hash using Bytes not identical to a single Write")
|
||||
}
|
||||
|
||||
if sum1 := String(hh[0].Seed(), string(b)); sum1 != hh[0].Sum64() {
|
||||
t.Errorf("hash using String not identical to a single Write")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashBytesVsString(t *testing.T) {
|
||||
s := "foo"
|
||||
b := []byte(s)
|
||||
h1 := new(Hash)
|
||||
h2 := new(Hash)
|
||||
h2.SetSeed(h1.Seed())
|
||||
n1, err1 := h1.WriteString(s)
|
||||
if n1 != len(s) || err1 != nil {
|
||||
t.Fatalf("WriteString(s) = %d, %v, want %d, nil", n1, err1, len(s))
|
||||
}
|
||||
n2, err2 := h2.Write(b)
|
||||
if n2 != len(b) || err2 != nil {
|
||||
t.Fatalf("Write(b) = %d, %v, want %d, nil", n2, err2, len(b))
|
||||
}
|
||||
if h1.Sum64() != h2.Sum64() {
|
||||
t.Errorf("hash of string and bytes not identical")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashHighBytes(t *testing.T) {
|
||||
// See issue 34925.
|
||||
const N = 10
|
||||
m := map[uint64]struct{}{}
|
||||
for i := 0; i < N; i++ {
|
||||
h := new(Hash)
|
||||
h.WriteString("foo")
|
||||
m[h.Sum64()>>32] = struct{}{}
|
||||
}
|
||||
if len(m) < N/2 {
|
||||
t.Errorf("from %d seeds, wanted at least %d different hashes; got %d", N, N/2, len(m))
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepeat(t *testing.T) {
|
||||
h1 := new(Hash)
|
||||
h1.WriteString("testing")
|
||||
sum1 := h1.Sum64()
|
||||
|
||||
h1.Reset()
|
||||
h1.WriteString("testing")
|
||||
sum2 := h1.Sum64()
|
||||
|
||||
if sum1 != sum2 {
|
||||
t.Errorf("different sum after resetting: %#x != %#x", sum1, sum2)
|
||||
}
|
||||
|
||||
h2 := new(Hash)
|
||||
h2.SetSeed(h1.Seed())
|
||||
h2.WriteString("testing")
|
||||
sum3 := h2.Sum64()
|
||||
|
||||
if sum1 != sum3 {
|
||||
t.Errorf("different sum on the same seed: %#x != %#x", sum1, sum3)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeedFromSum64(t *testing.T) {
|
||||
h1 := new(Hash)
|
||||
h1.WriteString("foo")
|
||||
x := h1.Sum64() // seed generated here
|
||||
h2 := new(Hash)
|
||||
h2.SetSeed(h1.Seed())
|
||||
h2.WriteString("foo")
|
||||
y := h2.Sum64()
|
||||
if x != y {
|
||||
t.Errorf("hashes don't match: want %x, got %x", x, y)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeedFromSeed(t *testing.T) {
|
||||
h1 := new(Hash)
|
||||
h1.WriteString("foo")
|
||||
_ = h1.Seed() // seed generated here
|
||||
x := h1.Sum64()
|
||||
h2 := new(Hash)
|
||||
h2.SetSeed(h1.Seed())
|
||||
h2.WriteString("foo")
|
||||
y := h2.Sum64()
|
||||
if x != y {
|
||||
t.Errorf("hashes don't match: want %x, got %x", x, y)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeedFromFlush(t *testing.T) {
|
||||
b := make([]byte, 65)
|
||||
h1 := new(Hash)
|
||||
h1.Write(b) // seed generated here
|
||||
x := h1.Sum64()
|
||||
h2 := new(Hash)
|
||||
h2.SetSeed(h1.Seed())
|
||||
h2.Write(b)
|
||||
y := h2.Sum64()
|
||||
if x != y {
|
||||
t.Errorf("hashes don't match: want %x, got %x", x, y)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeedFromReset(t *testing.T) {
|
||||
h1 := new(Hash)
|
||||
h1.WriteString("foo")
|
||||
h1.Reset() // seed generated here
|
||||
h1.WriteString("foo")
|
||||
x := h1.Sum64()
|
||||
h2 := new(Hash)
|
||||
h2.SetSeed(h1.Seed())
|
||||
h2.WriteString("foo")
|
||||
y := h2.Sum64()
|
||||
if x != y {
|
||||
t.Errorf("hashes don't match: want %x, got %x", x, y)
|
||||
}
|
||||
}
|
||||
|
||||
func negativeZero[T float32 | float64]() T {
|
||||
var f T
|
||||
f = -f
|
||||
return f
|
||||
}
|
||||
|
||||
func TestComparable(t *testing.T) {
|
||||
testComparable(t, int64(2))
|
||||
testComparable(t, uint64(8))
|
||||
testComparable(t, uintptr(12))
|
||||
testComparable(t, any("s"))
|
||||
testComparable(t, "s")
|
||||
testComparable(t, true)
|
||||
testComparable(t, new(float64))
|
||||
testComparable(t, float64(9))
|
||||
testComparable(t, complex128(9i+1))
|
||||
testComparable(t, struct{}{})
|
||||
testComparable(t, struct {
|
||||
i int
|
||||
u uint
|
||||
b bool
|
||||
f float64
|
||||
p *int
|
||||
a any
|
||||
}{i: 9, u: 1, b: true, f: 9.9, p: new(int), a: 1})
|
||||
type S struct {
|
||||
s string
|
||||
}
|
||||
s1 := S{s: heapStr(t)}
|
||||
s2 := S{s: heapStr(t)}
|
||||
if unsafe.StringData(s1.s) == unsafe.StringData(s2.s) {
|
||||
t.Fatalf("unexpected two heapStr ptr equal")
|
||||
}
|
||||
if s1.s != s2.s {
|
||||
t.Fatalf("unexpected two heapStr value not equal")
|
||||
}
|
||||
testComparable(t, s1, s2)
|
||||
testComparable(t, s1.s, s2.s)
|
||||
testComparable(t, float32(0), negativeZero[float32]())
|
||||
testComparable(t, float64(0), negativeZero[float64]())
|
||||
testComparableNoEqual(t, math.NaN(), math.NaN())
|
||||
testComparableNoEqual(t, [2]string{"a", ""}, [2]string{"", "a"})
|
||||
testComparableNoEqual(t, struct{ a, b string }{"foo", ""}, struct{ a, b string }{"", "foo"})
|
||||
testComparableNoEqual(t, struct{ a, b any }{int(0), struct{}{}}, struct{ a, b any }{struct{}{}, int(0)})
|
||||
}
|
||||
|
||||
func testComparableNoEqual[T comparable](t *testing.T, v1, v2 T) {
|
||||
seed := MakeSeed()
|
||||
if Comparable(seed, v1) == Comparable(seed, v2) {
|
||||
t.Fatalf("Comparable(seed, %v) == Comparable(seed, %v)", v1, v2)
|
||||
}
|
||||
}
|
||||
|
||||
var heapStrValue = []byte("aTestString")
|
||||
|
||||
func heapStr(t *testing.T) string {
|
||||
return string(heapStrValue)
|
||||
}
|
||||
|
||||
func testComparable[T comparable](t *testing.T, v T, v2 ...T) {
|
||||
t.Run(TypeFor[T]().String(), func(t *testing.T) {
|
||||
var a, b T = v, v
|
||||
if len(v2) != 0 {
|
||||
b = v2[0]
|
||||
}
|
||||
var pa *T = &a
|
||||
seed := MakeSeed()
|
||||
if Comparable(seed, a) != Comparable(seed, b) {
|
||||
t.Fatalf("Comparable(seed, %v) != Comparable(seed, %v)", a, b)
|
||||
}
|
||||
old := Comparable(seed, pa)
|
||||
stackGrow(8192)
|
||||
new := Comparable(seed, pa)
|
||||
if old != new {
|
||||
t.Fatal("Comparable(seed, ptr) != Comparable(seed, ptr)")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var use byte
|
||||
|
||||
//go:noinline
|
||||
func stackGrow(dep int) {
|
||||
if dep == 0 {
|
||||
return
|
||||
}
|
||||
var local [1024]byte
|
||||
// make sure local is allocated on the stack.
|
||||
local[rand.Uint64()%1024] = byte(rand.Uint64())
|
||||
use = local[rand.Uint64()%1024]
|
||||
stackGrow(dep - 1)
|
||||
}
|
||||
|
||||
func TestWriteComparable(t *testing.T) {
|
||||
testWriteComparable(t, int64(2))
|
||||
testWriteComparable(t, uint64(8))
|
||||
testWriteComparable(t, uintptr(12))
|
||||
testWriteComparable(t, any("s"))
|
||||
testWriteComparable(t, "s")
|
||||
testComparable(t, true)
|
||||
testWriteComparable(t, new(float64))
|
||||
testWriteComparable(t, float64(9))
|
||||
testWriteComparable(t, complex128(9i+1))
|
||||
testWriteComparable(t, struct{}{})
|
||||
testWriteComparable(t, struct {
|
||||
i int
|
||||
u uint
|
||||
b bool
|
||||
f float64
|
||||
p *int
|
||||
a any
|
||||
}{i: 9, u: 1, b: true, f: 9.9, p: new(int), a: 1})
|
||||
type S struct {
|
||||
s string
|
||||
}
|
||||
s1 := S{s: heapStr(t)}
|
||||
s2 := S{s: heapStr(t)}
|
||||
if unsafe.StringData(s1.s) == unsafe.StringData(s2.s) {
|
||||
t.Fatalf("unexpected two heapStr ptr equal")
|
||||
}
|
||||
if s1.s != s2.s {
|
||||
t.Fatalf("unexpected two heapStr value not equal")
|
||||
}
|
||||
testWriteComparable(t, s1, s2)
|
||||
testWriteComparable(t, s1.s, s2.s)
|
||||
testWriteComparable(t, float32(0), negativeZero[float32]())
|
||||
testWriteComparable(t, float64(0), negativeZero[float64]())
|
||||
testWriteComparableNoEqual(t, math.NaN(), math.NaN())
|
||||
testWriteComparableNoEqual(t, [2]string{"a", ""}, [2]string{"", "a"})
|
||||
testWriteComparableNoEqual(t, struct{ a, b string }{"foo", ""}, struct{ a, b string }{"", "foo"})
|
||||
testWriteComparableNoEqual(t, struct{ a, b any }{int(0), struct{}{}}, struct{ a, b any }{struct{}{}, int(0)})
|
||||
}
|
||||
|
||||
func testWriteComparableNoEqual[T comparable](t *testing.T, v1, v2 T) {
|
||||
seed := MakeSeed()
|
||||
h1 := Hash{}
|
||||
h2 := Hash{}
|
||||
*(*Seed)(unsafe.Pointer(&h1)), *(*Seed)(unsafe.Pointer(&h2)) = seed, seed
|
||||
WriteComparable(&h1, v1)
|
||||
WriteComparable(&h2, v2)
|
||||
if h1.Sum64() == h2.Sum64() {
|
||||
t.Fatalf("WriteComparable(seed, %v) == WriteComparable(seed, %v)", v1, v2)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func testWriteComparable[T comparable](t *testing.T, v T, v2 ...T) {
|
||||
t.Run(TypeFor[T]().String(), func(t *testing.T) {
|
||||
var a, b T = v, v
|
||||
if len(v2) != 0 {
|
||||
b = v2[0]
|
||||
}
|
||||
var pa *T = &a
|
||||
h1 := Hash{}
|
||||
h2 := Hash{}
|
||||
*(*Seed)(unsafe.Pointer(&h1)) = MakeSeed()
|
||||
h2 = h1
|
||||
WriteComparable(&h1, a)
|
||||
WriteComparable(&h2, b)
|
||||
if h1.Sum64() != h2.Sum64() {
|
||||
t.Fatalf("WriteComparable(h, %v) != WriteComparable(h, %v)", a, b)
|
||||
}
|
||||
WriteComparable(&h1, pa)
|
||||
old := h1.Sum64()
|
||||
stackGrow(8192)
|
||||
WriteComparable(&h2, pa)
|
||||
new := h2.Sum64()
|
||||
if old != new {
|
||||
t.Fatal("WriteComparable(seed, ptr) != WriteComparable(seed, ptr)")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestComparableShouldPanic(t *testing.T) {
|
||||
s := []byte("s")
|
||||
a := any(s)
|
||||
defer func() {
|
||||
e := recover()
|
||||
err, ok := e.(error)
|
||||
if !ok {
|
||||
t.Fatalf("Comaparable(any([]byte)) should panic")
|
||||
}
|
||||
want := "hash of unhashable type []uint8"
|
||||
if s := err.Error(); !strings.Contains(s, want) {
|
||||
t.Fatalf("want %s, got %s", want, s)
|
||||
}
|
||||
}()
|
||||
Comparable(MakeSeed(), a)
|
||||
}
|
||||
|
||||
func TestWriteComparableNoncommute(t *testing.T) {
|
||||
seed := MakeSeed()
|
||||
var h1, h2 Hash
|
||||
h1.SetSeed(seed)
|
||||
h2.SetSeed(seed)
|
||||
|
||||
h1.WriteString("abc")
|
||||
WriteComparable(&h1, 123)
|
||||
WriteComparable(&h2, 123)
|
||||
h2.WriteString("abc")
|
||||
|
||||
if h1.Sum64() == h2.Sum64() {
|
||||
t.Errorf("WriteComparable and WriteString unexpectedly commute")
|
||||
}
|
||||
}
|
||||
|
||||
func TestComparableAllocations(t *testing.T) {
|
||||
t.Skip("test broken in old golang version")
|
||||
seed := MakeSeed()
|
||||
x := heapStr(t)
|
||||
allocs := testing.AllocsPerRun(10, func() {
|
||||
s := "s" + x
|
||||
Comparable(seed, s)
|
||||
})
|
||||
if allocs > 0 {
|
||||
t.Errorf("got %v allocs, want 0", allocs)
|
||||
}
|
||||
|
||||
type S struct {
|
||||
a int
|
||||
b string
|
||||
}
|
||||
allocs = testing.AllocsPerRun(10, func() {
|
||||
s := S{123, "s" + x}
|
||||
Comparable(seed, s)
|
||||
})
|
||||
if allocs > 0 {
|
||||
t.Errorf("got %v allocs, want 0", allocs)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure a Hash implements the hash.Hash and hash.Hash64 interfaces.
|
||||
var _ hash.Hash = &Hash{}
|
||||
var _ hash.Hash64 = &Hash{}
|
||||
|
||||
func benchmarkSize(b *testing.B, size int) {
|
||||
h := &Hash{}
|
||||
buf := make([]byte, size)
|
||||
s := string(buf)
|
||||
|
||||
b.Run("Write", func(b *testing.B) {
|
||||
b.SetBytes(int64(size))
|
||||
for i := 0; i < b.N; i++ {
|
||||
h.Reset()
|
||||
h.Write(buf)
|
||||
h.Sum64()
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("Bytes", func(b *testing.B) {
|
||||
b.SetBytes(int64(size))
|
||||
seed := h.Seed()
|
||||
for i := 0; i < b.N; i++ {
|
||||
Bytes(seed, buf)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("String", func(b *testing.B) {
|
||||
b.SetBytes(int64(size))
|
||||
seed := h.Seed()
|
||||
for i := 0; i < b.N; i++ {
|
||||
String(seed, s)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkHash(b *testing.B) {
|
||||
sizes := []int{4, 8, 16, 32, 64, 256, 320, 1024, 4096, 16384}
|
||||
for _, size := range sizes {
|
||||
b.Run(fmt.Sprint("n=", size), func(b *testing.B) {
|
||||
benchmarkSize(b, size)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkComparable[T comparable](b *testing.B, v T) {
|
||||
b.Run(TypeFor[T]().String(), func(b *testing.B) {
|
||||
seed := MakeSeed()
|
||||
for i := 0; i < b.N; i++ {
|
||||
Comparable(seed, v)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkComparable(b *testing.B) {
|
||||
type testStruct struct {
|
||||
i int
|
||||
u uint
|
||||
b bool
|
||||
f float64
|
||||
p *int
|
||||
a any
|
||||
}
|
||||
benchmarkComparable(b, int64(2))
|
||||
benchmarkComparable(b, uint64(8))
|
||||
benchmarkComparable(b, uintptr(12))
|
||||
benchmarkComparable(b, any("s"))
|
||||
benchmarkComparable(b, "s")
|
||||
benchmarkComparable(b, true)
|
||||
benchmarkComparable(b, new(float64))
|
||||
benchmarkComparable(b, float64(9))
|
||||
benchmarkComparable(b, complex128(9i+1))
|
||||
benchmarkComparable(b, struct{}{})
|
||||
benchmarkComparable(b, testStruct{i: 9, u: 1, b: true, f: 9.9, p: new(int), a: 1})
|
||||
}
|
||||
|
||||
// TypeFor returns the [Type] that represents the type argument T.
|
||||
func TypeFor[T any]() reflect.Type {
|
||||
var v T
|
||||
if t := reflect.TypeOf(v); t != nil {
|
||||
return t // optimize for T being a non-interface kind
|
||||
}
|
||||
return reflect.TypeOf((*T)(nil)).Elem() // only for an interface kind
|
||||
}
|
||||
926
common/xsync/map.go
Normal file
926
common/xsync/map.go
Normal file
@@ -0,0 +1,926 @@
|
||||
package xsync
|
||||
|
||||
// copy and modified from https://github.com/puzpuzpuz/xsync/blob/v4.1.0/map.go
|
||||
// which is licensed under Apache v2.
|
||||
//
|
||||
// mihomo modified:
|
||||
// 1. parallel Map resize has been removed to decrease the memory using.
|
||||
// 2. the zero Map is ready for use.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/bits"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
|
||||
"github.com/metacubex/mihomo/common/maphash"
|
||||
)
|
||||
|
||||
const (
|
||||
// number of Map entries per bucket; 5 entries lead to size of 64B
|
||||
// (one cache line) on 64-bit machines
|
||||
entriesPerMapBucket = 5
|
||||
// threshold fraction of table occupation to start a table shrinking
|
||||
// when deleting the last entry in a bucket chain
|
||||
mapShrinkFraction = 128
|
||||
// map load factor to trigger a table resize during insertion;
|
||||
// a map holds up to mapLoadFactor*entriesPerMapBucket*mapTableLen
|
||||
// key-value pairs (this is a soft limit)
|
||||
mapLoadFactor = 0.75
|
||||
// minimal table size, i.e. number of buckets; thus, minimal map
|
||||
// capacity can be calculated as entriesPerMapBucket*defaultMinMapTableLen
|
||||
defaultMinMapTableLen = 32
|
||||
// minimum counter stripes to use
|
||||
minMapCounterLen = 8
|
||||
// maximum counter stripes to use; stands for around 4KB of memory
|
||||
maxMapCounterLen = 32
|
||||
defaultMeta uint64 = 0x8080808080808080
|
||||
metaMask uint64 = 0xffffffffff
|
||||
defaultMetaMasked uint64 = defaultMeta & metaMask
|
||||
emptyMetaSlot uint8 = 0x80
|
||||
)
|
||||
|
||||
type mapResizeHint int
|
||||
|
||||
const (
|
||||
mapGrowHint mapResizeHint = 0
|
||||
mapShrinkHint mapResizeHint = 1
|
||||
mapClearHint mapResizeHint = 2
|
||||
)
|
||||
|
||||
type ComputeOp int
|
||||
|
||||
const (
|
||||
// CancelOp signals to Compute to not do anything as a result
|
||||
// of executing the lambda. If the entry was not present in
|
||||
// the map, nothing happens, and if it was present, the
|
||||
// returned value is ignored.
|
||||
CancelOp ComputeOp = iota
|
||||
// UpdateOp signals to Compute to update the entry to the
|
||||
// value returned by the lambda, creating it if necessary.
|
||||
UpdateOp
|
||||
// DeleteOp signals to Compute to always delete the entry
|
||||
// from the map.
|
||||
DeleteOp
|
||||
)
|
||||
|
||||
type loadOp int
|
||||
|
||||
const (
|
||||
noLoadOp loadOp = iota
|
||||
loadOrComputeOp
|
||||
loadAndDeleteOp
|
||||
)
|
||||
|
||||
// Map is like a Go map[K]V but is safe for concurrent
|
||||
// use by multiple goroutines without additional locking or
|
||||
// coordination. It follows the interface of sync.Map with
|
||||
// a number of valuable extensions like Compute or Size.
|
||||
//
|
||||
// A Map must not be copied after first use.
|
||||
//
|
||||
// Map uses a modified version of Cache-Line Hash Table (CLHT)
|
||||
// data structure: https://github.com/LPD-EPFL/CLHT
|
||||
//
|
||||
// CLHT is built around idea to organize the hash table in
|
||||
// cache-line-sized buckets, so that on all modern CPUs update
|
||||
// operations complete with at most one cache-line transfer.
|
||||
// Also, Get operations involve no write to memory, as well as no
|
||||
// mutexes or any other sort of locks. Due to this design, in all
|
||||
// considered scenarios Map outperforms sync.Map.
|
||||
//
|
||||
// Map also borrows ideas from Java's j.u.c.ConcurrentHashMap
|
||||
// (immutable K/V pair structs instead of atomic snapshots)
|
||||
// and C++'s absl::flat_hash_map (meta memory and SWAR-based
|
||||
// lookups).
|
||||
type Map[K comparable, V any] struct {
|
||||
initOnce sync.Once
|
||||
totalGrowths atomic.Int64
|
||||
totalShrinks atomic.Int64
|
||||
resizing atomic.Bool // resize in progress flag
|
||||
resizeMu sync.Mutex // only used along with resizeCond
|
||||
resizeCond sync.Cond // used to wake up resize waiters (concurrent modifications)
|
||||
table atomic.Pointer[mapTable[K, V]]
|
||||
minTableLen int
|
||||
growOnly bool
|
||||
}
|
||||
|
||||
type mapTable[K comparable, V any] struct {
|
||||
buckets []bucketPadded[K, V]
|
||||
// striped counter for number of table entries;
|
||||
// used to determine if a table shrinking is needed
|
||||
// occupies min(buckets_memory/1024, 64KB) of memory
|
||||
size []counterStripe
|
||||
seed maphash.Seed
|
||||
}
|
||||
|
||||
type counterStripe struct {
|
||||
c int64
|
||||
// Padding to prevent false sharing.
|
||||
_ [cacheLineSize - 8]byte
|
||||
}
|
||||
|
||||
// bucketPadded is a CL-sized map bucket holding up to
|
||||
// entriesPerMapBucket entries.
|
||||
type bucketPadded[K comparable, V any] struct {
|
||||
//lint:ignore U1000 ensure each bucket takes two cache lines on both 32 and 64-bit archs
|
||||
pad [cacheLineSize - unsafe.Sizeof(bucket[K, V]{})]byte
|
||||
bucket[K, V]
|
||||
}
|
||||
|
||||
type bucket[K comparable, V any] struct {
|
||||
meta atomic.Uint64
|
||||
entries [entriesPerMapBucket]atomic.Pointer[entry[K, V]] // *entry
|
||||
next atomic.Pointer[bucketPadded[K, V]] // *bucketPadded
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// entry is an immutable map entry.
|
||||
type entry[K comparable, V any] struct {
|
||||
key K
|
||||
value V
|
||||
}
|
||||
|
||||
// MapConfig defines configurable Map options.
|
||||
type MapConfig struct {
|
||||
sizeHint int
|
||||
growOnly bool
|
||||
}
|
||||
|
||||
// WithPresize configures new Map instance with capacity enough
|
||||
// to hold sizeHint entries. The capacity is treated as the minimal
|
||||
// capacity meaning that the underlying hash table will never shrink
|
||||
// to a smaller capacity. If sizeHint is zero or negative, the value
|
||||
// is ignored.
|
||||
func WithPresize(sizeHint int) func(*MapConfig) {
|
||||
return func(c *MapConfig) {
|
||||
c.sizeHint = sizeHint
|
||||
}
|
||||
}
|
||||
|
||||
// WithGrowOnly configures new Map instance to be grow-only.
|
||||
// This means that the underlying hash table grows in capacity when
|
||||
// new keys are added, but does not shrink when keys are deleted.
|
||||
// The only exception to this rule is the Clear method which
|
||||
// shrinks the hash table back to the initial capacity.
|
||||
func WithGrowOnly() func(*MapConfig) {
|
||||
return func(c *MapConfig) {
|
||||
c.growOnly = true
|
||||
}
|
||||
}
|
||||
|
||||
// NewMap creates a new Map instance configured with the given
|
||||
// options.
|
||||
func NewMap[K comparable, V any](options ...func(*MapConfig)) *Map[K, V] {
|
||||
c := &MapConfig{}
|
||||
for _, o := range options {
|
||||
o(c)
|
||||
}
|
||||
|
||||
m := &Map[K, V]{}
|
||||
if c.sizeHint > defaultMinMapTableLen*entriesPerMapBucket {
|
||||
tableLen := nextPowOf2(uint32((float64(c.sizeHint) / entriesPerMapBucket) / mapLoadFactor))
|
||||
m.minTableLen = int(tableLen)
|
||||
}
|
||||
m.growOnly = c.growOnly
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *Map[K, V]) init() {
|
||||
if m.minTableLen == 0 {
|
||||
m.minTableLen = defaultMinMapTableLen
|
||||
}
|
||||
m.resizeCond = *sync.NewCond(&m.resizeMu)
|
||||
table := newMapTable[K, V](m.minTableLen)
|
||||
m.minTableLen = len(table.buckets)
|
||||
m.table.Store(table)
|
||||
}
|
||||
|
||||
func newMapTable[K comparable, V any](minTableLen int) *mapTable[K, V] {
|
||||
buckets := make([]bucketPadded[K, V], minTableLen)
|
||||
for i := range buckets {
|
||||
buckets[i].meta.Store(defaultMeta)
|
||||
}
|
||||
counterLen := minTableLen >> 10
|
||||
if counterLen < minMapCounterLen {
|
||||
counterLen = minMapCounterLen
|
||||
} else if counterLen > maxMapCounterLen {
|
||||
counterLen = maxMapCounterLen
|
||||
}
|
||||
counter := make([]counterStripe, counterLen)
|
||||
t := &mapTable[K, V]{
|
||||
buckets: buckets,
|
||||
size: counter,
|
||||
seed: maphash.MakeSeed(),
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// ToPlainMap returns a native map with a copy of xsync Map's
|
||||
// contents. The copied xsync Map should not be modified while
|
||||
// this call is made. If the copied Map is modified, the copying
|
||||
// behavior is the same as in the Range method.
|
||||
func ToPlainMap[K comparable, V any](m *Map[K, V]) map[K]V {
|
||||
pm := make(map[K]V)
|
||||
if m != nil {
|
||||
m.Range(func(key K, value V) bool {
|
||||
pm[key] = value
|
||||
return true
|
||||
})
|
||||
}
|
||||
return pm
|
||||
}
|
||||
|
||||
// Load returns the value stored in the map for a key, or zero value
|
||||
// of type V if no value is present.
|
||||
// The ok result indicates whether value was found in the map.
|
||||
func (m *Map[K, V]) Load(key K) (value V, ok bool) {
|
||||
m.initOnce.Do(m.init)
|
||||
table := m.table.Load()
|
||||
hash := maphash.Comparable(table.seed, key)
|
||||
h1 := h1(hash)
|
||||
h2w := broadcast(h2(hash))
|
||||
bidx := uint64(len(table.buckets)-1) & h1
|
||||
b := &table.buckets[bidx]
|
||||
for {
|
||||
metaw := b.meta.Load()
|
||||
markedw := markZeroBytes(metaw^h2w) & metaMask
|
||||
for markedw != 0 {
|
||||
idx := firstMarkedByteIndex(markedw)
|
||||
e := b.entries[idx].Load()
|
||||
if e != nil {
|
||||
if e.key == key {
|
||||
return e.value, true
|
||||
}
|
||||
}
|
||||
markedw &= markedw - 1
|
||||
}
|
||||
b = b.next.Load()
|
||||
if b == nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Store sets the value for a key.
|
||||
func (m *Map[K, V]) Store(key K, value V) {
|
||||
m.doCompute(
|
||||
key,
|
||||
func(V, bool) (V, ComputeOp) {
|
||||
return value, UpdateOp
|
||||
},
|
||||
noLoadOp,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// LoadOrStore returns the existing value for the key if present.
|
||||
// Otherwise, it stores and returns the given value.
|
||||
// The loaded result is true if the value was loaded, false if stored.
|
||||
func (m *Map[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
|
||||
return m.doCompute(
|
||||
key,
|
||||
func(oldValue V, loaded bool) (V, ComputeOp) {
|
||||
if loaded {
|
||||
return oldValue, CancelOp
|
||||
}
|
||||
return value, UpdateOp
|
||||
},
|
||||
loadOrComputeOp,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// LoadAndStore returns the existing value for the key if present,
|
||||
// while setting the new value for the key.
|
||||
// It stores the new value and returns the existing one, if present.
|
||||
// The loaded result is true if the existing value was loaded,
|
||||
// false otherwise.
|
||||
func (m *Map[K, V]) LoadAndStore(key K, value V) (actual V, loaded bool) {
|
||||
return m.doCompute(
|
||||
key,
|
||||
func(V, bool) (V, ComputeOp) {
|
||||
return value, UpdateOp
|
||||
},
|
||||
noLoadOp,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// LoadOrCompute returns the existing value for the key if
|
||||
// present. Otherwise, it tries to compute the value using the
|
||||
// provided function and, if successful, stores and returns
|
||||
// the computed value. The loaded result is true if the value was
|
||||
// loaded, or false if computed. If valueFn returns true as the
|
||||
// cancel value, the computation is cancelled and the zero value
|
||||
// for type V is returned.
|
||||
//
|
||||
// This call locks a hash table bucket while the compute function
|
||||
// is executed. It means that modifications on other entries in
|
||||
// the bucket will be blocked until the valueFn executes. Consider
|
||||
// this when the function includes long-running operations.
|
||||
func (m *Map[K, V]) LoadOrCompute(
|
||||
key K,
|
||||
valueFn func() (newValue V, cancel bool),
|
||||
) (value V, loaded bool) {
|
||||
return m.doCompute(
|
||||
key,
|
||||
func(oldValue V, loaded bool) (V, ComputeOp) {
|
||||
if loaded {
|
||||
return oldValue, CancelOp
|
||||
}
|
||||
newValue, c := valueFn()
|
||||
if !c {
|
||||
return newValue, UpdateOp
|
||||
}
|
||||
return oldValue, CancelOp
|
||||
},
|
||||
loadOrComputeOp,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// Compute either sets the computed new value for the key,
|
||||
// deletes the value for the key, or does nothing, based on
|
||||
// the returned [ComputeOp]. When the op returned by valueFn
|
||||
// is [UpdateOp], the value is updated to the new value. If
|
||||
// it is [DeleteOp], the entry is removed from the map
|
||||
// altogether. And finally, if the op is [CancelOp] then the
|
||||
// entry is left as-is. In other words, if it did not already
|
||||
// exist, it is not created, and if it did exist, it is not
|
||||
// updated. This is useful to synchronously execute some
|
||||
// operation on the value without incurring the cost of
|
||||
// updating the map every time. The ok result indicates
|
||||
// whether the entry is present in the map after the compute
|
||||
// operation. The actual result contains the value of the map
|
||||
// if a corresponding entry is present, or the zero value
|
||||
// otherwise. See the example for a few use cases.
|
||||
//
|
||||
// This call locks a hash table bucket while the compute function
|
||||
// is executed. It means that modifications on other entries in
|
||||
// the bucket will be blocked until the valueFn executes. Consider
|
||||
// this when the function includes long-running operations.
|
||||
func (m *Map[K, V]) Compute(
|
||||
key K,
|
||||
valueFn func(oldValue V, loaded bool) (newValue V, op ComputeOp),
|
||||
) (actual V, ok bool) {
|
||||
return m.doCompute(key, valueFn, noLoadOp, true)
|
||||
}
|
||||
|
||||
// LoadAndDelete deletes the value for a key, returning the previous
|
||||
// value if any. The loaded result reports whether the key was
|
||||
// present.
|
||||
func (m *Map[K, V]) LoadAndDelete(key K) (value V, loaded bool) {
|
||||
return m.doCompute(
|
||||
key,
|
||||
func(value V, loaded bool) (V, ComputeOp) {
|
||||
return value, DeleteOp
|
||||
},
|
||||
loadAndDeleteOp,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// Delete deletes the value for a key.
|
||||
func (m *Map[K, V]) Delete(key K) {
|
||||
m.LoadAndDelete(key)
|
||||
}
|
||||
|
||||
func (m *Map[K, V]) doCompute(
|
||||
key K,
|
||||
valueFn func(oldValue V, loaded bool) (V, ComputeOp),
|
||||
loadOp loadOp,
|
||||
computeOnly bool,
|
||||
) (V, bool) {
|
||||
m.initOnce.Do(m.init)
|
||||
for {
|
||||
compute_attempt:
|
||||
var (
|
||||
emptyb *bucketPadded[K, V]
|
||||
emptyidx int
|
||||
)
|
||||
table := m.table.Load()
|
||||
tableLen := len(table.buckets)
|
||||
hash := maphash.Comparable(table.seed, key)
|
||||
h1 := h1(hash)
|
||||
h2 := h2(hash)
|
||||
h2w := broadcast(h2)
|
||||
bidx := uint64(len(table.buckets)-1) & h1
|
||||
rootb := &table.buckets[bidx]
|
||||
|
||||
if loadOp != noLoadOp {
|
||||
b := rootb
|
||||
load:
|
||||
for {
|
||||
metaw := b.meta.Load()
|
||||
markedw := markZeroBytes(metaw^h2w) & metaMask
|
||||
for markedw != 0 {
|
||||
idx := firstMarkedByteIndex(markedw)
|
||||
e := b.entries[idx].Load()
|
||||
if e != nil {
|
||||
if e.key == key {
|
||||
if loadOp == loadOrComputeOp {
|
||||
return e.value, true
|
||||
}
|
||||
break load
|
||||
}
|
||||
}
|
||||
markedw &= markedw - 1
|
||||
}
|
||||
b = b.next.Load()
|
||||
if b == nil {
|
||||
if loadOp == loadAndDeleteOp {
|
||||
return *new(V), false
|
||||
}
|
||||
break load
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rootb.mu.Lock()
|
||||
// The following two checks must go in reverse to what's
|
||||
// in the resize method.
|
||||
if m.resizeInProgress() {
|
||||
// Resize is in progress. Wait, then go for another attempt.
|
||||
rootb.mu.Unlock()
|
||||
m.waitForResize()
|
||||
goto compute_attempt
|
||||
}
|
||||
if m.newerTableExists(table) {
|
||||
// Someone resized the table. Go for another attempt.
|
||||
rootb.mu.Unlock()
|
||||
goto compute_attempt
|
||||
}
|
||||
b := rootb
|
||||
for {
|
||||
metaw := b.meta.Load()
|
||||
markedw := markZeroBytes(metaw^h2w) & metaMask
|
||||
for markedw != 0 {
|
||||
idx := firstMarkedByteIndex(markedw)
|
||||
e := b.entries[idx].Load()
|
||||
if e != nil {
|
||||
if e.key == key {
|
||||
// In-place update/delete.
|
||||
// We get a copy of the value via an interface{} on each call,
|
||||
// thus the live value pointers are unique. Otherwise atomic
|
||||
// snapshot won't be correct in case of multiple Store calls
|
||||
// using the same value.
|
||||
oldv := e.value
|
||||
newv, op := valueFn(oldv, true)
|
||||
switch op {
|
||||
case DeleteOp:
|
||||
// Deletion.
|
||||
// First we update the hash, then the entry.
|
||||
newmetaw := setByte(metaw, emptyMetaSlot, idx)
|
||||
b.meta.Store(newmetaw)
|
||||
b.entries[idx].Store(nil)
|
||||
rootb.mu.Unlock()
|
||||
table.addSize(bidx, -1)
|
||||
// Might need to shrink the table if we left bucket empty.
|
||||
if newmetaw == defaultMeta {
|
||||
m.resize(table, mapShrinkHint)
|
||||
}
|
||||
return oldv, !computeOnly
|
||||
case UpdateOp:
|
||||
newe := new(entry[K, V])
|
||||
newe.key = key
|
||||
newe.value = newv
|
||||
b.entries[idx].Store(newe)
|
||||
case CancelOp:
|
||||
newv = oldv
|
||||
}
|
||||
rootb.mu.Unlock()
|
||||
if computeOnly {
|
||||
// Compute expects the new value to be returned.
|
||||
return newv, true
|
||||
}
|
||||
// LoadAndStore expects the old value to be returned.
|
||||
return oldv, true
|
||||
}
|
||||
}
|
||||
markedw &= markedw - 1
|
||||
}
|
||||
if emptyb == nil {
|
||||
// Search for empty entries (up to 5 per bucket).
|
||||
emptyw := metaw & defaultMetaMasked
|
||||
if emptyw != 0 {
|
||||
idx := firstMarkedByteIndex(emptyw)
|
||||
emptyb = b
|
||||
emptyidx = idx
|
||||
}
|
||||
}
|
||||
if b.next.Load() == nil {
|
||||
if emptyb != nil {
|
||||
// Insertion into an existing bucket.
|
||||
var zeroV V
|
||||
newValue, op := valueFn(zeroV, false)
|
||||
switch op {
|
||||
case DeleteOp, CancelOp:
|
||||
rootb.mu.Unlock()
|
||||
return zeroV, false
|
||||
default:
|
||||
newe := new(entry[K, V])
|
||||
newe.key = key
|
||||
newe.value = newValue
|
||||
// First we update meta, then the entry.
|
||||
emptyb.meta.Store(setByte(emptyb.meta.Load(), h2, emptyidx))
|
||||
emptyb.entries[emptyidx].Store(newe)
|
||||
rootb.mu.Unlock()
|
||||
table.addSize(bidx, 1)
|
||||
return newValue, computeOnly
|
||||
}
|
||||
}
|
||||
growThreshold := float64(tableLen) * entriesPerMapBucket * mapLoadFactor
|
||||
if table.sumSize() > int64(growThreshold) {
|
||||
// Need to grow the table. Then go for another attempt.
|
||||
rootb.mu.Unlock()
|
||||
m.resize(table, mapGrowHint)
|
||||
goto compute_attempt
|
||||
}
|
||||
// Insertion into a new bucket.
|
||||
var zeroV V
|
||||
newValue, op := valueFn(zeroV, false)
|
||||
switch op {
|
||||
case DeleteOp, CancelOp:
|
||||
rootb.mu.Unlock()
|
||||
return newValue, false
|
||||
default:
|
||||
// Create and append a bucket.
|
||||
newb := new(bucketPadded[K, V])
|
||||
newb.meta.Store(setByte(defaultMeta, h2, 0))
|
||||
newe := new(entry[K, V])
|
||||
newe.key = key
|
||||
newe.value = newValue
|
||||
newb.entries[0].Store(newe)
|
||||
b.next.Store(newb)
|
||||
rootb.mu.Unlock()
|
||||
table.addSize(bidx, 1)
|
||||
return newValue, computeOnly
|
||||
}
|
||||
}
|
||||
b = b.next.Load()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Map[K, V]) newerTableExists(table *mapTable[K, V]) bool {
|
||||
return table != m.table.Load()
|
||||
}
|
||||
|
||||
func (m *Map[K, V]) resizeInProgress() bool {
|
||||
return m.resizing.Load()
|
||||
}
|
||||
|
||||
func (m *Map[K, V]) waitForResize() {
|
||||
m.resizeMu.Lock()
|
||||
for m.resizeInProgress() {
|
||||
m.resizeCond.Wait()
|
||||
}
|
||||
m.resizeMu.Unlock()
|
||||
}
|
||||
|
||||
func (m *Map[K, V]) resize(knownTable *mapTable[K, V], hint mapResizeHint) {
|
||||
knownTableLen := len(knownTable.buckets)
|
||||
// Fast path for shrink attempts.
|
||||
if hint == mapShrinkHint {
|
||||
if m.growOnly ||
|
||||
m.minTableLen == knownTableLen ||
|
||||
knownTable.sumSize() > int64((knownTableLen*entriesPerMapBucket)/mapShrinkFraction) {
|
||||
return
|
||||
}
|
||||
}
|
||||
// Slow path.
|
||||
if !m.resizing.CompareAndSwap(false, true) {
|
||||
// Someone else started resize. Wait for it to finish.
|
||||
m.waitForResize()
|
||||
return
|
||||
}
|
||||
var newTable *mapTable[K, V]
|
||||
table := m.table.Load()
|
||||
tableLen := len(table.buckets)
|
||||
switch hint {
|
||||
case mapGrowHint:
|
||||
// Grow the table with factor of 2.
|
||||
m.totalGrowths.Add(1)
|
||||
newTable = newMapTable[K, V](tableLen << 1)
|
||||
case mapShrinkHint:
|
||||
shrinkThreshold := int64((tableLen * entriesPerMapBucket) / mapShrinkFraction)
|
||||
if tableLen > m.minTableLen && table.sumSize() <= shrinkThreshold {
|
||||
// Shrink the table with factor of 2.
|
||||
m.totalShrinks.Add(1)
|
||||
newTable = newMapTable[K, V](tableLen >> 1)
|
||||
} else {
|
||||
// No need to shrink. Wake up all waiters and give up.
|
||||
m.resizeMu.Lock()
|
||||
m.resizing.Store(false)
|
||||
m.resizeCond.Broadcast()
|
||||
m.resizeMu.Unlock()
|
||||
return
|
||||
}
|
||||
case mapClearHint:
|
||||
newTable = newMapTable[K, V](m.minTableLen)
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected resize hint: %d", hint))
|
||||
}
|
||||
// Copy the data only if we're not clearing the map.
|
||||
if hint != mapClearHint {
|
||||
for i := 0; i < tableLen; i++ {
|
||||
copied := copyBucket(&table.buckets[i], newTable)
|
||||
newTable.addSizePlain(uint64(i), copied)
|
||||
}
|
||||
}
|
||||
// Publish the new table and wake up all waiters.
|
||||
m.table.Store(newTable)
|
||||
m.resizeMu.Lock()
|
||||
m.resizing.Store(false)
|
||||
m.resizeCond.Broadcast()
|
||||
m.resizeMu.Unlock()
|
||||
}
|
||||
|
||||
func copyBucket[K comparable, V any](
|
||||
b *bucketPadded[K, V],
|
||||
destTable *mapTable[K, V],
|
||||
) (copied int) {
|
||||
rootb := b
|
||||
rootb.mu.Lock()
|
||||
for {
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
if e := b.entries[i].Load(); e != nil {
|
||||
hash := maphash.Comparable(destTable.seed, e.key)
|
||||
bidx := uint64(len(destTable.buckets)-1) & h1(hash)
|
||||
destb := &destTable.buckets[bidx]
|
||||
appendToBucket(h2(hash), b.entries[i].Load(), destb)
|
||||
copied++
|
||||
}
|
||||
}
|
||||
if next := b.next.Load(); next == nil {
|
||||
rootb.mu.Unlock()
|
||||
return
|
||||
} else {
|
||||
b = next
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Range calls f sequentially for each key and value present in the
|
||||
// map. If f returns false, range stops the iteration.
|
||||
//
|
||||
// Range does not necessarily correspond to any consistent snapshot
|
||||
// of the Map's contents: no key will be visited more than once, but
|
||||
// if the value for any key is stored or deleted concurrently, Range
|
||||
// may reflect any mapping for that key from any point during the
|
||||
// Range call.
|
||||
//
|
||||
// It is safe to modify the map while iterating it, including entry
|
||||
// creation, modification and deletion. However, the concurrent
|
||||
// modification rule apply, i.e. the changes may be not reflected
|
||||
// in the subsequently iterated entries.
|
||||
func (m *Map[K, V]) Range(f func(key K, value V) bool) {
|
||||
m.initOnce.Do(m.init)
|
||||
// Pre-allocate array big enough to fit entries for most hash tables.
|
||||
bentries := make([]*entry[K, V], 0, 16*entriesPerMapBucket)
|
||||
table := m.table.Load()
|
||||
for i := range table.buckets {
|
||||
rootb := &table.buckets[i]
|
||||
b := rootb
|
||||
// Prevent concurrent modifications and copy all entries into
|
||||
// the intermediate slice.
|
||||
rootb.mu.Lock()
|
||||
for {
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
if entry := b.entries[i].Load(); entry != nil {
|
||||
bentries = append(bentries, entry)
|
||||
}
|
||||
}
|
||||
if next := b.next.Load(); next == nil {
|
||||
rootb.mu.Unlock()
|
||||
break
|
||||
} else {
|
||||
b = next
|
||||
}
|
||||
}
|
||||
// Call the function for all copied entries.
|
||||
for j, e := range bentries {
|
||||
if !f(e.key, e.value) {
|
||||
return
|
||||
}
|
||||
// Remove the reference to avoid preventing the copied
|
||||
// entries from being GCed until this method finishes.
|
||||
bentries[j] = nil
|
||||
}
|
||||
bentries = bentries[:0]
|
||||
}
|
||||
}
|
||||
|
||||
// Clear deletes all keys and values currently stored in the map.
|
||||
func (m *Map[K, V]) Clear() {
|
||||
m.initOnce.Do(m.init)
|
||||
m.resize(m.table.Load(), mapClearHint)
|
||||
}
|
||||
|
||||
// Size returns current size of the map.
|
||||
func (m *Map[K, V]) Size() int {
|
||||
m.initOnce.Do(m.init)
|
||||
return int(m.table.Load().sumSize())
|
||||
}
|
||||
|
||||
func appendToBucket[K comparable, V any](h2 uint8, e *entry[K, V], b *bucketPadded[K, V]) {
|
||||
for {
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
if b.entries[i].Load() == nil {
|
||||
b.meta.Store(setByte(b.meta.Load(), h2, i))
|
||||
b.entries[i].Store(e)
|
||||
return
|
||||
}
|
||||
}
|
||||
if next := b.next.Load(); next == nil {
|
||||
newb := new(bucketPadded[K, V])
|
||||
newb.meta.Store(setByte(defaultMeta, h2, 0))
|
||||
newb.entries[0].Store(e)
|
||||
b.next.Store(newb)
|
||||
return
|
||||
} else {
|
||||
b = next
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (table *mapTable[K, V]) addSize(bucketIdx uint64, delta int) {
|
||||
cidx := uint64(len(table.size)-1) & bucketIdx
|
||||
atomic.AddInt64(&table.size[cidx].c, int64(delta))
|
||||
}
|
||||
|
||||
func (table *mapTable[K, V]) addSizePlain(bucketIdx uint64, delta int) {
|
||||
cidx := uint64(len(table.size)-1) & bucketIdx
|
||||
table.size[cidx].c += int64(delta)
|
||||
}
|
||||
|
||||
func (table *mapTable[K, V]) sumSize() int64 {
|
||||
sum := int64(0)
|
||||
for i := range table.size {
|
||||
sum += atomic.LoadInt64(&table.size[i].c)
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
func h1(h uint64) uint64 {
|
||||
return h >> 7
|
||||
}
|
||||
|
||||
func h2(h uint64) uint8 {
|
||||
return uint8(h & 0x7f)
|
||||
}
|
||||
|
||||
// MapStats is Map statistics.
|
||||
//
|
||||
// Warning: map statistics are intented to be used for diagnostic
|
||||
// purposes, not for production code. This means that breaking changes
|
||||
// may be introduced into this struct even between minor releases.
|
||||
type MapStats struct {
|
||||
// RootBuckets is the number of root buckets in the hash table.
|
||||
// Each bucket holds a few entries.
|
||||
RootBuckets int
|
||||
// TotalBuckets is the total number of buckets in the hash table,
|
||||
// including root and their chained buckets. Each bucket holds
|
||||
// a few entries.
|
||||
TotalBuckets int
|
||||
// EmptyBuckets is the number of buckets that hold no entries.
|
||||
EmptyBuckets int
|
||||
// Capacity is the Map capacity, i.e. the total number of
|
||||
// entries that all buckets can physically hold. This number
|
||||
// does not consider the load factor.
|
||||
Capacity int
|
||||
// Size is the exact number of entries stored in the map.
|
||||
Size int
|
||||
// Counter is the number of entries stored in the map according
|
||||
// to the internal atomic counter. In case of concurrent map
|
||||
// modifications this number may be different from Size.
|
||||
Counter int
|
||||
// CounterLen is the number of internal atomic counter stripes.
|
||||
// This number may grow with the map capacity to improve
|
||||
// multithreaded scalability.
|
||||
CounterLen int
|
||||
// MinEntries is the minimum number of entries per a chain of
|
||||
// buckets, i.e. a root bucket and its chained buckets.
|
||||
MinEntries int
|
||||
// MinEntries is the maximum number of entries per a chain of
|
||||
// buckets, i.e. a root bucket and its chained buckets.
|
||||
MaxEntries int
|
||||
// TotalGrowths is the number of times the hash table grew.
|
||||
TotalGrowths int64
|
||||
// TotalGrowths is the number of times the hash table shrinked.
|
||||
TotalShrinks int64
|
||||
}
|
||||
|
||||
// ToString returns string representation of map stats.
|
||||
func (s *MapStats) ToString() string {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("MapStats{\n")
|
||||
sb.WriteString(fmt.Sprintf("RootBuckets: %d\n", s.RootBuckets))
|
||||
sb.WriteString(fmt.Sprintf("TotalBuckets: %d\n", s.TotalBuckets))
|
||||
sb.WriteString(fmt.Sprintf("EmptyBuckets: %d\n", s.EmptyBuckets))
|
||||
sb.WriteString(fmt.Sprintf("Capacity: %d\n", s.Capacity))
|
||||
sb.WriteString(fmt.Sprintf("Size: %d\n", s.Size))
|
||||
sb.WriteString(fmt.Sprintf("Counter: %d\n", s.Counter))
|
||||
sb.WriteString(fmt.Sprintf("CounterLen: %d\n", s.CounterLen))
|
||||
sb.WriteString(fmt.Sprintf("MinEntries: %d\n", s.MinEntries))
|
||||
sb.WriteString(fmt.Sprintf("MaxEntries: %d\n", s.MaxEntries))
|
||||
sb.WriteString(fmt.Sprintf("TotalGrowths: %d\n", s.TotalGrowths))
|
||||
sb.WriteString(fmt.Sprintf("TotalShrinks: %d\n", s.TotalShrinks))
|
||||
sb.WriteString("}\n")
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// Stats returns statistics for the Map. Just like other map
|
||||
// methods, this one is thread-safe. Yet it's an O(N) operation,
|
||||
// so it should be used only for diagnostics or debugging purposes.
|
||||
func (m *Map[K, V]) Stats() MapStats {
|
||||
m.initOnce.Do(m.init)
|
||||
stats := MapStats{
|
||||
TotalGrowths: m.totalGrowths.Load(),
|
||||
TotalShrinks: m.totalShrinks.Load(),
|
||||
MinEntries: math.MaxInt32,
|
||||
}
|
||||
table := m.table.Load()
|
||||
stats.RootBuckets = len(table.buckets)
|
||||
stats.Counter = int(table.sumSize())
|
||||
stats.CounterLen = len(table.size)
|
||||
for i := range table.buckets {
|
||||
nentries := 0
|
||||
b := &table.buckets[i]
|
||||
stats.TotalBuckets++
|
||||
for {
|
||||
nentriesLocal := 0
|
||||
stats.Capacity += entriesPerMapBucket
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
if b.entries[i].Load() != nil {
|
||||
stats.Size++
|
||||
nentriesLocal++
|
||||
}
|
||||
}
|
||||
nentries += nentriesLocal
|
||||
if nentriesLocal == 0 {
|
||||
stats.EmptyBuckets++
|
||||
}
|
||||
if next := b.next.Load(); next == nil {
|
||||
break
|
||||
} else {
|
||||
b = next
|
||||
}
|
||||
stats.TotalBuckets++
|
||||
}
|
||||
if nentries < stats.MinEntries {
|
||||
stats.MinEntries = nentries
|
||||
}
|
||||
if nentries > stats.MaxEntries {
|
||||
stats.MaxEntries = nentries
|
||||
}
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
const (
|
||||
// cacheLineSize is used in paddings to prevent false sharing;
|
||||
// 64B are used instead of 128B as a compromise between
|
||||
// memory footprint and performance; 128B usage may give ~30%
|
||||
// improvement on NUMA machines.
|
||||
cacheLineSize = 64
|
||||
)
|
||||
|
||||
// nextPowOf2 computes the next highest power of 2 of 32-bit v.
|
||||
// Source: https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
|
||||
func nextPowOf2(v uint32) uint32 {
|
||||
if v == 0 {
|
||||
return 1
|
||||
}
|
||||
v--
|
||||
v |= v >> 1
|
||||
v |= v >> 2
|
||||
v |= v >> 4
|
||||
v |= v >> 8
|
||||
v |= v >> 16
|
||||
v++
|
||||
return v
|
||||
}
|
||||
|
||||
func broadcast(b uint8) uint64 {
|
||||
return 0x101010101010101 * uint64(b)
|
||||
}
|
||||
|
||||
func firstMarkedByteIndex(w uint64) int {
|
||||
return bits.TrailingZeros64(w) >> 3
|
||||
}
|
||||
|
||||
// SWAR byte search: may produce false positives, e.g. for 0x0100,
|
||||
// so make sure to double-check bytes found by this function.
|
||||
func markZeroBytes(w uint64) uint64 {
|
||||
return ((w - 0x0101010101010101) & (^w) & 0x8080808080808080)
|
||||
}
|
||||
|
||||
func setByte(w uint64, b uint8, idx int) uint64 {
|
||||
shift := idx << 3
|
||||
return (w &^ (0xff << shift)) | (uint64(b) << shift)
|
||||
}
|
||||
28
common/xsync/map_extra.go
Normal file
28
common/xsync/map_extra.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package xsync
|
||||
|
||||
// LoadOrStoreFn returns the existing value for the key if
|
||||
// present. Otherwise, it tries to compute the value using the
|
||||
// provided function and, if successful, stores and returns
|
||||
// the computed value. The loaded result is true if the value was
|
||||
// loaded, or false if computed.
|
||||
//
|
||||
// This call locks a hash table bucket while the compute function
|
||||
// is executed. It means that modifications on other entries in
|
||||
// the bucket will be blocked until the valueFn executes. Consider
|
||||
// this when the function includes long-running operations.
|
||||
//
|
||||
// Recovery this API and renamed from xsync/v3's LoadOrCompute.
|
||||
// We unneeded support no-op (cancel) compute operation, it will only add complexity to existing code.
|
||||
func (m *Map[K, V]) LoadOrStoreFn(key K, valueFn func() V) (actual V, loaded bool) {
|
||||
return m.doCompute(
|
||||
key,
|
||||
func(oldValue V, loaded bool) (V, ComputeOp) {
|
||||
if loaded {
|
||||
return oldValue, CancelOp
|
||||
}
|
||||
return valueFn(), UpdateOp
|
||||
},
|
||||
loadOrComputeOp,
|
||||
false,
|
||||
)
|
||||
}
|
||||
49
common/xsync/map_extra_test.go
Normal file
49
common/xsync/map_extra_test.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package xsync
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMapOfLoadOrStoreFn(t *testing.T) {
|
||||
const numEntries = 1000
|
||||
m := NewMap[string, int]()
|
||||
for i := 0; i < numEntries; i++ {
|
||||
v, loaded := m.LoadOrStoreFn(strconv.Itoa(i), func() int {
|
||||
return i
|
||||
})
|
||||
if loaded {
|
||||
t.Fatalf("value not computed for %d", i)
|
||||
}
|
||||
if v != i {
|
||||
t.Fatalf("values do not match for %d: %v", i, v)
|
||||
}
|
||||
}
|
||||
for i := 0; i < numEntries; i++ {
|
||||
v, loaded := m.LoadOrStoreFn(strconv.Itoa(i), func() int {
|
||||
return i
|
||||
})
|
||||
if !loaded {
|
||||
t.Fatalf("value not loaded for %d", i)
|
||||
}
|
||||
if v != i {
|
||||
t.Fatalf("values do not match for %d: %v", i, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapOfLoadOrStoreFn_FunctionCalledOnce(t *testing.T) {
|
||||
m := NewMap[int, int]()
|
||||
for i := 0; i < 100; {
|
||||
m.LoadOrStoreFn(i, func() (v int) {
|
||||
v, i = i, i+1
|
||||
return v
|
||||
})
|
||||
}
|
||||
m.Range(func(k, v int) bool {
|
||||
if k != v {
|
||||
t.Fatalf("%dth key is not equal to value %d", k, v)
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
1732
common/xsync/map_test.go
Normal file
1732
common/xsync/map_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -8,11 +8,10 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/metacubex/mihomo/common/callback"
|
||||
"github.com/metacubex/mihomo/common/xsync"
|
||||
"github.com/metacubex/mihomo/component/iface"
|
||||
C "github.com/metacubex/mihomo/constant"
|
||||
"github.com/metacubex/mihomo/constant/features"
|
||||
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
)
|
||||
|
||||
var disableLoopBackDetector, _ = strconv.ParseBool(os.Getenv("DISABLE_LOOPBACK_DETECTOR"))
|
||||
@@ -26,22 +25,19 @@ func init() {
|
||||
var ErrReject = errors.New("reject loopback connection")
|
||||
|
||||
type Detector struct {
|
||||
connMap *xsync.MapOf[netip.AddrPort, struct{}]
|
||||
packetConnMap *xsync.MapOf[uint16, struct{}]
|
||||
connMap xsync.Map[netip.AddrPort, struct{}]
|
||||
packetConnMap xsync.Map[uint16, struct{}]
|
||||
}
|
||||
|
||||
func NewDetector() *Detector {
|
||||
if disableLoopBackDetector {
|
||||
return nil
|
||||
}
|
||||
return &Detector{
|
||||
connMap: xsync.NewMapOf[netip.AddrPort, struct{}](),
|
||||
packetConnMap: xsync.NewMapOf[uint16, struct{}](),
|
||||
}
|
||||
return &Detector{}
|
||||
}
|
||||
|
||||
func (l *Detector) NewConn(conn C.Conn) C.Conn {
|
||||
if l == nil || l.connMap == nil {
|
||||
if l == nil {
|
||||
return conn
|
||||
}
|
||||
metadata := C.Metadata{}
|
||||
@@ -59,7 +55,7 @@ func (l *Detector) NewConn(conn C.Conn) C.Conn {
|
||||
}
|
||||
|
||||
func (l *Detector) NewPacketConn(conn C.PacketConn) C.PacketConn {
|
||||
if l == nil || l.packetConnMap == nil {
|
||||
if l == nil {
|
||||
return conn
|
||||
}
|
||||
metadata := C.Metadata{}
|
||||
@@ -78,7 +74,7 @@ func (l *Detector) NewPacketConn(conn C.PacketConn) C.PacketConn {
|
||||
}
|
||||
|
||||
func (l *Detector) CheckConn(metadata *C.Metadata) error {
|
||||
if l == nil || l.connMap == nil {
|
||||
if l == nil {
|
||||
return nil
|
||||
}
|
||||
connAddr := metadata.SourceAddrPort()
|
||||
@@ -92,7 +88,7 @@ func (l *Detector) CheckConn(metadata *C.Metadata) error {
|
||||
}
|
||||
|
||||
func (l *Detector) CheckPacketConn(metadata *C.Metadata) error {
|
||||
if l == nil || l.packetConnMap == nil {
|
||||
if l == nil {
|
||||
return nil
|
||||
}
|
||||
connAddr := metadata.SourceAddrPort()
|
||||
|
||||
@@ -4,27 +4,24 @@ import (
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/metacubex/mihomo/common/xsync"
|
||||
C "github.com/metacubex/mihomo/constant"
|
||||
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
)
|
||||
|
||||
type Table struct {
|
||||
mapping *xsync.MapOf[string, *entry]
|
||||
mapping xsync.Map[string, *entry]
|
||||
}
|
||||
|
||||
type entry struct {
|
||||
PacketSender C.PacketSender
|
||||
LocalUDPConnMap *xsync.MapOf[string, *net.UDPConn]
|
||||
LocalLockMap *xsync.MapOf[string, *sync.Cond]
|
||||
LocalUDPConnMap xsync.Map[string, *net.UDPConn]
|
||||
LocalLockMap xsync.Map[string, *sync.Cond]
|
||||
}
|
||||
|
||||
func (t *Table) GetOrCreate(key string, maker func() C.PacketSender) (C.PacketSender, bool) {
|
||||
item, loaded := t.mapping.LoadOrCompute(key, func() *entry {
|
||||
item, loaded := t.mapping.LoadOrStoreFn(key, func() *entry {
|
||||
return &entry{
|
||||
PacketSender: maker(),
|
||||
LocalUDPConnMap: xsync.NewMapOf[string, *net.UDPConn](),
|
||||
LocalLockMap: xsync.NewMapOf[string, *sync.Cond](),
|
||||
PacketSender: maker(),
|
||||
}
|
||||
})
|
||||
return item.PacketSender, loaded
|
||||
@@ -68,7 +65,7 @@ func (t *Table) GetOrCreateLockForLocalConn(lAddr, key string) (*sync.Cond, bool
|
||||
if !loaded {
|
||||
return nil, false
|
||||
}
|
||||
item, loaded := entry.LocalLockMap.LoadOrCompute(key, makeLock)
|
||||
item, loaded := entry.LocalLockMap.LoadOrStoreFn(key, makeLock)
|
||||
return item, loaded
|
||||
}
|
||||
|
||||
@@ -98,7 +95,5 @@ func makeLock() *sync.Cond {
|
||||
|
||||
// New return *Cache
|
||||
func New() *Table {
|
||||
return &Table{
|
||||
mapping: xsync.NewMapOf[string, *entry](),
|
||||
}
|
||||
return &Table{}
|
||||
}
|
||||
|
||||
@@ -113,7 +113,7 @@ func NewHostValueByDomain(domain string) (HostValue, error) {
|
||||
domain = strings.Trim(domain, ".")
|
||||
item := strings.Split(domain, ".")
|
||||
if len(item) < 2 {
|
||||
return HostValue{}, errors.New("invaild domain")
|
||||
return HostValue{}, errors.New("invalid domain")
|
||||
}
|
||||
return HostValue{
|
||||
IsDomain: true,
|
||||
|
||||
@@ -230,10 +230,18 @@ func ResolveECH(ctx context.Context, host string) ([]byte, error) {
|
||||
return ResolveECHWithResolver(ctx, host, DefaultResolver)
|
||||
}
|
||||
|
||||
func ClearCache() {
|
||||
if DefaultResolver != nil {
|
||||
go DefaultResolver.ClearCache()
|
||||
}
|
||||
go SystemResolver.ClearCache() // SystemResolver unneeded check nil
|
||||
}
|
||||
|
||||
func ResetConnection() {
|
||||
if DefaultResolver != nil {
|
||||
go DefaultResolver.ResetConnection()
|
||||
}
|
||||
go SystemResolver.ResetConnection() // SystemResolver unneeded check nil
|
||||
}
|
||||
|
||||
func SortationAddr(ips []netip.Addr) (ipv4s, ipv6s []netip.Addr) {
|
||||
|
||||
@@ -85,6 +85,9 @@ func GetRealityConn(ctx context.Context, conn net.Conn, fingerprint UClientHello
|
||||
continue // retry
|
||||
}
|
||||
ecdheKey := keyShareKeys.Ecdhe
|
||||
if ecdheKey == nil {
|
||||
ecdheKey = keyShareKeys.MlkemEcdhe
|
||||
}
|
||||
if ecdheKey == nil {
|
||||
// WTF???
|
||||
if retry > 2 {
|
||||
@@ -167,6 +170,7 @@ type realityVerifier struct {
|
||||
//var pOffset = utils.MustOK(reflect.TypeOf((*utls.Conn)(nil)).Elem().FieldByName("peerCertificates")).Offset
|
||||
|
||||
func (c *realityVerifier) VerifyPeerCertificate(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
|
||||
//log.Debugln("REALITY localAddr: %v\t is using X25519MLKEM768 for TLS' communication: %v", c.RemoteAddr(), c.HandshakeState.ServerHello.SelectedGroup == utls.X25519MLKEM768)
|
||||
//p, _ := reflect.TypeOf(c.Conn).Elem().FieldByName("peerCertificates")
|
||||
//certs := *(*[]*x509.Certificate)(unsafe.Add(unsafe.Pointer(c.Conn), pOffset))
|
||||
certs := c.Conn.PeerCertificates()
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -17,74 +16,86 @@ import (
|
||||
|
||||
mihomoHttp "github.com/metacubex/mihomo/component/http"
|
||||
C "github.com/metacubex/mihomo/constant"
|
||||
"github.com/metacubex/mihomo/constant/features"
|
||||
"github.com/metacubex/mihomo/log"
|
||||
|
||||
"github.com/klauspost/cpuid/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
baseReleaseURL = "https://github.com/MetaCubeX/mihomo/releases/latest/download/"
|
||||
versionReleaseURL = "https://github.com/MetaCubeX/mihomo/releases/latest/download/version.txt"
|
||||
|
||||
baseAlphaURL = "https://github.com/MetaCubeX/mihomo/releases/download/Prerelease-Alpha/"
|
||||
versionAlphaURL = "https://github.com/MetaCubeX/mihomo/releases/download/Prerelease-Alpha/version.txt"
|
||||
|
||||
// MaxPackageFileSize is a maximum package file length in bytes. The largest
|
||||
// package whose size is limited by this constant currently has the size of
|
||||
// approximately 32 MiB.
|
||||
MaxPackageFileSize = 32 * 1024 * 1024
|
||||
)
|
||||
|
||||
// CoreUpdater is the mihomo updater.
|
||||
// modify from https://github.com/AdguardTeam/AdGuardHome/blob/595484e0b3fb4c457f9bb727a6b94faa78a66c5f/internal/updater/updater.go
|
||||
// Updater is the mihomo updater.
|
||||
var (
|
||||
goarm string
|
||||
gomips string
|
||||
amd64Compatible string
|
||||
|
||||
workDir string
|
||||
|
||||
// mu protects all fields below.
|
||||
type CoreUpdater struct {
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
currentExeName string // 当前可执行文件
|
||||
updateDir string // 更新目录
|
||||
packageName string // 更新压缩文件
|
||||
backupDir string // 备份目录
|
||||
backupExeName string // 备份文件名
|
||||
updateExeName string // 更新后的可执行文件
|
||||
var DefaultCoreUpdater = CoreUpdater{}
|
||||
|
||||
baseURL string = "https://github.com/MetaCubeX/mihomo/releases/download/Prerelease-Alpha/mihomo"
|
||||
versionURL string = "https://github.com/MetaCubeX/mihomo/releases/download/Prerelease-Alpha/version.txt"
|
||||
packageURL string
|
||||
latestVersion string
|
||||
)
|
||||
|
||||
func init() {
|
||||
if runtime.GOARCH == "amd64" && cpuid.CPU.X64Level() < 3 {
|
||||
amd64Compatible = "-compatible"
|
||||
}
|
||||
if !strings.HasPrefix(C.Version, "alpha") {
|
||||
baseURL = "https://github.com/MetaCubeX/mihomo/releases/latest/download/mihomo"
|
||||
versionURL = "https://github.com/MetaCubeX/mihomo/releases/latest/download/version.txt"
|
||||
func (u *CoreUpdater) CoreBaseName() string {
|
||||
switch runtime.GOARCH {
|
||||
case "arm":
|
||||
// mihomo-linux-armv5
|
||||
return fmt.Sprintf("mihomo-%s-%sv%s", runtime.GOOS, runtime.GOARCH, features.GOARM)
|
||||
case "arm64":
|
||||
if runtime.GOOS == "android" {
|
||||
// mihomo-android-arm64-v8
|
||||
return fmt.Sprintf("mihomo-%s-%s-v8", runtime.GOOS, runtime.GOARCH)
|
||||
} else {
|
||||
// mihomo-linux-arm64
|
||||
return fmt.Sprintf("mihomo-%s-%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
||||
case "mips", "mipsle":
|
||||
// mihomo-linux-mips-hardfloat
|
||||
return fmt.Sprintf("mihomo-%s-%s-%s", runtime.GOOS, runtime.GOARCH, features.GOMIPS)
|
||||
case "amd64":
|
||||
// mihomo-linux-amd64-v1
|
||||
return fmt.Sprintf("mihomo-%s-%s-%s", runtime.GOOS, runtime.GOARCH, features.GOAMD64)
|
||||
default:
|
||||
// mihomo-linux-386
|
||||
// mihomo-linux-mips64
|
||||
// mihomo-linux-riscv64
|
||||
// mihomo-linux-s390x
|
||||
return fmt.Sprintf("mihomo-%s-%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
||||
}
|
||||
|
||||
type updateError struct {
|
||||
Message string
|
||||
}
|
||||
func (u *CoreUpdater) Update(currentExePath string) (err error) {
|
||||
u.mu.Lock()
|
||||
defer u.mu.Unlock()
|
||||
|
||||
func (e *updateError) Error() string {
|
||||
return fmt.Sprintf("update error: %s", e.Message)
|
||||
}
|
||||
|
||||
// Update performs the auto-updater. It returns an error if the updater failed.
|
||||
// If firstRun is true, it assumes the configuration file doesn't exist.
|
||||
func UpdateCore(execPath string) (err error) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
latestVersion, err = getLatestVersion()
|
||||
_, err = os.Stat(currentExePath)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("check currentExePath %q: %w", currentExePath, err)
|
||||
}
|
||||
|
||||
baseURL := baseAlphaURL
|
||||
versionURL := versionAlphaURL
|
||||
if !strings.HasPrefix(C.Version, "alpha") {
|
||||
baseURL = baseReleaseURL
|
||||
versionURL = versionReleaseURL
|
||||
}
|
||||
|
||||
latestVersion, err := u.getLatestVersion(versionURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get latest version: %w", err)
|
||||
}
|
||||
log.Infoln("current version %s, latest version %s", C.Version, latestVersion)
|
||||
|
||||
if latestVersion == C.Version {
|
||||
err := &updateError{Message: "already using latest version"}
|
||||
return err
|
||||
// don't change this output, some downstream dependencies on the upgrader's output fields
|
||||
return fmt.Errorf("update error: already using latest version %s", C.Version)
|
||||
}
|
||||
|
||||
updateDownloadURL()
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
log.Errorln("updater: failed: %v", err)
|
||||
@@ -93,31 +104,49 @@ func UpdateCore(execPath string) (err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
workDir = filepath.Dir(execPath)
|
||||
// ---- prepare ----
|
||||
mihomoBaseName := u.CoreBaseName()
|
||||
packageName := mihomoBaseName + "-" + latestVersion
|
||||
if runtime.GOOS == "windows" {
|
||||
packageName = packageName + ".zip"
|
||||
} else {
|
||||
packageName = packageName + ".gz"
|
||||
}
|
||||
packageURL := baseURL + packageName
|
||||
log.Infoln("updater: updating using url: %s", packageURL)
|
||||
|
||||
err = prepare(execPath)
|
||||
workDir := filepath.Dir(currentExePath)
|
||||
backupDir := filepath.Join(workDir, "meta-backup")
|
||||
updateDir := filepath.Join(workDir, "meta-update")
|
||||
packagePath := filepath.Join(updateDir, packageName)
|
||||
//log.Infoln(packagePath)
|
||||
|
||||
updateExeName := mihomoBaseName
|
||||
if runtime.GOOS == "windows" {
|
||||
updateExeName = updateExeName + ".exe"
|
||||
}
|
||||
log.Infoln("updateExeName: %s", updateExeName)
|
||||
updateExePath := filepath.Join(updateDir, updateExeName)
|
||||
backupExePath := filepath.Join(backupDir, filepath.Base(currentExePath))
|
||||
|
||||
defer u.clean(updateDir)
|
||||
|
||||
err = u.download(updateDir, packagePath, packageURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("preparing: %w", err)
|
||||
return fmt.Errorf("downloading: %w", err)
|
||||
}
|
||||
|
||||
defer clean()
|
||||
|
||||
err = downloadPackageFile()
|
||||
if err != nil {
|
||||
return fmt.Errorf("downloading package file: %w", err)
|
||||
}
|
||||
|
||||
err = unpack()
|
||||
err = u.unpack(updateDir, packagePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unpacking: %w", err)
|
||||
}
|
||||
|
||||
err = backup()
|
||||
err = u.backup(currentExePath, backupExePath, backupDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("backuping: %w", err)
|
||||
}
|
||||
|
||||
err = replace()
|
||||
err = u.replace(updateExePath, currentExePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("replacing: %w", err)
|
||||
}
|
||||
@@ -125,116 +154,30 @@ func UpdateCore(execPath string) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// prepare fills all necessary fields in Updater object.
|
||||
func prepare(exePath string) (err error) {
|
||||
updateDir = filepath.Join(workDir, "meta-update")
|
||||
currentExeName = exePath
|
||||
_, pkgNameOnly := filepath.Split(packageURL)
|
||||
if pkgNameOnly == "" {
|
||||
return fmt.Errorf("invalid PackageURL: %q", packageURL)
|
||||
}
|
||||
|
||||
packageName = filepath.Join(updateDir, pkgNameOnly)
|
||||
//log.Infoln(packageName)
|
||||
backupDir = filepath.Join(workDir, "meta-backup")
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
updateExeName = "mihomo" + "-" + runtime.GOOS + "-" + runtime.GOARCH + amd64Compatible + ".exe"
|
||||
} else if runtime.GOOS == "android" && runtime.GOARCH == "arm64" {
|
||||
updateExeName = "mihomo-android-arm64-v8"
|
||||
} else {
|
||||
updateExeName = "mihomo" + "-" + runtime.GOOS + "-" + runtime.GOARCH + amd64Compatible
|
||||
}
|
||||
|
||||
log.Infoln("updateExeName: %s ", updateExeName)
|
||||
|
||||
backupExeName = filepath.Join(backupDir, filepath.Base(exePath))
|
||||
updateExeName = filepath.Join(updateDir, updateExeName)
|
||||
|
||||
log.Infoln(
|
||||
"updater: updating using url: %s",
|
||||
packageURL,
|
||||
)
|
||||
|
||||
currentExeName = exePath
|
||||
_, err = os.Stat(currentExeName)
|
||||
func (u *CoreUpdater) getLatestVersion(versionURL string) (version string, err error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
|
||||
defer cancel()
|
||||
resp, err := mihomoHttp.HttpRequest(ctx, versionURL, http.MethodGet, nil, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking %q: %w", currentExeName, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// unpack extracts the files from the downloaded archive.
|
||||
func unpack() error {
|
||||
var err error
|
||||
_, pkgNameOnly := filepath.Split(packageURL)
|
||||
|
||||
log.Infoln("updater: unpacking package")
|
||||
if strings.HasSuffix(pkgNameOnly, ".zip") {
|
||||
_, err = zipFileUnpack(packageName, updateDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf(".zip unpack failed: %w", err)
|
||||
defer func() {
|
||||
closeErr := resp.Body.Close()
|
||||
if closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
} else if strings.HasSuffix(pkgNameOnly, ".gz") {
|
||||
_, err = gzFileUnpack(packageName, updateDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf(".gz unpack failed: %w", err)
|
||||
}
|
||||
|
||||
} else {
|
||||
return fmt.Errorf("unknown package extension")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// backup makes a backup of the current executable file
|
||||
func backup() (err error) {
|
||||
log.Infoln("updater: backing up current ExecFile:%s to %s", currentExeName, backupExeName)
|
||||
_ = os.Mkdir(backupDir, 0o755)
|
||||
|
||||
err = os.Rename(currentExeName, backupExeName)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
|
||||
return nil
|
||||
content := strings.TrimRight(string(body), "\n")
|
||||
return content, nil
|
||||
}
|
||||
|
||||
// replace moves the current executable with the updated one
|
||||
func replace() error {
|
||||
var err error
|
||||
|
||||
log.Infoln("replacing: %s to %s", updateExeName, currentExeName)
|
||||
if runtime.GOOS == "windows" {
|
||||
// rename fails with "File in use" error
|
||||
err = copyFile(updateExeName, currentExeName)
|
||||
} else {
|
||||
err = os.Rename(updateExeName, currentExeName)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infoln("updater: renamed: %s to %s", updateExeName, currentExeName)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// clean removes the temporary directory itself and all it's contents.
|
||||
func clean() {
|
||||
_ = os.RemoveAll(updateDir)
|
||||
}
|
||||
|
||||
// MaxPackageFileSize is a maximum package file length in bytes. The largest
|
||||
// package whose size is limited by this constant currently has the size of
|
||||
// approximately 32 MiB.
|
||||
const MaxPackageFileSize = 32 * 1024 * 1024
|
||||
|
||||
// Download package file and save it to disk
|
||||
func downloadPackageFile() (err error) {
|
||||
// download package file and save it to disk
|
||||
func (u *CoreUpdater) download(updateDir, packagePath, packageURL string) (err error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*90)
|
||||
defer cancel()
|
||||
resp, err := mihomoHttp.HttpRequest(ctx, packageURL, http.MethodGet, nil, nil)
|
||||
@@ -249,38 +192,110 @@ func downloadPackageFile() (err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
var r io.Reader
|
||||
r, err = LimitReader(resp.Body, MaxPackageFileSize)
|
||||
if err != nil {
|
||||
return fmt.Errorf("http request failed: %w", err)
|
||||
}
|
||||
|
||||
log.Debugln("updater: reading http body")
|
||||
// This use of ReadAll is now safe, because we limited body's Reader.
|
||||
body, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("io.ReadAll() failed: %w", err)
|
||||
}
|
||||
|
||||
log.Debugln("updateDir %s", updateDir)
|
||||
err = os.Mkdir(updateDir, 0o755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mkdir error: %w", err)
|
||||
}
|
||||
|
||||
log.Debugln("updater: saving package to file %s", packageName)
|
||||
err = os.WriteFile(packageName, body, 0o644)
|
||||
log.Debugln("updater: saving package to file %s", packagePath)
|
||||
// Create the output file
|
||||
wc, err := os.OpenFile(packagePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("os.WriteFile() failed: %w", err)
|
||||
return fmt.Errorf("os.OpenFile(%s): %w", packagePath, err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
closeErr := wc.Close()
|
||||
if closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
log.Debugln("updater: reading http body")
|
||||
// This use of io.Copy is now safe, because we limited body's Reader.
|
||||
n, err := io.Copy(wc, io.LimitReader(resp.Body, MaxPackageFileSize))
|
||||
if err != nil {
|
||||
return fmt.Errorf("io.Copy(): %w", err)
|
||||
}
|
||||
if n == MaxPackageFileSize {
|
||||
// Use whether n is equal to MaxPackageFileSize to determine whether the limit has been reached.
|
||||
// It is also possible that the size of the downloaded file is exactly the same as the maximum limit,
|
||||
// but we should not consider this too rare situation.
|
||||
return fmt.Errorf("attempted to read more than %d bytes", MaxPackageFileSize)
|
||||
}
|
||||
log.Debugln("updater: downloaded package to file %s", packagePath)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// unpack extracts the files from the downloaded archive.
|
||||
func (u *CoreUpdater) unpack(updateDir, packagePath string) error {
|
||||
log.Infoln("updater: unpacking package")
|
||||
if strings.HasSuffix(packagePath, ".zip") {
|
||||
_, err := u.zipFileUnpack(packagePath, updateDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf(".zip unpack failed: %w", err)
|
||||
}
|
||||
|
||||
} else if strings.HasSuffix(packagePath, ".gz") {
|
||||
_, err := u.gzFileUnpack(packagePath, updateDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf(".gz unpack failed: %w", err)
|
||||
}
|
||||
|
||||
} else {
|
||||
return fmt.Errorf("unknown package extension")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// backup creates a backup of the current executable file.
|
||||
func (u *CoreUpdater) backup(currentExePath, backupExePath, backupDir string) (err error) {
|
||||
log.Infoln("updater: backing up current ExecFile:%s to %s", currentExePath, backupExePath)
|
||||
_ = os.Mkdir(backupDir, 0o755)
|
||||
|
||||
// On Windows, since the running executable cannot be overwritten or deleted, it uses os.Rename to move the file to the backup path.
|
||||
// On other platforms, it copies the file to the backup path, preserving the original file and its permissions.
|
||||
// The backup directory is created if it does not exist.
|
||||
if runtime.GOOS == "windows" {
|
||||
err = os.Rename(currentExePath, backupExePath)
|
||||
} else {
|
||||
err = u.copyFile(currentExePath, backupExePath)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// replace moves the current executable with the updated one
|
||||
func (u *CoreUpdater) replace(updateExePath, currentExePath string) error {
|
||||
log.Infoln("replacing: %s to %s", updateExePath, currentExePath)
|
||||
|
||||
// Use copyFile to retain the original file attributes
|
||||
err := u.copyFile(updateExePath, currentExePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infoln("updater: copy: %s to %s", updateExePath, currentExePath)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// clean removes the temporary directory itself and all it's contents.
|
||||
func (u *CoreUpdater) clean(updateDir string) {
|
||||
_ = os.RemoveAll(updateDir)
|
||||
}
|
||||
|
||||
// Unpack a single .gz file to the specified directory
|
||||
// Existing files are overwritten
|
||||
// All files are created inside outDir, subdirectories are not created
|
||||
// Return the output file name
|
||||
func gzFileUnpack(gzfile, outDir string) (string, error) {
|
||||
func (u *CoreUpdater) gzFileUnpack(gzfile, outDir string) (outputName string, err error) {
|
||||
f, err := os.Open(gzfile)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("os.Open(): %w", err)
|
||||
@@ -312,14 +327,10 @@ func gzFileUnpack(gzfile, outDir string) (string, error) {
|
||||
originalName = strings.TrimSuffix(originalName, ".gz")
|
||||
}
|
||||
|
||||
outputName := filepath.Join(outDir, originalName)
|
||||
outputName = filepath.Join(outDir, originalName)
|
||||
|
||||
// Create the output file
|
||||
wc, err := os.OpenFile(
|
||||
outputName,
|
||||
os.O_WRONLY|os.O_CREATE|os.O_TRUNC,
|
||||
0o755,
|
||||
)
|
||||
wc, err := os.OpenFile(outputName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("os.OpenFile(%s): %w", outputName, err)
|
||||
}
|
||||
@@ -344,7 +355,7 @@ func gzFileUnpack(gzfile, outDir string) (string, error) {
|
||||
// Existing files are overwritten
|
||||
// All files are created inside 'outDir', subdirectories are not created
|
||||
// Return the output file name
|
||||
func zipFileUnpack(zipfile, outDir string) (string, error) {
|
||||
func (u *CoreUpdater) zipFileUnpack(zipfile, outDir string) (outputName string, err error) {
|
||||
zrc, err := zip.OpenReader(zipfile)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("zip.OpenReader(): %w", err)
|
||||
@@ -376,7 +387,7 @@ func zipFileUnpack(zipfile, outDir string) (string, error) {
|
||||
}()
|
||||
fi := zf.FileInfo()
|
||||
name := fi.Name()
|
||||
outputName := filepath.Join(outDir, name)
|
||||
outputName = filepath.Join(outDir, name)
|
||||
|
||||
if fi.IsDir() {
|
||||
return "", fmt.Errorf("the target file is a directory")
|
||||
@@ -403,97 +414,43 @@ func zipFileUnpack(zipfile, outDir string) (string, error) {
|
||||
}
|
||||
|
||||
// Copy file on disk
|
||||
func copyFile(src, dst string) error {
|
||||
d, e := os.ReadFile(src)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
e = os.WriteFile(dst, d, 0o644)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getLatestVersion() (version string, err error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
|
||||
defer cancel()
|
||||
resp, err := mihomoHttp.HttpRequest(ctx, versionURL, http.MethodGet, nil, nil)
|
||||
func (u *CoreUpdater) copyFile(src, dst string) (err error) {
|
||||
rc, err := os.Open(src)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("get Latest Version fail: %w", err)
|
||||
return fmt.Errorf("os.Open(%s): %w", src, err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
closeErr := resp.Body.Close()
|
||||
closeErr := rc.Close()
|
||||
if closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
info, err := rc.Stat()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("get Latest Version fail: %w", err)
|
||||
return fmt.Errorf("rc.Stat(): %w", err)
|
||||
}
|
||||
content := strings.TrimRight(string(body), "\n")
|
||||
return content, nil
|
||||
}
|
||||
|
||||
func updateDownloadURL() {
|
||||
var middle string
|
||||
// Create the output file
|
||||
// If the file does not exist, creates it with permissions perm (before umask);
|
||||
// otherwise truncates it before writing, without changing permissions.
|
||||
wc, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, info.Mode())
|
||||
if err != nil {
|
||||
return fmt.Errorf("os.OpenFile(%s): %w", dst, err)
|
||||
}
|
||||
|
||||
if runtime.GOARCH == "arm" && probeGoARM() {
|
||||
//-linux-armv7-alpha-e552b54.gz
|
||||
middle = fmt.Sprintf("-%s-%s%s-%s", runtime.GOOS, runtime.GOARCH, goarm, latestVersion)
|
||||
} else if runtime.GOARCH == "arm64" {
|
||||
//-linux-arm64-alpha-e552b54.gz
|
||||
if runtime.GOOS == "android" {
|
||||
middle = fmt.Sprintf("-%s-%s-v8-%s", runtime.GOOS, runtime.GOARCH, latestVersion)
|
||||
} else {
|
||||
middle = fmt.Sprintf("-%s-%s-%s", runtime.GOOS, runtime.GOARCH, latestVersion)
|
||||
defer func() {
|
||||
closeErr := wc.Close()
|
||||
if closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
} else if isMIPS(runtime.GOARCH) && gomips != "" {
|
||||
middle = fmt.Sprintf("-%s-%s-%s-%s", runtime.GOOS, runtime.GOARCH, gomips, latestVersion)
|
||||
} else {
|
||||
middle = fmt.Sprintf("-%s-%s%s-%s", runtime.GOOS, runtime.GOARCH, amd64Compatible, latestVersion)
|
||||
}
|
||||
}()
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
middle += ".zip"
|
||||
} else {
|
||||
middle += ".gz"
|
||||
}
|
||||
packageURL = baseURL + middle
|
||||
//log.Infoln(packageURL)
|
||||
}
|
||||
|
||||
// isMIPS returns true if arch is any MIPS architecture.
|
||||
func isMIPS(arch string) (ok bool) {
|
||||
switch arch {
|
||||
case
|
||||
"mips",
|
||||
"mips64",
|
||||
"mips64le",
|
||||
"mipsle":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// linux only
|
||||
func probeGoARM() (ok bool) {
|
||||
cmd := exec.Command("cat", "/proc/cpuinfo")
|
||||
output, err := cmd.Output()
|
||||
_, err = io.Copy(wc, rc)
|
||||
if err != nil {
|
||||
log.Errorln("probe goarm error:%s", err)
|
||||
return false
|
||||
return fmt.Errorf("io.Copy(): %w", err)
|
||||
}
|
||||
cpuInfo := string(output)
|
||||
if strings.Contains(cpuInfo, "vfpv3") || strings.Contains(cpuInfo, "vfpv4") {
|
||||
goarm = "v7"
|
||||
} else if strings.Contains(cpuInfo, "vfp") {
|
||||
goarm = "v6"
|
||||
} else {
|
||||
goarm = "v5"
|
||||
}
|
||||
return true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
10
component/updater/update_core_test.go
Normal file
10
component/updater/update_core_test.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package updater
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCoreBaseName(t *testing.T) {
|
||||
fmt.Println("Core base name =", DefaultCoreUpdater.CoreBaseName())
|
||||
}
|
||||
@@ -212,7 +212,7 @@ func UpdateGeoDatabases() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getUpdateTime() (err error, time time.Time) {
|
||||
func getUpdateTime() (time time.Time, err error) {
|
||||
filesToCheck := []string{
|
||||
C.Path.GeoIP(),
|
||||
C.Path.MMDB(),
|
||||
@@ -224,7 +224,7 @@ func getUpdateTime() (err error, time time.Time) {
|
||||
var fileInfo os.FileInfo
|
||||
fileInfo, err = os.Stat(file)
|
||||
if err == nil {
|
||||
return nil, fileInfo.ModTime()
|
||||
return fileInfo.ModTime(), nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -241,7 +241,7 @@ func RegisterGeoUpdater() {
|
||||
ticker := time.NewTicker(time.Duration(updateInterval) * time.Hour)
|
||||
defer ticker.Stop()
|
||||
|
||||
err, lastUpdate := getUpdateTime()
|
||||
lastUpdate, err := getUpdateTime()
|
||||
if err != nil {
|
||||
log.Errorln("[GEO] Get GEO database update time error: %s", err.Error())
|
||||
return
|
||||
|
||||
@@ -182,7 +182,7 @@ func unzip(data []byte, dest string) error {
|
||||
if err = os.MkdirAll(filepath.Dir(fpath), os.ModePerm); err != nil {
|
||||
return err
|
||||
}
|
||||
outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
|
||||
outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode().Perm())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -209,9 +209,6 @@ func untgz(data []byte, dest string) error {
|
||||
|
||||
tr := tar.NewReader(gzr)
|
||||
|
||||
_ = gzr.Reset(bytes.NewReader(data))
|
||||
tr = tar.NewReader(gzr)
|
||||
|
||||
for {
|
||||
header, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
@@ -236,7 +233,7 @@ func untgz(data []byte, dest string) error {
|
||||
if err = os.MkdirAll(filepath.Dir(fpath), os.ModePerm); err != nil {
|
||||
return err
|
||||
}
|
||||
outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.FileMode(header.Mode))
|
||||
outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.FileMode(header.Mode).Perm())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2,15 +2,12 @@ package updater
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
mihomoHttp "github.com/metacubex/mihomo/component/http"
|
||||
|
||||
"golang.org/x/exp/constraints"
|
||||
)
|
||||
|
||||
const defaultHttpTimeout = time.Second * 90
|
||||
@@ -30,62 +27,3 @@ func downloadForBytes(url string) ([]byte, error) {
|
||||
func saveFile(bytes []byte, path string) error {
|
||||
return os.WriteFile(path, bytes, 0o644)
|
||||
}
|
||||
|
||||
// LimitReachedError records the limit and the operation that caused it.
|
||||
type LimitReachedError struct {
|
||||
Limit int64
|
||||
}
|
||||
|
||||
// Error implements the [error] interface for *LimitReachedError.
|
||||
//
|
||||
// TODO(a.garipov): Think about error string format.
|
||||
func (lre *LimitReachedError) Error() string {
|
||||
return fmt.Sprintf("attempted to read more than %d bytes", lre.Limit)
|
||||
}
|
||||
|
||||
// limitedReader is a wrapper for [io.Reader] limiting the input and dealing
|
||||
// with errors package.
|
||||
type limitedReader struct {
|
||||
r io.Reader
|
||||
limit int64
|
||||
n int64
|
||||
}
|
||||
|
||||
// Read implements the [io.Reader] interface.
|
||||
func (lr *limitedReader) Read(p []byte) (n int, err error) {
|
||||
if lr.n == 0 {
|
||||
return 0, &LimitReachedError{
|
||||
Limit: lr.limit,
|
||||
}
|
||||
}
|
||||
|
||||
p = p[:Min(lr.n, int64(len(p)))]
|
||||
|
||||
n, err = lr.r.Read(p)
|
||||
lr.n -= int64(n)
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// LimitReader wraps Reader to make it's Reader stop with ErrLimitReached after
|
||||
// n bytes read.
|
||||
func LimitReader(r io.Reader, n int64) (limited io.Reader, err error) {
|
||||
if n < 0 {
|
||||
return nil, &updateError{Message: "limit must be non-negative"}
|
||||
}
|
||||
|
||||
return &limitedReader{
|
||||
r: r,
|
||||
limit: n,
|
||||
n: n,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Min returns the smaller of x or y.
|
||||
func Min[T constraints.Integer | ~string](x, y T) (res T) {
|
||||
if x < y {
|
||||
return x
|
||||
}
|
||||
|
||||
return y
|
||||
}
|
||||
|
||||
101
component/wildcard/wildcard.go
Normal file
101
component/wildcard/wildcard.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package wildcard
|
||||
|
||||
// copy and modified from https://github.com/IGLOU-EU/go-wildcard/tree/ce22b7af48e487517a492d3727d9386492043e21
|
||||
// which is licensed under OpenBSD's ISC-style license.
|
||||
// Copyright (c) 2023 Iglou.eu contact@iglou.eu Copyright (c) 2023 Adrien Kara adrien@iglou.eu
|
||||
|
||||
func Match(pattern, s string) bool {
|
||||
if pattern == "" {
|
||||
return s == pattern
|
||||
}
|
||||
if pattern == "*" || s == pattern {
|
||||
return true
|
||||
}
|
||||
|
||||
return matchByString(pattern, s)
|
||||
}
|
||||
|
||||
func matchByString(pattern, s string) bool {
|
||||
var lastErotemeCluster byte
|
||||
var patternIndex, sIndex, lastStar, lastEroteme int
|
||||
patternLen := len(pattern)
|
||||
sLen := len(s)
|
||||
star := -1
|
||||
eroteme := -1
|
||||
|
||||
Loop:
|
||||
if sIndex >= sLen {
|
||||
goto checkPattern
|
||||
}
|
||||
|
||||
if patternIndex >= patternLen {
|
||||
if star != -1 {
|
||||
patternIndex = star + 1
|
||||
lastStar++
|
||||
sIndex = lastStar
|
||||
goto Loop
|
||||
}
|
||||
return false
|
||||
}
|
||||
switch pattern[patternIndex] {
|
||||
// Removed dot matching as it conflicts with dot in domains.
|
||||
// case '.':
|
||||
// It matches any single character. So, we don't need to check anything.
|
||||
case '?':
|
||||
// '?' matches one character. Store its position and match exactly one character in the string.
|
||||
eroteme = patternIndex
|
||||
lastEroteme = sIndex
|
||||
lastErotemeCluster = byte(s[sIndex])
|
||||
case '*':
|
||||
// '*' matches zero or more characters. Store its position and increment the pattern index.
|
||||
star = patternIndex
|
||||
lastStar = sIndex
|
||||
patternIndex++
|
||||
goto Loop
|
||||
default:
|
||||
// If the characters don't match, check if there was a previous '?' or '*' to backtrack.
|
||||
if pattern[patternIndex] != s[sIndex] {
|
||||
if eroteme != -1 {
|
||||
patternIndex = eroteme + 1
|
||||
sIndex = lastEroteme
|
||||
eroteme = -1
|
||||
goto Loop
|
||||
}
|
||||
|
||||
if star != -1 {
|
||||
patternIndex = star + 1
|
||||
lastStar++
|
||||
sIndex = lastStar
|
||||
goto Loop
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// If the characters match, check if it was not the same to validate the eroteme.
|
||||
if eroteme != -1 && lastErotemeCluster != byte(s[sIndex]) {
|
||||
eroteme = -1
|
||||
}
|
||||
}
|
||||
|
||||
patternIndex++
|
||||
sIndex++
|
||||
goto Loop
|
||||
|
||||
// Check if the remaining pattern characters are '*' or '?', which can match the end of the string.
|
||||
checkPattern:
|
||||
if patternIndex < patternLen {
|
||||
if pattern[patternIndex] == '*' {
|
||||
patternIndex++
|
||||
goto checkPattern
|
||||
} else if pattern[patternIndex] == '?' {
|
||||
if sIndex >= sLen {
|
||||
sIndex--
|
||||
}
|
||||
patternIndex++
|
||||
goto checkPattern
|
||||
}
|
||||
}
|
||||
|
||||
return patternIndex == patternLen
|
||||
}
|
||||
105
component/wildcard/wildcard_test.go
Normal file
105
component/wildcard/wildcard_test.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package wildcard
|
||||
|
||||
/*
|
||||
* copy and modified from https://github.com/IGLOU-EU/go-wildcard/tree/ce22b7af48e487517a492d3727d9386492043e21
|
||||
*
|
||||
* Copyright (c) 2023 Iglou.eu <contact@iglou.eu>
|
||||
* Copyright (c) 2023 Adrien Kara <adrien@iglou.eu>
|
||||
*
|
||||
* Licensed under the BSD 3-Clause License,
|
||||
*/
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestMatch validates the logic of wild card matching,
|
||||
// it need to support '*', '?' and only validate for byte comparison
|
||||
// over string, not rune or grapheme cluster
|
||||
func TestMatch(t *testing.T) {
|
||||
cases := []struct {
|
||||
s string
|
||||
pattern string
|
||||
result bool
|
||||
}{
|
||||
{"", "", true},
|
||||
{"", "*", true},
|
||||
{"", "**", true},
|
||||
{"", "?", true},
|
||||
{"", "??", true},
|
||||
{"", "?*", true},
|
||||
{"", "*?", true},
|
||||
{"", ".", false},
|
||||
{"", ".?", false},
|
||||
{"", "?.", false},
|
||||
{"", ".*", false},
|
||||
{"", "*.", false},
|
||||
{"", "*.?", false},
|
||||
{"", "?.*", false},
|
||||
|
||||
{"a", "", false},
|
||||
{"a", "a", true},
|
||||
{"a", "*", true},
|
||||
{"a", "**", true},
|
||||
{"a", "?", true},
|
||||
{"a", "??", true},
|
||||
{"a", ".", false},
|
||||
{"a", ".?", false},
|
||||
{"a", "?.", false},
|
||||
{"a", ".*", false},
|
||||
{"a", "*.", false},
|
||||
{"a", "*.?", false},
|
||||
{"a", "?.*", false},
|
||||
|
||||
{"match the exact string", "match the exact string", true},
|
||||
{"do not match a different string", "this is a different string", false},
|
||||
{"Match The Exact String WITH DIFFERENT CASE", "Match The Exact String WITH DIFFERENT CASE", true},
|
||||
{"do not match a different string WITH DIFFERENT CASE", "this is a different string WITH DIFFERENT CASE", false},
|
||||
{"Do Not Match The Exact String With Different Case", "do not match the exact string with different case", false},
|
||||
{"match an emoji 😃", "match an emoji 😃", true},
|
||||
{"do not match because of different emoji 😃", "do not match because of different emoji 😄", false},
|
||||
{"🌅☕️📰👨💼👩💼🏢🖥️💼💻📊📈📉👨👩👧👦🍝🕰️💪🏋️♂️🏋️♀️🏋️♂️💼🚴♂️🚴♀️🚴♂️🛀💤🌃", "🌅☕️📰👨💼👩💼🏢🖥️💼💻📊📈📉👨👩👧👦🍝🕰️💪🏋️♂️🏋️♀️🏋️♂️💼🚴♂️🚴♀️🚴♂️🛀💤🌃", true},
|
||||
{"🌅☕️📰👨💼👩💼🏢🖥️💼💻📊📈📉👨👩👧👦🍝🕰️💪🏋️♂️🏋️♀️🏋️♂️💼🚴♂️🚴♀️🚴♂️🛀💤🌃", "🦌🐇🦡🐿️🌲🌳🏰🌳🌲🌞🌧️❄️🌬️⛈️🔥🎄🎅🎁🎉🎊🥳👨👩👧👦💏👪💖👩💼🛀", false},
|
||||
|
||||
{"match a string with a *", "match a string *", true},
|
||||
{"match a string with a * at the beginning", "* at the beginning", true},
|
||||
{"match a string with two *", "match * with *", true},
|
||||
{"do not match a string with extra and a *", "do not match a string * with more", false},
|
||||
|
||||
{"match a string with a ?", "match ? string with a ?", true},
|
||||
{"match a string with a ? at the beginning", "?atch a string with a ? at the beginning", true},
|
||||
{"match a string with two ?", "match a string with two ??", true},
|
||||
{"match a optional char with a ?", "match a optional? char with a ?", true},
|
||||
{"match a optional char with a ?", "match a optional? char with a ?", true},
|
||||
{"do not match a string with extra and a ?", "do not match ? string with extra and a ? like this", false},
|
||||
|
||||
{"do not match a string with a .", "do not match . string with a .", false},
|
||||
{"do not match a string with a . at the beginning", "do not .atch a string with a . at the beginning", false},
|
||||
{"do not match a string with two .", "do not match a ..ring with two .", false},
|
||||
{"do not match a string with extra .", "do not match a string with extra ..", false},
|
||||
|
||||
{"A big brown fox jumps over the lazy dog, with all there wildcards friends", ". big?brown fox jumps over * wildcard. friend??", false},
|
||||
{"A big brown fox fails to jump over the lazy dog, with all there wildcards friends", ". big?brown fox jumps over * wildcard. friend??", false},
|
||||
|
||||
{"domain a.b.c", "domain a.b.c", true},
|
||||
{"domain adb.c", "domain a.b.c", false},
|
||||
{"aaaa", "a*a", true},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
t.Run(c.s, func(t *testing.T) {
|
||||
result := Match(c.pattern, c.s)
|
||||
if c.result != result {
|
||||
t.Errorf("Test %d: Expected `%v`, found `%v`; With Pattern: `%s` and String: `%s`", i+1, c.result, result, c.pattern, c.s)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func FuzzMatch(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, s string) {
|
||||
if !Match(string(s), string(s)) {
|
||||
t.Fatalf("%s does not match %s", s, s)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
@@ -157,6 +156,7 @@ type DNS struct {
|
||||
EnhancedMode C.DNSMode
|
||||
DefaultNameserver []dns.NameServer
|
||||
CacheAlgorithm string
|
||||
CacheMaxSize int
|
||||
FakeIPRange *fakeip.Pool
|
||||
Hosts *trie.DomainTrie[resolver.HostValue]
|
||||
NameServerPolicy []dns.Policy
|
||||
@@ -224,6 +224,7 @@ type RawDNS struct {
|
||||
FakeIPFilterMode C.FilterMode `yaml:"fake-ip-filter-mode" json:"fake-ip-filter-mode"`
|
||||
DefaultNameserver []string `yaml:"default-nameserver" json:"default-nameserver"`
|
||||
CacheAlgorithm string `yaml:"cache-algorithm" json:"cache-algorithm"`
|
||||
CacheMaxSize int `yaml:"cache-max-size" json:"cache-max-size"`
|
||||
NameServerPolicy *orderedmap.OrderedMap[string, any] `yaml:"nameserver-policy" json:"nameserver-policy"`
|
||||
ProxyServerNameserver []string `yaml:"proxy-server-nameserver" json:"proxy-server-nameserver"`
|
||||
DirectNameServer []string `yaml:"direct-nameserver" json:"direct-nameserver"`
|
||||
@@ -297,6 +298,10 @@ type RawTun struct {
|
||||
Inet6RouteAddress []netip.Prefix `yaml:"inet6-route-address" json:"inet6-route-address,omitempty"`
|
||||
Inet4RouteExcludeAddress []netip.Prefix `yaml:"inet4-route-exclude-address" json:"inet4-route-exclude-address,omitempty"`
|
||||
Inet6RouteExcludeAddress []netip.Prefix `yaml:"inet6-route-exclude-address" json:"inet6-route-exclude-address,omitempty"`
|
||||
|
||||
// darwin special config
|
||||
RecvMsgX bool `yaml:"recvmsgx" json:"recvmsgx,omitempty"`
|
||||
SendMsgX bool `yaml:"sendmsgx" json:"sendmsgx,omitempty"`
|
||||
}
|
||||
|
||||
type RawTuicServer struct {
|
||||
@@ -514,6 +519,8 @@ func DefaultRawConfig() *RawConfig {
|
||||
AutoRoute: true,
|
||||
AutoDetectInterface: true,
|
||||
Inet6Address: []netip.Prefix{netip.MustParsePrefix("fdfe:dcba:9876::1/126")},
|
||||
RecvMsgX: true,
|
||||
SendMsgX: false, // In the current implementation, if enabled, the kernel may freeze during multi-thread downloads, so it is disabled by default.
|
||||
},
|
||||
TuicServer: RawTuicServer{
|
||||
Enable: false,
|
||||
@@ -838,8 +845,6 @@ func parseProxies(cfg *RawConfig) (proxies map[string]C.Proxy, providersMap map[
|
||||
AllProxies []string
|
||||
hasGlobal bool
|
||||
)
|
||||
proxiesList := list.New()
|
||||
groupsList := list.New()
|
||||
|
||||
proxies["DIRECT"] = adapter.NewProxy(outbound.NewDirect())
|
||||
proxies["REJECT"] = adapter.NewProxy(outbound.NewReject())
|
||||
@@ -861,7 +866,6 @@ func parseProxies(cfg *RawConfig) (proxies map[string]C.Proxy, providersMap map[
|
||||
proxies[proxy.Name()] = proxy
|
||||
proxyList = append(proxyList, proxy.Name())
|
||||
AllProxies = append(AllProxies, proxy.Name())
|
||||
proxiesList.PushBack(mapping)
|
||||
}
|
||||
|
||||
// keep the original order of ProxyGroups in config file
|
||||
@@ -874,7 +878,6 @@ func parseProxies(cfg *RawConfig) (proxies map[string]C.Proxy, providersMap map[
|
||||
hasGlobal = true
|
||||
}
|
||||
proxyList = append(proxyList, groupName)
|
||||
groupsList.PushBack(mapping)
|
||||
}
|
||||
|
||||
// check if any loop exists and sort the ProxyGroups
|
||||
@@ -1040,46 +1043,20 @@ func parseRules(rulesConfig []string, proxies map[string]C.Proxy, ruleProviders
|
||||
|
||||
// parse rules
|
||||
for idx, line := range rulesConfig {
|
||||
rule := trimArr(strings.Split(line, ","))
|
||||
var (
|
||||
payload string
|
||||
target string
|
||||
params []string
|
||||
ruleName = strings.ToUpper(rule[0])
|
||||
)
|
||||
|
||||
l := len(rule)
|
||||
|
||||
if ruleName == "NOT" || ruleName == "OR" || ruleName == "AND" || ruleName == "SUB-RULE" || ruleName == "DOMAIN-REGEX" || ruleName == "PROCESS-NAME-REGEX" || ruleName == "PROCESS-PATH-REGEX" {
|
||||
target = rule[l-1]
|
||||
payload = strings.Join(rule[1:l-1], ",")
|
||||
} else {
|
||||
if l < 2 {
|
||||
return nil, fmt.Errorf("%s[%d] [%s] error: format invalid", format, idx, line)
|
||||
}
|
||||
if l < 4 {
|
||||
rule = append(rule, make([]string, 4-l)...)
|
||||
}
|
||||
if ruleName == "MATCH" {
|
||||
l = 2
|
||||
}
|
||||
if l >= 3 {
|
||||
l = 3
|
||||
payload = rule[1]
|
||||
}
|
||||
target = rule[l-1]
|
||||
params = rule[l:]
|
||||
tp, payload, target, params := RC.ParseRulePayload(line, true)
|
||||
if target == "" {
|
||||
return nil, fmt.Errorf("%s[%d] [%s] error: format invalid", format, idx, line)
|
||||
}
|
||||
|
||||
if _, ok := proxies[target]; !ok {
|
||||
if ruleName != "SUB-RULE" {
|
||||
if tp != "SUB-RULE" {
|
||||
return nil, fmt.Errorf("%s[%d] [%s] error: proxy [%s] not found", format, idx, line, target)
|
||||
} else if _, ok = subRules[target]; !ok {
|
||||
return nil, fmt.Errorf("%s[%d] [%s] error: sub-rule [%s] not found", format, idx, line, target)
|
||||
}
|
||||
}
|
||||
|
||||
params = trimArr(params)
|
||||
parsed, parseErr := R.ParseRule(ruleName, payload, target, params, subRules)
|
||||
parsed, parseErr := R.ParseRule(tp, payload, target, params, subRules)
|
||||
if parseErr != nil {
|
||||
return nil, fmt.Errorf("%s[%d] [%s] error: %s", format, idx, line, parseErr.Error())
|
||||
}
|
||||
@@ -1377,6 +1354,8 @@ func parseDNS(rawCfg *RawConfig, hosts *trie.DomainTrie[resolver.HostValue], rul
|
||||
IPv6: cfg.IPv6,
|
||||
UseSystemHosts: cfg.UseSystemHosts,
|
||||
EnhancedMode: cfg.EnhancedMode,
|
||||
CacheAlgorithm: cfg.CacheAlgorithm,
|
||||
CacheMaxSize: cfg.CacheMaxSize,
|
||||
}
|
||||
var err error
|
||||
if dnsCfg.NameServer, err = parseNameServer(cfg.NameServer, cfg.RespectRules, cfg.PreferH3); err != nil {
|
||||
@@ -1510,12 +1489,6 @@ func parseDNS(rawCfg *RawConfig, hosts *trie.DomainTrie[resolver.HostValue], rul
|
||||
dnsCfg.Hosts = hosts
|
||||
}
|
||||
|
||||
if cfg.CacheAlgorithm == "" || cfg.CacheAlgorithm == "lru" {
|
||||
dnsCfg.CacheAlgorithm = "lru"
|
||||
} else {
|
||||
dnsCfg.CacheAlgorithm = "arc"
|
||||
}
|
||||
|
||||
return dnsCfg, nil
|
||||
}
|
||||
|
||||
@@ -1585,6 +1558,9 @@ func parseTun(rawTun RawTun, general *General) error {
|
||||
Inet6RouteAddress: rawTun.Inet6RouteAddress,
|
||||
Inet4RouteExcludeAddress: rawTun.Inet4RouteExcludeAddress,
|
||||
Inet6RouteExcludeAddress: rawTun.Inet6RouteExcludeAddress,
|
||||
|
||||
RecvMsgX: rawTun.RecvMsgX,
|
||||
SendMsgX: rawTun.SendMsgX,
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -6,19 +6,11 @@ import (
|
||||
"net/netip"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/metacubex/mihomo/adapter/outboundgroup"
|
||||
"github.com/metacubex/mihomo/common/structure"
|
||||
)
|
||||
|
||||
func trimArr(arr []string) (r []string) {
|
||||
for _, e := range arr {
|
||||
r = append(r, strings.Trim(e, " "))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Check if ProxyGroups form DAG(Directed Acyclic Graph), and sort all ProxyGroups by dependency order.
|
||||
// Meanwhile, record the original index in the config file.
|
||||
// If loop is detected, return an error with location of loop.
|
||||
|
||||
24
constant/features/goflags.go
Normal file
24
constant/features/goflags.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package features
|
||||
|
||||
import "runtime/debug"
|
||||
|
||||
var (
|
||||
GOARM string
|
||||
GOMIPS string
|
||||
GOAMD64 string
|
||||
)
|
||||
|
||||
func init() {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
for _, bs := range info.Settings {
|
||||
switch bs.Key {
|
||||
case "GOARM":
|
||||
GOARM = bs.Value
|
||||
case "GOMIPS":
|
||||
GOMIPS = bs.Value
|
||||
case "GOAMD64":
|
||||
GOAMD64 = bs.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,7 @@ const (
|
||||
DomainSuffix
|
||||
DomainKeyword
|
||||
DomainRegex
|
||||
DomainWildcard
|
||||
GEOSITE
|
||||
GEOIP
|
||||
SrcGEOIP
|
||||
@@ -48,6 +49,8 @@ func (rt RuleType) String() string {
|
||||
return "DomainKeyword"
|
||||
case DomainRegex:
|
||||
return "DomainRegex"
|
||||
case DomainWildcard:
|
||||
return "DomainWildcard"
|
||||
case GEOSITE:
|
||||
return "GeoSite"
|
||||
case GEOIP:
|
||||
|
||||
12
dns/doh.go
12
dns/doh.go
@@ -447,12 +447,12 @@ func (doh *dnsOverHTTPS) createTransport(ctx context.Context) (t http.RoundTripp
|
||||
return transport, nil
|
||||
}
|
||||
|
||||
// http3Transport is a wrapper over *http3.RoundTripper that tries to optimize
|
||||
// http3Transport is a wrapper over *http3.Transport that tries to optimize
|
||||
// its behavior. The main thing that it does is trying to force use a single
|
||||
// connection to a host instead of creating a new one all the time. It also
|
||||
// helps mitigate race issues with quic-go.
|
||||
type http3Transport struct {
|
||||
baseTransport *http3.RoundTripper
|
||||
baseTransport *http3.Transport
|
||||
|
||||
closed bool
|
||||
mu sync.RWMutex
|
||||
@@ -505,7 +505,7 @@ func (h *http3Transport) CloseIdleConnections() {
|
||||
// We should be able to fall back to H1/H2 in case if HTTP/3 is unavailable or
|
||||
// if it is too slow. In order to do that, this method will run two probes
|
||||
// in parallel (one for TLS, the other one for QUIC) and if QUIC is faster it
|
||||
// will create the *http3.RoundTripper instance.
|
||||
// will create the *http3.Transport instance.
|
||||
func (doh *dnsOverHTTPS) createTransportH3(
|
||||
ctx context.Context,
|
||||
tlsConfig *tls.Config,
|
||||
@@ -519,7 +519,7 @@ func (doh *dnsOverHTTPS) createTransportH3(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rt := &http3.RoundTripper{
|
||||
rt := &http3.Transport{
|
||||
Dial: func(
|
||||
ctx context.Context,
|
||||
|
||||
@@ -528,7 +528,7 @@ func (doh *dnsOverHTTPS) createTransportH3(
|
||||
_ string,
|
||||
tlsCfg *tlsC.Config,
|
||||
cfg *quic.Config,
|
||||
) (c quic.EarlyConnection, err error) {
|
||||
) (c *quic.Conn, err error) {
|
||||
return doh.dialQuic(ctx, addr, tlsCfg, cfg)
|
||||
},
|
||||
DisableCompression: true,
|
||||
@@ -539,7 +539,7 @@ func (doh *dnsOverHTTPS) createTransportH3(
|
||||
return &http3Transport{baseTransport: rt}, nil
|
||||
}
|
||||
|
||||
func (doh *dnsOverHTTPS) dialQuic(ctx context.Context, addr string, tlsCfg *tlsC.Config, cfg *quic.Config) (quic.EarlyConnection, error) {
|
||||
func (doh *dnsOverHTTPS) dialQuic(ctx context.Context, addr string, tlsCfg *tlsC.Config, cfg *quic.Config) (*quic.Conn, error) {
|
||||
ip, port, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
18
dns/doq.go
18
dns/doq.go
@@ -53,7 +53,7 @@ type dnsOverQUIC struct {
|
||||
|
||||
// conn is the current active QUIC connection. It can be closed and
|
||||
// re-opened when needed.
|
||||
conn quic.Connection
|
||||
conn *quic.Conn
|
||||
connMu sync.RWMutex
|
||||
|
||||
// bytesPool is a *sync.Pool we use to store byte buffers in. These byte
|
||||
@@ -157,7 +157,7 @@ func (doq *dnsOverQUIC) ResetConnection() {
|
||||
// exchangeQUIC attempts to open a QUIC connection, send the DNS message
|
||||
// through it and return the response it got from the server.
|
||||
func (doq *dnsOverQUIC) exchangeQUIC(ctx context.Context, msg *D.Msg) (resp *D.Msg, err error) {
|
||||
var conn quic.Connection
|
||||
var conn *quic.Conn
|
||||
conn, err = doq.getConnection(ctx, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -169,7 +169,7 @@ func (doq *dnsOverQUIC) exchangeQUIC(ctx context.Context, msg *D.Msg) (resp *D.M
|
||||
return nil, fmt.Errorf("failed to pack DNS message for DoQ: %w", err)
|
||||
}
|
||||
|
||||
var stream quic.Stream
|
||||
var stream *quic.Stream
|
||||
stream, err = doq.openStream(ctx, conn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -222,12 +222,12 @@ func (doq *dnsOverQUIC) getBytesPool() (pool *sync.Pool) {
|
||||
return doq.bytesPool
|
||||
}
|
||||
|
||||
// getConnection opens or returns an existing quic.Connection. useCached
|
||||
// getConnection opens or returns an existing *quic.Conn. useCached
|
||||
// argument controls whether we should try to use the existing cached
|
||||
// connection. If it is false, we will forcibly create a new connection and
|
||||
// close the existing one if needed.
|
||||
func (doq *dnsOverQUIC) getConnection(ctx context.Context, useCached bool) (quic.Connection, error) {
|
||||
var conn quic.Connection
|
||||
func (doq *dnsOverQUIC) getConnection(ctx context.Context, useCached bool) (*quic.Conn, error) {
|
||||
var conn *quic.Conn
|
||||
doq.connMu.RLock()
|
||||
conn = doq.conn
|
||||
if conn != nil && useCached {
|
||||
@@ -282,7 +282,7 @@ func (doq *dnsOverQUIC) resetQUICConfig() {
|
||||
}
|
||||
|
||||
// openStream opens a new QUIC stream for the specified connection.
|
||||
func (doq *dnsOverQUIC) openStream(ctx context.Context, conn quic.Connection) (quic.Stream, error) {
|
||||
func (doq *dnsOverQUIC) openStream(ctx context.Context, conn *quic.Conn) (*quic.Stream, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
@@ -302,7 +302,7 @@ func (doq *dnsOverQUIC) openStream(ctx context.Context, conn quic.Connection) (q
|
||||
}
|
||||
|
||||
// openConnection opens a new QUIC connection.
|
||||
func (doq *dnsOverQUIC) openConnection(ctx context.Context) (conn quic.Connection, err error) {
|
||||
func (doq *dnsOverQUIC) openConnection(ctx context.Context) (conn *quic.Conn, err error) {
|
||||
// we're using bootstrapped address instead of what's passed to the function
|
||||
// it does not create an actual connection, but it helps us determine
|
||||
// what IP is actually reachable (when there're v4/v6 addresses).
|
||||
@@ -382,7 +382,7 @@ func (doq *dnsOverQUIC) closeConnWithError(err error) {
|
||||
}
|
||||
|
||||
// readMsg reads the incoming DNS message from the QUIC stream.
|
||||
func (doq *dnsOverQUIC) readMsg(stream quic.Stream) (m *D.Msg, err error) {
|
||||
func (doq *dnsOverQUIC) readMsg(stream *quic.Stream) (m *D.Msg, err error) {
|
||||
pool := doq.getBytesPool()
|
||||
bufPtr := pool.Get().(*[]byte)
|
||||
|
||||
|
||||
@@ -9,12 +9,7 @@ import (
|
||||
var systemResolver []dnsClient
|
||||
|
||||
func FlushCacheWithDefaultResolver() {
|
||||
if r := resolver.DefaultResolver; r != nil {
|
||||
r.ClearCache()
|
||||
}
|
||||
if r := resolver.SystemResolver; r != nil {
|
||||
r.ClearCache()
|
||||
}
|
||||
resolver.ClearCache()
|
||||
resolver.ResetConnection()
|
||||
}
|
||||
|
||||
|
||||
@@ -459,13 +459,18 @@ type Config struct {
|
||||
Hosts *trie.DomainTrie[resolver.HostValue]
|
||||
Policy []Policy
|
||||
CacheAlgorithm string
|
||||
CacheMaxSize int
|
||||
}
|
||||
|
||||
func (config Config) newCache() dnsCache {
|
||||
if config.CacheAlgorithm == "" || config.CacheAlgorithm == "lru" {
|
||||
return lru.New(lru.WithSize[string, *D.Msg](4096), lru.WithStale[string, *D.Msg](true))
|
||||
} else {
|
||||
return arc.New(arc.WithSize[string, *D.Msg](4096))
|
||||
if config.CacheMaxSize == 0 {
|
||||
config.CacheMaxSize = 4096
|
||||
}
|
||||
switch config.CacheAlgorithm {
|
||||
case "arc":
|
||||
return arc.New(arc.WithSize[string, *D.Msg](config.CacheMaxSize))
|
||||
default:
|
||||
return lru.New(lru.WithSize[string, *D.Msg](config.CacheMaxSize), lru.WithStale[string, *D.Msg](true))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
18
dns/util.go
18
dns/util.go
@@ -231,8 +231,7 @@ func batchExchange(ctx context.Context, clients []dnsClient, m *D.Msg) (msg *D.M
|
||||
fast, ctx := picker.WithTimeout[*D.Msg](ctx, resolver.DefaultDNSTimeout)
|
||||
defer fast.Close()
|
||||
domain := msgToDomain(m)
|
||||
qType, qTypeStr := msgToQtype(m)
|
||||
var noIpMsg *D.Msg
|
||||
_, qTypeStr := msgToQtype(m)
|
||||
for _, client := range clients {
|
||||
if _, isRCodeClient := client.(rcodeClient); isRCodeClient {
|
||||
msg, err = client.ExchangeContext(ctx, m)
|
||||
@@ -251,27 +250,12 @@ func batchExchange(ctx context.Context, clients []dnsClient, m *D.Msg) (msg *D.M
|
||||
}
|
||||
ips := msgToIP(m)
|
||||
log.Debugln("[DNS] %s --> %s %s from %s", domain, ips, qTypeStr, client.Address())
|
||||
switch qType {
|
||||
case D.TypeAAAA:
|
||||
if len(ips) == 0 {
|
||||
noIpMsg = m
|
||||
return nil, resolver.ErrIPNotFound
|
||||
}
|
||||
case D.TypeA:
|
||||
if len(ips) == 0 {
|
||||
noIpMsg = m
|
||||
return nil, resolver.ErrIPNotFound
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
})
|
||||
}
|
||||
|
||||
msg = fast.Wait()
|
||||
if msg == nil {
|
||||
if noIpMsg != nil {
|
||||
return noIpMsg, false, nil
|
||||
}
|
||||
err = errors.New("all DNS requests failed")
|
||||
if fErr := fast.Error(); fErr != nil {
|
||||
err = fmt.Errorf("%w, first error: %w", err, fErr)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
os="mihomo-linux-"
|
||||
case $TARGETPLATFORM in
|
||||
"linux/amd64")
|
||||
arch="amd64-compatible"
|
||||
arch="amd64-v1"
|
||||
;;
|
||||
"linux/386")
|
||||
arch="386"
|
||||
|
||||
@@ -1119,6 +1119,7 @@ rules:
|
||||
- DOMAIN-REGEX,^abc,DIRECT
|
||||
- DOMAIN-SUFFIX,baidu.com,DIRECT
|
||||
- DOMAIN-KEYWORD,google,ss1
|
||||
- DOMAIN-WILDCARD,test.*.mihomo.com,ss1
|
||||
- IP-CIDR,1.1.1.1/32,ss1
|
||||
- IP-CIDR6,2409::/64,DIRECT
|
||||
# 当满足条件是 TCP 或 UDP 流量时,使用名为 sub-rule-name1 的规则集
|
||||
|
||||
29
go.mod
29
go.mod
@@ -7,45 +7,43 @@ require (
|
||||
github.com/bahlo/generic-list-go v0.2.0
|
||||
github.com/coreos/go-iptables v0.8.0
|
||||
github.com/dlclark/regexp2 v1.11.5
|
||||
github.com/enfein/mieru/v3 v3.13.0
|
||||
github.com/go-chi/chi/v5 v5.2.1
|
||||
github.com/enfein/mieru/v3 v3.16.1
|
||||
github.com/go-chi/chi/v5 v5.2.2
|
||||
github.com/go-chi/render v1.0.3
|
||||
github.com/gobwas/ws v1.4.0
|
||||
github.com/gofrs/uuid/v5 v5.3.2
|
||||
github.com/insomniacslk/dhcp v0.0.0-20250109001534-8abf58130905
|
||||
github.com/klauspost/compress v1.17.9 // lastest version compatible with golang1.20
|
||||
github.com/klauspost/cpuid/v2 v2.2.9 // lastest version compatible with golang1.20
|
||||
github.com/lunixbochs/struc v0.0.0-20200707160740-784aaebc1d40
|
||||
github.com/mdlayher/netlink v1.7.2
|
||||
github.com/metacubex/amneziawg-go v0.0.0-20240922133038-fdf3a4d5a4ab
|
||||
github.com/metacubex/bart v0.20.5
|
||||
github.com/metacubex/bbolt v0.0.0-20240822011022-aed6d4850399
|
||||
github.com/metacubex/bbolt v0.0.0-20250725135710-010dbbbb7a5b
|
||||
github.com/metacubex/chacha v0.1.5
|
||||
github.com/metacubex/fswatch v0.1.1
|
||||
github.com/metacubex/gopacket v1.1.20-0.20230608035415-7e2f98a3e759
|
||||
github.com/metacubex/quic-go v0.52.1-0.20250522021943-aef454b9e639
|
||||
github.com/metacubex/quic-go v0.53.1-0.20250628094454-fda5262d1d9c
|
||||
github.com/metacubex/randv2 v0.2.0
|
||||
github.com/metacubex/sing v0.5.4-0.20250605054047-54dc6097da29
|
||||
github.com/metacubex/sing v0.5.4
|
||||
github.com/metacubex/sing-mux v0.3.2
|
||||
github.com/metacubex/sing-quic v0.0.0-20250523120938-f1a248e5ec7f
|
||||
github.com/metacubex/sing-shadowsocks v0.2.11-0.20250621023810-0e9ef9dd0c92
|
||||
github.com/metacubex/sing-shadowsocks2 v0.2.5-0.20250621023950-93d605a2143d
|
||||
github.com/metacubex/sing-quic v0.0.0-20250718154553-1b193bec4cbb
|
||||
github.com/metacubex/sing-shadowsocks v0.2.11
|
||||
github.com/metacubex/sing-shadowsocks2 v0.2.5
|
||||
github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2
|
||||
github.com/metacubex/sing-tun v0.4.7-0.20250611091011-60774779fdd8
|
||||
github.com/metacubex/sing-vmess v0.2.2
|
||||
github.com/metacubex/sing-tun v0.4.7-0.20250721020617-8e7c37ed3d97
|
||||
github.com/metacubex/sing-vmess v0.2.3
|
||||
github.com/metacubex/sing-wireguard v0.0.0-20250503063753-2dc62acc626f
|
||||
github.com/metacubex/smux v0.0.0-20250503055512-501391591dee
|
||||
github.com/metacubex/tfo-go v0.0.0-20250516165257-e29c16ae41d4
|
||||
github.com/metacubex/utls v1.7.4-0.20250610022031-808d767c8c73
|
||||
github.com/metacubex/utls v1.8.0
|
||||
github.com/metacubex/wireguard-go v0.0.0-20240922131502-c182e7471181
|
||||
github.com/miekg/dns v1.1.63 // lastest version compatible with golang1.20
|
||||
github.com/mroth/weightedrand/v2 v2.1.0
|
||||
github.com/openacid/low v0.1.21
|
||||
github.com/oschwald/maxminddb-golang v1.12.0 // lastest version compatible with golang1.20
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1
|
||||
github.com/sagernet/cors v1.2.1
|
||||
github.com/sagernet/netlink v0.0.0-20240612041022-b9a21c07ac6a
|
||||
github.com/samber/lo v1.50.0
|
||||
github.com/samber/lo v1.51.0
|
||||
github.com/shirou/gopsutil/v4 v4.25.1 // lastest version compatible with golang1.20
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/stretchr/testify v1.10.0
|
||||
@@ -70,7 +68,6 @@ require (
|
||||
github.com/ajg/form v1.5.1 // indirect
|
||||
github.com/andybalholm/brotli v1.0.6 // indirect
|
||||
github.com/buger/jsonparser v1.1.1 // indirect
|
||||
github.com/cloudflare/circl v1.3.7 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/ebitengine/purego v0.8.3 // indirect
|
||||
github.com/ericlagergren/aegis v0.0.0-20250325060835-cd0defd64358 // indirect
|
||||
@@ -88,6 +85,8 @@ require (
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect
|
||||
github.com/hashicorp/yamux v0.1.2 // indirect
|
||||
github.com/josharian/native v1.1.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mdlayher/socket v0.4.1 // indirect
|
||||
|
||||
62
go.sum
62
go.sum
@@ -17,10 +17,9 @@ github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx2
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
|
||||
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
|
||||
github.com/coreos/go-iptables v0.8.0 h1:MPc2P89IhuVpLI7ETL/2tx3XZ61VeICZjYqDEgNsPRc=
|
||||
github.com/coreos/go-iptables v0.8.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -28,8 +27,8 @@ github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZ
|
||||
github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
||||
github.com/ebitengine/purego v0.8.3 h1:K+0AjQp63JEZTEMZiwsI9g0+hAMNohwUOtY0RPGexmc=
|
||||
github.com/ebitengine/purego v0.8.3/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/enfein/mieru/v3 v3.13.0 h1:eGyxLGkb+lut9ebmx+BGwLJ5UMbEc/wGIYO0AXEKy98=
|
||||
github.com/enfein/mieru/v3 v3.13.0/go.mod h1:zJBUCsi5rxyvHM8fjFf+GLaEl4OEjjBXr1s5F6Qd3hM=
|
||||
github.com/enfein/mieru/v3 v3.16.1 h1:CfIt1pQCCQbohkw+HBD2o8V9tnhZvB5yuXGGQIXTLOs=
|
||||
github.com/enfein/mieru/v3 v3.16.1/go.mod h1:zJBUCsi5rxyvHM8fjFf+GLaEl4OEjjBXr1s5F6Qd3hM=
|
||||
github.com/ericlagergren/aegis v0.0.0-20250325060835-cd0defd64358 h1:kXYqH/sL8dS/FdoFjr12ePjnLPorPo2FsnrHNuXSDyo=
|
||||
github.com/ericlagergren/aegis v0.0.0-20250325060835-cd0defd64358/go.mod h1:hkIFzoiIPZYxdFOOLyDho59b7SrDfo+w3h+yWdlg45I=
|
||||
github.com/ericlagergren/polyval v0.0.0-20220411101811-e25bc10ba391 h1:8j2RH289RJplhA6WfdaPqzg1MjH2K8wX5e0uhAxrw2g=
|
||||
@@ -43,8 +42,8 @@ github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/gaukas/godicttls v0.0.4 h1:NlRaXb3J6hAnTmWdsEKb9bcSBD6BvcIjdGdeb0zfXbk=
|
||||
github.com/gaukas/godicttls v0.0.4/go.mod h1:l6EenT4TLWgTdwslVb4sEMOCf7Bv0JAK67deKr9/NCI=
|
||||
github.com/go-chi/chi/v5 v5.2.1 h1:KOIHODQj58PmL80G2Eak4WdvUzjSJSm0vG72crDCqb8=
|
||||
github.com/go-chi/chi/v5 v5.2.1/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
|
||||
github.com/go-chi/chi/v5 v5.2.2 h1:CMwsvRVTbXVytCk1Wd72Zy1LAsAh9GxMmSNWLHCG618=
|
||||
github.com/go-chi/chi/v5 v5.2.2/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
|
||||
github.com/go-chi/render v1.0.3 h1:AsXqd2a1/INaIfUSKq3G5uA8weYx20FOsM7uSoCyyt4=
|
||||
github.com/go-chi/render v1.0.3/go.mod h1:/gr3hVkmYR0YlEy3LxCuVRFzEu9Ruok+gFqbIofjao0=
|
||||
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
||||
@@ -84,7 +83,9 @@ github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY=
|
||||
github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||
github.com/lunixbochs/struc v0.0.0-20200707160740-784aaebc1d40 h1:EnfXoSqDfSNJv0VBNqY/88RNnhSGYkrHaO0mmFGbVsc=
|
||||
@@ -99,8 +100,8 @@ github.com/metacubex/amneziawg-go v0.0.0-20240922133038-fdf3a4d5a4ab h1:Chbw+/31
|
||||
github.com/metacubex/amneziawg-go v0.0.0-20240922133038-fdf3a4d5a4ab/go.mod h1:xVKK8jC5Sd3hfh7WjmCq+HorehIbrBijaUWmcuKjPcI=
|
||||
github.com/metacubex/bart v0.20.5 h1:XkgLZ17QxfxkqKdGsojoM2Zu01mmHyyQSFzt2/calTM=
|
||||
github.com/metacubex/bart v0.20.5/go.mod h1:DCcyfP4MC+Zy7sLK7XeGuMw+P5K9mIRsYOBgiE8icsI=
|
||||
github.com/metacubex/bbolt v0.0.0-20240822011022-aed6d4850399 h1:oBowHVKZycNtAFbZ6avaCSZJYeme2Nrj+4RpV2cNJig=
|
||||
github.com/metacubex/bbolt v0.0.0-20240822011022-aed6d4850399/go.mod h1:4xcieuIK+M4bGQmQYZVqEaIYqjS1ahO4kXG7EmDgEro=
|
||||
github.com/metacubex/bbolt v0.0.0-20250725135710-010dbbbb7a5b h1:j7dadXD8I2KTmMt8jg1JcaP1ANL3JEObJPdANKcSYPY=
|
||||
github.com/metacubex/bbolt v0.0.0-20250725135710-010dbbbb7a5b/go.mod h1:+WmP0VJZDkDszvpa83HzfUp6QzARl/IKkMorH4+nODw=
|
||||
github.com/metacubex/chacha v0.1.5 h1:fKWMb/5c7ZrY8Uoqi79PPFxl+qwR7X/q0OrsAubyX2M=
|
||||
github.com/metacubex/chacha v0.1.5/go.mod h1:Djn9bPZxLTXbJFSeyo0/qzEzQI+gUSSzttuzZM75GH8=
|
||||
github.com/metacubex/fswatch v0.1.1 h1:jqU7C/v+g0qc2RUFgmAOPoVvfl2BXXUXEumn6oQuxhU=
|
||||
@@ -111,46 +112,41 @@ github.com/metacubex/gvisor v0.0.0-20250324165734-5857f47bd43b h1:RUh4OdVPz/jDrM
|
||||
github.com/metacubex/gvisor v0.0.0-20250324165734-5857f47bd43b/go.mod h1:8LpS0IJW1VmWzUm3ylb0e2SK5QDm5lO/2qwWLZgRpBU=
|
||||
github.com/metacubex/nftables v0.0.0-20250503052935-30a69ab87793 h1:1Qpuy+sU3DmyX9HwI+CrBT/oLNJngvBorR2RbajJcqo=
|
||||
github.com/metacubex/nftables v0.0.0-20250503052935-30a69ab87793/go.mod h1:RjRNb4G52yAgfR+Oe/kp9G4PJJ97Fnj89eY1BFO3YyA=
|
||||
github.com/metacubex/quic-go v0.52.1-0.20250522021943-aef454b9e639 h1:L+1brQNzBhCCxWlicwfK1TlceemCRmrDE4HmcVHc29w=
|
||||
github.com/metacubex/quic-go v0.52.1-0.20250522021943-aef454b9e639/go.mod h1:Kc6h++Q/zf3AxcUCevJhJwgrskJumv+pZdR8g/E/10k=
|
||||
github.com/metacubex/quic-go v0.53.1-0.20250628094454-fda5262d1d9c h1:ABQzmOaZddM3q0OYeoZEc0XF+KW+dUdPNvY/c5rsunI=
|
||||
github.com/metacubex/quic-go v0.53.1-0.20250628094454-fda5262d1d9c/go.mod h1:eWlAK3zsKI0P8UhYpXlIsl3mtW4D6MpMNuYLIu8CKWI=
|
||||
github.com/metacubex/randv2 v0.2.0 h1:uP38uBvV2SxYfLj53kuvAjbND4RUDfFJjwr4UigMiLs=
|
||||
github.com/metacubex/randv2 v0.2.0/go.mod h1:kFi2SzrQ5WuneuoLLCMkABtiBu6VRrMrWFqSPyj2cxY=
|
||||
github.com/metacubex/sing v0.5.2/go.mod h1:ypf0mjwlZm0sKdQSY+yQvmsbWa0hNPtkeqyRMGgoN+w=
|
||||
github.com/metacubex/sing v0.5.4-0.20250605054047-54dc6097da29 h1:SD9q025FNTaepuFXFOKDhnGLVu6PQYChBvw2ZYPXeLo=
|
||||
github.com/metacubex/sing v0.5.4-0.20250605054047-54dc6097da29/go.mod h1:ypf0mjwlZm0sKdQSY+yQvmsbWa0hNPtkeqyRMGgoN+w=
|
||||
github.com/metacubex/sing v0.5.4 h1:a4kAOZmF+OXosbzPEcrSc5QD35/ex+MNuZsrcuWskHk=
|
||||
github.com/metacubex/sing v0.5.4/go.mod h1:ypf0mjwlZm0sKdQSY+yQvmsbWa0hNPtkeqyRMGgoN+w=
|
||||
github.com/metacubex/sing-mux v0.3.2 h1:nJv52pyRivHcaZJKk2JgxpaVvj1GAXG81scSa9N7ncw=
|
||||
github.com/metacubex/sing-mux v0.3.2/go.mod h1:3rt1soewn0O6j89GCLmwAQFsq257u0jf2zQSPhTL3Bw=
|
||||
github.com/metacubex/sing-quic v0.0.0-20250523120938-f1a248e5ec7f h1:mP3vIm+9hRFI0C0Vl3pE0NESF/L85FDbuB0tGgUii6I=
|
||||
github.com/metacubex/sing-quic v0.0.0-20250523120938-f1a248e5ec7f/go.mod h1:JPTpf7fpnojsSuwRJExhSZSy63pVbp3VM39+zj+sAJM=
|
||||
github.com/metacubex/sing-shadowsocks v0.2.11-0.20250621021503-4f85ef9bf4b3 h1:dtiRj7WaCAXp4UhCkmaIiFF6v886qXiuqeIDN4Z//9E=
|
||||
github.com/metacubex/sing-shadowsocks v0.2.11-0.20250621021503-4f85ef9bf4b3/go.mod h1:/squZ38pXrYjqtg8qn+joVvwbpGNYQNp8yxKsMVbCto=
|
||||
github.com/metacubex/sing-shadowsocks v0.2.11-0.20250621023810-0e9ef9dd0c92 h1:Y9ebcKya6ow7VHoESCN5+l4zZvg5eaL2IhI5LLCQxQA=
|
||||
github.com/metacubex/sing-shadowsocks v0.2.11-0.20250621023810-0e9ef9dd0c92/go.mod h1:/squZ38pXrYjqtg8qn+joVvwbpGNYQNp8yxKsMVbCto=
|
||||
github.com/metacubex/sing-shadowsocks2 v0.2.5-0.20250621021638-dcd503063651 h1:vwLj0DDjPYy4AHEZvfRVf8ih52o6wpBnJxXxqa+ztmE=
|
||||
github.com/metacubex/sing-shadowsocks2 v0.2.5-0.20250621021638-dcd503063651/go.mod h1:+ukTd0OPFglT3bnKAYTJWYPbuox6HYNXE235r5tHdUk=
|
||||
github.com/metacubex/sing-shadowsocks2 v0.2.5-0.20250621023950-93d605a2143d h1:Ey3A1tA8lVkRbK1FDmwuWj/57Nr8JMdpoVqe45mFzJg=
|
||||
github.com/metacubex/sing-shadowsocks2 v0.2.5-0.20250621023950-93d605a2143d/go.mod h1:+ukTd0OPFglT3bnKAYTJWYPbuox6HYNXE235r5tHdUk=
|
||||
github.com/metacubex/sing-quic v0.0.0-20250718154553-1b193bec4cbb h1:U/m3h8lp/j7i8zFgfvScLdZa1/Y8dd74oO7iZaQq80s=
|
||||
github.com/metacubex/sing-quic v0.0.0-20250718154553-1b193bec4cbb/go.mod h1:B60FxaPHjR1SeQB0IiLrgwgvKsaoASfOWdiqhLjmMGA=
|
||||
github.com/metacubex/sing-shadowsocks v0.2.11 h1:p2NGNOdF95e6XvdDKipLj1FRRqR8dnbfC/7pw2CCTlw=
|
||||
github.com/metacubex/sing-shadowsocks v0.2.11/go.mod h1:bT1PCTV316zFnlToRMk5zt9HmIQYRBveiT71mplYPfc=
|
||||
github.com/metacubex/sing-shadowsocks2 v0.2.5 h1:MnPn0hbcDkSJt6TlpI15XImHKK6IqaOwBUGPKyMnJnE=
|
||||
github.com/metacubex/sing-shadowsocks2 v0.2.5/go.mod h1:Zyh+rAQRyevYfG/COCvDs1c/YMhGqCuknn7QrGmoQIw=
|
||||
github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2 h1:gXU+MYPm7Wme3/OAY2FFzVq9d9GxPHOqu5AQfg/ddhI=
|
||||
github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2/go.mod h1:mbfboaXauKJNIHJYxQRa+NJs4JU9NZfkA+I33dS2+9E=
|
||||
github.com/metacubex/sing-tun v0.4.7-0.20250611091011-60774779fdd8 h1:4zWKqxTx75TbfW2EmlQ3hxM6RTRg2PYOAVMCnU4I61I=
|
||||
github.com/metacubex/sing-tun v0.4.7-0.20250611091011-60774779fdd8/go.mod h1:2YywXPWW8Z97kTH7RffOeykKzU+l0aiKlglWV1PAS64=
|
||||
github.com/metacubex/sing-vmess v0.2.2 h1:nG6GIKF1UOGmlzs+BIetdGHkFZ20YqFVIYp5Htqzp+4=
|
||||
github.com/metacubex/sing-vmess v0.2.2/go.mod h1:CVDNcdSLVYFgTHQlubr88d8CdqupAUDqLjROos+H9xk=
|
||||
github.com/metacubex/sing-tun v0.4.7-0.20250721020617-8e7c37ed3d97 h1:YYpc60UZE2G0pUeHbRw9erDrUDZrPQy8QzWFqA3kHsk=
|
||||
github.com/metacubex/sing-tun v0.4.7-0.20250721020617-8e7c37ed3d97/go.mod h1:2YywXPWW8Z97kTH7RffOeykKzU+l0aiKlglWV1PAS64=
|
||||
github.com/metacubex/sing-vmess v0.2.3 h1:QKLdIk5A2FcR3Y7m2/JO1XhfzgDA8tF4W9/ffsH9opo=
|
||||
github.com/metacubex/sing-vmess v0.2.3/go.mod h1:21R5R1u90uUvBQF0owoooEu96/SAYYD56nDrwm6nFaM=
|
||||
github.com/metacubex/sing-wireguard v0.0.0-20250503063753-2dc62acc626f h1:Sr/DYKYofKHKc4GF3qkRGNuj6XA6c0eqPgEDN+VAsYU=
|
||||
github.com/metacubex/sing-wireguard v0.0.0-20250503063753-2dc62acc626f/go.mod h1:jpAkVLPnCpGSfNyVmj6Cq4YbuZsFepm/Dc+9BAOcR80=
|
||||
github.com/metacubex/smux v0.0.0-20250503055512-501391591dee h1:lp6hJ+4wCLZu113awp7P6odM2okB5s60HUyF0FMqKmo=
|
||||
github.com/metacubex/smux v0.0.0-20250503055512-501391591dee/go.mod h1:4bPD8HWx9jPJ9aE4uadgyN7D1/Wz3KmPy+vale8sKLE=
|
||||
github.com/metacubex/tfo-go v0.0.0-20250516165257-e29c16ae41d4 h1:j1VRTiC9JLR4nUbSikx9OGdu/3AgFDqgcLj4GoqyQkc=
|
||||
github.com/metacubex/tfo-go v0.0.0-20250516165257-e29c16ae41d4/go.mod h1:l9oLnLoEXyGZ5RVLsh7QCC5XsouTUyKk4F2nLm2DHLw=
|
||||
github.com/metacubex/utls v1.7.4-0.20250610022031-808d767c8c73 h1:HWKsf92BqLYqugATFIJ3hYiEBZ7JF6AoqyvqF39afuI=
|
||||
github.com/metacubex/utls v1.7.4-0.20250610022031-808d767c8c73/go.mod h1:oknYT0qTOwE4hjPmZOEpzVdefnW7bAdGLvZcqmk4TLU=
|
||||
github.com/metacubex/utls v1.8.0 h1:mSYi6FMnmc5riARl5UZDmWVy710z+P5b7xuGW0lV9ac=
|
||||
github.com/metacubex/utls v1.8.0/go.mod h1:FdjYzVfCtgtna19hX0ER1Xsa5uJInwdQ4IcaaI98lEQ=
|
||||
github.com/metacubex/wireguard-go v0.0.0-20240922131502-c182e7471181 h1:hJLQviGySBuaynlCwf/oYgIxbVbGRUIKZCxdya9YrbQ=
|
||||
github.com/metacubex/wireguard-go v0.0.0-20240922131502-c182e7471181/go.mod h1:phewKljNYiTVT31Gcif8RiCKnTUOgVWFJjccqYM8s+Y=
|
||||
github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY=
|
||||
github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs=
|
||||
github.com/mroth/weightedrand/v2 v2.1.0 h1:o1ascnB1CIVzsqlfArQQjeMy1U0NcIbBO5rfd5E/OeU=
|
||||
github.com/mroth/weightedrand/v2 v2.1.0/go.mod h1:f2faGsfOGOwc1p94wzHKKZyTpcJUW7OJ/9U4yfiNAOU=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
github.com/oasisprotocol/deoxysii v0.0.0-20220228165953-2091330c22b7 h1:1102pQc2SEPp5+xrS26wEaeb26sZy6k9/ZXlZN+eXE4=
|
||||
github.com/oasisprotocol/deoxysii v0.0.0-20220228165953-2091330c22b7/go.mod h1:UqoUn6cHESlliMhOnKLWr+CBH+e3bazUPvFj1XZwAjs=
|
||||
github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q=
|
||||
@@ -171,16 +167,14 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
|
||||
github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
|
||||
github.com/sagernet/cors v1.2.1 h1:Cv5Z8y9YSD6Gm+qSpNrL3LO4lD3eQVvbFYJSG7JCMHQ=
|
||||
github.com/sagernet/cors v1.2.1/go.mod h1:O64VyOjjhrkLmQIjF4KGRrJO/5dVXFdpEmCW/eISRAI=
|
||||
github.com/sagernet/netlink v0.0.0-20240612041022-b9a21c07ac6a h1:ObwtHN2VpqE0ZNjr6sGeT00J8uU7JF4cNUdb44/Duis=
|
||||
github.com/sagernet/netlink v0.0.0-20240612041022-b9a21c07ac6a/go.mod h1:xLnfdiJbSp8rNqYEdIW/6eDO4mVoogml14Bh2hSiFpM=
|
||||
github.com/samber/lo v1.50.0 h1:XrG0xOeHs+4FQ8gJR97zDz5uOFMW7OwFWiFVzqopKgY=
|
||||
github.com/samber/lo v1.50.0/go.mod h1:RjZyNk6WSnUFRKK6EyOhsRJMqft3G+pg7dCWHQCWvsc=
|
||||
github.com/samber/lo v1.51.0 h1:kysRYLbHy/MB7kQZf5DSN50JHmMsNEdeY24VzJFu7wI=
|
||||
github.com/samber/lo v1.51.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=
|
||||
github.com/shirou/gopsutil/v4 v4.25.1 h1:QSWkTc+fu9LTAWfkZwZ6j8MSUk4A2LV7rbH0ZqmLjXs=
|
||||
github.com/shirou/gopsutil/v4 v4.25.1/go.mod h1:RoUCUpndaJFtT+2zsZzzmhvbfGoDCJ7nFXKJf8GqJbI=
|
||||
github.com/sina-ghaderi/poly1305 v0.0.0-20220724002748-c5926b03988b h1:rXHg9GrUEtWZhEkrykicdND3VPjlVbYiLdX9J7gimS8=
|
||||
@@ -281,7 +275,7 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
|
||||
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@@ -260,6 +260,7 @@ func updateDNS(c *config.DNS, generalIPv6 bool) {
|
||||
DirectServer: c.DirectNameServer,
|
||||
DirectFollowPolicy: c.DirectFollowPolicy,
|
||||
CacheAlgorithm: c.CacheAlgorithm,
|
||||
CacheMaxSize: c.CacheMaxSize,
|
||||
}
|
||||
|
||||
r := dns.NewResolver(cfg)
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
func cacheRouter() http.Handler {
|
||||
r := chi.NewRouter()
|
||||
r.Post("/fakeip/flush", flushFakeIPPool)
|
||||
r.Post("/dns/flush", flushDnsCache)
|
||||
return r
|
||||
}
|
||||
|
||||
@@ -24,3 +25,8 @@ func flushFakeIPPool(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
render.NoContent(w, r)
|
||||
}
|
||||
|
||||
func flushDnsCache(w http.ResponseWriter, r *http.Request) {
|
||||
resolver.ClearCache()
|
||||
render.NoContent(w, r)
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/metacubex/mihomo/adapter/inbound"
|
||||
"github.com/metacubex/mihomo/component/dialer"
|
||||
"github.com/metacubex/mihomo/component/process"
|
||||
"github.com/metacubex/mihomo/component/resolver"
|
||||
"github.com/metacubex/mihomo/component/updater"
|
||||
"github.com/metacubex/mihomo/config"
|
||||
@@ -33,28 +34,29 @@ func configRouter() http.Handler {
|
||||
}
|
||||
|
||||
type configSchema struct {
|
||||
Port *int `json:"port"`
|
||||
SocksPort *int `json:"socks-port"`
|
||||
RedirPort *int `json:"redir-port"`
|
||||
TProxyPort *int `json:"tproxy-port"`
|
||||
MixedPort *int `json:"mixed-port"`
|
||||
Tun *tunSchema `json:"tun"`
|
||||
TuicServer *tuicServerSchema `json:"tuic-server"`
|
||||
ShadowSocksConfig *string `json:"ss-config"`
|
||||
VmessConfig *string `json:"vmess-config"`
|
||||
TcptunConfig *string `json:"tcptun-config"`
|
||||
UdptunConfig *string `json:"udptun-config"`
|
||||
AllowLan *bool `json:"allow-lan"`
|
||||
SkipAuthPrefixes *[]netip.Prefix `json:"skip-auth-prefixes"`
|
||||
LanAllowedIPs *[]netip.Prefix `json:"lan-allowed-ips"`
|
||||
LanDisAllowedIPs *[]netip.Prefix `json:"lan-disallowed-ips"`
|
||||
BindAddress *string `json:"bind-address"`
|
||||
Mode *tunnel.TunnelMode `json:"mode"`
|
||||
LogLevel *log.LogLevel `json:"log-level"`
|
||||
IPv6 *bool `json:"ipv6"`
|
||||
Sniffing *bool `json:"sniffing"`
|
||||
TcpConcurrent *bool `json:"tcp-concurrent"`
|
||||
InterfaceName *string `json:"interface-name"`
|
||||
Port *int `json:"port"`
|
||||
SocksPort *int `json:"socks-port"`
|
||||
RedirPort *int `json:"redir-port"`
|
||||
TProxyPort *int `json:"tproxy-port"`
|
||||
MixedPort *int `json:"mixed-port"`
|
||||
Tun *tunSchema `json:"tun"`
|
||||
TuicServer *tuicServerSchema `json:"tuic-server"`
|
||||
ShadowSocksConfig *string `json:"ss-config"`
|
||||
VmessConfig *string `json:"vmess-config"`
|
||||
TcptunConfig *string `json:"tcptun-config"`
|
||||
UdptunConfig *string `json:"udptun-config"`
|
||||
AllowLan *bool `json:"allow-lan"`
|
||||
SkipAuthPrefixes *[]netip.Prefix `json:"skip-auth-prefixes"`
|
||||
LanAllowedIPs *[]netip.Prefix `json:"lan-allowed-ips"`
|
||||
LanDisAllowedIPs *[]netip.Prefix `json:"lan-disallowed-ips"`
|
||||
BindAddress *string `json:"bind-address"`
|
||||
Mode *tunnel.TunnelMode `json:"mode"`
|
||||
LogLevel *log.LogLevel `json:"log-level"`
|
||||
IPv6 *bool `json:"ipv6"`
|
||||
Sniffing *bool `json:"sniffing"`
|
||||
TcpConcurrent *bool `json:"tcp-concurrent"`
|
||||
FindProcessMode *process.FindProcessMode `json:"find-process-mode"`
|
||||
InterfaceName *string `json:"interface-name"`
|
||||
}
|
||||
|
||||
type tunSchema struct {
|
||||
@@ -98,6 +100,10 @@ type tunSchema struct {
|
||||
Inet6RouteAddress *[]netip.Prefix `yaml:"inet6-route-address" json:"inet6-route-address,omitempty"`
|
||||
Inet4RouteExcludeAddress *[]netip.Prefix `yaml:"inet4-route-exclude-address" json:"inet4-route-exclude-address,omitempty"`
|
||||
Inet6RouteExcludeAddress *[]netip.Prefix `yaml:"inet6-route-exclude-address" json:"inet6-route-exclude-address,omitempty"`
|
||||
|
||||
// darwin special config
|
||||
RecvMsgX *bool `yaml:"recvmsgx" json:"recvmsgx,omitempty"`
|
||||
SendMsgX *bool `yaml:"sendmsgx" json:"sendmsgx,omitempty"`
|
||||
}
|
||||
|
||||
type tuicServerSchema struct {
|
||||
@@ -241,6 +247,12 @@ func pointerOrDefaultTun(p *tunSchema, def LC.Tun) LC.Tun {
|
||||
if p.FileDescriptor != nil {
|
||||
def.FileDescriptor = *p.FileDescriptor
|
||||
}
|
||||
if p.RecvMsgX != nil {
|
||||
def.RecvMsgX = *p.RecvMsgX
|
||||
}
|
||||
if p.SendMsgX != nil {
|
||||
def.SendMsgX = *p.SendMsgX
|
||||
}
|
||||
}
|
||||
return def
|
||||
}
|
||||
@@ -341,6 +353,10 @@ func patchConfigs(w http.ResponseWriter, r *http.Request) {
|
||||
tunnel.SetMode(*general.Mode)
|
||||
}
|
||||
|
||||
if general.FindProcessMode != nil {
|
||||
tunnel.SetFindProcessMode(*general.FindProcessMode)
|
||||
}
|
||||
|
||||
if general.LogLevel != nil {
|
||||
log.SetLevel(*general.LogLevel)
|
||||
}
|
||||
|
||||
@@ -299,7 +299,7 @@ func startPipe(cfg *Config) {
|
||||
}
|
||||
}
|
||||
|
||||
func safeEuqal(a, b string) bool {
|
||||
func safeEqual(a, b string) bool {
|
||||
aBuf := utils.ImmutableBytesFromString(a)
|
||||
bBuf := utils.ImmutableBytesFromString(b)
|
||||
return subtle.ConstantTimeCompare(aBuf, bBuf) == 1
|
||||
@@ -311,7 +311,7 @@ func authentication(secret string) func(http.Handler) http.Handler {
|
||||
// Browser websocket not support custom header
|
||||
if r.Header.Get("Upgrade") == "websocket" && r.URL.Query().Get("token") != "" {
|
||||
token := r.URL.Query().Get("token")
|
||||
if !safeEuqal(token, secret) {
|
||||
if !safeEqual(token, secret) {
|
||||
render.Status(r, http.StatusUnauthorized)
|
||||
render.JSON(w, r, ErrUnauthorized)
|
||||
return
|
||||
@@ -324,7 +324,7 @@ func authentication(secret string) func(http.Handler) http.Handler {
|
||||
bearer, token, found := strings.Cut(header, " ")
|
||||
|
||||
hasInvalidHeader := bearer != "Bearer"
|
||||
hasInvalidSecret := !found || !safeEuqal(token, secret)
|
||||
hasInvalidSecret := !found || !safeEqual(token, secret)
|
||||
if hasInvalidHeader || hasInvalidSecret {
|
||||
render.Status(r, http.StatusUnauthorized)
|
||||
render.JSON(w, r, ErrUnauthorized)
|
||||
|
||||
@@ -32,7 +32,7 @@ func upgradeCore(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
err = updater.UpdateCore(execPath)
|
||||
err = updater.DefaultCoreUpdater.Update(execPath)
|
||||
if err != nil {
|
||||
log.Warnln("%s", err)
|
||||
render.Status(r, http.StatusInternalServerError)
|
||||
|
||||
@@ -54,6 +54,10 @@ type Tun struct {
|
||||
Inet6RouteAddress []netip.Prefix `yaml:"inet6-route-address" json:"inet6-route-address,omitempty"`
|
||||
Inet4RouteExcludeAddress []netip.Prefix `yaml:"inet4-route-exclude-address" json:"inet4-route-exclude-address,omitempty"`
|
||||
Inet6RouteExcludeAddress []netip.Prefix `yaml:"inet6-route-exclude-address" json:"inet6-route-exclude-address,omitempty"`
|
||||
|
||||
// darwin special config
|
||||
RecvMsgX bool `yaml:"recvmsgx" json:"recvmsgx,omitempty"`
|
||||
SendMsgX bool `yaml:"sendmsgx" json:"sendmsgx,omitempty"`
|
||||
}
|
||||
|
||||
func (t *Tun) Sort() {
|
||||
@@ -199,5 +203,12 @@ func (t *Tun) Equal(other Tun) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
if t.RecvMsgX != other.RecvMsgX {
|
||||
return false
|
||||
}
|
||||
if t.SendMsgX != other.SendMsgX {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -55,6 +55,10 @@ type TunOption struct {
|
||||
Inet6RouteAddress []netip.Prefix `inbound:"inet6-route-address,omitempty"`
|
||||
Inet4RouteExcludeAddress []netip.Prefix `inbound:"inet4-route-exclude-address,omitempty"`
|
||||
Inet6RouteExcludeAddress []netip.Prefix `inbound:"inet6-route-exclude-address,omitempty"`
|
||||
|
||||
// darwin special config
|
||||
RecvMsgX bool `inbound:"recvmsgx,omitempty"`
|
||||
SendMsgX bool `inbound:"sendmsgx,omitempty"`
|
||||
}
|
||||
|
||||
var _ encoding.TextUnmarshaler = (*netip.Addr)(nil) // ensure netip.Addr can decode direct by structure package
|
||||
@@ -124,6 +128,9 @@ func NewTun(options *TunOption) (*Tun, error) {
|
||||
Inet6RouteAddress: options.Inet6RouteAddress,
|
||||
Inet4RouteExcludeAddress: options.Inet4RouteExcludeAddress,
|
||||
Inet6RouteExcludeAddress: options.Inet6RouteExcludeAddress,
|
||||
|
||||
RecvMsgX: options.RecvMsgX,
|
||||
SendMsgX: options.SendMsgX,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -365,6 +365,8 @@ func New(options LC.Tun, tunnel C.Tunnel, additions ...inbound.Addition) (l *Lis
|
||||
ExcludePackage: options.ExcludePackage,
|
||||
FileDescriptor: options.FileDescriptor,
|
||||
InterfaceMonitor: defaultInterfaceMonitor,
|
||||
EXP_RecvMsgX: options.RecvMsgX,
|
||||
EXP_SendMsgX: options.SendMsgX,
|
||||
}
|
||||
|
||||
if options.AutoRedirect {
|
||||
|
||||
@@ -2,6 +2,7 @@ package common
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
C "github.com/metacubex/mihomo/constant"
|
||||
|
||||
@@ -33,4 +34,48 @@ func ParseParams(params []string) (isSrc bool, noResolve bool) {
|
||||
return
|
||||
}
|
||||
|
||||
func trimArr(arr []string) (r []string) {
|
||||
for _, e := range arr {
|
||||
r = append(r, strings.Trim(e, " "))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ParseRulePayload parse rule format like:
|
||||
// `tp,payload,target(,params...)` or `tp,payload(,params...)`
|
||||
// needTarget control the format contains `target` in string
|
||||
func ParseRulePayload(ruleRaw string, needTarget bool) (tp, payload, target string, params []string) {
|
||||
item := trimArr(strings.Split(ruleRaw, ","))
|
||||
tp = strings.ToUpper(item[0])
|
||||
if len(item) > 1 {
|
||||
switch tp {
|
||||
case "MATCH":
|
||||
// MATCH doesn't contain payload and params
|
||||
target = item[1]
|
||||
case "NOT", "OR", "AND", "SUB-RULE", "DOMAIN-REGEX", "PROCESS-NAME-REGEX", "PROCESS-PATH-REGEX":
|
||||
// some type of rules that has comma in payload and don't need params
|
||||
if needTarget {
|
||||
l := len(item)
|
||||
target = item[l-1] // don't have params so target must at the end of slices
|
||||
item = item[:l-1] // remove the target from slices
|
||||
}
|
||||
payload = strings.Join(item[1:], ",")
|
||||
default:
|
||||
payload = item[1]
|
||||
if len(item) > 2 {
|
||||
if needTarget {
|
||||
target = item[2]
|
||||
if len(item) > 3 {
|
||||
params = item[3:]
|
||||
}
|
||||
} else {
|
||||
params = item[2:]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type ParseRuleFunc func(tp, payload, target string, params []string, subRules map[string][]C.Rule) (C.Rule, error)
|
||||
|
||||
41
rules/common/domain_wildcard.go
Normal file
41
rules/common/domain_wildcard.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/metacubex/mihomo/component/wildcard"
|
||||
C "github.com/metacubex/mihomo/constant"
|
||||
)
|
||||
|
||||
type DomainWildcard struct {
|
||||
*Base
|
||||
pattern string
|
||||
adapter string
|
||||
}
|
||||
|
||||
func (dw *DomainWildcard) RuleType() C.RuleType {
|
||||
return C.DomainWildcard
|
||||
}
|
||||
|
||||
func (dw *DomainWildcard) Match(metadata *C.Metadata, _ C.RuleMatchHelper) (bool, string) {
|
||||
return wildcard.Match(dw.pattern, metadata.Host), dw.adapter
|
||||
}
|
||||
|
||||
func (dw *DomainWildcard) Adapter() string {
|
||||
return dw.adapter
|
||||
}
|
||||
|
||||
func (dw *DomainWildcard) Payload() string {
|
||||
return dw.pattern
|
||||
}
|
||||
|
||||
var _ C.Rule = (*DomainWildcard)(nil)
|
||||
|
||||
func NewDomainWildcard(pattern string, adapter string) (*DomainWildcard, error) {
|
||||
pattern = strings.ToLower(pattern)
|
||||
return &DomainWildcard{
|
||||
Base: &Base{},
|
||||
pattern: pattern,
|
||||
adapter: adapter,
|
||||
}, nil
|
||||
}
|
||||
@@ -78,21 +78,14 @@ func (r Range) containRange(preStart, preEnd int) bool {
|
||||
}
|
||||
|
||||
func (logic *Logic) payloadToRule(subPayload string, parseRule common.ParseRuleFunc) (C.Rule, error) {
|
||||
splitStr := strings.SplitN(subPayload, ",", 2)
|
||||
if len(splitStr) < 2 {
|
||||
return nil, fmt.Errorf("[%s] format is error", subPayload)
|
||||
}
|
||||
|
||||
tp := splitStr[0]
|
||||
payload := splitStr[1]
|
||||
tp, payload, target, param := common.ParseRulePayload(subPayload, false)
|
||||
switch tp {
|
||||
case "MATCH", "SUB-RULE":
|
||||
return nil, fmt.Errorf("unsupported rule type [%s] on logic rule", tp)
|
||||
case "NOT", "OR", "AND":
|
||||
return parseRule(tp, payload, "", nil, nil)
|
||||
case "":
|
||||
return nil, fmt.Errorf("[%s] format is error", subPayload)
|
||||
}
|
||||
param := strings.Split(payload, ",")
|
||||
return parseRule(tp, param[0], "", param[1:], nil)
|
||||
return parseRule(tp, payload, target, param, nil)
|
||||
}
|
||||
|
||||
func (logic *Logic) format(payload string) ([]Range, error) {
|
||||
|
||||
@@ -10,6 +10,10 @@ import (
|
||||
)
|
||||
|
||||
func ParseRule(tp, payload, target string, params []string, subRules map[string][]C.Rule) (parsed C.Rule, parseErr error) {
|
||||
if tp != "MATCH" && payload == "" { // only MATCH allowed doesn't contain payload
|
||||
return nil, fmt.Errorf("missing subsequent parameters: %s", tp)
|
||||
}
|
||||
|
||||
switch tp {
|
||||
case "DOMAIN":
|
||||
parsed = RC.NewDomain(payload, target)
|
||||
@@ -19,6 +23,8 @@ func ParseRule(tp, payload, target string, params []string, subRules map[string]
|
||||
parsed = RC.NewDomainKeyword(payload, target)
|
||||
case "DOMAIN-REGEX":
|
||||
parsed, parseErr = RC.NewDomainRegex(payload, target)
|
||||
case "DOMAIN-WILDCARD":
|
||||
parsed, parseErr = RC.NewDomainWildcard(payload, target)
|
||||
case "GEOSITE":
|
||||
parsed, parseErr = RC.NewGEOSITE(payload, target)
|
||||
case "GEOIP":
|
||||
@@ -81,8 +87,6 @@ func ParseRule(tp, payload, target string, params []string, subRules map[string]
|
||||
case "MATCH":
|
||||
parsed = RC.NewMatch(target)
|
||||
parseErr = nil
|
||||
case "":
|
||||
parseErr = fmt.Errorf("missing subsequent parameters: %s", payload)
|
||||
default:
|
||||
parseErr = fmt.Errorf("unsupported rule type: %s", tp)
|
||||
}
|
||||
|
||||
@@ -2,17 +2,17 @@ package provider
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
C "github.com/metacubex/mihomo/constant"
|
||||
P "github.com/metacubex/mihomo/constant/provider"
|
||||
"github.com/metacubex/mihomo/log"
|
||||
"github.com/metacubex/mihomo/rules/common"
|
||||
)
|
||||
|
||||
type classicalStrategy struct {
|
||||
rules []C.Rule
|
||||
count int
|
||||
parse func(tp, payload, target string, params []string) (parsed C.Rule, parseErr error)
|
||||
parse common.ParseRuleFunc
|
||||
}
|
||||
|
||||
func (c *classicalStrategy) Behavior() P.RuleBehavior {
|
||||
@@ -39,42 +39,26 @@ func (c *classicalStrategy) Reset() {
|
||||
}
|
||||
|
||||
func (c *classicalStrategy) Insert(rule string) {
|
||||
ruleType, rule, params := ruleParse(rule)
|
||||
r, err := c.parse(ruleType, rule, "", params)
|
||||
r, err := c.payloadToRule(rule)
|
||||
if err != nil {
|
||||
log.Warnln("parse classical rule error: %s", err.Error())
|
||||
log.Warnln("parse classical rule [%s] error: %s", rule, err.Error())
|
||||
} else {
|
||||
c.rules = append(c.rules, r)
|
||||
c.count++
|
||||
}
|
||||
}
|
||||
|
||||
func (c *classicalStrategy) payloadToRule(rule string) (C.Rule, error) {
|
||||
tp, payload, target, params := common.ParseRulePayload(rule, false)
|
||||
switch tp {
|
||||
case "MATCH", "RULE-SET", "SUB-RULE":
|
||||
return nil, fmt.Errorf("unsupported rule type on classical rule-set: %s", tp)
|
||||
}
|
||||
return c.parse(tp, payload, target, params, nil)
|
||||
}
|
||||
|
||||
func (c *classicalStrategy) FinishInsert() {}
|
||||
|
||||
func ruleParse(ruleRaw string) (string, string, []string) {
|
||||
item := strings.Split(ruleRaw, ",")
|
||||
if len(item) == 1 {
|
||||
return "", item[0], nil
|
||||
} else if len(item) == 2 {
|
||||
return item[0], item[1], nil
|
||||
} else if len(item) > 2 {
|
||||
if item[0] == "NOT" || item[0] == "OR" || item[0] == "AND" || item[0] == "SUB-RULE" || item[0] == "DOMAIN-REGEX" || item[0] == "PROCESS-NAME-REGEX" || item[0] == "PROCESS-PATH-REGEX" {
|
||||
return item[0], strings.Join(item[1:], ","), nil
|
||||
} else {
|
||||
return item[0], item[1], item[2:]
|
||||
}
|
||||
}
|
||||
|
||||
return "", "", nil
|
||||
}
|
||||
|
||||
func NewClassicalStrategy(parse func(tp, payload, target string, params []string, subRules map[string][]C.Rule) (parsed C.Rule, parseErr error)) *classicalStrategy {
|
||||
return &classicalStrategy{rules: []C.Rule{}, parse: func(tp, payload, target string, params []string) (parsed C.Rule, parseErr error) {
|
||||
switch tp {
|
||||
case "MATCH", "RULE-SET", "SUB-RULE":
|
||||
return nil, fmt.Errorf("unsupported rule type on classical rule-set: %s", tp)
|
||||
default:
|
||||
return parse(tp, payload, target, params, nil)
|
||||
}
|
||||
}}
|
||||
func NewClassicalStrategy(parse common.ParseRuleFunc) *classicalStrategy {
|
||||
return &classicalStrategy{rules: []C.Rule{}, parse: parse}
|
||||
}
|
||||
|
||||
@@ -42,6 +42,9 @@ func ParseRuleProvider(name string, mapping map[string]any, parse common.ParseRu
|
||||
switch schema.Type {
|
||||
case "file":
|
||||
path := C.Path.Resolve(schema.Path)
|
||||
if !C.Path.IsSafePath(path) {
|
||||
return nil, C.Path.ErrNotSafePath(path)
|
||||
}
|
||||
vehicle = resource.NewFileVehicle(path)
|
||||
case "http":
|
||||
path := C.Path.GetPathByHash("rules", schema.URL)
|
||||
|
||||
@@ -41,7 +41,7 @@ type Client struct {
|
||||
tlsConfig *tlsC.Config
|
||||
quicConfig *quic.Config
|
||||
|
||||
quicSession quic.Connection
|
||||
quicSession *quic.Conn
|
||||
reconnectMutex sync.Mutex
|
||||
closed bool
|
||||
|
||||
@@ -103,7 +103,7 @@ func (c *Client) connectToServer(dialer utils.PacketDialer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) handleControlStream(qs quic.Connection, stream quic.Stream) (bool, string, error) {
|
||||
func (c *Client) handleControlStream(qs *quic.Conn, stream *quic.Stream) (bool, string, error) {
|
||||
// Send protocol version
|
||||
_, err := stream.Write([]byte{protocolVersion})
|
||||
if err != nil {
|
||||
@@ -133,7 +133,7 @@ func (c *Client) handleControlStream(qs quic.Connection, stream quic.Stream) (bo
|
||||
return sh.OK, sh.Message, nil
|
||||
}
|
||||
|
||||
func (c *Client) handleMessage(qs quic.Connection) {
|
||||
func (c *Client) handleMessage(qs *quic.Conn) {
|
||||
for {
|
||||
msg, err := qs.ReceiveDatagram(context.Background())
|
||||
if err != nil {
|
||||
@@ -162,7 +162,7 @@ func (c *Client) handleMessage(qs quic.Connection) {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) openStreamWithReconnect(dialer utils.PacketDialer) (quic.Connection, quic.Stream, error) {
|
||||
func (c *Client) openStreamWithReconnect(dialer utils.PacketDialer) (*quic.Conn, *wrappedQUICStream, error) {
|
||||
c.reconnectMutex.Lock()
|
||||
defer c.reconnectMutex.Unlock()
|
||||
if c.closed {
|
||||
@@ -298,7 +298,7 @@ func (c *Client) Close() error {
|
||||
}
|
||||
|
||||
type quicConn struct {
|
||||
Orig quic.Stream
|
||||
Orig *wrappedQUICStream
|
||||
PseudoLocalAddr net.Addr
|
||||
PseudoRemoteAddr net.Addr
|
||||
Established bool
|
||||
@@ -360,8 +360,8 @@ type UDPConn interface {
|
||||
}
|
||||
|
||||
type quicPktConn struct {
|
||||
Session quic.Connection
|
||||
Stream quic.Stream
|
||||
Session *quic.Conn
|
||||
Stream *wrappedQUICStream
|
||||
CloseFunc func()
|
||||
UDPSessionID uint32
|
||||
MsgCh <-chan *udpMessage
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
// Handle stream close properly
|
||||
// Ref: https://github.com/libp2p/go-libp2p-quic-transport/blob/master/stream.go
|
||||
type wrappedQUICStream struct {
|
||||
Stream quic.Stream
|
||||
Stream *quic.Stream
|
||||
}
|
||||
|
||||
func (s *wrappedQUICStream) StreamID() quic.StreamID {
|
||||
|
||||
@@ -62,7 +62,7 @@ func (ct *ClientTransport) quicPacketConn(proto string, rAddr net.Addr, serverPo
|
||||
}
|
||||
}
|
||||
|
||||
func (ct *ClientTransport) QUICDial(proto string, server string, serverPorts string, tlsConfig *tlsC.Config, quicConfig *quic.Config, obfs obfsPkg.Obfuscator, hopInterval time.Duration, dialer utils.PacketDialer) (quic.Connection, error) {
|
||||
func (ct *ClientTransport) QUICDial(proto string, server string, serverPorts string, tlsConfig *tlsC.Config, quicConfig *quic.Config, obfs obfsPkg.Obfuscator, hopInterval time.Duration, dialer utils.PacketDialer) (*quic.Conn, error) {
|
||||
serverUDPAddr, err := dialer.RemoteAddr(server)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -13,7 +13,7 @@ const (
|
||||
DefaultConnectionReceiveWindow = 67108864 // 64 MB/s
|
||||
)
|
||||
|
||||
func SetCongestionController(quicConn quic.Connection, cc string, cwnd int) {
|
||||
func SetCongestionController(quicConn *quic.Conn, cc string, cwnd int) {
|
||||
if cwnd == 0 {
|
||||
cwnd = 32
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
type quicStreamConn struct {
|
||||
quic.Stream
|
||||
*quic.Stream
|
||||
lock sync.Mutex
|
||||
lAddr net.Addr
|
||||
rAddr net.Addr
|
||||
@@ -62,6 +62,6 @@ func (q *quicStreamConn) RemoteAddr() net.Addr {
|
||||
|
||||
var _ net.Conn = (*quicStreamConn)(nil)
|
||||
|
||||
func NewQuicStreamConn(stream quic.Stream, lAddr, rAddr net.Addr, closeDeferFn func()) net.Conn {
|
||||
func NewQuicStreamConn(stream *quic.Stream, lAddr, rAddr net.Addr, closeDeferFn func()) net.Conn {
|
||||
return &quicStreamConn{Stream: stream, lAddr: lAddr, rAddr: rAddr, closeDeferFn: closeDeferFn}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ const (
|
||||
DefaultBBRMaxCongestionWindow = 10000
|
||||
)
|
||||
|
||||
func GetInitialPacketSize(quicConn quic.Connection) congestion.ByteCount {
|
||||
func GetInitialPacketSize(quicConn *quic.Conn) congestion.ByteCount {
|
||||
return congestion.ByteCount(quicConn.Config().InitialPacketSize)
|
||||
}
|
||||
|
||||
|
||||
@@ -930,6 +930,6 @@ func bdpFromRttAndBandwidth(rtt time.Duration, bandwidth Bandwidth) congestion.B
|
||||
return congestion.ByteCount(rtt) * congestion.ByteCount(bandwidth) / congestion.ByteCount(BytesPerSecond) / congestion.ByteCount(time.Second)
|
||||
}
|
||||
|
||||
func GetInitialPacketSize(quicConn quic.Connection) congestion.ByteCount {
|
||||
func GetInitialPacketSize(quicConn *quic.Conn) congestion.ByteCount {
|
||||
return congestion.ByteCount(quicConn.Config().InitialPacketSize)
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ func (s *Server) Close() error {
|
||||
|
||||
type serverHandler struct {
|
||||
*Server
|
||||
quicConn quic.EarlyConnection
|
||||
quicConn *quic.Conn
|
||||
uuid uuid.UUID
|
||||
|
||||
v4Handler common.ServerHandler
|
||||
@@ -138,7 +138,7 @@ func (s *serverHandler) handleMessage() (err error) {
|
||||
|
||||
func (s *serverHandler) handleStream() (err error) {
|
||||
for {
|
||||
var quicStream quic.Stream
|
||||
var quicStream *quic.Stream
|
||||
quicStream, err = s.quicConn.AcceptStream(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -175,7 +175,7 @@ func (s *serverHandler) handleStream() (err error) {
|
||||
|
||||
func (s *serverHandler) handleUniStream() (err error) {
|
||||
for {
|
||||
var stream quic.ReceiveStream
|
||||
var stream *quic.ReceiveStream
|
||||
stream, err = s.quicConn.AcceptUniStream(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
atomic2 "github.com/metacubex/mihomo/common/atomic"
|
||||
N "github.com/metacubex/mihomo/common/net"
|
||||
"github.com/metacubex/mihomo/common/pool"
|
||||
"github.com/metacubex/mihomo/common/xsync"
|
||||
tlsC "github.com/metacubex/mihomo/component/tls"
|
||||
C "github.com/metacubex/mihomo/constant"
|
||||
"github.com/metacubex/mihomo/log"
|
||||
@@ -21,7 +22,6 @@ import (
|
||||
|
||||
"github.com/metacubex/quic-go"
|
||||
"github.com/metacubex/randv2"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
)
|
||||
|
||||
type ClientOption struct {
|
||||
@@ -42,13 +42,13 @@ type clientImpl struct {
|
||||
*ClientOption
|
||||
udp bool
|
||||
|
||||
quicConn quic.Connection
|
||||
quicConn *quic.Conn
|
||||
connMutex sync.Mutex
|
||||
|
||||
openStreams atomic.Int64
|
||||
closed atomic.Bool
|
||||
|
||||
udpInputMap *xsync.MapOf[uint32, net.Conn]
|
||||
udpInputMap xsync.Map[uint32, net.Conn]
|
||||
|
||||
// only ready for PoolClient
|
||||
dialerRef C.Dialer
|
||||
@@ -71,7 +71,7 @@ func (t *clientImpl) SetLastVisited(last time.Time) {
|
||||
t.lastVisited.Store(last)
|
||||
}
|
||||
|
||||
func (t *clientImpl) getQuicConn(ctx context.Context, dialer C.Dialer, dialFn common.DialFunc) (quic.Connection, error) {
|
||||
func (t *clientImpl) getQuicConn(ctx context.Context, dialer C.Dialer, dialFn common.DialFunc) (*quic.Conn, error) {
|
||||
t.connMutex.Lock()
|
||||
defer t.connMutex.Unlock()
|
||||
if t.quicConn != nil {
|
||||
@@ -81,7 +81,7 @@ func (t *clientImpl) getQuicConn(ctx context.Context, dialer C.Dialer, dialFn co
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var quicConn quic.Connection
|
||||
var quicConn *quic.Conn
|
||||
if t.ReduceRtt {
|
||||
quicConn, err = transport.DialEarly(ctx, addr, t.TlsConfig, t.QuicConfig)
|
||||
} else {
|
||||
@@ -113,7 +113,7 @@ func (t *clientImpl) getQuicConn(ctx context.Context, dialer C.Dialer, dialFn co
|
||||
return quicConn, nil
|
||||
}
|
||||
|
||||
func (t *clientImpl) sendAuthentication(quicConn quic.Connection) (err error) {
|
||||
func (t *clientImpl) sendAuthentication(quicConn *quic.Conn) (err error) {
|
||||
defer func() {
|
||||
t.deferQuicConn(quicConn, err)
|
||||
}()
|
||||
@@ -138,12 +138,12 @@ func (t *clientImpl) sendAuthentication(quicConn quic.Connection) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *clientImpl) handleUniStream(quicConn quic.Connection) (err error) {
|
||||
func (t *clientImpl) handleUniStream(quicConn *quic.Conn) (err error) {
|
||||
defer func() {
|
||||
t.deferQuicConn(quicConn, err)
|
||||
}()
|
||||
for {
|
||||
var stream quic.ReceiveStream
|
||||
var stream *quic.ReceiveStream
|
||||
stream, err = quicConn.AcceptUniStream(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -189,7 +189,7 @@ func (t *clientImpl) handleUniStream(quicConn quic.Connection) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *clientImpl) handleMessage(quicConn quic.Connection) (err error) {
|
||||
func (t *clientImpl) handleMessage(quicConn *quic.Conn) (err error) {
|
||||
defer func() {
|
||||
t.deferQuicConn(quicConn, err)
|
||||
}()
|
||||
@@ -237,14 +237,14 @@ func (t *clientImpl) handleMessage(quicConn quic.Connection) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *clientImpl) deferQuicConn(quicConn quic.Connection, err error) {
|
||||
func (t *clientImpl) deferQuicConn(quicConn *quic.Conn, err error) {
|
||||
var netError net.Error
|
||||
if err != nil && errors.As(err, &netError) {
|
||||
t.forceClose(quicConn, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *clientImpl) forceClose(quicConn quic.Connection, err error) {
|
||||
func (t *clientImpl) forceClose(quicConn *quic.Conn, err error) {
|
||||
t.connMutex.Lock()
|
||||
defer t.connMutex.Unlock()
|
||||
if quicConn == nil {
|
||||
@@ -422,7 +422,6 @@ func NewClient(clientOption *ClientOption, udp bool, dialerRef C.Dialer) *Client
|
||||
ClientOption: clientOption,
|
||||
udp: udp,
|
||||
dialerRef: dialerRef,
|
||||
udpInputMap: xsync.NewMapOf[uint32, net.Conn](),
|
||||
}
|
||||
c := &Client{ci}
|
||||
runtime.SetFinalizer(c, closeClient)
|
||||
|
||||
@@ -15,13 +15,13 @@ import (
|
||||
|
||||
type quicStreamPacketConn struct {
|
||||
connId uint32
|
||||
quicConn quic.Connection
|
||||
quicConn *quic.Conn
|
||||
inputConn *N.BufferedConn
|
||||
|
||||
udpRelayMode common.UdpRelayMode
|
||||
maxUdpRelayPacketSize int
|
||||
|
||||
deferQuicConnFn func(quicConn quic.Connection, err error)
|
||||
deferQuicConnFn func(quicConn *quic.Conn, err error)
|
||||
closeDeferFn func()
|
||||
writeClosed *atomic.Bool
|
||||
|
||||
@@ -57,7 +57,7 @@ func (q *quicStreamPacketConn) close() (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var stream quic.SendStream
|
||||
var stream *quic.SendStream
|
||||
stream, err = q.quicConn.OpenUniStream()
|
||||
if err != nil {
|
||||
return
|
||||
@@ -149,7 +149,7 @@ func (q *quicStreamPacketConn) WriteTo(p []byte, addr net.Addr) (n int, err erro
|
||||
}
|
||||
switch q.udpRelayMode {
|
||||
case common.QUIC:
|
||||
var stream quic.SendStream
|
||||
var stream *quic.SendStream
|
||||
stream, err = q.quicConn.OpenUniStream()
|
||||
if err != nil {
|
||||
return
|
||||
|
||||
@@ -11,13 +11,13 @@ import (
|
||||
"github.com/metacubex/mihomo/common/atomic"
|
||||
N "github.com/metacubex/mihomo/common/net"
|
||||
"github.com/metacubex/mihomo/common/pool"
|
||||
"github.com/metacubex/mihomo/common/xsync"
|
||||
C "github.com/metacubex/mihomo/constant"
|
||||
"github.com/metacubex/mihomo/transport/socks5"
|
||||
"github.com/metacubex/mihomo/transport/tuic/common"
|
||||
|
||||
"github.com/gofrs/uuid/v5"
|
||||
"github.com/metacubex/quic-go"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
)
|
||||
|
||||
type ServerOption struct {
|
||||
@@ -28,26 +28,25 @@ type ServerOption struct {
|
||||
MaxUdpRelayPacketSize int
|
||||
}
|
||||
|
||||
func NewServerHandler(option *ServerOption, quicConn quic.EarlyConnection, uuid uuid.UUID) common.ServerHandler {
|
||||
func NewServerHandler(option *ServerOption, quicConn *quic.Conn, uuid uuid.UUID) common.ServerHandler {
|
||||
return &serverHandler{
|
||||
ServerOption: option,
|
||||
quicConn: quicConn,
|
||||
uuid: uuid,
|
||||
authCh: make(chan struct{}),
|
||||
udpInputMap: xsync.NewMapOf[uint32, *atomic.Bool](),
|
||||
}
|
||||
}
|
||||
|
||||
type serverHandler struct {
|
||||
*ServerOption
|
||||
quicConn quic.EarlyConnection
|
||||
quicConn *quic.Conn
|
||||
uuid uuid.UUID
|
||||
|
||||
authCh chan struct{}
|
||||
authOk atomic.Bool
|
||||
authOnce sync.Once
|
||||
|
||||
udpInputMap *xsync.MapOf[uint32, *atomic.Bool]
|
||||
udpInputMap xsync.Map[uint32, *atomic.Bool]
|
||||
}
|
||||
|
||||
func (s *serverHandler) AuthOk() bool {
|
||||
@@ -80,7 +79,7 @@ func (s *serverHandler) parsePacket(packet *Packet, udpRelayMode common.UdpRelay
|
||||
|
||||
assocId = packet.ASSOC_ID
|
||||
|
||||
writeClosed, _ := s.udpInputMap.LoadOrCompute(assocId, func() *atomic.Bool { return &atomic.Bool{} })
|
||||
writeClosed, _ := s.udpInputMap.LoadOrStoreFn(assocId, func() *atomic.Bool { return &atomic.Bool{} })
|
||||
if writeClosed.Load() {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
atomic2 "github.com/metacubex/mihomo/common/atomic"
|
||||
N "github.com/metacubex/mihomo/common/net"
|
||||
"github.com/metacubex/mihomo/common/pool"
|
||||
"github.com/metacubex/mihomo/common/xsync"
|
||||
tlsC "github.com/metacubex/mihomo/component/tls"
|
||||
C "github.com/metacubex/mihomo/constant"
|
||||
"github.com/metacubex/mihomo/log"
|
||||
@@ -21,7 +22,6 @@ import (
|
||||
|
||||
"github.com/metacubex/quic-go"
|
||||
"github.com/metacubex/randv2"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
)
|
||||
|
||||
type ClientOption struct {
|
||||
@@ -41,13 +41,13 @@ type clientImpl struct {
|
||||
*ClientOption
|
||||
udp bool
|
||||
|
||||
quicConn quic.Connection
|
||||
quicConn *quic.Conn
|
||||
connMutex sync.Mutex
|
||||
|
||||
openStreams atomic.Int64
|
||||
closed atomic.Bool
|
||||
|
||||
udpInputMap *xsync.MapOf[uint16, net.Conn]
|
||||
udpInputMap xsync.Map[uint16, net.Conn]
|
||||
|
||||
// only ready for PoolClient
|
||||
dialerRef C.Dialer
|
||||
@@ -70,7 +70,7 @@ func (t *clientImpl) SetLastVisited(last time.Time) {
|
||||
t.lastVisited.Store(last)
|
||||
}
|
||||
|
||||
func (t *clientImpl) getQuicConn(ctx context.Context, dialer C.Dialer, dialFn common.DialFunc) (quic.Connection, error) {
|
||||
func (t *clientImpl) getQuicConn(ctx context.Context, dialer C.Dialer, dialFn common.DialFunc) (*quic.Conn, error) {
|
||||
t.connMutex.Lock()
|
||||
defer t.connMutex.Unlock()
|
||||
if t.quicConn != nil {
|
||||
@@ -80,7 +80,7 @@ func (t *clientImpl) getQuicConn(ctx context.Context, dialer C.Dialer, dialFn co
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var quicConn quic.Connection
|
||||
var quicConn *quic.Conn
|
||||
if t.ReduceRtt {
|
||||
quicConn, err = transport.DialEarly(ctx, addr, t.TlsConfig, t.QuicConfig)
|
||||
} else {
|
||||
@@ -110,7 +110,7 @@ func (t *clientImpl) getQuicConn(ctx context.Context, dialer C.Dialer, dialFn co
|
||||
return quicConn, nil
|
||||
}
|
||||
|
||||
func (t *clientImpl) sendAuthentication(quicConn quic.Connection) (err error) {
|
||||
func (t *clientImpl) sendAuthentication(quicConn *quic.Conn) (err error) {
|
||||
defer func() {
|
||||
t.deferQuicConn(quicConn, err)
|
||||
}()
|
||||
@@ -139,12 +139,12 @@ func (t *clientImpl) sendAuthentication(quicConn quic.Connection) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *clientImpl) handleUniStream(quicConn quic.Connection) (err error) {
|
||||
func (t *clientImpl) handleUniStream(quicConn *quic.Conn) (err error) {
|
||||
defer func() {
|
||||
t.deferQuicConn(quicConn, err)
|
||||
}()
|
||||
for {
|
||||
var stream quic.ReceiveStream
|
||||
var stream *quic.ReceiveStream
|
||||
stream, err = quicConn.AcceptUniStream(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -190,7 +190,7 @@ func (t *clientImpl) handleUniStream(quicConn quic.Connection) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *clientImpl) handleMessage(quicConn quic.Connection) (err error) {
|
||||
func (t *clientImpl) handleMessage(quicConn *quic.Conn) (err error) {
|
||||
defer func() {
|
||||
t.deferQuicConn(quicConn, err)
|
||||
}()
|
||||
@@ -245,14 +245,14 @@ func (t *clientImpl) handleMessage(quicConn quic.Connection) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *clientImpl) deferQuicConn(quicConn quic.Connection, err error) {
|
||||
func (t *clientImpl) deferQuicConn(quicConn *quic.Conn, err error) {
|
||||
var netError net.Error
|
||||
if err != nil && errors.As(err, &netError) {
|
||||
t.forceClose(quicConn, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *clientImpl) forceClose(quicConn quic.Connection, err error) {
|
||||
func (t *clientImpl) forceClose(quicConn *quic.Conn, err error) {
|
||||
t.connMutex.Lock()
|
||||
defer t.connMutex.Unlock()
|
||||
if quicConn == nil {
|
||||
@@ -406,7 +406,6 @@ func NewClient(clientOption *ClientOption, udp bool, dialerRef C.Dialer) *Client
|
||||
ClientOption: clientOption,
|
||||
udp: udp,
|
||||
dialerRef: dialerRef,
|
||||
udpInputMap: xsync.NewMapOf[uint16, net.Conn](),
|
||||
}
|
||||
c := &Client{ci}
|
||||
runtime.SetFinalizer(c, closeClient)
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
// "-3" from quic-go's DatagramFrame.MaxDataLen
|
||||
var MaxFragSize = 1200 - PacketOverHead - 3
|
||||
|
||||
func fragWriteNative(quicConn quic.Connection, packet Packet, buf *bytes.Buffer, fragSize int) (err error) {
|
||||
func fragWriteNative(quicConn *quic.Conn, packet Packet, buf *bytes.Buffer, fragSize int) (err error) {
|
||||
fullPayload := packet.DATA
|
||||
off := 0
|
||||
fragID := uint8(0)
|
||||
|
||||
@@ -17,13 +17,13 @@ import (
|
||||
|
||||
type quicStreamPacketConn struct {
|
||||
connId uint16
|
||||
quicConn quic.Connection
|
||||
quicConn *quic.Conn
|
||||
inputConn *N.BufferedConn
|
||||
|
||||
udpRelayMode common.UdpRelayMode
|
||||
maxUdpRelayPacketSize int
|
||||
|
||||
deferQuicConnFn func(quicConn quic.Connection, err error)
|
||||
deferQuicConnFn func(quicConn *quic.Conn, err error)
|
||||
closeDeferFn func()
|
||||
writeClosed *atomic.Bool
|
||||
|
||||
@@ -61,7 +61,7 @@ func (q *quicStreamPacketConn) close() (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var stream quic.SendStream
|
||||
var stream *quic.SendStream
|
||||
stream, err = q.quicConn.OpenUniStream()
|
||||
if err != nil {
|
||||
return
|
||||
@@ -165,7 +165,7 @@ func (q *quicStreamPacketConn) WriteTo(p []byte, addr net.Addr) (n int, err erro
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var stream quic.SendStream
|
||||
var stream *quic.SendStream
|
||||
stream, err = q.quicConn.OpenUniStream()
|
||||
if err != nil {
|
||||
return
|
||||
|
||||
@@ -10,13 +10,13 @@ import (
|
||||
"github.com/metacubex/mihomo/adapter/inbound"
|
||||
"github.com/metacubex/mihomo/common/atomic"
|
||||
N "github.com/metacubex/mihomo/common/net"
|
||||
"github.com/metacubex/mihomo/common/xsync"
|
||||
C "github.com/metacubex/mihomo/constant"
|
||||
"github.com/metacubex/mihomo/transport/socks5"
|
||||
"github.com/metacubex/mihomo/transport/tuic/common"
|
||||
|
||||
"github.com/gofrs/uuid/v5"
|
||||
"github.com/metacubex/quic-go"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
)
|
||||
|
||||
type ServerOption struct {
|
||||
@@ -27,19 +27,18 @@ type ServerOption struct {
|
||||
MaxUdpRelayPacketSize int
|
||||
}
|
||||
|
||||
func NewServerHandler(option *ServerOption, quicConn quic.EarlyConnection, uuid uuid.UUID) common.ServerHandler {
|
||||
func NewServerHandler(option *ServerOption, quicConn *quic.Conn, uuid uuid.UUID) common.ServerHandler {
|
||||
return &serverHandler{
|
||||
ServerOption: option,
|
||||
quicConn: quicConn,
|
||||
uuid: uuid,
|
||||
authCh: make(chan struct{}),
|
||||
udpInputMap: xsync.NewMapOf[uint16, *serverUDPInput](),
|
||||
}
|
||||
}
|
||||
|
||||
type serverHandler struct {
|
||||
*ServerOption
|
||||
quicConn quic.EarlyConnection
|
||||
quicConn *quic.Conn
|
||||
uuid uuid.UUID
|
||||
|
||||
authCh chan struct{}
|
||||
@@ -47,7 +46,7 @@ type serverHandler struct {
|
||||
authUUID atomic.TypedValue[string]
|
||||
authOnce sync.Once
|
||||
|
||||
udpInputMap *xsync.MapOf[uint16, *serverUDPInput]
|
||||
udpInputMap xsync.Map[uint16, *serverUDPInput]
|
||||
}
|
||||
|
||||
func (s *serverHandler) AuthOk() bool {
|
||||
@@ -96,7 +95,7 @@ func (s *serverHandler) parsePacket(packet *Packet, udpRelayMode common.UdpRelay
|
||||
|
||||
assocId = packet.ASSOC_ID
|
||||
|
||||
input, _ := s.udpInputMap.LoadOrCompute(assocId, func() *serverUDPInput { return &serverUDPInput{} })
|
||||
input, _ := s.udpInputMap.LoadOrStoreFn(assocId, func() *serverUDPInput { return &serverUDPInput{} })
|
||||
if input.writeClosed.Load() {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/metacubex/mihomo/common/atomic"
|
||||
"github.com/metacubex/mihomo/common/xsync"
|
||||
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
"github.com/shirou/gopsutil/v4/process"
|
||||
)
|
||||
|
||||
@@ -14,7 +14,6 @@ var DefaultManager *Manager
|
||||
|
||||
func init() {
|
||||
DefaultManager = &Manager{
|
||||
connections: xsync.NewMapOf[string, Tracker](),
|
||||
uploadTemp: atomic.NewInt64(0),
|
||||
downloadTemp: atomic.NewInt64(0),
|
||||
uploadBlip: atomic.NewInt64(0),
|
||||
@@ -28,7 +27,7 @@ func init() {
|
||||
}
|
||||
|
||||
type Manager struct {
|
||||
connections *xsync.MapOf[string, Tracker]
|
||||
connections xsync.Map[string, Tracker]
|
||||
uploadTemp atomic.Int64
|
||||
downloadTemp atomic.Int64
|
||||
uploadBlip atomic.Int64
|
||||
|
||||
Reference in New Issue
Block a user