Compare commits

..

21 Commits

Author SHA1 Message Date
enfein
a001b1b110 chore: update mieru version (#2403) 2025-12-01 08:42:28 +08:00
wwqgtxx
d1f89fa05e chore: update tfo-go ready for go1.26 2025-12-01 01:12:31 +08:00
wwqgtxx
e2796e2d5c chore: apply ping destination filter for windows 2025-11-30 17:00:04 +08:00
futai
93de49d20c chore: sync sudoku with mihomo log (#2402) 2025-11-29 15:21:29 +08:00
wwqgtxx
4d3167ff2f chore: completely remove relay group type using dialer-proxy instead
which was marked as deprecated in v1.18.6
2025-11-29 09:39:28 +08:00
enfein
5998956a72 fix: a nil pointer error when closing mieru underlay (#2401) 2025-11-29 08:39:45 +08:00
futai
6cf1743961 feat: add Sudoku protocol inbound & outbound support (#2397) 2025-11-28 23:40:00 +08:00
Sinspired
8b6ba22b90 fix: replace wrong SetString() with SetBool() for uint weak-typed input (#2394)
The uint branch in decodeBool() incorrectly used SetString(). Use SetBool(dataVal.Uint() != 0) to match expected behavior.
2025-11-26 10:35:26 +08:00
wwqgtxx
7571c87afb chore: add fake-ip-ttl to dns section 2025-11-23 21:34:30 +08:00
wwqgtxx
d4d2c062a3 test: skip inbound test on darwin 2025-11-23 21:34:30 +08:00
TargetLocked
438d4138d6 fix: compare authentication scheme case-insensitively (#2386) 2025-11-23 19:34:02 +08:00
wwqgtxx
140d892ccf chore: better log 2025-11-22 20:59:53 +08:00
enfein
5aa140c493 feat: support mieru UDP outbound (#2384) 2025-11-22 08:54:14 +08:00
enfein
c107c6a824 fix: crash due to nil net.Conn from mieru inbound (#2361) 2025-11-15 07:42:29 +08:00
wwqgtxx
f6e494e73f chore: upgrade the embedded xsync.Map to v4.2.0 2025-11-14 00:23:01 +08:00
hi
0b3159bf9b chore: remove redundant code (#2355) 2025-11-11 17:06:49 +08:00
wwqgtxx
45fd628788 fix: bugs in kcp-go and smux 2025-11-11 09:33:41 +08:00
wwqgtxx
2f545ef634 fix: hosts not working
https://github.com/MetaCubeX/mihomo/issues/2351
2025-11-10 10:55:02 +08:00
wwqgtxx
054e63cb3f chore: remove depend of purego 2025-11-10 00:46:09 +08:00
wwqgtxx
d48bcf1e1e fix: fakeip6 logic not work correctly 2025-11-09 19:19:20 +08:00
wwqgtxx
0df2f79ece fix: missing metadata in mieru inbound 2025-11-09 11:41:37 +08:00
33 changed files with 1506 additions and 243 deletions

View File

@@ -54,6 +54,11 @@ jobs:
cd $(go env GOROOT)
patch --verbose -p 1 < $GITHUB_WORKSPACE/.github/patch/go${{matrix.go-version}}.patch
- name: Remove inbound test for macOS
if: ${{ runner.os == 'macOS' }}
run: |
rm -rf listener/inbound/*_test.go
- name: Test
run: go test ./... -v -count=1

View File

@@ -4,12 +4,14 @@ import (
"context"
"fmt"
"net"
"net/netip"
"strconv"
"sync"
CN "github.com/metacubex/mihomo/common/net"
"github.com/metacubex/mihomo/component/dialer"
"github.com/metacubex/mihomo/component/proxydialer"
"github.com/metacubex/mihomo/component/resolver"
C "github.com/metacubex/mihomo/constant"
mieruclient "github.com/enfein/mieru/v3/apis/client"
@@ -40,6 +42,45 @@ type MieruOption struct {
HandshakeMode string `proxy:"handshake-mode,omitempty"`
}
type mieruPacketDialer struct {
C.Dialer
}
var _ mierucommon.PacketDialer = (*mieruPacketDialer)(nil)
func (pd mieruPacketDialer) ListenPacket(ctx context.Context, network, laddr, raddr string) (net.PacketConn, error) {
rAddrPort, err := netip.ParseAddrPort(raddr)
if err != nil {
return nil, fmt.Errorf("invalid address %s: %w", raddr, err)
}
return pd.Dialer.ListenPacket(ctx, network, laddr, rAddrPort)
}
type mieruDNSResolver struct {
prefer C.DNSPrefer
}
var _ mierucommon.DNSResolver = (*mieruDNSResolver)(nil)
func (dr mieruDNSResolver) LookupIP(ctx context.Context, network, host string) (_ []net.IP, err error) {
var ip netip.Addr
switch dr.prefer {
case C.IPv4Only:
ip, err = resolver.ResolveIPv4WithResolver(ctx, host, resolver.ProxyServerHostResolver)
case C.IPv6Only:
ip, err = resolver.ResolveIPv6WithResolver(ctx, host, resolver.ProxyServerHostResolver)
case C.IPv6Prefer:
ip, err = resolver.ResolveIPPrefer6WithResolver(ctx, host, resolver.ProxyServerHostResolver)
default:
ip, err = resolver.ResolveIPWithResolver(ctx, host, resolver.ProxyServerHostResolver)
}
if err != nil {
return nil, fmt.Errorf("can't resolve ip: %w", err)
}
// TODO: handle IP4P (due to interface limitations, it's currently impossible to modify the port here)
return []net.IP{ip.AsSlice()}, nil
}
// DialContext implements C.ProxyAdapter
func (m *Mieru) DialContext(ctx context.Context, metadata *C.Metadata) (C.Conn, error) {
if err := m.ensureClientIsRunning(); err != nil {
@@ -102,6 +143,8 @@ func (m *Mieru) ensureClientIsRunning() error {
return err
}
config.Dialer = dialer
config.PacketDialer = mieruPacketDialer{Dialer: dialer}
config.Resolver = mieruDNSResolver{prefer: m.prefer}
if err := m.client.Store(config); err != nil {
return err
}
@@ -158,23 +201,21 @@ func (m *Mieru) Close() error {
}
func metadataToMieruNetAddrSpec(metadata *C.Metadata) mierumodel.NetAddrSpec {
spec := mierumodel.NetAddrSpec{
Net: metadata.NetWork.String(),
}
if metadata.Host != "" {
return mierumodel.NetAddrSpec{
AddrSpec: mierumodel.AddrSpec{
FQDN: metadata.Host,
Port: int(metadata.DstPort),
},
Net: "tcp",
spec.AddrSpec = mierumodel.AddrSpec{
FQDN: metadata.Host,
Port: int(metadata.DstPort),
}
} else {
return mierumodel.NetAddrSpec{
AddrSpec: mierumodel.AddrSpec{
IP: metadata.DstIP.AsSlice(),
Port: int(metadata.DstPort),
},
Net: "tcp",
spec.AddrSpec = mierumodel.AddrSpec{
IP: metadata.DstIP.AsSlice(),
Port: int(metadata.DstPort),
}
}
return spec
}
func buildMieruClientConfig(option MieruOption) (*mieruclient.ClientConfig, error) {
@@ -182,7 +223,13 @@ func buildMieruClientConfig(option MieruOption) (*mieruclient.ClientConfig, erro
return nil, fmt.Errorf("failed to validate mieru option: %w", err)
}
transportProtocol := mierupb.TransportProtocol_TCP.Enum()
var transportProtocol = mierupb.TransportProtocol_UNKNOWN_TRANSPORT_PROTOCOL.Enum()
switch option.Transport {
case "TCP":
transportProtocol = mierupb.TransportProtocol_TCP.Enum()
case "UDP":
transportProtocol = mierupb.TransportProtocol_UDP.Enum()
}
var server *mierupb.ServerEndpoint
if net.ParseIP(option.Server) != nil {
// server is an IP address
@@ -240,6 +287,9 @@ func buildMieruClientConfig(option MieruOption) (*mieruclient.ClientConfig, erro
},
Servers: []*mierupb.ServerEndpoint{server},
},
DNSConfig: &mierucommon.ClientDNSConfig{
BypassDialerDNS: true,
},
}
if multiplexing, ok := mierupb.MultiplexingLevel_value[option.Multiplexing]; ok {
config.Profile.Multiplexing = &mierupb.MultiplexingConfig{
@@ -284,8 +334,8 @@ func validateMieruOption(option MieruOption) error {
}
}
if option.Transport != "TCP" {
return fmt.Errorf("transport must be TCP")
if option.Transport != "TCP" && option.Transport != "UDP" {
return fmt.Errorf("transport must be TCP or UDP")
}
if option.UserName == "" {
return fmt.Errorf("username is empty")

View File

@@ -34,7 +34,7 @@ func TestNewMieru(t *testing.T) {
Name: "test",
Server: "example.com",
Port: 10003,
Transport: "TCP",
Transport: "UDP",
UserName: "test",
Password: "test",
},

270
adapter/outbound/sudoku.go Normal file
View File

@@ -0,0 +1,270 @@
package outbound
import (
"context"
"crypto/sha256"
"encoding/binary"
"fmt"
"io"
"net"
"strconv"
"strings"
"time"
"github.com/metacubex/mihomo/log"
"github.com/saba-futai/sudoku/apis"
"github.com/saba-futai/sudoku/pkg/crypto"
"github.com/saba-futai/sudoku/pkg/obfs/httpmask"
"github.com/saba-futai/sudoku/pkg/obfs/sudoku"
N "github.com/metacubex/mihomo/common/net"
"github.com/metacubex/mihomo/component/dialer"
"github.com/metacubex/mihomo/component/proxydialer"
C "github.com/metacubex/mihomo/constant"
)
type Sudoku struct {
*Base
option *SudokuOption
table *sudoku.Table
baseConf apis.ProtocolConfig
}
type SudokuOption struct {
BasicOption
Name string `proxy:"name"`
Server string `proxy:"server"`
Port int `proxy:"port"`
Key string `proxy:"key"`
AEADMethod string `proxy:"aead-method,omitempty"`
PaddingMin *int `proxy:"padding-min,omitempty"`
PaddingMax *int `proxy:"padding-max,omitempty"`
TableType string `proxy:"table-type,omitempty"` // "prefer_ascii" or "prefer_entropy"
HTTPMask bool `proxy:"http-mask,omitempty"`
}
// DialContext implements C.ProxyAdapter
func (s *Sudoku) DialContext(ctx context.Context, metadata *C.Metadata) (C.Conn, error) {
return s.DialContextWithDialer(ctx, dialer.NewDialer(s.DialOptions()...), metadata)
}
// DialContextWithDialer implements C.ProxyAdapter
func (s *Sudoku) DialContextWithDialer(ctx context.Context, d C.Dialer, metadata *C.Metadata) (_ C.Conn, err error) {
if len(s.option.DialerProxy) > 0 {
d, err = proxydialer.NewByName(s.option.DialerProxy, d)
if err != nil {
return nil, err
}
}
cfg, err := s.buildConfig(metadata)
if err != nil {
return nil, err
}
c, err := d.DialContext(ctx, "tcp", s.addr)
if err != nil {
return nil, fmt.Errorf("%s connect error: %w", s.addr, err)
}
defer func() {
safeConnClose(c, err)
}()
if ctx.Done() != nil {
done := N.SetupContextForConn(ctx, c)
defer done(&err)
}
c, err = s.streamConn(c, cfg)
if err != nil {
return nil, err
}
return NewConn(c, s), nil
}
// ListenPacketContext implements C.ProxyAdapter
func (s *Sudoku) ListenPacketContext(ctx context.Context, metadata *C.Metadata) (C.PacketConn, error) {
return nil, C.ErrNotSupport
}
// SupportUOT implements C.ProxyAdapter
func (s *Sudoku) SupportUOT() bool {
return false // Sudoku protocol only supports TCP
}
// SupportWithDialer implements C.ProxyAdapter
func (s *Sudoku) SupportWithDialer() C.NetWork {
return C.TCP
}
// ProxyInfo implements C.ProxyAdapter
func (s *Sudoku) ProxyInfo() C.ProxyInfo {
info := s.Base.ProxyInfo()
info.DialerProxy = s.option.DialerProxy
return info
}
func (s *Sudoku) buildConfig(metadata *C.Metadata) (*apis.ProtocolConfig, error) {
if metadata == nil || metadata.DstPort == 0 || !metadata.Valid() {
return nil, fmt.Errorf("invalid metadata for sudoku outbound")
}
cfg := s.baseConf
cfg.TargetAddress = metadata.RemoteAddress()
if err := cfg.ValidateClient(); err != nil {
return nil, err
}
return &cfg, nil
}
func (s *Sudoku) streamConn(rawConn net.Conn, cfg *apis.ProtocolConfig) (_ net.Conn, err error) {
if !cfg.DisableHTTPMask {
if err = httpmask.WriteRandomRequestHeader(rawConn, cfg.ServerAddress); err != nil {
return nil, fmt.Errorf("write http mask failed: %w", err)
}
}
obfsConn := sudoku.NewConn(rawConn, cfg.Table, cfg.PaddingMin, cfg.PaddingMax, false)
cConn, err := crypto.NewAEADConn(obfsConn, cfg.Key, cfg.AEADMethod)
if err != nil {
return nil, fmt.Errorf("setup crypto failed: %w", err)
}
handshake := buildSudokuHandshakePayload(cfg.Key)
if _, err = cConn.Write(handshake[:]); err != nil {
cConn.Close()
return nil, fmt.Errorf("send handshake failed: %w", err)
}
if err = writeTargetAddress(cConn, cfg.TargetAddress); err != nil {
cConn.Close()
return nil, fmt.Errorf("send target address failed: %w", err)
}
return cConn, nil
}
func NewSudoku(option SudokuOption) (*Sudoku, error) {
if option.Server == "" {
return nil, fmt.Errorf("server is required")
}
if option.Port <= 0 || option.Port > 65535 {
return nil, fmt.Errorf("invalid port: %d", option.Port)
}
if option.Key == "" {
return nil, fmt.Errorf("key is required")
}
tableType := strings.ToLower(option.TableType)
if tableType == "" {
tableType = "prefer_ascii"
}
if tableType != "prefer_ascii" && tableType != "prefer_entropy" {
return nil, fmt.Errorf("table-type must be prefer_ascii or prefer_entropy")
}
seed := option.Key
if recoveredFromKey, err := crypto.RecoverPublicKey(option.Key); err == nil {
seed = crypto.EncodePoint(recoveredFromKey)
}
start := time.Now()
table := sudoku.NewTable(seed, tableType)
log.Infoln("[Sudoku] Tables initialized (%s) in %v", tableType, time.Since(start))
defaultConf := apis.DefaultConfig()
paddingMin := defaultConf.PaddingMin
paddingMax := defaultConf.PaddingMax
if option.PaddingMin != nil {
paddingMin = *option.PaddingMin
}
if option.PaddingMax != nil {
paddingMax = *option.PaddingMax
}
if option.PaddingMin == nil && option.PaddingMax != nil && paddingMax < paddingMin {
paddingMin = paddingMax
}
if option.PaddingMax == nil && option.PaddingMin != nil && paddingMax < paddingMin {
paddingMax = paddingMin
}
baseConf := apis.ProtocolConfig{
ServerAddress: net.JoinHostPort(option.Server, strconv.Itoa(option.Port)),
Key: option.Key,
AEADMethod: defaultConf.AEADMethod,
Table: table,
PaddingMin: paddingMin,
PaddingMax: paddingMax,
HandshakeTimeoutSeconds: defaultConf.HandshakeTimeoutSeconds,
DisableHTTPMask: !option.HTTPMask,
}
if option.AEADMethod != "" {
baseConf.AEADMethod = option.AEADMethod
}
return &Sudoku{
Base: &Base{
name: option.Name,
addr: baseConf.ServerAddress,
tp: C.Sudoku,
udp: false,
tfo: option.TFO,
mpTcp: option.MPTCP,
iface: option.Interface,
rmark: option.RoutingMark,
prefer: C.NewDNSPrefer(option.IPVersion),
},
option: &option,
table: table,
baseConf: baseConf,
}, nil
}
func buildSudokuHandshakePayload(key string) [16]byte {
var payload [16]byte
binary.BigEndian.PutUint64(payload[:8], uint64(time.Now().Unix()))
hash := sha256.Sum256([]byte(key))
copy(payload[8:], hash[:8])
return payload
}
func writeTargetAddress(w io.Writer, rawAddr string) error {
host, portStr, err := net.SplitHostPort(rawAddr)
if err != nil {
return err
}
portInt, err := net.LookupPort("tcp", portStr)
if err != nil {
return err
}
var buf []byte
if ip := net.ParseIP(host); ip != nil {
if ip4 := ip.To4(); ip4 != nil {
buf = append(buf, 0x01) // IPv4
buf = append(buf, ip4...)
} else {
buf = append(buf, 0x04) // IPv6
buf = append(buf, ip...)
}
} else {
if len(host) > 255 {
return fmt.Errorf("domain too long")
}
buf = append(buf, 0x03) // domain
buf = append(buf, byte(len(host)))
buf = append(buf, host...)
}
var portBytes [2]byte
binary.BigEndian.PutUint16(portBytes[:], uint16(portInt))
buf = append(buf, portBytes[:]...)
_, err = w.Write(buf)
return err
}

View File

@@ -186,7 +186,7 @@ func ParseProxyGroup(config map[string]any, proxyMap map[string]C.Proxy, provide
strategy := parseStrategy(config)
return NewLoadBalance(groupOption, providers, strategy)
case "relay":
group = NewRelay(groupOption, providers)
return nil, fmt.Errorf("%w: The group [%s] with relay type was removed, please using dialer-proxy instead", errType, groupName)
default:
return nil, fmt.Errorf("%w: %s", errType, groupOption.Type)
}

View File

@@ -146,6 +146,13 @@ func ParseProxy(mapping map[string]any) (C.Proxy, error) {
break
}
proxy, err = outbound.NewAnyTLS(*anytlsOption)
case "sudoku":
sudokuOption := &outbound.SudokuOption{}
err = decoder.Decode(mapping, sudokuOption)
if err != nil {
break
}
proxy, err = outbound.NewSudoku(*sudokuOption)
default:
return nil, fmt.Errorf("unsupport proxy type: %s", proxyType)
}

View File

@@ -289,7 +289,7 @@ func (d *Decoder) decodeBool(name string, data any, val reflect.Value) (err erro
case isInt(kind) && d.option.WeaklyTypedInput:
val.SetBool(dataVal.Int() != 0)
case isUint(kind) && d.option.WeaklyTypedInput:
val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
val.SetBool(dataVal.Uint() != 0)
default:
err = fmt.Errorf(
"'%s' expected type '%s', got unconvertible type '%s'",

View File

@@ -1,16 +1,17 @@
package xsync
// copy and modified from https://github.com/puzpuzpuz/xsync/blob/v4.1.0/map.go
// copy and modified from https://github.com/puzpuzpuz/xsync/blob/v4.2.0/map.go
// which is licensed under Apache v2.
//
// mihomo modified:
// 1. parallel Map resize has been removed to decrease the memory using.
// 1. restore xsync/v3's LoadOrCompute api and rename to LoadOrStoreFn.
// 2. the zero Map is ready for use.
import (
"fmt"
"math"
"math/bits"
"runtime"
"strings"
"sync"
"sync/atomic"
@@ -41,8 +42,28 @@ const (
metaMask uint64 = 0xffffffffff
defaultMetaMasked uint64 = defaultMeta & metaMask
emptyMetaSlot uint8 = 0x80
// minimal number of buckets to transfer when participating in cooperative
// resize; should be at least defaultMinMapTableLen
minResizeTransferStride = 64
// upper limit for max number of additional goroutines that participate
// in cooperative resize; must be changed simultaneously with resizeCtl
// and the related code
maxResizeHelpersLimit = (1 << 5) - 1
)
// max number of additional goroutines that participate in cooperative resize;
// "resize owner" goroutine isn't counted
var maxResizeHelpers = func() int32 {
v := int32(parallelism() - 1)
if v < 1 {
v = 1
}
if v > maxResizeHelpersLimit {
v = maxResizeHelpersLimit
}
return v
}()
type mapResizeHint int
const (
@@ -100,16 +121,25 @@ type Map[K comparable, V any] struct {
initOnce sync.Once
totalGrowths atomic.Int64
totalShrinks atomic.Int64
resizing atomic.Bool // resize in progress flag
resizeMu sync.Mutex // only used along with resizeCond
resizeCond sync.Cond // used to wake up resize waiters (concurrent modifications)
table atomic.Pointer[mapTable[K, V]]
minTableLen int
growOnly bool
// table being transferred to
nextTable atomic.Pointer[mapTable[K, V]]
// resize control state: combines resize sequence number (upper 59 bits) and
// the current number of resize helpers (lower 5 bits);
// odd values of resize sequence mean in-progress resize
resizeCtl atomic.Uint64
// only used along with resizeCond
resizeMu sync.Mutex
// used to wake up resize waiters (concurrent writes)
resizeCond sync.Cond
// transfer progress index for resize
resizeIdx atomic.Int64
minTableLen int
growOnly bool
}
type mapTable[K comparable, V any] struct {
buckets []bucketPadded[K, V]
buckets []bucketPadded
// striped counter for number of table entries;
// used to determine if a table shrinking is needed
// occupies min(buckets_memory/1024, 64KB) of memory
@@ -125,16 +155,16 @@ type counterStripe struct {
// bucketPadded is a CL-sized map bucket holding up to
// entriesPerMapBucket entries.
type bucketPadded[K comparable, V any] struct {
type bucketPadded struct {
//lint:ignore U1000 ensure each bucket takes two cache lines on both 32 and 64-bit archs
pad [cacheLineSize - unsafe.Sizeof(bucket[K, V]{})]byte
bucket[K, V]
pad [cacheLineSize - unsafe.Sizeof(bucket{})]byte
bucket
}
type bucket[K comparable, V any] struct {
meta atomic.Uint64
entries [entriesPerMapBucket]atomic.Pointer[entry[K, V]] // *entry
next atomic.Pointer[bucketPadded[K, V]] // *bucketPadded
type bucket struct {
meta uint64
entries [entriesPerMapBucket]unsafe.Pointer // *entry
next unsafe.Pointer // *bucketPadded
mu sync.Mutex
}
@@ -194,15 +224,15 @@ func (m *Map[K, V]) init() {
m.minTableLen = defaultMinMapTableLen
}
m.resizeCond = *sync.NewCond(&m.resizeMu)
table := newMapTable[K, V](m.minTableLen)
table := newMapTable[K, V](m.minTableLen, maphash.MakeSeed())
m.minTableLen = len(table.buckets)
m.table.Store(table)
}
func newMapTable[K comparable, V any](minTableLen int) *mapTable[K, V] {
buckets := make([]bucketPadded[K, V], minTableLen)
func newMapTable[K comparable, V any](minTableLen int, seed maphash.Seed) *mapTable[K, V] {
buckets := make([]bucketPadded, minTableLen)
for i := range buckets {
buckets[i].meta.Store(defaultMeta)
buckets[i].meta = defaultMeta
}
counterLen := minTableLen >> 10
if counterLen < minMapCounterLen {
@@ -214,7 +244,7 @@ func newMapTable[K comparable, V any](minTableLen int) *mapTable[K, V] {
t := &mapTable[K, V]{
buckets: buckets,
size: counter,
seed: maphash.MakeSeed(),
seed: seed,
}
return t
}
@@ -246,22 +276,24 @@ func (m *Map[K, V]) Load(key K) (value V, ok bool) {
bidx := uint64(len(table.buckets)-1) & h1
b := &table.buckets[bidx]
for {
metaw := b.meta.Load()
metaw := atomic.LoadUint64(&b.meta)
markedw := markZeroBytes(metaw^h2w) & metaMask
for markedw != 0 {
idx := firstMarkedByteIndex(markedw)
e := b.entries[idx].Load()
if e != nil {
eptr := atomic.LoadPointer(&b.entries[idx])
if eptr != nil {
e := (*entry[K, V])(eptr)
if e.key == key {
return e.value, true
}
}
markedw &= markedw - 1
}
b = b.next.Load()
if b == nil {
bptr := atomic.LoadPointer(&b.next)
if bptr == nil {
return
}
b = (*bucketPadded)(bptr)
}
}
@@ -399,7 +431,7 @@ func (m *Map[K, V]) doCompute(
for {
compute_attempt:
var (
emptyb *bucketPadded[K, V]
emptyb *bucketPadded
emptyidx int
)
table := m.table.Load()
@@ -415,12 +447,13 @@ func (m *Map[K, V]) doCompute(
b := rootb
load:
for {
metaw := b.meta.Load()
metaw := atomic.LoadUint64(&b.meta)
markedw := markZeroBytes(metaw^h2w) & metaMask
for markedw != 0 {
idx := firstMarkedByteIndex(markedw)
e := b.entries[idx].Load()
if e != nil {
eptr := atomic.LoadPointer(&b.entries[idx])
if eptr != nil {
e := (*entry[K, V])(eptr)
if e.key == key {
if loadOp == loadOrComputeOp {
return e.value, true
@@ -430,23 +463,24 @@ func (m *Map[K, V]) doCompute(
}
markedw &= markedw - 1
}
b = b.next.Load()
if b == nil {
bptr := atomic.LoadPointer(&b.next)
if bptr == nil {
if loadOp == loadAndDeleteOp {
return *new(V), false
}
break load
}
b = (*bucketPadded)(bptr)
}
}
rootb.mu.Lock()
// The following two checks must go in reverse to what's
// in the resize method.
if m.resizeInProgress() {
// Resize is in progress. Wait, then go for another attempt.
if seq := resizeSeq(m.resizeCtl.Load()); seq&1 == 1 {
// Resize is in progress. Help with the transfer, then go for another attempt.
rootb.mu.Unlock()
m.waitForResize()
m.helpResize(seq)
goto compute_attempt
}
if m.newerTableExists(table) {
@@ -456,12 +490,13 @@ func (m *Map[K, V]) doCompute(
}
b := rootb
for {
metaw := b.meta.Load()
metaw := b.meta
markedw := markZeroBytes(metaw^h2w) & metaMask
for markedw != 0 {
idx := firstMarkedByteIndex(markedw)
e := b.entries[idx].Load()
if e != nil {
eptr := b.entries[idx]
if eptr != nil {
e := (*entry[K, V])(eptr)
if e.key == key {
// In-place update/delete.
// We get a copy of the value via an interface{} on each call,
@@ -475,8 +510,8 @@ func (m *Map[K, V]) doCompute(
// Deletion.
// First we update the hash, then the entry.
newmetaw := setByte(metaw, emptyMetaSlot, idx)
b.meta.Store(newmetaw)
b.entries[idx].Store(nil)
atomic.StoreUint64(&b.meta, newmetaw)
atomic.StorePointer(&b.entries[idx], nil)
rootb.mu.Unlock()
table.addSize(bidx, -1)
// Might need to shrink the table if we left bucket empty.
@@ -488,7 +523,7 @@ func (m *Map[K, V]) doCompute(
newe := new(entry[K, V])
newe.key = key
newe.value = newv
b.entries[idx].Store(newe)
atomic.StorePointer(&b.entries[idx], unsafe.Pointer(newe))
case CancelOp:
newv = oldv
}
@@ -512,7 +547,7 @@ func (m *Map[K, V]) doCompute(
emptyidx = idx
}
}
if b.next.Load() == nil {
if b.next == nil {
if emptyb != nil {
// Insertion into an existing bucket.
var zeroV V
@@ -526,8 +561,8 @@ func (m *Map[K, V]) doCompute(
newe.key = key
newe.value = newValue
// First we update meta, then the entry.
emptyb.meta.Store(setByte(emptyb.meta.Load(), h2, emptyidx))
emptyb.entries[emptyidx].Store(newe)
atomic.StoreUint64(&emptyb.meta, setByte(emptyb.meta, h2, emptyidx))
atomic.StorePointer(&emptyb.entries[emptyidx], unsafe.Pointer(newe))
rootb.mu.Unlock()
table.addSize(bidx, 1)
return newValue, computeOnly
@@ -549,19 +584,19 @@ func (m *Map[K, V]) doCompute(
return newValue, false
default:
// Create and append a bucket.
newb := new(bucketPadded[K, V])
newb.meta.Store(setByte(defaultMeta, h2, 0))
newb := new(bucketPadded)
newb.meta = setByte(defaultMeta, h2, 0)
newe := new(entry[K, V])
newe.key = key
newe.value = newValue
newb.entries[0].Store(newe)
b.next.Store(newb)
newb.entries[0] = unsafe.Pointer(newe)
atomic.StorePointer(&b.next, unsafe.Pointer(newb))
rootb.mu.Unlock()
table.addSize(bidx, 1)
return newValue, computeOnly
}
}
b = b.next.Load()
b = (*bucketPadded)(b.next)
}
}
}
@@ -570,13 +605,21 @@ func (m *Map[K, V]) newerTableExists(table *mapTable[K, V]) bool {
return table != m.table.Load()
}
func (m *Map[K, V]) resizeInProgress() bool {
return m.resizing.Load()
func resizeSeq(ctl uint64) uint64 {
return ctl >> 5
}
func resizeHelpers(ctl uint64) uint64 {
return ctl & maxResizeHelpersLimit
}
func resizeCtl(seq uint64, helpers uint64) uint64 {
return (seq << 5) | (helpers & maxResizeHelpersLimit)
}
func (m *Map[K, V]) waitForResize() {
m.resizeMu.Lock()
for m.resizeInProgress() {
for resizeSeq(m.resizeCtl.Load())&1 == 1 {
m.resizeCond.Wait()
}
m.resizeMu.Unlock()
@@ -593,9 +636,9 @@ func (m *Map[K, V]) resize(knownTable *mapTable[K, V], hint mapResizeHint) {
}
}
// Slow path.
if !m.resizing.CompareAndSwap(false, true) {
// Someone else started resize. Wait for it to finish.
m.waitForResize()
seq := resizeSeq(m.resizeCtl.Load())
if seq&1 == 1 || !m.resizeCtl.CompareAndSwap(resizeCtl(seq, 0), resizeCtl(seq+1, 0)) {
m.helpResize(seq)
return
}
var newTable *mapTable[K, V]
@@ -604,64 +647,189 @@ func (m *Map[K, V]) resize(knownTable *mapTable[K, V], hint mapResizeHint) {
switch hint {
case mapGrowHint:
// Grow the table with factor of 2.
// We must keep the same table seed here to keep the same hash codes
// allowing us to avoid locking destination buckets when resizing.
m.totalGrowths.Add(1)
newTable = newMapTable[K, V](tableLen << 1)
newTable = newMapTable[K, V](tableLen<<1, table.seed)
case mapShrinkHint:
shrinkThreshold := int64((tableLen * entriesPerMapBucket) / mapShrinkFraction)
if tableLen > m.minTableLen && table.sumSize() <= shrinkThreshold {
// Shrink the table with factor of 2.
// It's fine to generate a new seed since full locking
// is required anyway.
m.totalShrinks.Add(1)
newTable = newMapTable[K, V](tableLen >> 1)
newTable = newMapTable[K, V](tableLen>>1, maphash.MakeSeed())
} else {
// No need to shrink. Wake up all waiters and give up.
m.resizeMu.Lock()
m.resizing.Store(false)
m.resizeCtl.Store(resizeCtl(seq+2, 0))
m.resizeCond.Broadcast()
m.resizeMu.Unlock()
return
}
case mapClearHint:
newTable = newMapTable[K, V](m.minTableLen)
newTable = newMapTable[K, V](m.minTableLen, maphash.MakeSeed())
default:
panic(fmt.Sprintf("unexpected resize hint: %d", hint))
}
// Copy the data only if we're not clearing the map.
if hint != mapClearHint {
for i := 0; i < tableLen; i++ {
copied := copyBucket(&table.buckets[i], newTable)
newTable.addSizePlain(uint64(i), copied)
}
// Set up cooperative transfer state.
// Next table must be published as the last step.
m.resizeIdx.Store(0)
m.nextTable.Store(newTable)
// Copy the buckets.
m.transfer(table, newTable)
}
// We're about to publish the new table, but before that
// we must wait for all helpers to finish.
for resizeHelpers(m.resizeCtl.Load()) != 0 {
runtime.Gosched()
}
// Publish the new table and wake up all waiters.
m.table.Store(newTable)
m.nextTable.Store(nil)
ctl := resizeCtl(seq+1, 0)
newCtl := resizeCtl(seq+2, 0)
// Increment the sequence number and wake up all waiters.
m.resizeMu.Lock()
m.resizing.Store(false)
// There may be slowpoke helpers who have just incremented
// the helper counter. This CAS loop makes sure to wait
// for them to back off.
for !m.resizeCtl.CompareAndSwap(ctl, newCtl) {
runtime.Gosched()
}
m.resizeCond.Broadcast()
m.resizeMu.Unlock()
}
func copyBucket[K comparable, V any](
b *bucketPadded[K, V],
func (m *Map[K, V]) helpResize(seq uint64) {
for {
table := m.table.Load()
nextTable := m.nextTable.Load()
if resizeSeq(m.resizeCtl.Load()) == seq {
if nextTable == nil || nextTable == table {
// Carry on until the next table is set by the main
// resize goroutine or until the resize finishes.
runtime.Gosched()
continue
}
// The resize is still in-progress, so let's try registering
// as a helper.
for {
ctl := m.resizeCtl.Load()
if resizeSeq(ctl) != seq || resizeHelpers(ctl) >= uint64(maxResizeHelpers) {
// The resize has ended or there are too many helpers.
break
}
if m.resizeCtl.CompareAndSwap(ctl, ctl+1) {
// Yay, we're a resize helper!
m.transfer(table, nextTable)
// Don't forget to unregister as a helper.
m.resizeCtl.Add(^uint64(0))
break
}
}
m.waitForResize()
}
break
}
}
func (m *Map[K, V]) transfer(table, newTable *mapTable[K, V]) {
tableLen := len(table.buckets)
newTableLen := len(newTable.buckets)
stride := (tableLen >> 3) / int(maxResizeHelpers)
if stride < minResizeTransferStride {
stride = minResizeTransferStride
}
for {
// Claim work by incrementing resizeIdx.
nextIdx := m.resizeIdx.Add(int64(stride))
start := int(nextIdx) - stride
if start < 0 {
start = 0
}
if start > tableLen {
break
}
end := int(nextIdx)
if end > tableLen {
end = tableLen
}
// Transfer buckets in this range.
total := 0
if newTableLen > tableLen {
// We're growing the table with 2x multiplier, so entries from a N bucket can
// only be transferred to N and 2*N buckets in the new table. Thus, destination
// buckets written by the resize helpers don't intersect, so we don't need to
// acquire locks in the destination buckets.
for i := start; i < end; i++ {
total += transferBucketUnsafe(&table.buckets[i], newTable)
}
} else {
// We're shrinking the table, so all locks must be acquired.
for i := start; i < end; i++ {
total += transferBucket(&table.buckets[i], newTable)
}
}
// The exact counter stripe doesn't matter here, so pick up the one
// that corresponds to the start value to avoid contention.
newTable.addSize(uint64(start), total)
}
}
// Doesn't acquire dest bucket lock.
func transferBucketUnsafe[K comparable, V any](
b *bucketPadded,
destTable *mapTable[K, V],
) (copied int) {
rootb := b
rootb.mu.Lock()
for {
for i := 0; i < entriesPerMapBucket; i++ {
if e := b.entries[i].Load(); e != nil {
if eptr := b.entries[i]; eptr != nil {
e := (*entry[K, V])(eptr)
hash := maphash.Comparable(destTable.seed, e.key)
bidx := uint64(len(destTable.buckets)-1) & h1(hash)
destb := &destTable.buckets[bidx]
appendToBucket(h2(hash), b.entries[i].Load(), destb)
appendToBucket(h2(hash), e, destb)
copied++
}
}
if next := b.next.Load(); next == nil {
if b.next == nil {
rootb.mu.Unlock()
return
} else {
b = next
}
b = (*bucketPadded)(b.next)
}
}
func transferBucket[K comparable, V any](
b *bucketPadded,
destTable *mapTable[K, V],
) (copied int) {
rootb := b
rootb.mu.Lock()
for {
for i := 0; i < entriesPerMapBucket; i++ {
if eptr := b.entries[i]; eptr != nil {
e := (*entry[K, V])(eptr)
hash := maphash.Comparable(destTable.seed, e.key)
bidx := uint64(len(destTable.buckets)-1) & h1(hash)
destb := &destTable.buckets[bidx]
destb.mu.Lock()
appendToBucket(h2(hash), e, destb)
destb.mu.Unlock()
copied++
}
}
if b.next == nil {
rootb.mu.Unlock()
return
}
b = (*bucketPadded)(b.next)
}
}
@@ -691,16 +859,15 @@ func (m *Map[K, V]) Range(f func(key K, value V) bool) {
rootb.mu.Lock()
for {
for i := 0; i < entriesPerMapBucket; i++ {
if entry := b.entries[i].Load(); entry != nil {
bentries = append(bentries, entry)
if b.entries[i] != nil {
bentries = append(bentries, (*entry[K, V])(b.entries[i]))
}
}
if next := b.next.Load(); next == nil {
if b.next == nil {
rootb.mu.Unlock()
break
} else {
b = next
}
b = (*bucketPadded)(b.next)
}
// Call the function for all copied entries.
for j, e := range bentries {
@@ -727,24 +894,25 @@ func (m *Map[K, V]) Size() int {
return int(m.table.Load().sumSize())
}
func appendToBucket[K comparable, V any](h2 uint8, e *entry[K, V], b *bucketPadded[K, V]) {
// It is safe to use plain stores here because the destination bucket must be
// either locked or exclusively written to by the helper during resize.
func appendToBucket[K comparable, V any](h2 uint8, e *entry[K, V], b *bucketPadded) {
for {
for i := 0; i < entriesPerMapBucket; i++ {
if b.entries[i].Load() == nil {
b.meta.Store(setByte(b.meta.Load(), h2, i))
b.entries[i].Store(e)
if b.entries[i] == nil {
b.meta = setByte(b.meta, h2, i)
b.entries[i] = unsafe.Pointer(e)
return
}
}
if next := b.next.Load(); next == nil {
newb := new(bucketPadded[K, V])
newb.meta.Store(setByte(defaultMeta, h2, 0))
newb.entries[0].Store(e)
b.next.Store(newb)
if b.next == nil {
newb := new(bucketPadded)
newb.meta = setByte(defaultMeta, h2, 0)
newb.entries[0] = unsafe.Pointer(e)
b.next = unsafe.Pointer(newb)
return
} else {
b = next
}
b = (*bucketPadded)(b.next)
}
}
@@ -753,11 +921,6 @@ func (table *mapTable[K, V]) addSize(bucketIdx uint64, delta int) {
atomic.AddInt64(&table.size[cidx].c, int64(delta))
}
func (table *mapTable[K, V]) addSizePlain(bucketIdx uint64, delta int) {
cidx := uint64(len(table.size)-1) & bucketIdx
table.size[cidx].c += int64(delta)
}
func (table *mapTable[K, V]) sumSize() int64 {
sum := int64(0)
for i := range table.size {
@@ -856,7 +1019,7 @@ func (m *Map[K, V]) Stats() MapStats {
nentriesLocal := 0
stats.Capacity += entriesPerMapBucket
for i := 0; i < entriesPerMapBucket; i++ {
if b.entries[i].Load() != nil {
if atomic.LoadPointer(&b.entries[i]) != nil {
stats.Size++
nentriesLocal++
}
@@ -865,11 +1028,10 @@ func (m *Map[K, V]) Stats() MapStats {
if nentriesLocal == 0 {
stats.EmptyBuckets++
}
if next := b.next.Load(); next == nil {
if b.next == nil {
break
} else {
b = next
}
b = (*bucketPadded)(atomic.LoadPointer(&b.next))
stats.TotalBuckets++
}
if nentries < stats.MinEntries {
@@ -906,6 +1068,15 @@ func nextPowOf2(v uint32) uint32 {
return v
}
func parallelism() uint32 {
maxProcs := uint32(runtime.GOMAXPROCS(0))
numCores := uint32(runtime.NumCPU())
if maxProcs < numCores {
return maxProcs
}
return numCores
}
func broadcast(b uint8) uint64 {
return 0x101010101010101 * uint64(b)
}
@@ -920,6 +1091,7 @@ func markZeroBytes(w uint64) uint64 {
return ((w - 0x0101010101010101) & (^w) & 0x8080808080808080)
}
// Sets byte of the input word at the specified index to the given value.
func setByte(w uint64, b uint8, idx int) uint64 {
shift := idx << 3
return (w &^ (0xff << shift)) | (uint64(b) << shift)

View File

@@ -3,6 +3,7 @@ package xsync
import (
"math"
"math/rand"
"runtime"
"strconv"
"sync"
"sync/atomic"
@@ -53,11 +54,11 @@ func runParallel(b *testing.B, benchFn func(pb *testing.PB)) {
}
func TestMap_BucketStructSize(t *testing.T) {
size := unsafe.Sizeof(bucketPadded[string, int64]{})
size := unsafe.Sizeof(bucketPadded{})
if size != 64 {
t.Fatalf("size of 64B (one cache line) is expected, got: %d", size)
}
size = unsafe.Sizeof(bucketPadded[struct{}, int32]{})
size = unsafe.Sizeof(bucketPadded{})
if size != 64 {
t.Fatalf("size of 64B (one cache line) is expected, got: %d", size)
}
@@ -743,10 +744,7 @@ func TestNewMapGrowOnly_OnlyShrinksOnClear(t *testing.T) {
}
func TestMapResize(t *testing.T) {
testMapResize(t, NewMap[string, int]())
}
func testMapResize(t *testing.T, m *Map[string, int]) {
m := NewMap[string, int]()
const numEntries = 100_000
for i := 0; i < numEntries; i++ {
@@ -810,6 +808,147 @@ func TestMapResize_CounterLenLimit(t *testing.T) {
}
}
func testParallelResize(t *testing.T, numGoroutines int) {
m := NewMap[int, int]()
// Fill the map to trigger resizing
const initialEntries = 10000
const newEntries = 5000
for i := 0; i < initialEntries; i++ {
m.Store(i, i*2)
}
// Start concurrent operations that should trigger helping behavior
var wg sync.WaitGroup
// Launch goroutines that will encounter resize operations
for g := 0; g < numGoroutines; g++ {
wg.Add(1)
go func(goroutineID int) {
defer wg.Done()
// Perform many operations to trigger resize and helping
for i := 0; i < newEntries; i++ {
key := goroutineID*newEntries + i + initialEntries
m.Store(key, key*2)
// Verify the value
if val, ok := m.Load(key); !ok || val != key*2 {
t.Errorf("Failed to load key %d: got %v, %v", key, val, ok)
return
}
}
}(g)
}
wg.Wait()
// Verify all entries are present
finalSize := m.Size()
expectedSize := initialEntries + numGoroutines*newEntries
if finalSize != expectedSize {
t.Errorf("Expected size %d, got %d", expectedSize, finalSize)
}
stats := m.Stats()
if stats.TotalGrowths == 0 {
t.Error("Expected at least one table growth due to concurrent operations")
}
}
func TestMapParallelResize(t *testing.T) {
testParallelResize(t, 1)
testParallelResize(t, runtime.GOMAXPROCS(0))
testParallelResize(t, 100)
}
func testParallelResizeWithSameKeys(t *testing.T, numGoroutines int) {
m := NewMap[int, int]()
// Fill the map to trigger resizing
const entries = 1000
for i := 0; i < entries; i++ {
m.Store(2*i, 2*i)
}
// Start concurrent operations that should trigger helping behavior
var wg sync.WaitGroup
// Launch goroutines that will encounter resize operations
for g := 0; g < numGoroutines; g++ {
wg.Add(1)
go func(goroutineID int) {
defer wg.Done()
for i := 0; i < 10*entries; i++ {
m.Store(i, i)
}
}(g)
}
wg.Wait()
// Verify all entries are present
finalSize := m.Size()
expectedSize := 10 * entries
if finalSize != expectedSize {
t.Errorf("Expected size %d, got %d", expectedSize, finalSize)
}
stats := m.Stats()
if stats.TotalGrowths == 0 {
t.Error("Expected at least one table growth due to concurrent operations")
}
}
func TestMapParallelResize_IntersectingKeys(t *testing.T) {
testParallelResizeWithSameKeys(t, 1)
testParallelResizeWithSameKeys(t, runtime.GOMAXPROCS(0))
testParallelResizeWithSameKeys(t, 100)
}
func testParallelShrinking(t *testing.T, numGoroutines int) {
m := NewMap[int, int]()
// Fill the map to trigger resizing
const entries = 100000
for i := 0; i < entries; i++ {
m.Store(i, i)
}
// Start concurrent operations that should trigger helping behavior
var wg sync.WaitGroup
// Launch goroutines that will encounter resize operations
for g := 0; g < numGoroutines; g++ {
wg.Add(1)
go func(goroutineID int) {
defer wg.Done()
for i := 0; i < entries; i++ {
m.Delete(i)
}
}(g)
}
wg.Wait()
// Verify all entries are present
finalSize := m.Size()
if finalSize != 0 {
t.Errorf("Expected size 0, got %d", finalSize)
}
stats := m.Stats()
if stats.TotalShrinks == 0 {
t.Error("Expected at least one table shrinking due to concurrent operations")
}
}
func TestMapParallelShrinking(t *testing.T) {
testParallelShrinking(t, 1)
testParallelShrinking(t, runtime.GOMAXPROCS(0))
testParallelShrinking(t, 100)
}
func parallelSeqMapGrower(m *Map[int, int], numEntries int, positive bool, cdone chan bool) {
for i := 0; i < numEntries; i++ {
if positive {
@@ -1459,7 +1598,7 @@ func BenchmarkMapRange(b *testing.B) {
}
// Benchmarks noop performance of Compute
func BenchmarkCompute(b *testing.B) {
func BenchmarkMapCompute(b *testing.B) {
tests := []struct {
Name string
Op ComputeOp
@@ -1487,6 +1626,57 @@ func BenchmarkCompute(b *testing.B) {
}
}
func BenchmarkMapParallelRehashing(b *testing.B) {
tests := []struct {
name string
goroutines int
numEntries int
}{
{"1goroutine_10M", 1, 10_000_000},
{"4goroutines_10M", 4, 10_000_000},
{"8goroutines_10M", 8, 10_000_000},
}
for _, test := range tests {
b.Run(test.name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
m := NewMap[int, int]()
var wg sync.WaitGroup
entriesPerGoroutine := test.numEntries / test.goroutines
start := time.Now()
for g := 0; g < test.goroutines; g++ {
wg.Add(1)
go func(goroutineID int) {
defer wg.Done()
base := goroutineID * entriesPerGoroutine
for j := 0; j < entriesPerGoroutine; j++ {
key := base + j
m.Store(key, key)
}
}(g)
}
wg.Wait()
duration := time.Since(start)
b.ReportMetric(float64(test.numEntries)/duration.Seconds(), "entries/s")
finalSize := m.Size()
if finalSize != test.numEntries {
b.Fatalf("Expected size %d, got %d", test.numEntries, finalSize)
}
stats := m.Stats()
if stats.TotalGrowths == 0 {
b.Error("Expected at least one table growth during rehashing")
}
}
})
}
}
func TestNextPowOf2(t *testing.T) {
if nextPowOf2(0) != 1 {
t.Error("nextPowOf2 failed")

View File

@@ -1,9 +1,9 @@
package memory
import (
"syscall"
"unsafe"
"github.com/ebitengine/purego"
_ "unsafe"
)
const PROC_PIDTASKINFO = 4
@@ -29,24 +29,12 @@ type ProcTaskInfo struct {
Priority int32
}
const System = "/usr/lib/libSystem.B.dylib"
type ProcPidInfoFunc func(pid, flavor int32, arg uint64, buffer uintptr, bufferSize int32) int32
const ProcPidInfoSym = "proc_pidinfo"
func GetMemoryInfo(pid int32) (*MemoryInfoStat, error) {
lib, err := purego.Dlopen(System, purego.RTLD_LAZY|purego.RTLD_GLOBAL)
if err != nil {
return nil, err
}
defer purego.Dlclose(lib)
var procPidInfo ProcPidInfoFunc
purego.RegisterLibFunc(&procPidInfo, lib, ProcPidInfoSym)
var ti ProcTaskInfo
procPidInfo(pid, PROC_PIDTASKINFO, 0, uintptr(unsafe.Pointer(&ti)), int32(unsafe.Sizeof(ti)))
_, _, errno := syscall_syscall6(proc_pidinfo_trampoline_addr, uintptr(pid), PROC_PIDTASKINFO, 0, uintptr(unsafe.Pointer(&ti)), unsafe.Sizeof(ti), 0)
if errno != 0 {
return nil, errno
}
ret := &MemoryInfoStat{
RSS: uint64(ti.Resident_size),
@@ -54,3 +42,26 @@ func GetMemoryInfo(pid int32) (*MemoryInfoStat, error) {
}
return ret, nil
}
var proc_pidinfo_trampoline_addr uintptr
//go:cgo_import_dynamic proc_pidinfo proc_pidinfo "/usr/lib/libSystem.B.dylib"
// from golang.org/x/sys@v0.30.0/unix/syscall_darwin_libSystem.go
// Implemented in the runtime package (runtime/sys_darwin.go)
func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno)
func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
func syscall_syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) // 32-bit only
func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno)
func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
func syscall_syscallPtr(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno)
//go:linkname syscall_syscall syscall.syscall
//go:linkname syscall_syscall6 syscall.syscall6
//go:linkname syscall_syscall6X syscall.syscall6X
//go:linkname syscall_syscall9 syscall.syscall9
//go:linkname syscall_rawSyscall syscall.rawSyscall
//go:linkname syscall_rawSyscall6 syscall.rawSyscall6
//go:linkname syscall_syscallPtr syscall.syscallPtr

View File

@@ -0,0 +1,9 @@
// go run mkasm.go darwin amd64
// Code generated by the command above; DO NOT EDIT.
#include "textflag.h"
TEXT proc_pidinfo_trampoline<>(SB),NOSPLIT,$0-0
JMP proc_pidinfo(SB)
GLOBL ·proc_pidinfo_trampoline_addr(SB), RODATA, $8
DATA ·proc_pidinfo_trampoline_addr(SB)/8, $proc_pidinfo_trampoline<>(SB)

View File

@@ -0,0 +1,9 @@
// go run mkasm.go darwin arm64
// Code generated by the command above; DO NOT EDIT.
#include "textflag.h"
TEXT proc_pidinfo_trampoline<>(SB),NOSPLIT,$0-0
JMP proc_pidinfo(SB)
GLOBL ·proc_pidinfo_trampoline_addr(SB), RODATA, $8
DATA ·proc_pidinfo_trampoline_addr(SB)/8, $proc_pidinfo_trampoline<>(SB)

View File

@@ -162,6 +162,7 @@ type DNS struct {
FakeIPRange6 netip.Prefix
FakeIPPool6 *fakeip.Pool
FakeIPSkipper *fakeip.Skipper
FakeIPTTL int
NameServerPolicy []dns.Policy
ProxyServerNameserver []dns.NameServer
DirectNameServer []dns.NameServer
@@ -228,6 +229,7 @@ type RawDNS struct {
FakeIPRange6 string `yaml:"fake-ip-range6" json:"fake-ip-range6"`
FakeIPFilter []string `yaml:"fake-ip-filter" json:"fake-ip-filter"`
FakeIPFilterMode C.FilterMode `yaml:"fake-ip-filter-mode" json:"fake-ip-filter-mode"`
FakeIPTTL int `yaml:"fake-ip-ttl" json:"fake-ip-ttl"`
DefaultNameserver []string `yaml:"default-nameserver" json:"default-nameserver"`
CacheAlgorithm string `yaml:"cache-algorithm" json:"cache-algorithm"`
CacheMaxSize int `yaml:"cache-max-size" json:"cache-max-size"`
@@ -490,6 +492,7 @@ func DefaultRawConfig() *RawConfig {
IPv6Timeout: 100,
EnhancedMode: C.DNSMapping,
FakeIPRange: "198.18.0.1/16",
FakeIPTTL: 1,
FallbackFilter: RawFallbackFilter{
GeoIP: true,
GeoIPCode: "CN",
@@ -1458,6 +1461,7 @@ func parseDNS(rawCfg *RawConfig, ruleProviders map[string]P.RuleProvider) (*DNS,
Mode: cfg.FakeIPFilterMode,
}
dnsCfg.FakeIPSkipper = skipper
dnsCfg.FakeIPTTL = cfg.FakeIPTTL
if dnsCfg.FakeIPRange.IsValid() {
pool, err := fakeip.New(fakeip.Options{

View File

@@ -44,6 +44,7 @@ const (
Ssh
Mieru
AnyTLS
Sudoku
)
const (
@@ -230,6 +231,8 @@ func (at AdapterType) String() string {
return "Mieru"
case AnyTLS:
return "AnyTLS"
case Sudoku:
return "Sudoku"
case Relay:
return "Relay"
case Selector:

View File

@@ -39,6 +39,7 @@ const (
HYSTERIA2
ANYTLS
MIERU
SUDOKU
INNER
)
@@ -112,6 +113,8 @@ func (t Type) String() string {
return "AnyTLS"
case MIERU:
return "Mieru"
case SUDOKU:
return "Sudoku"
case INNER:
return "Inner"
default:
@@ -154,6 +157,8 @@ func ParseType(t string) (*Type, error) {
res = ANYTLS
case "MIERU":
res = MIERU
case "SUDOKU":
res = SUDOKU
case "INNER":
res = INNER
default:

View File

@@ -1,6 +1,7 @@
package dns
import (
"errors"
"net/netip"
"github.com/metacubex/mihomo/common/lru"
@@ -13,6 +14,7 @@ type ResolverEnhancer struct {
fakeIPPool *fakeip.Pool
fakeIPPool6 *fakeip.Pool
fakeIPSkipper *fakeip.Skipper
fakeIPTTL int
mapping *lru.LruCache[netip.Addr, string]
useHosts bool
}
@@ -31,11 +33,15 @@ func (h *ResolverEnhancer) IsExistFakeIP(ip netip.Addr) bool {
}
if pool := h.fakeIPPool; pool != nil {
return pool.Exist(ip)
if pool.Exist(ip) {
return true
}
}
if pool6 := h.fakeIPPool6; pool6 != nil {
return pool6.Exist(ip)
if pool6.Exist(ip) {
return true
}
}
return false
@@ -47,11 +53,15 @@ func (h *ResolverEnhancer) IsFakeIP(ip netip.Addr) bool {
}
if pool := h.fakeIPPool; pool != nil {
return pool.IPNet().Contains(ip) && ip != pool.Gateway() && ip != pool.Broadcast()
if pool.IPNet().Contains(ip) && ip != pool.Gateway() && ip != pool.Broadcast() {
return true
}
}
if pool6 := h.fakeIPPool6; pool6 != nil {
return pool6.IPNet().Contains(ip) && ip != pool6.Gateway() && ip != pool6.Broadcast()
if pool6.IPNet().Contains(ip) && ip != pool6.Gateway() && ip != pool6.Broadcast() {
return true
}
}
return false
@@ -63,11 +73,15 @@ func (h *ResolverEnhancer) IsFakeBroadcastIP(ip netip.Addr) bool {
}
if pool := h.fakeIPPool; pool != nil {
return pool.Broadcast() == ip
if pool.Broadcast() == ip {
return true
}
}
if pool6 := h.fakeIPPool6; pool6 != nil {
return pool6.Broadcast() == ip
if pool6.Broadcast() == ip {
return true
}
}
return false
@@ -102,11 +116,19 @@ func (h *ResolverEnhancer) InsertHostByIP(ip netip.Addr, host string) {
}
func (h *ResolverEnhancer) FlushFakeIP() error {
var errs []error
if pool := h.fakeIPPool; pool != nil {
return pool.FlushFakeIP()
if err := pool.FlushFakeIP(); err != nil {
errs = append(errs, err)
}
}
if pool6 := h.fakeIPPool6; pool6 != nil {
return pool6.FlushFakeIP()
if err := pool6.FlushFakeIP(); err != nil {
errs = append(errs, err)
}
}
if len(errs) > 0 {
return errors.Join(errs...)
}
return nil
}
@@ -141,6 +163,7 @@ type EnhancerConfig struct {
FakeIPPool *fakeip.Pool
FakeIPPool6 *fakeip.Pool
FakeIPSkipper *fakeip.Skipper
FakeIPTTL int
UseHosts bool
}
@@ -156,6 +179,10 @@ func NewEnhancer(cfg EnhancerConfig) *ResolverEnhancer {
e.fakeIPPool6 = cfg.FakeIPPool6
}
e.fakeIPSkipper = cfg.FakeIPSkipper
e.fakeIPTTL = cfg.FakeIPTTL
if e.fakeIPTTL < 1 {
e.fakeIPTTL = 1
}
e.mapping = lru.New(lru.WithSize[netip.Addr, string](4096))
}

View File

@@ -64,7 +64,7 @@ func withHosts(mapping *lru.LruCache[netip.Addr, string]) middleware {
if mapping != nil {
mapping.SetWithExpire(ipAddr, host, time.Now().Add(time.Second*10))
}
} else if q.Qtype == D.TypeAAAA {
} else if ipAddr.Is6() && q.Qtype == D.TypeAAAA {
rr := &D.AAAA{}
rr.Hdr = D.RR_Header{Name: q.Name, Rrtype: D.TypeAAAA, Class: D.ClassINET, Ttl: 10}
rr.AAAA = ipAddr.AsSlice()
@@ -146,7 +146,7 @@ func withMapping(mapping *lru.LruCache[netip.Addr, string]) middleware {
}
}
func withFakeIP(skipper *fakeip.Skipper, fakePool *fakeip.Pool, fakePool6 *fakeip.Pool) middleware {
func withFakeIP(skipper *fakeip.Skipper, fakePool *fakeip.Pool, fakePool6 *fakeip.Pool, fakeIPTTL int) middleware {
return func(next handler) handler {
return func(ctx *icontext.DNSContext, r *D.Msg) (*D.Msg, error) {
q := r.Question[0]
@@ -186,7 +186,7 @@ func withFakeIP(skipper *fakeip.Skipper, fakePool *fakeip.Pool, fakePool6 *fakei
msg.Answer = []D.RR{rr}
ctx.SetType(icontext.DNSTypeFakeIP)
setMsgTTL(msg, 1)
setMsgTTL(msg, uint32(fakeIPTTL))
msg.SetRcode(r, D.RcodeSuccess)
msg.Authoritative = true
msg.RecursionAvailable = true
@@ -238,7 +238,7 @@ func newHandler(resolver *Resolver, mapper *ResolverEnhancer) handler {
}
if mapper.mode == C.DNSFakeIP {
middlewares = append(middlewares, withFakeIP(mapper.fakeIPSkipper, mapper.fakeIPPool, mapper.fakeIPPool6))
middlewares = append(middlewares, withFakeIP(mapper.fakeIPSkipper, mapper.fakeIPPool, mapper.fakeIPPool6, mapper.fakeIPTTL))
}
if mapper.mode != C.DNSNormal {

View File

@@ -275,6 +275,8 @@ dns:
# 配置fake-ip-filter的匹配模式默认为blacklist即如果匹配成功不返回fake-ip
# 可设置为whitelist即只有匹配成功才返回fake-ip
fake-ip-filter-mode: blacklist
# 配置fakeip查询返回的TTL非必要情况下请勿修改
fake-ip-ttl: 1
# use-hosts: true # 查询 hosts
@@ -1027,7 +1029,7 @@ proxies: # socks5
server: 1.2.3.4
port: 2999
# port-range: 2090-2099 #(不可同时填写 port 和 port-range
transport: TCP # 支持 TCP
transport: TCP # 支持 TCP 或者 UDP
udp: true # 支持 UDP over TCP
username: user
password: password
@@ -1036,6 +1038,18 @@ proxies: # socks5
# 如果想开启 0-RTT 握手,请设置为 HANDSHAKE_NO_WAIT否则请设置为 HANDSHAKE_STANDARD。默认值为 HANDSHAKE_STANDARD
# handshake-mode: HANDSHAKE_STANDARD
# sudoku
- name: sudoku
type: sudoku
server: serverip # 1.2.3.4
port: 443
key: "<client_key>" # 如果你使用sudoku生成的ED25519密钥对请填写密钥对中的私钥否则填入和服务端相同的uuid
aead-method: chacha20-poly1305 # 可选值chacha20-poly1305、aes-128-gcm、none 我们保证在none的情况下sudoku混淆层仍然确保安全
padding-min: 2 # 最小填充字节数
padding-max: 7 # 最大填充字节数
table-type: prefer_ascii # 可选值prefer_ascii、prefer_entropy 前者全ascii映射后者保证熵值汉明1低于3
http-mask: true # 是否启用http掩码
# anytls
- name: anytls
type: anytls
@@ -1565,6 +1579,19 @@ listeners:
username1: password1
username2: password2
- name: sudoku-in-1
type: sudoku
port: 8443 # 仅支持单端口
listen: 0.0.0.0
key: "<server_key>" # 如果你使用sudoku生成的ED25519密钥对此处是密钥对中的公钥当然你也可以仅仅使用任意uuid充当key
aead-method: chacha20-poly1305 # 支持chacha20-poly1305或者aes-128-gcm以及nonesudoku的混淆层可以确保none情况下数据安全
padding-min: 1 # 填充最小长度
padding-max: 15 # 填充最大长度,均不建议过大
seed: "<seed-or-key>" # 如果你不使用ED25519密钥对就请填入uuid否则仍然是公钥
table-type: prefer_ascii # 可选值prefer_ascii、prefer_entropy 前者全ascii映射后者保证熵值汉明1低于3
handshake-timeout: 5 # optional
- name: trojan-in-1
type: trojan
port: 10819 # 支持使用ports格式例如200,302 or 200,204,401-429,501-503
@@ -1713,3 +1740,4 @@ listeners:
# alpn:
# - h3
# max-udp-relay-packet-size: 1500

13
go.mod
View File

@@ -6,8 +6,7 @@ require (
github.com/bahlo/generic-list-go v0.2.0
github.com/coreos/go-iptables v0.8.0
github.com/dlclark/regexp2 v1.11.5
github.com/ebitengine/purego v0.9.1
github.com/enfein/mieru/v3 v3.22.1
github.com/enfein/mieru/v3 v3.26.0
github.com/go-chi/chi/v5 v5.2.3
github.com/go-chi/render v1.0.3
github.com/gobwas/ws v1.4.0
@@ -23,7 +22,7 @@ require (
github.com/metacubex/chacha v0.1.5
github.com/metacubex/fswatch v0.1.1
github.com/metacubex/gopacket v1.1.20-0.20230608035415-7e2f98a3e759
github.com/metacubex/kcp-go v0.0.0-20251105084629-8c93f4bf37be
github.com/metacubex/kcp-go v0.0.0-20251111012849-7455698490e9
github.com/metacubex/quic-go v0.55.1-0.20251024060151-bd465f127128
github.com/metacubex/randv2 v0.2.0
github.com/metacubex/restls-client-go v0.1.7
@@ -33,17 +32,18 @@ require (
github.com/metacubex/sing-shadowsocks v0.2.12
github.com/metacubex/sing-shadowsocks2 v0.2.7
github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2
github.com/metacubex/sing-tun v0.4.9
github.com/metacubex/sing-tun v0.4.10
github.com/metacubex/sing-vmess v0.2.4
github.com/metacubex/sing-wireguard v0.0.0-20250503063753-2dc62acc626f
github.com/metacubex/smux v0.0.0-20250922175018-15c9a6a78719
github.com/metacubex/tfo-go v0.0.0-20251024101424-368b42b59148
github.com/metacubex/smux v0.0.0-20251111013112-03f8d12dafc1
github.com/metacubex/tfo-go v0.0.0-20251130171125-413e892ac443
github.com/metacubex/utls v1.8.3
github.com/metacubex/wireguard-go v0.0.0-20250820062549-a6cecdd7f57f
github.com/miekg/dns v1.1.63 // lastest version compatible with golang1.20
github.com/mroth/weightedrand/v2 v2.1.0
github.com/openacid/low v0.1.21
github.com/oschwald/maxminddb-golang v1.12.0 // lastest version compatible with golang1.20
github.com/saba-futai/sudoku v0.0.1-g
github.com/sagernet/cors v1.2.1
github.com/sagernet/netlink v0.0.0-20240612041022-b9a21c07ac6a
github.com/samber/lo v1.52.0
@@ -64,6 +64,7 @@ require (
)
require (
filippo.io/edwards25519 v1.1.0 // indirect
github.com/RyuaNerin/go-krypto v1.3.0 // indirect
github.com/Yawning/aez v0.0.0-20211027044916-e49e68abd344 // indirect
github.com/ajg/form v1.5.1 // indirect

26
go.sum
View File

@@ -1,3 +1,5 @@
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/RyuaNerin/go-krypto v1.3.0 h1:smavTzSMAx8iuVlGb4pEwl9MD2qicqMzuXR2QWp2/Pg=
github.com/RyuaNerin/go-krypto v1.3.0/go.mod h1:9R9TU936laAIqAmjcHo/LsaXYOZlymudOAxjaBf62UM=
github.com/RyuaNerin/testingutil v0.1.0 h1:IYT6JL57RV3U2ml3dLHZsVtPOP6yNK7WUVdzzlpNrss=
@@ -23,10 +25,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ=
github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A=
github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/enfein/mieru/v3 v3.22.1 h1:/XGYYXpEhEJlxosmtbpEJkhtRLHB8IToG7LB8kU2ZDY=
github.com/enfein/mieru/v3 v3.22.1/go.mod h1:zJBUCsi5rxyvHM8fjFf+GLaEl4OEjjBXr1s5F6Qd3hM=
github.com/enfein/mieru/v3 v3.26.0 h1:ZsxCFkh3UfGSu9LL6EQ9+b97uxTJ7/AnJmLMyrbjSDI=
github.com/enfein/mieru/v3 v3.26.0/go.mod h1:zJBUCsi5rxyvHM8fjFf+GLaEl4OEjjBXr1s5F6Qd3hM=
github.com/ericlagergren/aegis v0.0.0-20250325060835-cd0defd64358 h1:kXYqH/sL8dS/FdoFjr12ePjnLPorPo2FsnrHNuXSDyo=
github.com/ericlagergren/aegis v0.0.0-20250325060835-cd0defd64358/go.mod h1:hkIFzoiIPZYxdFOOLyDho59b7SrDfo+w3h+yWdlg45I=
github.com/ericlagergren/polyval v0.0.0-20220411101811-e25bc10ba391 h1:8j2RH289RJplhA6WfdaPqzg1MjH2K8wX5e0uhAxrw2g=
@@ -108,8 +108,8 @@ github.com/metacubex/gopacket v1.1.20-0.20230608035415-7e2f98a3e759 h1:cjd4biTvO
github.com/metacubex/gopacket v1.1.20-0.20230608035415-7e2f98a3e759/go.mod h1:UHOv2xu+RIgLwpXca7TLrXleEd4oR3sPatW6IF8wU88=
github.com/metacubex/gvisor v0.0.0-20250919004547-6122b699a301 h1:N5GExQJqYAH3gOCshpp2u/J3CtNYzMctmlb0xK9wtbQ=
github.com/metacubex/gvisor v0.0.0-20250919004547-6122b699a301/go.mod h1:8LpS0IJW1VmWzUm3ylb0e2SK5QDm5lO/2qwWLZgRpBU=
github.com/metacubex/kcp-go v0.0.0-20251105084629-8c93f4bf37be h1:Y7SigZIqfv/+RIA/D7R6EbB9p+brPRoGOM6zobSmRIM=
github.com/metacubex/kcp-go v0.0.0-20251105084629-8c93f4bf37be/go.mod h1:HIJZW4QMhbBqXuqC1ly6Hn0TEYT2SzRw58ns1yGhXTs=
github.com/metacubex/kcp-go v0.0.0-20251111012849-7455698490e9 h1:7m3tRPrLpKOLOvZ/Lp4XCxz0t7rg9t9K35x6TahjR8o=
github.com/metacubex/kcp-go v0.0.0-20251111012849-7455698490e9/go.mod h1:HIJZW4QMhbBqXuqC1ly6Hn0TEYT2SzRw58ns1yGhXTs=
github.com/metacubex/nftables v0.0.0-20250503052935-30a69ab87793 h1:1Qpuy+sU3DmyX9HwI+CrBT/oLNJngvBorR2RbajJcqo=
github.com/metacubex/nftables v0.0.0-20250503052935-30a69ab87793/go.mod h1:RjRNb4G52yAgfR+Oe/kp9G4PJJ97Fnj89eY1BFO3YyA=
github.com/metacubex/quic-go v0.55.1-0.20251024060151-bd465f127128 h1:I1uvJl206/HbkzEAZpLgGkZgUveOZb+P+6oTUj7dN+o=
@@ -131,16 +131,16 @@ github.com/metacubex/sing-shadowsocks2 v0.2.7 h1:hSuuc0YpsfiqYqt1o+fP4m34BQz4e6w
github.com/metacubex/sing-shadowsocks2 v0.2.7/go.mod h1:vOEbfKC60txi0ca+yUlqEwOGc3Obl6cnSgx9Gf45KjE=
github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2 h1:gXU+MYPm7Wme3/OAY2FFzVq9d9GxPHOqu5AQfg/ddhI=
github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2/go.mod h1:mbfboaXauKJNIHJYxQRa+NJs4JU9NZfkA+I33dS2+9E=
github.com/metacubex/sing-tun v0.4.9 h1:jY0Yyt8nnN3yQRN/jTxgqNCmGi1dsFdxdIi7pQUlVVU=
github.com/metacubex/sing-tun v0.4.9/go.mod h1:L/TjQY5JEGy8nvsuYmy/XgMFMCPiF0+AWSFCYfS6r9w=
github.com/metacubex/sing-tun v0.4.10 h1:DllQTERAcqQyiEl4L/R7Ia0jCiSzZzikw2kL8N85p0E=
github.com/metacubex/sing-tun v0.4.10/go.mod h1:L/TjQY5JEGy8nvsuYmy/XgMFMCPiF0+AWSFCYfS6r9w=
github.com/metacubex/sing-vmess v0.2.4 h1:Tx6AGgCiEf400E/xyDuYyafsel6sGbR8oF7RkAaus6I=
github.com/metacubex/sing-vmess v0.2.4/go.mod h1:21R5R1u90uUvBQF0owoooEu96/SAYYD56nDrwm6nFaM=
github.com/metacubex/sing-wireguard v0.0.0-20250503063753-2dc62acc626f h1:Sr/DYKYofKHKc4GF3qkRGNuj6XA6c0eqPgEDN+VAsYU=
github.com/metacubex/sing-wireguard v0.0.0-20250503063753-2dc62acc626f/go.mod h1:jpAkVLPnCpGSfNyVmj6Cq4YbuZsFepm/Dc+9BAOcR80=
github.com/metacubex/smux v0.0.0-20250922175018-15c9a6a78719 h1:T6qCCfolRDAVJKeaPW/mXwNLjnlo65AYN7WS2jrBNaM=
github.com/metacubex/smux v0.0.0-20250922175018-15c9a6a78719/go.mod h1:4bPD8HWx9jPJ9aE4uadgyN7D1/Wz3KmPy+vale8sKLE=
github.com/metacubex/tfo-go v0.0.0-20251024101424-368b42b59148 h1:Zd0QqciLIhv9MKbGKTPEgN8WUFsgQGA1WJBy6spEnVU=
github.com/metacubex/tfo-go v0.0.0-20251024101424-368b42b59148/go.mod h1:l9oLnLoEXyGZ5RVLsh7QCC5XsouTUyKk4F2nLm2DHLw=
github.com/metacubex/smux v0.0.0-20251111013112-03f8d12dafc1 h1:a6DF0ze9miXes+rdwl8a4Wkvfpe0lXYU82sPJfDzz6s=
github.com/metacubex/smux v0.0.0-20251111013112-03f8d12dafc1/go.mod h1:4bPD8HWx9jPJ9aE4uadgyN7D1/Wz3KmPy+vale8sKLE=
github.com/metacubex/tfo-go v0.0.0-20251130171125-413e892ac443 h1:H6TnfM12tOoTizYE/qBHH3nEuibIelmHI+BVSxVJr8o=
github.com/metacubex/tfo-go v0.0.0-20251130171125-413e892ac443/go.mod h1:l9oLnLoEXyGZ5RVLsh7QCC5XsouTUyKk4F2nLm2DHLw=
github.com/metacubex/utls v1.8.3 h1:0m/yCxm3SK6kWve2lKiFb1pue1wHitJ8sQQD4Ikqde4=
github.com/metacubex/utls v1.8.3/go.mod h1:kncGGVhFaoGn5M3pFe3SXhZCzsbCJayNOH4UEqTKTko=
github.com/metacubex/wireguard-go v0.0.0-20250820062549-a6cecdd7f57f h1:FGBPRb1zUabhPhDrlKEjQ9lgIwQ6cHL4x8M9lrERhbk=
@@ -171,6 +171,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
github.com/saba-futai/sudoku v0.0.1-g h1:4q6OuAA6COaRW+CgoQtdim5AUPzzm0uOkvbYpJnOaBE=
github.com/saba-futai/sudoku v0.0.1-g/go.mod h1:2ZRzRwz93cS2K/o2yOG4CPJEltcvk5y6vbvUmjftGU0=
github.com/sagernet/cors v1.2.1 h1:Cv5Z8y9YSD6Gm+qSpNrL3LO4lD3eQVvbFYJSG7JCMHQ=
github.com/sagernet/cors v1.2.1/go.mod h1:O64VyOjjhrkLmQIjF4KGRrJO/5dVXFdpEmCW/eISRAI=
github.com/sagernet/netlink v0.0.0-20240612041022-b9a21c07ac6a h1:ObwtHN2VpqE0ZNjr6sGeT00J8uU7JF4cNUdb44/Duis=

View File

@@ -269,6 +269,7 @@ func updateDNS(c *config.DNS, generalIPv6 bool) {
FakeIPPool: c.FakeIPPool,
FakeIPPool6: c.FakeIPPool6,
FakeIPSkipper: c.FakeIPSkipper,
FakeIPTTL: c.FakeIPTTL,
UseHosts: c.UseHosts,
})
@@ -449,12 +450,7 @@ func patchSelectGroup(proxies map[string]C.Proxy) {
return
}
for name, proxy := range proxies {
outbound, ok := proxy.(C.Proxy)
if !ok {
continue
}
for name, outbound := range proxies {
selector, ok := outbound.Adapter().(outboundgroup.SelectAble)
if !ok {
continue

22
listener/config/sudoku.go Normal file
View File

@@ -0,0 +1,22 @@
package config
import "encoding/json"
// SudokuServer describes a Sudoku inbound server configuration.
// It is internal to the listener layer and mainly used for logging and wiring.
type SudokuServer struct {
Enable bool `json:"enable"`
Listen string `json:"listen"`
Key string `json:"key"`
AEADMethod string `json:"aead-method,omitempty"`
PaddingMin *int `json:"padding-min,omitempty"`
PaddingMax *int `json:"padding-max,omitempty"`
Seed string `json:"seed,omitempty"`
TableType string `json:"table-type,omitempty"`
HandshakeTimeoutSecond *int `json:"handshake-timeout,omitempty"`
}
func (s SudokuServer) String() string {
b, _ := json.Marshal(s)
return string(b)
}

View File

@@ -63,7 +63,11 @@ func removeExtraHTTPHostPort(req *http.Request) {
// parseBasicProxyAuthorization parse header Proxy-Authorization and return base64-encoded credential
func parseBasicProxyAuthorization(request *http.Request) string {
value := request.Header.Get("Proxy-Authorization")
if !strings.HasPrefix(value, "Basic ") {
const prefix = "Basic "
// According to RFC7617, the scheme should be case-insensitive.
// In practice, some implementations do use different case styles, causing authentication to fail
// eg: https://github.com/algesten/ureq/blob/381fd42cfcb80a5eb709d64860aa0ae726f17b8e/src/unversioned/transport/connect.rs#L118
if len(value) < len(prefix) || !strings.EqualFold(value[:len(prefix)], prefix) {
return ""
}

View File

@@ -59,11 +59,13 @@ func init() {
}
type TestTunnel struct {
HandleTCPConnFn func(conn net.Conn, metadata *C.Metadata)
HandleUDPPacketFn func(packet C.UDPPacket, metadata *C.Metadata)
NatTableFn func() C.NatTable
CloseFn func() error
DoTestFn func(t *testing.T, proxy C.ProxyAdapter)
HandleTCPConnFn func(conn net.Conn, metadata *C.Metadata)
HandleUDPPacketFn func(packet C.UDPPacket, metadata *C.Metadata)
NatTableFn func() C.NatTable
CloseFn func() error
DoTestFn func(t *testing.T, proxy C.ProxyAdapter)
DoSequentialTestFn func(t *testing.T, proxy C.ProxyAdapter)
DoConcurrentTestFn func(t *testing.T, proxy C.ProxyAdapter)
}
func (tt *TestTunnel) HandleTCPConn(conn net.Conn, metadata *C.Metadata) {
@@ -86,6 +88,14 @@ func (tt *TestTunnel) DoTest(t *testing.T, proxy C.ProxyAdapter) {
tt.DoTestFn(t, proxy)
}
func (tt *TestTunnel) DoSequentialTest(t *testing.T, proxy C.ProxyAdapter) {
tt.DoSequentialTestFn(t, proxy)
}
func (tt *TestTunnel) DoConcurrentTest(t *testing.T, proxy C.ProxyAdapter) {
tt.DoConcurrentTestFn(t, proxy)
}
type TestTunnelListener struct {
ch chan net.Conn
ctx context.Context
@@ -213,6 +223,40 @@ func NewHttpTestTunnel() *TestTunnel {
}
assert.Equal(t, httpData[:size], data)
}
sequentialTestFn := func(t *testing.T, proxy C.ProxyAdapter) {
// Sequential testing for debugging
t.Run("Sequential", func(t *testing.T) {
testFn(t, proxy, "http", len(httpData))
testFn(t, proxy, "https", len(httpData))
})
}
concurrentTestFn := func(t *testing.T, proxy C.ProxyAdapter) {
// Concurrent testing to detect stress
t.Run("Concurrent", func(t *testing.T) {
wg := sync.WaitGroup{}
num := len(httpData) / 1024
for i := 1; i <= num; i++ {
i := i
wg.Add(1)
go func() {
testFn(t, proxy, "https", i*1024)
defer wg.Done()
}()
}
for i := 1; i <= num; i++ {
i := i
wg.Add(1)
go func() {
testFn(t, proxy, "http", i*1024)
defer wg.Done()
}()
}
wg.Wait()
})
}
tunnel := &TestTunnel{
HandleTCPConnFn: func(conn net.Conn, metadata *C.Metadata) {
defer conn.Close()
@@ -252,36 +296,11 @@ func NewHttpTestTunnel() *TestTunnel {
},
CloseFn: ln.Close,
DoTestFn: func(t *testing.T, proxy C.ProxyAdapter) {
// Sequential testing for debugging
t.Run("Sequential", func(t *testing.T) {
testFn(t, proxy, "http", len(httpData))
testFn(t, proxy, "https", len(httpData))
})
// Concurrent testing to detect stress
t.Run("Concurrent", func(t *testing.T) {
wg := sync.WaitGroup{}
num := len(httpData) / 1024
for i := 1; i <= num; i++ {
i := i
wg.Add(1)
go func() {
testFn(t, proxy, "https", i*1024)
defer wg.Done()
}()
}
for i := 1; i <= num; i++ {
i := i
wg.Add(1)
go func() {
testFn(t, proxy, "http", i*1024)
defer wg.Done()
}()
}
wg.Wait()
})
sequentialTestFn(t, proxy)
concurrentTestFn(t, proxy)
},
DoSequentialTestFn: sequentialTestFn,
DoConcurrentTestFn: concurrentTestFn,
}
return tunnel
}

View File

@@ -90,6 +90,8 @@ func (m *Mieru) Listen(tunnel C.Tunnel) error {
if err != nil {
if !m.server.IsRunning() {
break
} else {
continue
}
}
go mieru.Handle(c, tunnel, req, additions...)

View File

@@ -149,12 +149,18 @@ func TestNewMieru(t *testing.T) {
}
func TestInboundMieru(t *testing.T) {
t.Run("HANDSHAKE_STANDARD", func(t *testing.T) {
t.Run("TCP_HANDSHAKE_STANDARD", func(t *testing.T) {
testInboundMieruTCP(t, "HANDSHAKE_STANDARD")
})
t.Run("HANDSHAKE_NO_WAIT", func(t *testing.T) {
t.Run("TCP_HANDSHAKE_NO_WAIT", func(t *testing.T) {
testInboundMieruTCP(t, "HANDSHAKE_NO_WAIT")
})
t.Run("UDP_HANDSHAKE_STANDARD", func(t *testing.T) {
testInboundMieruUDP(t, "HANDSHAKE_STANDARD")
})
t.Run("UDP_HANDSHAKE_NO_WAIT", func(t *testing.T) {
testInboundMieruUDP(t, "HANDSHAKE_NO_WAIT")
})
}
func testInboundMieruTCP(t *testing.T, handshakeMode string) {
@@ -168,7 +174,7 @@ func testInboundMieruTCP(t *testing.T, handshakeMode string) {
inboundOptions := inbound.MieruOption{
BaseOption: inbound.BaseOption{
NameStr: "mieru_inbound",
NameStr: "mieru_inbound_tcp",
Listen: "127.0.0.1",
Port: strconv.Itoa(port),
},
@@ -194,7 +200,7 @@ func testInboundMieruTCP(t *testing.T, handshakeMode string) {
return
}
outboundOptions := outbound.MieruOption{
Name: "mieru_outbound",
Name: "mieru_outbound_tcp",
Server: addrPort.Addr().String(),
Port: int(addrPort.Port()),
Transport: "TCP",
@@ -210,3 +216,57 @@ func testInboundMieruTCP(t *testing.T, handshakeMode string) {
tunnel.DoTest(t, out)
}
func testInboundMieruUDP(t *testing.T, handshakeMode string) {
t.Parallel()
l, err := net.ListenPacket("udp", "127.0.0.1:0")
if !assert.NoError(t, err) {
return
}
port := l.LocalAddr().(*net.UDPAddr).Port
l.Close()
inboundOptions := inbound.MieruOption{
BaseOption: inbound.BaseOption{
NameStr: "mieru_inbound_udp",
Listen: "127.0.0.1",
Port: strconv.Itoa(port),
},
Transport: "UDP",
Users: map[string]string{"test": "password"},
}
in, err := inbound.NewMieru(&inboundOptions)
if !assert.NoError(t, err) {
return
}
tunnel := NewHttpTestTunnel()
defer tunnel.Close()
err = in.Listen(tunnel)
if !assert.NoError(t, err) {
return
}
defer in.Close()
addrPort, err := netip.ParseAddrPort(in.Address())
if !assert.NoError(t, err) {
return
}
outboundOptions := outbound.MieruOption{
Name: "mieru_outbound_udp",
Server: addrPort.Addr().String(),
Port: int(addrPort.Port()),
Transport: "UDP",
UserName: "test",
Password: "password",
HandshakeMode: handshakeMode,
}
out, err := outbound.NewMieru(outboundOptions)
if !assert.NoError(t, err) {
return
}
defer out.Close()
tunnel.DoSequentialTest(t, out)
}

128
listener/inbound/sudoku.go Normal file
View File

@@ -0,0 +1,128 @@
package inbound
import (
"errors"
"fmt"
"strings"
"github.com/saba-futai/sudoku/apis"
C "github.com/metacubex/mihomo/constant"
LC "github.com/metacubex/mihomo/listener/config"
sudokuListener "github.com/metacubex/mihomo/listener/sudoku"
"github.com/metacubex/mihomo/log"
)
type SudokuOption struct {
BaseOption
Key string `inbound:"key"`
AEADMethod string `inbound:"aead-method,omitempty"`
PaddingMin *int `inbound:"padding-min,omitempty"`
PaddingMax *int `inbound:"padding-max,omitempty"`
Seed string `inbound:"seed,omitempty"`
TableType string `inbound:"table-type,omitempty"` // "prefer_ascii" or "prefer_entropy"
HandshakeTimeoutSecond *int `inbound:"handshake-timeout,omitempty"`
}
func (o SudokuOption) Equal(config C.InboundConfig) bool {
return optionToString(o) == optionToString(config)
}
type Sudoku struct {
*Base
config *SudokuOption
listeners []*sudokuListener.Listener
serverConf LC.SudokuServer
}
func NewSudoku(options *SudokuOption) (*Sudoku, error) {
if options.Key == "" {
return nil, fmt.Errorf("sudoku inbound requires key")
}
base, err := NewBase(&options.BaseOption)
if err != nil {
return nil, err
}
defaultConf := apis.DefaultConfig()
serverConf := LC.SudokuServer{
Enable: true,
Listen: base.RawAddress(),
Key: options.Key,
AEADMethod: options.AEADMethod,
PaddingMin: options.PaddingMin,
PaddingMax: options.PaddingMax,
Seed: options.Seed,
TableType: options.TableType,
}
if options.HandshakeTimeoutSecond != nil {
serverConf.HandshakeTimeoutSecond = options.HandshakeTimeoutSecond
} else {
// Use Sudoku default if not specified.
v := defaultConf.HandshakeTimeoutSeconds
serverConf.HandshakeTimeoutSecond = &v
}
return &Sudoku{
Base: base,
config: options,
serverConf: serverConf,
}, nil
}
// Config implements constant.InboundListener
func (s *Sudoku) Config() C.InboundConfig {
return s.config
}
// Address implements constant.InboundListener
func (s *Sudoku) Address() string {
var addrList []string
for _, l := range s.listeners {
addrList = append(addrList, l.Address())
}
return strings.Join(addrList, ",")
}
// Listen implements constant.InboundListener
func (s *Sudoku) Listen(tunnel C.Tunnel) error {
if s.serverConf.Key == "" {
return fmt.Errorf("sudoku inbound requires key")
}
var errs []error
for _, addr := range strings.Split(s.RawAddress(), ",") {
conf := s.serverConf
conf.Listen = addr
l, err := sudokuListener.New(conf, tunnel, s.Additions()...)
if err != nil {
errs = append(errs, err)
continue
}
s.listeners = append(s.listeners, l)
}
if len(errs) > 0 {
return errors.Join(errs...)
}
log.Infoln("Sudoku[%s] inbound listening at: %s", s.Name(), s.Address())
return nil
}
// Close implements constant.InboundListener
func (s *Sudoku) Close() error {
var errs []error
for _, l := range s.listeners {
if err := l.Close(); err != nil {
errs = append(errs, err)
}
}
if len(errs) > 0 {
return errors.Join(errs...)
}
return nil
}
var _ C.InboundListener = (*Sudoku)(nil)

View File

@@ -0,0 +1,91 @@
package inbound_test
import (
"net/netip"
"testing"
"github.com/metacubex/mihomo/adapter/outbound"
"github.com/metacubex/mihomo/listener/inbound"
"github.com/stretchr/testify/assert"
)
func testInboundSudoku(t *testing.T, inboundOptions inbound.SudokuOption, outboundOptions outbound.SudokuOption) {
t.Parallel()
inboundOptions.BaseOption = inbound.BaseOption{
NameStr: "sudoku_inbound",
Listen: "127.0.0.1",
Port: "0",
}
in, err := inbound.NewSudoku(&inboundOptions)
if !assert.NoError(t, err) {
return
}
tunnel := NewHttpTestTunnel()
defer tunnel.Close()
err = in.Listen(tunnel)
if !assert.NoError(t, err) {
return
}
defer in.Close()
addrPort, err := netip.ParseAddrPort(in.Address())
if !assert.NoError(t, err) {
return
}
outboundOptions.Name = "sudoku_outbound"
outboundOptions.Server = addrPort.Addr().String()
outboundOptions.Port = int(addrPort.Port())
out, err := outbound.NewSudoku(outboundOptions)
if !assert.NoError(t, err) {
return
}
defer out.Close()
tunnel.DoTest(t, out)
}
func TestInboundSudoku_Basic(t *testing.T) {
key := "test_key"
inboundOptions := inbound.SudokuOption{
Key: key,
}
outboundOptions := outbound.SudokuOption{
Key: key,
}
testInboundSudoku(t, inboundOptions, outboundOptions)
}
func TestInboundSudoku_Entropy(t *testing.T) {
key := "test_key_entropy"
inboundOptions := inbound.SudokuOption{
Key: key,
TableType: "prefer_entropy",
}
outboundOptions := outbound.SudokuOption{
Key: key,
TableType: "prefer_entropy",
}
testInboundSudoku(t, inboundOptions, outboundOptions)
}
func TestInboundSudoku_Padding(t *testing.T) {
key := "test_key_padding"
min := 10
max := 100
inboundOptions := inbound.SudokuOption{
Key: key,
PaddingMin: &min,
PaddingMax: &max,
}
outboundOptions := outbound.SudokuOption{
Key: key,
PaddingMin: &min,
PaddingMax: &max,
}
testInboundSudoku(t, inboundOptions, outboundOptions)
}

View File

@@ -31,12 +31,28 @@ func Handle(conn net.Conn, tunnel C.Tunnel, request *mierumodel.Request, additio
}
// Handle the connection with tunnel.
metadata := mieruRequestToMetadata(request)
inbound.ApplyAdditions(&metadata, additions...)
switch metadata.NetWork {
case C.TCP:
tunnel.HandleTCPConn(conn, &metadata)
case C.UDP:
switch request.Command {
case mieruconstant.Socks5ConnectCmd: // TCP
metadata := &C.Metadata{
NetWork: C.TCP,
Type: C.MIERU,
DstPort: uint16(request.DstAddr.Port),
}
if request.DstAddr.FQDN != "" {
metadata.Host = request.DstAddr.FQDN
} else if request.DstAddr.IP != nil {
metadata.DstIP, _ = netip.AddrFromSlice(request.DstAddr.IP)
metadata.DstIP = metadata.DstIP.Unmap()
}
inbound.ApplyAdditions(
metadata,
inbound.WithInName(conn.(mierucommon.UserContext).UserName()),
inbound.WithSrcAddr(conn.RemoteAddr()),
inbound.WithInAddr(conn.LocalAddr()),
)
inbound.ApplyAdditions(metadata, additions...)
tunnel.HandleTCPConn(conn, metadata)
case mieruconstant.Socks5UDPAssociateCmd: // UDP
pc := mierucommon.NewPacketOverStreamTunnel(conn)
ep := N.NewEnhancePacketConn(pc)
for {
@@ -67,24 +83,6 @@ func Handle(conn net.Conn, tunnel C.Tunnel, request *mierumodel.Request, additio
}
}
func mieruRequestToMetadata(request *mierumodel.Request) C.Metadata {
m := C.Metadata{
DstPort: uint16(request.DstAddr.Port),
}
switch request.Command {
case mieruconstant.Socks5ConnectCmd:
m.NetWork = C.TCP
case mieruconstant.Socks5UDPAssociateCmd:
m.NetWork = C.UDP
}
if request.DstAddr.FQDN != "" {
m.Host = request.DstAddr.FQDN
} else if request.DstAddr.IP != nil {
m.DstIP, _ = netip.AddrFromSlice(request.DstAddr.IP)
}
return m
}
type packet struct {
pc net.PacketConn
addr net.Addr // source (i.e. remote) IP & Port of the packet

View File

@@ -134,6 +134,13 @@ func ParseListener(mapping map[string]any) (C.InboundListener, error) {
return nil, err
}
listener, err = IN.NewMieru(mieruOption)
case "sudoku":
sudokuOption := &IN.SudokuOption{}
err = decoder.Decode(mapping, sudokuOption)
if err != nil {
return nil, err
}
listener, err = IN.NewSudoku(sudokuOption)
default:
return nil, fmt.Errorf("unsupport proxy type: %s", proxyType)
}

139
listener/sudoku/server.go Normal file
View File

@@ -0,0 +1,139 @@
package sudoku
import (
"net"
"strings"
"github.com/saba-futai/sudoku/apis"
sudokuobfs "github.com/saba-futai/sudoku/pkg/obfs/sudoku"
"github.com/metacubex/mihomo/adapter/inbound"
C "github.com/metacubex/mihomo/constant"
LC "github.com/metacubex/mihomo/listener/config"
"github.com/metacubex/mihomo/transport/socks5"
)
type Listener struct {
listener net.Listener
addr string
closed bool
protoConf apis.ProtocolConfig
}
// RawAddress implements C.Listener
func (l *Listener) RawAddress() string {
return l.addr
}
// Address implements C.Listener
func (l *Listener) Address() string {
if l.listener == nil {
return ""
}
return l.listener.Addr().String()
}
// Close implements C.Listener
func (l *Listener) Close() error {
l.closed = true
if l.listener != nil {
return l.listener.Close()
}
return nil
}
func (l *Listener) handleConn(conn net.Conn, tunnel C.Tunnel, additions ...inbound.Addition) {
tunnelConn, target, err := apis.ServerHandshake(conn, &l.protoConf)
if err != nil {
_ = conn.Close()
return
}
targetAddr := socks5.ParseAddr(target)
if targetAddr == nil {
_ = tunnelConn.Close()
return
}
tunnel.HandleTCPConn(inbound.NewSocket(targetAddr, tunnelConn, C.SUDOKU, additions...))
}
func New(config LC.SudokuServer, tunnel C.Tunnel, additions ...inbound.Addition) (*Listener, error) {
if len(additions) == 0 {
additions = []inbound.Addition{
inbound.WithInName("DEFAULT-SUDOKU"),
inbound.WithSpecialRules(""),
}
}
l, err := inbound.Listen("tcp", config.Listen)
if err != nil {
return nil, err
}
seed := config.Seed
if seed == "" {
seed = config.Key
}
tableType := strings.ToLower(config.TableType)
if tableType == "" {
tableType = "prefer_ascii"
}
table := sudokuobfs.NewTable(seed, tableType)
defaultConf := apis.DefaultConfig()
paddingMin := defaultConf.PaddingMin
paddingMax := defaultConf.PaddingMax
if config.PaddingMin != nil {
paddingMin = *config.PaddingMin
}
if config.PaddingMax != nil {
paddingMax = *config.PaddingMax
}
if config.PaddingMin == nil && config.PaddingMax != nil && paddingMax < paddingMin {
paddingMin = paddingMax
}
if config.PaddingMax == nil && config.PaddingMin != nil && paddingMax < paddingMin {
paddingMax = paddingMin
}
handshakeTimeout := defaultConf.HandshakeTimeoutSeconds
if config.HandshakeTimeoutSecond != nil {
handshakeTimeout = *config.HandshakeTimeoutSecond
}
protoConf := apis.ProtocolConfig{
Key: config.Key,
AEADMethod: defaultConf.AEADMethod,
Table: table,
PaddingMin: paddingMin,
PaddingMax: paddingMax,
HandshakeTimeoutSeconds: handshakeTimeout,
}
if config.AEADMethod != "" {
protoConf.AEADMethod = config.AEADMethod
}
sl := &Listener{
listener: l,
addr: config.Listen,
protoConf: protoConf,
}
go func() {
for {
c, err := l.Accept()
if err != nil {
if sl.closed {
break
}
continue
}
go sl.handleConn(c, tunnel, additions...)
}
}()
return sl, nil
}

View File

@@ -70,7 +70,9 @@ func (d *DNSDialer) DialContext(ctx context.Context, network, addr string) (net.
} else {
var ok bool
proxyAdapter, ok = Proxies()[proxyName]
if !ok {
if ok {
metadata.SpecialProxy = proxyName // just for log
} else {
opts = append(opts, dialer.WithInterface(proxyName))
}
}
@@ -158,7 +160,9 @@ func (d *DNSDialer) ListenPacket(ctx context.Context, network, addr string) (net
} else {
var ok bool
proxyAdapter, ok = Proxies()[proxyName]
if !ok {
if ok {
metadata.SpecialProxy = proxyName // just for log
} else {
opts = append(opts, dialer.WithInterface(proxyName))
}
}

View File

@@ -627,7 +627,7 @@ func logMetadataErr(metadata *C.Metadata, rule C.Rule, proxy C.ProxyAdapter, err
func logMetadata(metadata *C.Metadata, rule C.Rule, remoteConn C.Connection) {
switch {
case metadata.SpecialProxy != "":
log.Infoln("[%s] %s --> %s using %s", strings.ToUpper(metadata.NetWork.String()), metadata.SourceDetail(), metadata.RemoteAddress(), metadata.SpecialProxy)
log.Infoln("[%s] %s --> %s using %s", strings.ToUpper(metadata.NetWork.String()), metadata.SourceDetail(), metadata.RemoteAddress(), remoteConn.Chains().String())
case rule != nil:
if rule.Payload() != "" {
log.Infoln("[%s] %s --> %s match %s using %s", strings.ToUpper(metadata.NetWork.String()), metadata.SourceDetail(), metadata.RemoteAddress(), fmt.Sprintf("%s(%s)", rule.RuleType().String(), rule.Payload()), remoteConn.Chains().String())
@@ -639,7 +639,7 @@ func logMetadata(metadata *C.Metadata, rule C.Rule, remoteConn C.Connection) {
case mode == Direct:
log.Infoln("[%s] %s --> %s using DIRECT", strings.ToUpper(metadata.NetWork.String()), metadata.SourceDetail(), metadata.RemoteAddress())
default:
log.Infoln("[%s] %s --> %s doesn't match any rule using %s", strings.ToUpper(metadata.NetWork.String()), metadata.SourceDetail(), metadata.RemoteAddress(), remoteConn.Chains().Last())
log.Infoln("[%s] %s --> %s doesn't match any rule using %s", strings.ToUpper(metadata.NetWork.String()), metadata.SourceDetail(), metadata.RemoteAddress(), remoteConn.Chains().String())
}
}