Update On Sun May 25 20:34:13 CEST 2025

This commit is contained in:
github-action[bot]
2025-05-25 20:34:14 +02:00
parent 8a13333e48
commit 8b9af21a0e
56 changed files with 586 additions and 486 deletions

1
.github/update.log vendored
View File

@@ -1009,3 +1009,4 @@ Update On Wed May 21 20:37:50 CEST 2025
Update On Thu May 22 20:37:12 CEST 2025
Update On Fri May 23 20:34:50 CEST 2025
Update On Sat May 24 20:33:25 CEST 2025
Update On Sun May 25 20:34:05 CEST 2025

View File

@@ -5,7 +5,6 @@ import (
"encoding/json"
"net"
"runtime"
"strings"
"sync"
"syscall"
@@ -203,12 +202,21 @@ func NewBase(opt BaseOption) *Base {
type conn struct {
N.ExtendedConn
chain C.Chain
actualRemoteDestination string
chain C.Chain
adapterAddr string
}
func (c *conn) RemoteDestination() string {
return c.actualRemoteDestination
if remoteAddr := c.RemoteAddr(); remoteAddr != nil {
m := C.Metadata{}
if err := m.SetRemoteAddr(remoteAddr); err != nil {
if m.Valid() {
return m.String()
}
}
}
host, _, _ := net.SplitHostPort(c.adapterAddr)
return host
}
// Chains implements C.Connection
@@ -241,19 +249,20 @@ func NewConn(c net.Conn, a C.ProxyAdapter) C.Conn {
if _, ok := c.(syscall.Conn); !ok { // exclusion system conn like *net.TCPConn
c = N.NewDeadlineConn(c) // most conn from outbound can't handle readDeadline correctly
}
return &conn{N.NewExtendedConn(c), []string{a.Name()}, parseRemoteDestination(a.Addr())}
return &conn{N.NewExtendedConn(c), []string{a.Name()}, a.Addr()}
}
type packetConn struct {
N.EnhancePacketConn
chain C.Chain
adapterName string
connID string
actualRemoteDestination string
chain C.Chain
adapterName string
connID string
adapterAddr string
}
func (c *packetConn) RemoteDestination() string {
return c.actualRemoteDestination
host, _, _ := net.SplitHostPort(c.adapterAddr)
return host
}
// Chains implements C.Connection
@@ -292,19 +301,7 @@ func newPacketConn(pc net.PacketConn, a C.ProxyAdapter) C.PacketConn {
if _, ok := pc.(syscall.Conn); !ok { // exclusion system conn like *net.UDPConn
epc = N.NewDeadlineEnhancePacketConn(epc) // most conn from outbound can't handle readDeadline correctly
}
return &packetConn{epc, []string{a.Name()}, a.Name(), utils.NewUUIDV4().String(), parseRemoteDestination(a.Addr())}
}
func parseRemoteDestination(addr string) string {
if dst, _, err := net.SplitHostPort(addr); err == nil {
return dst
} else {
if addrError, ok := err.(*net.AddrError); ok && strings.Contains(addrError.Err, "missing port") {
return dst
} else {
return ""
}
}
return &packetConn{epc, []string{a.Name()}, a.Name(), utils.NewUUIDV4().String(), a.Addr()}
}
type AddRef interface {

View File

@@ -20,7 +20,7 @@ type connReadResult struct {
type Conn struct {
network.ExtendedConn
deadline atomic.TypedValue[time.Time]
pipeDeadline pipeDeadline
pipeDeadline PipeDeadline
disablePipe atomic.Bool
inRead atomic.Bool
resultCh chan *connReadResult
@@ -34,7 +34,7 @@ func IsConn(conn any) bool {
func NewConn(conn net.Conn) *Conn {
c := &Conn{
ExtendedConn: bufio.NewExtendedConn(conn),
pipeDeadline: makePipeDeadline(),
pipeDeadline: MakePipeDeadline(),
resultCh: make(chan *connReadResult, 1),
}
c.resultCh <- nil
@@ -58,7 +58,7 @@ func (c *Conn) Read(p []byte) (n int, err error) {
c.resultCh <- nil
break
}
case <-c.pipeDeadline.wait():
case <-c.pipeDeadline.Wait():
return 0, os.ErrDeadlineExceeded
}
@@ -104,7 +104,7 @@ func (c *Conn) ReadBuffer(buffer *buf.Buffer) (err error) {
c.resultCh <- nil
break
}
case <-c.pipeDeadline.wait():
case <-c.pipeDeadline.Wait():
return os.ErrDeadlineExceeded
}
@@ -130,7 +130,7 @@ func (c *Conn) SetReadDeadline(t time.Time) error {
return c.ExtendedConn.SetReadDeadline(t)
}
c.deadline.Store(t)
c.pipeDeadline.set(t)
c.pipeDeadline.Set(t)
return nil
}

View File

@@ -19,7 +19,7 @@ type readResult struct {
type NetPacketConn struct {
net.PacketConn
deadline atomic.TypedValue[time.Time]
pipeDeadline pipeDeadline
pipeDeadline PipeDeadline
disablePipe atomic.Bool
inRead atomic.Bool
resultCh chan any
@@ -28,7 +28,7 @@ type NetPacketConn struct {
func NewNetPacketConn(pc net.PacketConn) net.PacketConn {
npc := &NetPacketConn{
PacketConn: pc,
pipeDeadline: makePipeDeadline(),
pipeDeadline: MakePipeDeadline(),
resultCh: make(chan any, 1),
}
npc.resultCh <- nil
@@ -83,7 +83,7 @@ FOR:
c.resultCh <- nil
break FOR
}
case <-c.pipeDeadline.wait():
case <-c.pipeDeadline.Wait():
return 0, nil, os.ErrDeadlineExceeded
}
}
@@ -122,7 +122,7 @@ func (c *NetPacketConn) SetReadDeadline(t time.Time) error {
return c.PacketConn.SetReadDeadline(t)
}
c.deadline.Store(t)
c.pipeDeadline.set(t)
c.pipeDeadline.Set(t)
return nil
}

View File

@@ -52,7 +52,7 @@ FOR:
c.netPacketConn.resultCh <- nil
break FOR
}
case <-c.netPacketConn.pipeDeadline.wait():
case <-c.netPacketConn.pipeDeadline.Wait():
return nil, nil, nil, os.ErrDeadlineExceeded
}
}

View File

@@ -69,7 +69,7 @@ FOR:
c.netPacketConn.resultCh <- nil
break FOR
}
case <-c.netPacketConn.pipeDeadline.wait():
case <-c.netPacketConn.pipeDeadline.Wait():
return M.Socksaddr{}, os.ErrDeadlineExceeded
}
}
@@ -146,7 +146,7 @@ FOR:
c.netPacketConn.resultCh <- nil
break FOR
}
case <-c.netPacketConn.pipeDeadline.wait():
case <-c.netPacketConn.pipeDeadline.Wait():
return nil, M.Socksaddr{}, os.ErrDeadlineExceeded
}
}

View File

@@ -9,24 +9,24 @@ import (
"time"
)
// pipeDeadline is an abstraction for handling timeouts.
type pipeDeadline struct {
// PipeDeadline is an abstraction for handling timeouts.
type PipeDeadline struct {
mu sync.Mutex // Guards timer and cancel
timer *time.Timer
cancel chan struct{} // Must be non-nil
}
func makePipeDeadline() pipeDeadline {
return pipeDeadline{cancel: make(chan struct{})}
func MakePipeDeadline() PipeDeadline {
return PipeDeadline{cancel: make(chan struct{})}
}
// set sets the point in time when the deadline will time out.
// Set sets the point in time when the deadline will time out.
// A timeout event is signaled by closing the channel returned by waiter.
// Once a timeout has occurred, the deadline can be refreshed by specifying a
// t value in the future.
//
// A zero value for t prevents timeout.
func (d *pipeDeadline) set(t time.Time) {
func (d *PipeDeadline) Set(t time.Time) {
d.mu.Lock()
defer d.mu.Unlock()
@@ -61,8 +61,8 @@ func (d *pipeDeadline) set(t time.Time) {
}
}
// wait returns a channel that is closed when the deadline is exceeded.
func (d *pipeDeadline) wait() chan struct{} {
// Wait returns a channel that is closed when the deadline is exceeded.
func (d *PipeDeadline) Wait() chan struct{} {
d.mu.Lock()
defer d.mu.Unlock()
return d.cancel

View File

@@ -33,8 +33,8 @@ type pipe struct {
localDone chan struct{}
remoteDone <-chan struct{}
readDeadline pipeDeadline
writeDeadline pipeDeadline
readDeadline PipeDeadline
writeDeadline PipeDeadline
readWaitOptions N.ReadWaitOptions
}
@@ -56,15 +56,15 @@ func Pipe() (net.Conn, net.Conn) {
rdRx: cb1, rdTx: cn1,
wrTx: cb2, wrRx: cn2,
localDone: done1, remoteDone: done2,
readDeadline: makePipeDeadline(),
writeDeadline: makePipeDeadline(),
readDeadline: MakePipeDeadline(),
writeDeadline: MakePipeDeadline(),
}
p2 := &pipe{
rdRx: cb2, rdTx: cn2,
wrTx: cb1, wrRx: cn1,
localDone: done2, remoteDone: done1,
readDeadline: makePipeDeadline(),
writeDeadline: makePipeDeadline(),
readDeadline: MakePipeDeadline(),
writeDeadline: MakePipeDeadline(),
}
return p1, p2
}
@@ -86,7 +86,7 @@ func (p *pipe) read(b []byte) (n int, err error) {
return 0, io.ErrClosedPipe
case isClosedChan(p.remoteDone):
return 0, io.EOF
case isClosedChan(p.readDeadline.wait()):
case isClosedChan(p.readDeadline.Wait()):
return 0, os.ErrDeadlineExceeded
}
@@ -99,7 +99,7 @@ func (p *pipe) read(b []byte) (n int, err error) {
return 0, io.ErrClosedPipe
case <-p.remoteDone:
return 0, io.EOF
case <-p.readDeadline.wait():
case <-p.readDeadline.Wait():
return 0, os.ErrDeadlineExceeded
}
}
@@ -118,7 +118,7 @@ func (p *pipe) write(b []byte) (n int, err error) {
return 0, io.ErrClosedPipe
case isClosedChan(p.remoteDone):
return 0, io.ErrClosedPipe
case isClosedChan(p.writeDeadline.wait()):
case isClosedChan(p.writeDeadline.Wait()):
return 0, os.ErrDeadlineExceeded
}
@@ -134,7 +134,7 @@ func (p *pipe) write(b []byte) (n int, err error) {
return n, io.ErrClosedPipe
case <-p.remoteDone:
return n, io.ErrClosedPipe
case <-p.writeDeadline.wait():
case <-p.writeDeadline.Wait():
return n, os.ErrDeadlineExceeded
}
}
@@ -145,8 +145,8 @@ func (p *pipe) SetDeadline(t time.Time) error {
if isClosedChan(p.localDone) || isClosedChan(p.remoteDone) {
return io.ErrClosedPipe
}
p.readDeadline.set(t)
p.writeDeadline.set(t)
p.readDeadline.Set(t)
p.writeDeadline.Set(t)
return nil
}
@@ -154,7 +154,7 @@ func (p *pipe) SetReadDeadline(t time.Time) error {
if isClosedChan(p.localDone) || isClosedChan(p.remoteDone) {
return io.ErrClosedPipe
}
p.readDeadline.set(t)
p.readDeadline.Set(t)
return nil
}
@@ -162,7 +162,7 @@ func (p *pipe) SetWriteDeadline(t time.Time) error {
if isClosedChan(p.localDone) || isClosedChan(p.remoteDone) {
return io.ErrClosedPipe
}
p.writeDeadline.set(t)
p.writeDeadline.Set(t)
return nil
}
@@ -192,7 +192,7 @@ func (p *pipe) waitReadBuffer() (buffer *buf.Buffer, err error) {
return nil, io.ErrClosedPipe
case isClosedChan(p.remoteDone):
return nil, io.EOF
case isClosedChan(p.readDeadline.wait()):
case isClosedChan(p.readDeadline.Wait()):
return nil, os.ErrDeadlineExceeded
}
select {
@@ -211,7 +211,7 @@ func (p *pipe) waitReadBuffer() (buffer *buf.Buffer, err error) {
return nil, io.ErrClosedPipe
case <-p.remoteDone:
return nil, io.EOF
case <-p.readDeadline.wait():
case <-p.readDeadline.Wait():
return nil, os.ErrDeadlineExceeded
}
}

View File

@@ -3,7 +3,6 @@ package statistic
import (
"io"
"net"
"net/netip"
"time"
"github.com/metacubex/mihomo/common/atomic"
@@ -116,20 +115,8 @@ func (tt *tcpTracker) Upstream() any {
return tt.Conn
}
func parseRemoteDestination(addr net.Addr, conn C.Connection) string {
if addr != nil {
if addrPort, err := netip.ParseAddrPort(addr.String()); err == nil && addrPort.Addr().IsValid() {
return addrPort.Addr().String()
}
}
if conn != nil {
return conn.RemoteDestination()
}
return ""
}
func NewTCPTracker(conn C.Conn, manager *Manager, metadata *C.Metadata, rule C.Rule, uploadTotal int64, downloadTotal int64, pushToManager bool) *tcpTracker {
metadata.RemoteDst = parseRemoteDestination(conn.RemoteAddr(), conn)
metadata.RemoteDst = conn.RemoteDestination()
t := &tcpTracker{
Conn: conn,
@@ -220,7 +207,7 @@ func (ut *udpTracker) Upstream() any {
}
func NewUDPTracker(conn C.PacketConn, manager *Manager, metadata *C.Metadata, rule C.Rule, uploadTotal int64, downloadTotal int64, pushToManager bool) *udpTracker {
metadata.RemoteDst = parseRemoteDestination(nil, conn)
metadata.RemoteDst = conn.RemoteDestination()
ut := &udpTracker{
PacketConn: conn,

View File

@@ -413,11 +413,6 @@ func handleUDPConn(packet C.PacketAdapter) {
pc := statistic.NewUDPTracker(rawPc, statistic.DefaultManager, metadata, rule, 0, 0, true)
if rawPc.Chains().Last() == "REJECT-DROP" {
_ = pc.Close()
return nil, nil, errors.New("rejected drop packet")
}
oAddrPort := metadata.AddrPort()
writeBackProxy := nat.NewWriteBackProxy(packet)

View File

@@ -7858,9 +7858,9 @@ dependencies = [
[[package]]
name = "rust-i18n"
version = "3.1.4"
version = "3.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2b6307cde881492032919adf26e254981604a6657b339ae23cce8358e9ee203"
checksum = "fda2551fdfaf6cc5ee283adc15e157047b92ae6535cf80f6d4962d05717dc332"
dependencies = [
"globwalk",
"once_cell",
@@ -9870,9 +9870,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
version = "1.45.0"
version = "1.45.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2513ca694ef9ede0fb23fe71a4ee4107cb102b9dc1930f6d0fd77aae068ae165"
checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779"
dependencies = [
"backtrace",
"bytes",

View File

@@ -22,6 +22,6 @@
},
"devDependencies": {
"@types/lodash-es": "4.17.12",
"@types/react": "19.1.4"
"@types/react": "19.1.5"
}
}

View File

@@ -62,16 +62,16 @@
"@tanstack/react-router-devtools": "1.120.10",
"@tanstack/router-plugin": "1.120.10",
"@tauri-apps/plugin-clipboard-manager": "2.2.2",
"@tauri-apps/plugin-dialog": "2.2.1",
"@tauri-apps/plugin-fs": "2.2.1",
"@tauri-apps/plugin-dialog": "2.2.2",
"@tauri-apps/plugin-fs": "2.3.0",
"@tauri-apps/plugin-notification": "2.2.2",
"@tauri-apps/plugin-os": "2.2.1",
"@tauri-apps/plugin-process": "2.2.1",
"@tauri-apps/plugin-shell": "2.2.1",
"@tauri-apps/plugin-updater": "2.7.1",
"@types/react": "19.1.4",
"@types/react": "19.1.5",
"@types/react-dom": "19.1.5",
"@types/validator": "13.15.0",
"@types/validator": "13.15.1",
"@vitejs/plugin-legacy": "6.1.1",
"@vitejs/plugin-react": "4.4.1",
"@vitejs/plugin-react-swc": "3.9.0",

View File

@@ -19,7 +19,7 @@
"@radix-ui/react-scroll-area": "1.2.9",
"@tauri-apps/api": "2.5.0",
"@types/d3": "7.4.3",
"@types/react": "19.1.4",
"@types/react": "19.1.5",
"@vitejs/plugin-react": "4.4.1",
"ahooks": "3.8.5",
"d3": "7.9.0",

View File

@@ -2,10 +2,10 @@
"manifest_version": 1,
"latest": {
"mihomo": "v1.19.9",
"mihomo_alpha": "alpha-989f4ec",
"mihomo_alpha": "alpha-34de62d",
"clash_rs": "v0.7.8",
"clash_premium": "2023-09-05-gdcc8d87",
"clash_rs_alpha": "0.7.8-alpha+sha.5e5a732"
"clash_rs_alpha": "0.7.8-alpha+sha.993dda8"
},
"arch_template": {
"mihomo": {
@@ -69,5 +69,5 @@
"linux-armv7hf": "clash-armv7-unknown-linux-gnueabihf"
}
},
"updated_at": "2025-05-23T22:21:03.987Z"
"updated_at": "2025-05-24T22:20:47.315Z"
}

View File

@@ -65,7 +65,7 @@
"@tauri-apps/cli": "2.5.0",
"@types/fs-extra": "11.0.4",
"@types/lodash-es": "4.17.12",
"@types/node": "22.15.18",
"@types/node": "22.15.21",
"@typescript-eslint/eslint-plugin": "8.32.1",
"@typescript-eslint/parser": "8.32.1",
"autoprefixer": "10.4.21",

File diff suppressed because it is too large Load Diff

View File

@@ -2,23 +2,21 @@
#### 已知问题
- 仅在Ubuntu 22.04/24.04Fedora 41 **Gnome桌面环境** 做过简单测试不保证其他其他Linux发行版可用将在未来做进一步适配和调优
- MacOS 自定义图标与速率显示推荐图标尺寸为 256x256。其他尺寸可能会导致不正常图标和速率间隙
- MacOS 下 墙贴主要为浅色Tray 图标深色时图标闪烁;彩色 Tray 速率颜色淡
- Linux 下 Clash Verge Rev 内存占用显著高于 Windows / MacOS
- 窗口状态管理器已确定上游存在缺陷,暂时移除。当前不再内置窗口大小和位置记忆。
### 2.2.4 相对于 2.2.3
#### 修复了:
- 首页代理模式快速切换导致的卡死问题
- 首页"代理模式"快速切换导致的卡死问题
- 解锁测试报错信息
- Macos 快捷键关闭窗口无法启用自动轻量模式
- 静默启动异常窗口创建和关闭流程
- 使用 tauri window-state 管理窗口,尝试解决各种窗口异常
- Windows 错误的全局快捷键 'Ctrl+Q' 注册
- Windows 错误的全局快捷键 `Ctrl+Q` 注册
- Vless URL 解码时网络类型错误
- 切换自定义代理地址导致系统代理状态异常
- Macos TUN 默认无效网卡名称
- 托盘更改订阅后 UI 不同步的问题
- 修复提权漏洞,改用带认证的 IPC 通信(后续还可以加强完善认证密钥创建和管理机制)
- 修复提权漏洞,改用带认证的 IPC 通信
- 编辑器中连字符问题
- 安装服务模式后无法立即开启 TUN 模式
- 同步更新多语言翻译
@@ -28,14 +26,13 @@
- 使用外部扩展脚本覆写代理组时首页无法显示代理组
#### 新增了:
- Mihomo(Meta)内核升级至 1.19.8
- Mihomo(Meta)内核升级至 1.19.9
- 允许代理主机地址设置为非 127.0.0.1 对 WSL 代理友好
- 关闭系统代理时关闭已建立的网络连接
- 托盘显示当前轻量模式状态
- Webdav 请求加入 UA
- Webdav 支持目录重定向
- 移除 Webdav 跨平台备份恢复限
- 增强 Webdav 备份目录检查和文件上传重试机制
- Webdav 备份目录检查和文件上传重试机
- 系统代理守卫可检查意外设置变更并恢复
- 定时自动订阅更新也能自动回退使用代理
- 订阅请求超时机制,防止订阅更新的时候卡死
@@ -51,7 +48,8 @@
- 添加了Zashboard的一键跳转URL
- 使用操作系统默认的窗口管理器
- 切换、升级、重启内核的状态管理
- 更精细化控制自动日志清理新增1天选项
- 更精细化控制自动日志清理新增1天选项
- Winodws 快捷键名称改为 `Clash Verge`
#### 优化了:
- 系统代理 Bypass 设置
@@ -59,7 +57,7 @@
- 切换到规则页面时自动刷新规则数据
- 重构更新失败回退机制,使用后端完成更新失败后回退到使用 Clash 代理再次尝试更新
- 编辑非激活订阅的时候不在触发当前订阅配置重载
- 改进核心功能防止主进程阻塞、改进MihomoManager实现以及优化窗口创建流程。减少应用程序可能出现的主进程卡死情况
- 改进核心功能防止主进程阻塞、改进MihomoManager实现以及优化窗口创建流程
- 优化系统代理设置更新逻辑
- 重构前端通知系统分离通知线程防止前端卡死
- 优化网络请求和错误处理
@@ -71,15 +69,20 @@
- Mihomo 内核默认日志等级为 warn
- Clash Verge Rev 应用默认日志等级为 warn
- 重构了原来的 IP 信息请求重试机制,采用轮询检测,解决了 Network Error 和超时问题
- 对轮询检测机制进行了优化,引入洗牌算法来增强随机性
- 对轮询检测机制进行了优化,引入洗牌算法来增强随机性
- 对获取系统信息的流程进行了优化,并添加了去重检测机制,确保剔除重复的信息
- 优化窗口状态初始化逻辑和添加缺失的权限设置
- 异步化配置:优化端口查找和配置保存逻辑
- 重构事件通知机制到独立线程,避免前端卡死
- 优化端口设置,每个端口可随机设置端口号
- 优化了保存机制,使用平滑函数,防止客户端卡死
- 优化端口设置退出和保存机制
- 强制为 Mihomo 配置补全并覆盖 external-controller-cors 字段,默认不允许跨域和仅本地请求,提升 cors 安全性,升级配置时自动覆盖
- 优化了保存机制,使用平滑函数,防止客户端卡死
- 优化端口设置退出和保存机制
- 强制为 Mihomo 配置补全并覆盖 external-controller-cors 字段,默认不允许跨域和仅本地请求,提升 cors 安全性,升级配置时自动覆盖
- 修改 端口检测范围 1111-65536
#### 移除了:
- 窗口状态管理器
- Webdav 跨平台备份恢复限制
## v2.2.3

View File

@@ -761,10 +761,6 @@ SectionEnd
Section Install
;删除 .window-state.json 文件
SetShellVarContext current
Delete "$APPDATA\io.github.clash-verge-rev.clash-verge-rev\.window-state.json"
SetOutPath $INSTDIR
nsExec::Exec 'netsh int tcp res'
!insertmacro CheckIfAppIsRunning
@@ -983,16 +979,23 @@ Section Uninstall
RMDir "$INSTDIR"
!insertmacro DeleteAppUserModelId
!insertmacro UnpinShortcut "$SMPROGRAMS\$AppStartMenuFolder\${MAINBINARYNAME}.lnk"
!insertmacro UnpinShortcut "$DESKTOP\${MAINBINARYNAME}.lnk"
!insertmacro UnpinShortcut "$SMPROGRAMS\$AppStartMenuFolder\${PRODUCTNAME}.lnk"
!insertmacro UnpinShortcut "$DESKTOP\${PRODUCTNAME}.lnk"
; 兼容旧名称快捷方式
!insertmacro UnpinShortcut "$SMPROGRAMS\$AppStartMenuFolder\clash-verge.lnk"
!insertmacro UnpinShortcut "$DESKTOP\clash-verge.lnk"
; Remove start menu shortcut
!insertmacro MUI_STARTMENU_GETFOLDER Application $AppStartMenuFolder
Delete "$SMPROGRAMS\$AppStartMenuFolder\${MAINBINARYNAME}.lnk"
Delete "$SMPROGRAMS\$AppStartMenuFolder\${PRODUCTNAME}.lnk"
; 兼容旧名称快捷方式
Delete "$SMPROGRAMS\$AppStartMenuFolder\clash-verge.lnk"
RMDir "$SMPROGRAMS\$AppStartMenuFolder"
; Remove desktop shortcuts
Delete "$DESKTOP\${MAINBINARYNAME}.lnk"
Delete "$DESKTOP\${PRODUCTNAME}.lnk"
; 兼容旧名称快捷方式
Delete "$DESKTOP\clash-verge.lnk"
; Remove registry information for add/remove programs
!if "${INSTALLMODE}" == "both"
@@ -1058,12 +1061,12 @@ FunctionEnd
!macroend
Function CreateDesktopShortcut
CreateShortcut "$DESKTOP\${MAINBINARYNAME}.lnk" "$INSTDIR\${MAINBINARYNAME}.exe"
!insertmacro SetLnkAppUserModelId "$DESKTOP\${MAINBINARYNAME}.lnk"
CreateShortcut "$DESKTOP\${PRODUCTNAME}.lnk" "$INSTDIR\${MAINBINARYNAME}.exe"
!insertmacro SetLnkAppUserModelId "$DESKTOP\${PRODUCTNAME}.lnk"
FunctionEnd
Function CreateStartMenuShortcut
CreateDirectory "$SMPROGRAMS\$AppStartMenuFolder"
CreateShortcut "$SMPROGRAMS\$AppStartMenuFolder\${MAINBINARYNAME}.lnk" "$INSTDIR\${MAINBINARYNAME}.exe"
!insertmacro SetLnkAppUserModelId "$SMPROGRAMS\$AppStartMenuFolder\${MAINBINARYNAME}.lnk"
FunctionEnd
CreateShortcut "$SMPROGRAMS\$AppStartMenuFolder\${PRODUCTNAME}.lnk" "$INSTDIR\${MAINBINARYNAME}.exe"
!insertmacro SetLnkAppUserModelId "$SMPROGRAMS\$AppStartMenuFolder\${PRODUCTNAME}.lnk"
FunctionEnd

View File

@@ -10,7 +10,6 @@ use crate::{
/// 获取配置文件列表
#[tauri::command]
pub fn get_profiles() -> CmdResult<IProfiles> {
let _ = Tray::global().update_menu();
Ok(Config::profiles().data().clone())
}
@@ -154,11 +153,25 @@ pub async fn patch_profiles_config(profiles: IProfiles) -> CmdResult<bool> {
match CoreManager::global().update_config().await {
Ok((true, _)) => {
logging!(info, Type::Cmd, true, "配置更新成功");
handle::Handle::refresh_clash();
let _ = Tray::global().update_tooltip();
Config::profiles().apply();
wrap_err!(Config::profiles().data().save_file())?;
handle::Handle::refresh_clash();
crate::process::AsyncHandler::spawn(|| async move {
if let Err(e) = Tray::global().update_tooltip() {
log::warn!(target: "app", "异步更新托盘提示失败: {}", e);
}
if let Err(e) = Tray::global().update_menu() {
log::warn!(target: "app", "异步更新托盘菜单失败: {}", e);
}
// 保存配置文件
if let Err(e) = Config::profiles().data().save_file() {
log::warn!(target: "app", "异步保存配置文件失败: {}", e);
}
});
// 立即通知前端配置变更
if let Some(current) = &current_value {
logging!(info, Type::Cmd, true, "向前端发送配置变更事件: {}", current);
handle::Handle::notify_profile_changed(current.clone());
@@ -185,7 +198,13 @@ pub async fn patch_profiles_config(profiles: IProfiles) -> CmdResult<bool> {
// 静默恢复,不触发验证
wrap_err!({ Config::profiles().draft().patch_config(restore_profiles) })?;
Config::profiles().apply();
wrap_err!(Config::profiles().data().save_file())?;
crate::process::AsyncHandler::spawn(|| async move {
if let Err(e) = Config::profiles().data().save_file() {
log::warn!(target: "app", "异步保存恢复配置文件失败: {}", e);
}
});
logging!(info, Type::Cmd, true, "成功恢复到之前的配置");
}

View File

@@ -1,24 +1,69 @@
use super::CmdResult;
use crate::module::mihomo::MihomoManager;
use once_cell::sync::Lazy;
use parking_lot::Mutex;
use std::sync::atomic::{AtomicBool, Ordering};
use std::time::{Duration, Instant};
static LAST_REFRESH_TIME: Lazy<Mutex<Option<Instant>>> = Lazy::new(|| Mutex::new(None));
static IS_REFRESHING: AtomicBool = AtomicBool::new(false);
const REFRESH_INTERVAL: Duration = Duration::from_secs(5);
#[tauri::command]
pub async fn get_proxies() -> CmdResult<serde_json::Value> {
let mannager = MihomoManager::global();
let manager = MihomoManager::global();
mannager
manager
.refresh_proxies()
.await
.map(|_| mannager.get_proxies())
.or_else(|_| Ok(mannager.get_proxies()))
.map(|_| manager.get_proxies())
.or_else(|_| Ok(manager.get_proxies()))
}
#[tauri::command]
pub async fn get_providers_proxies() -> CmdResult<serde_json::Value> {
let mannager = MihomoManager::global();
let manager = MihomoManager::global();
let cached_data = manager.get_providers_proxies();
mannager
.refresh_providers_proxies()
.await
.map(|_| mannager.get_providers_proxies())
.or_else(|_| Ok(mannager.get_providers_proxies()))
let safe_data = if cached_data.is_null() {
serde_json::json!({
"providers": {}
})
} else {
cached_data
};
// 检查是否需要刷新
let should_refresh = {
let last_refresh = LAST_REFRESH_TIME.lock();
match *last_refresh {
Some(last_time) => last_time.elapsed() > REFRESH_INTERVAL,
None => true,
}
};
if should_refresh && !IS_REFRESHING.load(Ordering::Acquire) {
IS_REFRESHING.store(true, Ordering::Release);
crate::process::AsyncHandler::spawn(|| async move {
let manager = MihomoManager::global();
match manager.refresh_providers_proxies().await {
Ok(_) => {
log::debug!(target: "app", "providers_proxies静默后台刷新成功");
}
Err(e) => {
log::warn!(target: "app", "providers_proxies后台刷新失败: {}", e);
}
}
{
let mut last_refresh = LAST_REFRESH_TIME.lock();
*last_refresh = Some(Instant::now());
}
IS_REFRESHING.store(false, Ordering::Release);
});
}
Ok(safe_data)
}

View File

@@ -18,15 +18,16 @@ use crate::{
use anyhow::Result;
#[cfg(target_os = "macos")]
use futures::StreamExt;
#[cfg(target_os = "macos")]
use parking_lot::Mutex;
#[cfg(target_os = "macos")]
use parking_lot::RwLock;
#[cfg(target_os = "macos")]
pub use speed_rate::{SpeedRate, Traffic};
use std::fs;
use std::sync::atomic::{AtomicBool, Ordering};
#[cfg(target_os = "macos")]
use std::sync::Arc;
use std::time::{Duration, Instant};
use tauri::{
menu::{CheckMenuItem, IsMenuItem, MenuEvent, MenuItem, PredefinedMenuItem, Submenu},
tray::{MouseButton, MouseButtonState, TrayIconEvent},
@@ -46,10 +47,15 @@ pub struct Tray {
shutdown_tx: Arc<RwLock<Option<broadcast::Sender<()>>>>,
is_subscribed: Arc<RwLock<bool>>,
pub rate_cache: Arc<Mutex<Option<Rate>>>,
last_menu_update: Mutex<Option<Instant>>,
menu_updating: AtomicBool,
}
#[cfg(not(target_os = "macos"))]
pub struct Tray {}
pub struct Tray {
last_menu_update: Mutex<Option<Instant>>,
menu_updating: AtomicBool,
}
impl TrayState {
pub fn get_common_tray_icon() -> (bool, Vec<u8>) {
@@ -164,10 +170,15 @@ impl Tray {
shutdown_tx: Arc::new(RwLock::new(None)),
is_subscribed: Arc::new(RwLock::new(false)),
rate_cache: Arc::new(Mutex::new(None)),
last_menu_update: Mutex::new(None),
menu_updating: AtomicBool::new(false),
});
#[cfg(not(target_os = "macos"))]
return TRAY.get_or_init(|| Tray {});
return TRAY.get_or_init(|| Tray {
last_menu_update: Mutex::new(None),
menu_updating: AtomicBool::new(false),
});
}
pub fn init(&self) -> Result<()> {
@@ -192,8 +203,28 @@ impl Tray {
Ok(())
}
/// 更新托盘菜单
/// 更新托盘菜单(带频率限制)
pub fn update_menu(&self) -> Result<()> {
// 检查是否正在更新或距离上次更新太近
const MIN_UPDATE_INTERVAL: Duration = Duration::from_millis(500);
// 检查是否已有更新任务在执行
if self.menu_updating.load(Ordering::Acquire) {
log::debug!(target: "app", "托盘菜单正在更新中,跳过本次更新");
return Ok(());
}
// 检查更新频率
{
let last_update = self.last_menu_update.lock();
if let Some(last_time) = *last_update {
if last_time.elapsed() < MIN_UPDATE_INTERVAL {
log::debug!(target: "app", "托盘菜单更新频率过高,跳过本次更新");
return Ok(());
}
}
}
let app_handle = match handle::Handle::global().app_handle() {
Some(handle) => handle,
None => {
@@ -202,6 +233,20 @@ impl Tray {
}
};
// 设置更新状态
self.menu_updating.store(true, Ordering::Release);
let result = self.update_menu_internal(&app_handle);
{
let mut last_update = self.last_menu_update.lock();
*last_update = Some(Instant::now());
}
self.menu_updating.store(false, Ordering::Release);
result
}
fn update_menu_internal(&self, app_handle: &AppHandle) -> Result<()> {
let verge = Config::verge().latest().clone();
let system_proxy = verge.enable_system_proxy.as_ref().unwrap_or(&false);
let tun_mode = verge.enable_tun_mode.as_ref().unwrap_or(&false);
@@ -230,6 +275,7 @@ impl Tray {
profile_uid_and_name,
is_lightweight_mode,
)?));
log::debug!(target: "app", "托盘菜单更新成功");
Ok(())
}
None => {

View File

@@ -71,8 +71,8 @@ export const useClashInfo = () => {
if (patch["redir-port"]) {
const port = patch["redir-port"];
if (port < 1000) {
throw new Error("The port should not < 1000");
if (port < 1111) {
throw new Error("The port should not < 1111");
}
if (port > 65536) {
throw new Error("The port should not > 65536");
@@ -81,8 +81,8 @@ export const useClashInfo = () => {
if (patch["tproxy-port"]) {
const port = patch["tproxy-port"];
if (port < 1000) {
throw new Error("The port should not < 1000");
if (port < 1111) {
throw new Error("The port should not < 1111");
}
if (port > 65536) {
throw new Error("The port should not > 65536");
@@ -91,8 +91,8 @@ export const useClashInfo = () => {
if (patch["mixed-port"]) {
const port = patch["mixed-port"];
if (port < 1000) {
throw new Error("The port should not < 1000");
if (port < 1111) {
throw new Error("The port should not < 1111");
}
if (port > 65536) {
throw new Error("The port should not > 65536");
@@ -101,8 +101,8 @@ export const useClashInfo = () => {
if (patch["socks-port"]) {
const port = patch["socks-port"];
if (port < 1000) {
throw new Error("The port should not < 1000");
if (port < 1111) {
throw new Error("The port should not < 1111");
}
if (port > 65536) {
throw new Error("The port should not > 65536");
@@ -111,8 +111,8 @@ export const useClashInfo = () => {
if (patch["port"]) {
const port = patch["port"];
if (port < 1000) {
throw new Error("The port should not < 1000");
if (port < 1111) {
throw new Error("The port should not < 1111");
}
if (port > 65536) {
throw new Error("The port should not > 65536");

View File

@@ -185,6 +185,7 @@ const Layout = () => {
mutate("getAutotemProxy");
}),
addListener("verge://notice-message", ({ payload }) =>
handleNotice(payload as [string, string]),
),

View File

@@ -297,17 +297,28 @@ const ProfilePage = () => {
// 监听后端配置变更
useEffect(() => {
let unlistenPromise: Promise<() => void> | undefined;
let timeoutId: ReturnType<typeof setTimeout> | undefined;
const setupListener = async () => {
unlistenPromise = listen<string>('profile-changed', (event) => {
console.log('Profile changed event received:', event.payload);
mutateProfiles();
if (timeoutId) {
clearTimeout(timeoutId);
}
timeoutId = setTimeout(() => {
mutateProfiles();
timeoutId = undefined;
}, 300);
});
};
setupListener();
return () => {
if (timeoutId) {
clearTimeout(timeoutId);
}
unlistenPromise?.then(unlisten => unlisten());
};
}, [mutateProfiles, t]);

View File

@@ -71,6 +71,9 @@ export const AppDataProvider = ({ children }: { children: React.ReactNode }) =>
getProxyProviders,
{
revalidateOnFocus: false,
revalidateOnReconnect: false,
refreshInterval: 30000,
dedupingInterval: 10000,
suspense: false,
errorRetryCount: 3
}

View File

@@ -198,6 +198,11 @@ export const getProxyProviders = async () => {
const response = await invoke<{
providers: Record<string, IProxyProviderItem>;
}>("get_providers_proxies");
if (!response || !response.providers) {
console.warn("getProxyProviders: Invalid response structure, returning empty object");
return {};
}
const providers = response.providers as Record<string, IProxyProviderItem>;
return Object.fromEntries(

View File

@@ -1,2 +1,2 @@
LINUX_VERSION-5.15 = .183
LINUX_KERNEL_HASH-5.15.183 = d06f7f629a4d61a87ebd0db285ace9ebf4fce0226b10b2c0ec235e3550c58ee8
LINUX_VERSION-5.15 = .184
LINUX_KERNEL_HASH-5.15.184 = 9c3e98c6dcc7dca7c2e9dd51423eaf0581f5e100d0f04c23bc29f21913dac1d9

View File

@@ -1,2 +1,2 @@
LINUX_VERSION-6.1 = .139
LINUX_KERNEL_HASH-6.1.139 = f66affdfee8b6cf8a14cfa00cc7842f79af0e7a70a68604289074f3ecffc9f18
LINUX_VERSION-6.1 = .140
LINUX_KERNEL_HASH-6.1.140 = 5779f9caca77f7bfe3c3923b4d760041318db0303de93b2d4691d60e4d41eb74

View File

@@ -1,2 +1,2 @@
LINUX_VERSION-6.12 = .29
LINUX_KERNEL_HASH-6.12.29 = e8b2ec7e2338ccb9c86de7154f6edcaadfce80907493c143e85a82776bb5064d
LINUX_VERSION-6.12 = .30
LINUX_KERNEL_HASH-6.12.30 = df046a48971e40ce0b2e003e7e55b6b1e7da2912120eb216d5d6c8450c9cf82e

View File

@@ -1,2 +1,2 @@
LINUX_VERSION-6.6 = .91
LINUX_KERNEL_HASH-6.6.91 = d08d3d175407a52cd0b25fc95e149bbd2fd6922cd37816c8fcfad18f95e254f4
LINUX_VERSION-6.6 = .92
LINUX_KERNEL_HASH-6.6.92 = 1d82a82642d281c31d86f7301bc55e12a8a9f9c04532e249ef8ae6fe7dc237ec

View File

@@ -12,10 +12,14 @@ include $(INCLUDE_DIR)/package.mk
define Package/$(PKG_NAME)
SECTION:=utils
CATEGORY:=Utilities
DEPENDS:=@USB_GADGET_SUPPORT +kmod-usb-gadget +kmod-usb-lib-composite
DEPENDS:=@USB_GADGET_SUPPORT +kmod-usb-gadget +kmod-fs-configfs +kmod-usb-lib-composite
TITLE:=init script to create USB gadgets
endef
define Package/$(PKG_NAME)/conffiles
/etc/config/usbgadget
endef
define Build/Compile
endef
@@ -35,7 +39,7 @@ define GadgetPreset
SECTION:=utils
CATEGORY:=Utilities
TITLE+= $(2) gadget preset
DEPENDS+= $(3)
DEPENDS+= +usbgadget $(3)
endef
define Package/$(PKG_NAME)-$(1)/description

View File

@@ -5,7 +5,6 @@ import (
"encoding/json"
"net"
"runtime"
"strings"
"sync"
"syscall"
@@ -203,12 +202,21 @@ func NewBase(opt BaseOption) *Base {
type conn struct {
N.ExtendedConn
chain C.Chain
actualRemoteDestination string
chain C.Chain
adapterAddr string
}
func (c *conn) RemoteDestination() string {
return c.actualRemoteDestination
if remoteAddr := c.RemoteAddr(); remoteAddr != nil {
m := C.Metadata{}
if err := m.SetRemoteAddr(remoteAddr); err != nil {
if m.Valid() {
return m.String()
}
}
}
host, _, _ := net.SplitHostPort(c.adapterAddr)
return host
}
// Chains implements C.Connection
@@ -241,19 +249,20 @@ func NewConn(c net.Conn, a C.ProxyAdapter) C.Conn {
if _, ok := c.(syscall.Conn); !ok { // exclusion system conn like *net.TCPConn
c = N.NewDeadlineConn(c) // most conn from outbound can't handle readDeadline correctly
}
return &conn{N.NewExtendedConn(c), []string{a.Name()}, parseRemoteDestination(a.Addr())}
return &conn{N.NewExtendedConn(c), []string{a.Name()}, a.Addr()}
}
type packetConn struct {
N.EnhancePacketConn
chain C.Chain
adapterName string
connID string
actualRemoteDestination string
chain C.Chain
adapterName string
connID string
adapterAddr string
}
func (c *packetConn) RemoteDestination() string {
return c.actualRemoteDestination
host, _, _ := net.SplitHostPort(c.adapterAddr)
return host
}
// Chains implements C.Connection
@@ -292,19 +301,7 @@ func newPacketConn(pc net.PacketConn, a C.ProxyAdapter) C.PacketConn {
if _, ok := pc.(syscall.Conn); !ok { // exclusion system conn like *net.UDPConn
epc = N.NewDeadlineEnhancePacketConn(epc) // most conn from outbound can't handle readDeadline correctly
}
return &packetConn{epc, []string{a.Name()}, a.Name(), utils.NewUUIDV4().String(), parseRemoteDestination(a.Addr())}
}
func parseRemoteDestination(addr string) string {
if dst, _, err := net.SplitHostPort(addr); err == nil {
return dst
} else {
if addrError, ok := err.(*net.AddrError); ok && strings.Contains(addrError.Err, "missing port") {
return dst
} else {
return ""
}
}
return &packetConn{epc, []string{a.Name()}, a.Name(), utils.NewUUIDV4().String(), a.Addr()}
}
type AddRef interface {

View File

@@ -20,7 +20,7 @@ type connReadResult struct {
type Conn struct {
network.ExtendedConn
deadline atomic.TypedValue[time.Time]
pipeDeadline pipeDeadline
pipeDeadline PipeDeadline
disablePipe atomic.Bool
inRead atomic.Bool
resultCh chan *connReadResult
@@ -34,7 +34,7 @@ func IsConn(conn any) bool {
func NewConn(conn net.Conn) *Conn {
c := &Conn{
ExtendedConn: bufio.NewExtendedConn(conn),
pipeDeadline: makePipeDeadline(),
pipeDeadline: MakePipeDeadline(),
resultCh: make(chan *connReadResult, 1),
}
c.resultCh <- nil
@@ -58,7 +58,7 @@ func (c *Conn) Read(p []byte) (n int, err error) {
c.resultCh <- nil
break
}
case <-c.pipeDeadline.wait():
case <-c.pipeDeadline.Wait():
return 0, os.ErrDeadlineExceeded
}
@@ -104,7 +104,7 @@ func (c *Conn) ReadBuffer(buffer *buf.Buffer) (err error) {
c.resultCh <- nil
break
}
case <-c.pipeDeadline.wait():
case <-c.pipeDeadline.Wait():
return os.ErrDeadlineExceeded
}
@@ -130,7 +130,7 @@ func (c *Conn) SetReadDeadline(t time.Time) error {
return c.ExtendedConn.SetReadDeadline(t)
}
c.deadline.Store(t)
c.pipeDeadline.set(t)
c.pipeDeadline.Set(t)
return nil
}

View File

@@ -19,7 +19,7 @@ type readResult struct {
type NetPacketConn struct {
net.PacketConn
deadline atomic.TypedValue[time.Time]
pipeDeadline pipeDeadline
pipeDeadline PipeDeadline
disablePipe atomic.Bool
inRead atomic.Bool
resultCh chan any
@@ -28,7 +28,7 @@ type NetPacketConn struct {
func NewNetPacketConn(pc net.PacketConn) net.PacketConn {
npc := &NetPacketConn{
PacketConn: pc,
pipeDeadline: makePipeDeadline(),
pipeDeadline: MakePipeDeadline(),
resultCh: make(chan any, 1),
}
npc.resultCh <- nil
@@ -83,7 +83,7 @@ FOR:
c.resultCh <- nil
break FOR
}
case <-c.pipeDeadline.wait():
case <-c.pipeDeadline.Wait():
return 0, nil, os.ErrDeadlineExceeded
}
}
@@ -122,7 +122,7 @@ func (c *NetPacketConn) SetReadDeadline(t time.Time) error {
return c.PacketConn.SetReadDeadline(t)
}
c.deadline.Store(t)
c.pipeDeadline.set(t)
c.pipeDeadline.Set(t)
return nil
}

View File

@@ -52,7 +52,7 @@ FOR:
c.netPacketConn.resultCh <- nil
break FOR
}
case <-c.netPacketConn.pipeDeadline.wait():
case <-c.netPacketConn.pipeDeadline.Wait():
return nil, nil, nil, os.ErrDeadlineExceeded
}
}

View File

@@ -69,7 +69,7 @@ FOR:
c.netPacketConn.resultCh <- nil
break FOR
}
case <-c.netPacketConn.pipeDeadline.wait():
case <-c.netPacketConn.pipeDeadline.Wait():
return M.Socksaddr{}, os.ErrDeadlineExceeded
}
}
@@ -146,7 +146,7 @@ FOR:
c.netPacketConn.resultCh <- nil
break FOR
}
case <-c.netPacketConn.pipeDeadline.wait():
case <-c.netPacketConn.pipeDeadline.Wait():
return nil, M.Socksaddr{}, os.ErrDeadlineExceeded
}
}

View File

@@ -9,24 +9,24 @@ import (
"time"
)
// pipeDeadline is an abstraction for handling timeouts.
type pipeDeadline struct {
// PipeDeadline is an abstraction for handling timeouts.
type PipeDeadline struct {
mu sync.Mutex // Guards timer and cancel
timer *time.Timer
cancel chan struct{} // Must be non-nil
}
func makePipeDeadline() pipeDeadline {
return pipeDeadline{cancel: make(chan struct{})}
func MakePipeDeadline() PipeDeadline {
return PipeDeadline{cancel: make(chan struct{})}
}
// set sets the point in time when the deadline will time out.
// Set sets the point in time when the deadline will time out.
// A timeout event is signaled by closing the channel returned by waiter.
// Once a timeout has occurred, the deadline can be refreshed by specifying a
// t value in the future.
//
// A zero value for t prevents timeout.
func (d *pipeDeadline) set(t time.Time) {
func (d *PipeDeadline) Set(t time.Time) {
d.mu.Lock()
defer d.mu.Unlock()
@@ -61,8 +61,8 @@ func (d *pipeDeadline) set(t time.Time) {
}
}
// wait returns a channel that is closed when the deadline is exceeded.
func (d *pipeDeadline) wait() chan struct{} {
// Wait returns a channel that is closed when the deadline is exceeded.
func (d *PipeDeadline) Wait() chan struct{} {
d.mu.Lock()
defer d.mu.Unlock()
return d.cancel

View File

@@ -33,8 +33,8 @@ type pipe struct {
localDone chan struct{}
remoteDone <-chan struct{}
readDeadline pipeDeadline
writeDeadline pipeDeadline
readDeadline PipeDeadline
writeDeadline PipeDeadline
readWaitOptions N.ReadWaitOptions
}
@@ -56,15 +56,15 @@ func Pipe() (net.Conn, net.Conn) {
rdRx: cb1, rdTx: cn1,
wrTx: cb2, wrRx: cn2,
localDone: done1, remoteDone: done2,
readDeadline: makePipeDeadline(),
writeDeadline: makePipeDeadline(),
readDeadline: MakePipeDeadline(),
writeDeadline: MakePipeDeadline(),
}
p2 := &pipe{
rdRx: cb2, rdTx: cn2,
wrTx: cb1, wrRx: cn1,
localDone: done2, remoteDone: done1,
readDeadline: makePipeDeadline(),
writeDeadline: makePipeDeadline(),
readDeadline: MakePipeDeadline(),
writeDeadline: MakePipeDeadline(),
}
return p1, p2
}
@@ -86,7 +86,7 @@ func (p *pipe) read(b []byte) (n int, err error) {
return 0, io.ErrClosedPipe
case isClosedChan(p.remoteDone):
return 0, io.EOF
case isClosedChan(p.readDeadline.wait()):
case isClosedChan(p.readDeadline.Wait()):
return 0, os.ErrDeadlineExceeded
}
@@ -99,7 +99,7 @@ func (p *pipe) read(b []byte) (n int, err error) {
return 0, io.ErrClosedPipe
case <-p.remoteDone:
return 0, io.EOF
case <-p.readDeadline.wait():
case <-p.readDeadline.Wait():
return 0, os.ErrDeadlineExceeded
}
}
@@ -118,7 +118,7 @@ func (p *pipe) write(b []byte) (n int, err error) {
return 0, io.ErrClosedPipe
case isClosedChan(p.remoteDone):
return 0, io.ErrClosedPipe
case isClosedChan(p.writeDeadline.wait()):
case isClosedChan(p.writeDeadline.Wait()):
return 0, os.ErrDeadlineExceeded
}
@@ -134,7 +134,7 @@ func (p *pipe) write(b []byte) (n int, err error) {
return n, io.ErrClosedPipe
case <-p.remoteDone:
return n, io.ErrClosedPipe
case <-p.writeDeadline.wait():
case <-p.writeDeadline.Wait():
return n, os.ErrDeadlineExceeded
}
}
@@ -145,8 +145,8 @@ func (p *pipe) SetDeadline(t time.Time) error {
if isClosedChan(p.localDone) || isClosedChan(p.remoteDone) {
return io.ErrClosedPipe
}
p.readDeadline.set(t)
p.writeDeadline.set(t)
p.readDeadline.Set(t)
p.writeDeadline.Set(t)
return nil
}
@@ -154,7 +154,7 @@ func (p *pipe) SetReadDeadline(t time.Time) error {
if isClosedChan(p.localDone) || isClosedChan(p.remoteDone) {
return io.ErrClosedPipe
}
p.readDeadline.set(t)
p.readDeadline.Set(t)
return nil
}
@@ -162,7 +162,7 @@ func (p *pipe) SetWriteDeadline(t time.Time) error {
if isClosedChan(p.localDone) || isClosedChan(p.remoteDone) {
return io.ErrClosedPipe
}
p.writeDeadline.set(t)
p.writeDeadline.Set(t)
return nil
}
@@ -192,7 +192,7 @@ func (p *pipe) waitReadBuffer() (buffer *buf.Buffer, err error) {
return nil, io.ErrClosedPipe
case isClosedChan(p.remoteDone):
return nil, io.EOF
case isClosedChan(p.readDeadline.wait()):
case isClosedChan(p.readDeadline.Wait()):
return nil, os.ErrDeadlineExceeded
}
select {
@@ -211,7 +211,7 @@ func (p *pipe) waitReadBuffer() (buffer *buf.Buffer, err error) {
return nil, io.ErrClosedPipe
case <-p.remoteDone:
return nil, io.EOF
case <-p.readDeadline.wait():
case <-p.readDeadline.Wait():
return nil, os.ErrDeadlineExceeded
}
}

View File

@@ -3,7 +3,6 @@ package statistic
import (
"io"
"net"
"net/netip"
"time"
"github.com/metacubex/mihomo/common/atomic"
@@ -116,20 +115,8 @@ func (tt *tcpTracker) Upstream() any {
return tt.Conn
}
func parseRemoteDestination(addr net.Addr, conn C.Connection) string {
if addr != nil {
if addrPort, err := netip.ParseAddrPort(addr.String()); err == nil && addrPort.Addr().IsValid() {
return addrPort.Addr().String()
}
}
if conn != nil {
return conn.RemoteDestination()
}
return ""
}
func NewTCPTracker(conn C.Conn, manager *Manager, metadata *C.Metadata, rule C.Rule, uploadTotal int64, downloadTotal int64, pushToManager bool) *tcpTracker {
metadata.RemoteDst = parseRemoteDestination(conn.RemoteAddr(), conn)
metadata.RemoteDst = conn.RemoteDestination()
t := &tcpTracker{
Conn: conn,
@@ -220,7 +207,7 @@ func (ut *udpTracker) Upstream() any {
}
func NewUDPTracker(conn C.PacketConn, manager *Manager, metadata *C.Metadata, rule C.Rule, uploadTotal int64, downloadTotal int64, pushToManager bool) *udpTracker {
metadata.RemoteDst = parseRemoteDestination(nil, conn)
metadata.RemoteDst = conn.RemoteDestination()
ut := &udpTracker{
PacketConn: conn,

View File

@@ -413,11 +413,6 @@ func handleUDPConn(packet C.PacketAdapter) {
pc := statistic.NewUDPTracker(rawPc, statistic.DefaultManager, metadata, rule, 0, 0, true)
if rawPc.Chains().Last() == "REJECT-DROP" {
_ = pc.Close()
return nil, nil, errors.New("rejected drop packet")
}
oAddrPort := metadata.AddrPort()
writeBackProxy := nat.NewWriteBackProxy(packet)

View File

@@ -397,9 +397,9 @@ dependencies = [
[[package]]
name = "bson"
version = "2.14.0"
version = "2.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af8113ff51309e2779e8785a246c10fb783e8c2452f134d6257fd71cc03ccd6c"
checksum = "7969a9ba84b0ff843813e7249eed1678d9b6607ce5a3b8f0a47af3fcf7978e6e"
dependencies = [
"ahash",
"base64",
@@ -3857,9 +3857,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
version = "1.45.0"
version = "1.45.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2513ca694ef9ede0fb23fe71a4ee4107cb102b9dc1930f6d0fd77aae068ae165"
checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779"
dependencies = [
"backtrace",
"bytes",

View File

@@ -151,6 +151,7 @@ o:depends("pdnsd_enable", "2")
o:depends("pdnsd_enable", "3")
o.description = translate("Custom DNS Server format as IP:PORT (default: 8.8.4.4:53)")
o.datatype = "ip4addrport"
o.default = "8.8.4.4:53"
o = s:option(ListValue, "tunnel_forward_mosdns", translate("Anti-pollution DNS Server"))
o:value("tcp://8.8.4.4:53,tcp://8.8.8.8:53", translate("Google Public DNS"))

View File

@@ -21,13 +21,13 @@ define Download/geoip
HASH:=8023379316bca4713dcfa5ba4ea2fe7f4c127fff64a0cb7859d4756142b2c4dc
endef
GEOSITE_VER:=20250523165307
GEOSITE_VER:=20250525112927
GEOSITE_FILE:=dlc.dat.$(GEOSITE_VER)
define Download/geosite
URL:=https://github.com/v2fly/domain-list-community/releases/download/$(GEOSITE_VER)/
URL_FILE:=dlc.dat
FILE:=$(GEOSITE_FILE)
HASH:=b1d02c3b4f90830e8b4bda83a552da6d218407fe6833ddc8bb2c8b5372998c9f
HASH:=564c9a35b05a1a3a5febfd9ee66235bc732d4dd7675f7f1e1c917b2ece409742
endef
GEOSITE_IRAN_VER:=202505230820

View File

@@ -1,7 +1,7 @@
<Project>
<PropertyGroup>
<Version>7.12.4</Version>
<Version>7.12.5</Version>
</PropertyGroup>
<PropertyGroup>

View File

@@ -388,7 +388,7 @@ public partial class MainWindow : ReactiveWindow<MainWindowViewModel>
private async void MenuClose_Click(object? sender, RoutedEventArgs e)
{
if (await UI.ShowYesNo(this, ResUI.menuExitTips) == ButtonResult.No)
if (await UI.ShowYesNo(this, ResUI.menuExitTips) != ButtonResult.Yes)
{
return;
}

View File

@@ -138,7 +138,7 @@ public partial class ProfilesView : ReactiveUserControl<ProfilesViewModel>
break;
case EViewAction.ShowYesNo:
if (await UI.ShowYesNo(_window, ResUI.RemoveServer) == ButtonResult.No)
if (await UI.ShowYesNo(_window, ResUI.RemoveServer) != ButtonResult.Yes)
{
return false;
}

View File

@@ -80,14 +80,14 @@ public partial class RoutingRuleSettingWindow : ReactiveWindow<RoutingRuleSettin
break;
case EViewAction.ShowYesNo:
if (await UI.ShowYesNo(this, ResUI.RemoveServer) == ButtonResult.No)
if (await UI.ShowYesNo(this, ResUI.RemoveServer) != ButtonResult.Yes)
{
return false;
}
break;
case EViewAction.AddBatchRoutingRulesYesNo:
if (await UI.ShowYesNo(this, ResUI.AddBatchRoutingRulesYesNo) == ButtonResult.No)
if (await UI.ShowYesNo(this, ResUI.AddBatchRoutingRulesYesNo) != ButtonResult.Yes)
{
return false;
}

View File

@@ -68,7 +68,7 @@ public partial class RoutingSettingWindow : ReactiveWindow<RoutingSettingViewMod
break;
case EViewAction.ShowYesNo:
if (await UI.ShowYesNo(this, ResUI.RemoveRules) == ButtonResult.No)
if (await UI.ShowYesNo(this, ResUI.RemoveRules) != ButtonResult.Yes)
{
return false;
}

View File

@@ -45,7 +45,7 @@ public partial class SubSettingWindow : ReactiveWindow<SubSettingViewModel>
break;
case EViewAction.ShowYesNo:
if (await UI.ShowYesNo(this, ResUI.RemoveServer) == ButtonResult.No)
if (await UI.ShowYesNo(this, ResUI.RemoveServer) != ButtonResult.Yes)
{
return false;
}

View File

@@ -12,8 +12,8 @@ android {
applicationId = "com.v2ray.ang"
minSdk = 21
targetSdk = 35
versionCode = 653
versionName = "1.10.3"
versionCode = 654
versionName = "1.10.4"
multiDexEnabled = true
val abiFilterList = (properties["ABI_FILTERS"] as? String)?.split(';')

View File

@@ -148,7 +148,7 @@
<string name="title_mux_settings">سامووا Mux</string>
<string name="title_pref_mux_enabled">ر وندن Mux</string>
<string name="summary_pref_mux_enabled">زل تر، ٱما گاشڌ منپیز زی قت بۊ بارت دؽوۉداری، TCP، UDP و QUIC ن ای لم سفارشی کۊنین.</string>
<string name="summary_pref_mux_enabled">زل تر، ٱما گاشڌ منپیز زی قت بۊ\nمخزن ترافیک TCP وا 8 منپیز پؽش فرز، بارت دؽوۉداری UDP وو QUIC ن ای لم سفارشی کۊنین.</string>
<string name="title_pref_mux_concurency">منپیزا TCP (تلایه منجا 1-1024)</string>
<string name="title_pref_mux_xudp_concurency">منپیزا XUDP (تلایه منجا 1-1024)</string>
<string name="title_pref_mux_xudp_quic">دؽوۉداری QUIC من تۊنل mux</string>
@@ -324,7 +324,7 @@
<string name="update_new_version_found">نوسخه نۊ ن جوست: %s</string>
<string name="update_now">سکو ورۊ رسۊوی کۊنین</string>
<string name="update_check_pre_release">واجۊری نوسخه یل پؽش ز تیجنیڌن</string>
<string name="update_checking_for_update">Checking for update…</string>
<string name="update_checking_for_update">ورۊ رسۊوی ن هونی واجۊری اکونه...</string>
<string-array name="share_method">
<item>QRcode</item>

View File

@@ -321,7 +321,7 @@
<string name="update_new_version_found">نسخه جدید پیدا شد: %s</string>
<string name="update_now">اکنون به روز رسانی کنید</string>
<string name="update_check_pre_release">بررسی نسخه پیش از انتشار</string>
<string name="update_checking_for_update">Checking for update</string>
<string name="update_checking_for_update">در حال بررسی برای به‌روزرسانی</string>
<string-array name="share_method">
<item>QRcode</item>

View File

@@ -323,7 +323,7 @@
<string name="update_new_version_found">Найдена новая версия: %s</string>
<string name="update_now">Обновить</string>
<string name="update_check_pre_release">Искать предварительный выпуск</string>
<string name="update_checking_for_update">Checking for update</string>
<string name="update_checking_for_update">Проверка обновления</string>
<string-array name="share_method">
<item>QR-код</item>

View File

@@ -1,5 +1,5 @@
[versions]
agp = "8.9.3"
agp = "8.10.0"
desugarJdkLibs = "2.1.5"
gradleLicensePlugin = "0.9.8"
kotlin = "2.1.21"

View File

@@ -1,6 +1,6 @@
#Thu Nov 14 12:42:51 BDT 2024
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-8.13-bin.zip
distributionUrl=https\://services.gradle.org/distributions/gradle-8.14.1-bin.zip
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists