Update On Tue Sep 17 20:34:17 CEST 2024

This commit is contained in:
github-action[bot]
2024-09-17 20:34:17 +02:00
parent d3c6d73392
commit 8e1d5c1fdd
1241 changed files with 1492 additions and 176262 deletions

1
.github/update.log vendored
View File

@@ -766,3 +766,4 @@ Update On Fri Sep 13 20:34:47 CEST 2024
Update On Sat Sep 14 20:32:28 CEST 2024 Update On Sat Sep 14 20:32:28 CEST 2024
Update On Sun Sep 15 20:33:53 CEST 2024 Update On Sun Sep 15 20:33:53 CEST 2024
Update On Mon Sep 16 20:36:05 CEST 2024 Update On Mon Sep 16 20:36:05 CEST 2024
Update On Tue Sep 17 20:34:06 CEST 2024

View File

@@ -505,17 +505,14 @@ func NewVless(option VlessOption) (*Vless, error) {
var addons *vless.Addons var addons *vless.Addons
if option.Network != "ws" && len(option.Flow) >= 16 { if option.Network != "ws" && len(option.Flow) >= 16 {
option.Flow = option.Flow[:16] option.Flow = option.Flow[:16]
switch option.Flow { if option.Flow != vless.XRV {
case vless.XRV:
log.Warnln("To use %s, ensure your server is upgrade to Xray-core v1.8.0+", vless.XRV)
addons = &vless.Addons{
Flow: option.Flow,
}
case vless.XRO, vless.XRD, vless.XRS:
log.Fatalln("Legacy XTLS protocol %s is deprecated and no longer supported", option.Flow)
default:
return nil, fmt.Errorf("unsupported xtls flow type: %s", option.Flow) return nil, fmt.Errorf("unsupported xtls flow type: %s", option.Flow)
} }
log.Warnln("To use %s, ensure your server is upgrade to Xray-core v1.8.0+", vless.XRV)
addons = &vless.Addons{
Flow: option.Flow,
}
} }
switch option.PacketEncoding { switch option.PacketEncoding {

View File

@@ -696,7 +696,7 @@ dependencies = [
"bitflags 2.6.0", "bitflags 2.6.0",
"cexpr", "cexpr",
"clang-sys", "clang-sys",
"itertools 0.12.1", "itertools 0.11.0",
"lazy_static", "lazy_static",
"lazycell", "lazycell",
"log", "log",
@@ -7322,9 +7322,9 @@ checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1"
[[package]] [[package]]
name = "tauri" name = "tauri"
version = "2.0.0-rc.14" version = "2.0.0-rc.15"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2fa32e2741bda64c1da02d93252a466893180052fc6de61c8803b0356504b70d" checksum = "eb3c3b1c7ac5b72d59da307b84af900a0098c74c9d7369f65018cd8ec0eb50fb"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"bytes", "bytes",
@@ -7360,7 +7360,7 @@ dependencies = [
"tauri-macros", "tauri-macros",
"tauri-runtime", "tauri-runtime",
"tauri-runtime-wry", "tauri-runtime-wry",
"tauri-utils 2.0.0-rc.11", "tauri-utils 2.0.0-rc.12",
"thiserror", "thiserror",
"tokio", "tokio",
"tray-icon", "tray-icon",
@@ -7374,9 +7374,9 @@ dependencies = [
[[package]] [[package]]
name = "tauri-build" name = "tauri-build"
version = "2.0.0-rc.11" version = "2.0.0-rc.12"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "148441d64674b2885c1ba5baf3ae61662bb8753859ffcfb541962cbc6b847f39" checksum = "6ff5713e81e02e0b99f5219b275abbd7d2c0cc0f30180e25b1b650e08feeac63"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"cargo_toml", "cargo_toml",
@@ -7388,7 +7388,7 @@ dependencies = [
"semver 1.0.23", "semver 1.0.23",
"serde", "serde",
"serde_json", "serde_json",
"tauri-utils 2.0.0-rc.11", "tauri-utils 2.0.0-rc.12",
"tauri-winres", "tauri-winres",
"toml 0.8.2", "toml 0.8.2",
"walkdir", "walkdir",
@@ -7396,9 +7396,9 @@ dependencies = [
[[package]] [[package]]
name = "tauri-codegen" name = "tauri-codegen"
version = "2.0.0-rc.11" version = "2.0.0-rc.12"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72a15c3f9282c82871c69ddb65d02ae552738bbac848c8adcab521bf14d8b9e6" checksum = "5370f2591dcc93d4ff08d9dd168f5097f79b34e859883586a409c627544190e3"
dependencies = [ dependencies = [
"base64 0.22.1", "base64 0.22.1",
"brotli", "brotli",
@@ -7413,7 +7413,7 @@ dependencies = [
"serde_json", "serde_json",
"sha2 0.10.8", "sha2 0.10.8",
"syn 2.0.77", "syn 2.0.77",
"tauri-utils 2.0.0-rc.11", "tauri-utils 2.0.0-rc.12",
"thiserror", "thiserror",
"time", "time",
"url", "url",
@@ -7423,16 +7423,16 @@ dependencies = [
[[package]] [[package]]
name = "tauri-macros" name = "tauri-macros"
version = "2.0.0-rc.10" version = "2.0.0-rc.11"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f12d1aa317bec56f78388cf6012d788876d838595a48f95cbd7835642db356a0" checksum = "19442dc8ee002ab1926586f6aecb90114f3a1226766008b0c9ac2d9fec9eeb7e"
dependencies = [ dependencies = [
"heck 0.5.0", "heck 0.5.0",
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn 2.0.77", "syn 2.0.77",
"tauri-codegen", "tauri-codegen",
"tauri-utils 2.0.0-rc.11", "tauri-utils 2.0.0-rc.12",
] ]
[[package]] [[package]]
@@ -7447,7 +7447,7 @@ dependencies = [
"schemars", "schemars",
"serde", "serde",
"serde_json", "serde_json",
"tauri-utils 2.0.0-rc.11", "tauri-utils 2.0.0-rc.12",
"toml 0.8.2", "toml 0.8.2",
"walkdir", "walkdir",
] ]
@@ -7627,9 +7627,9 @@ dependencies = [
[[package]] [[package]]
name = "tauri-runtime" name = "tauri-runtime"
version = "2.0.0-rc.11" version = "2.0.0-rc.12"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "389f78c8e8e6eff3897d8d9581087943b5976ea96a0ab5036be691f28c2b0df0" checksum = "c5f38d8aaa1e81d20e8e208e3e317f81b59fb75c530fbae8a90e72d02001d687"
dependencies = [ dependencies = [
"dpi", "dpi",
"gtk", "gtk",
@@ -7638,7 +7638,7 @@ dependencies = [
"raw-window-handle", "raw-window-handle",
"serde", "serde",
"serde_json", "serde_json",
"tauri-utils 2.0.0-rc.11", "tauri-utils 2.0.0-rc.12",
"thiserror", "thiserror",
"url", "url",
"windows 0.58.0", "windows 0.58.0",
@@ -7646,9 +7646,9 @@ dependencies = [
[[package]] [[package]]
name = "tauri-runtime-wry" name = "tauri-runtime-wry"
version = "2.0.0-rc.12" version = "2.0.0-rc.13"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e17625b7cf63958d53945e199391d11c9f195fb3d1cb8aeb64dc3084d0091b92" checksum = "cf1ef5171e14c8fe3b5a63e75004c20d057747bc3e7fdc5f8ded625f0b29f5c7"
dependencies = [ dependencies = [
"gtk", "gtk",
"http 1.1.0", "http 1.1.0",
@@ -7662,7 +7662,7 @@ dependencies = [
"softbuffer", "softbuffer",
"tao", "tao",
"tauri-runtime", "tauri-runtime",
"tauri-utils 2.0.0-rc.11", "tauri-utils 2.0.0-rc.12",
"url", "url",
"webkit2gtk", "webkit2gtk",
"webview2-com", "webview2-com",
@@ -7697,9 +7697,9 @@ dependencies = [
[[package]] [[package]]
name = "tauri-utils" name = "tauri-utils"
version = "2.0.0-rc.11" version = "2.0.0-rc.12"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3019641087c9039b57ebfca95fa42a93c07056845b7d8d57c8966061bcee83b4" checksum = "31fe4c9148e1b35225e1c00753f24b517ce00041d02eb4b4d6fd10613a47736c"
dependencies = [ dependencies = [
"brotli", "brotli",
"cargo_metadata", "cargo_metadata",

View File

@@ -11,7 +11,7 @@
"build": "tsc" "build": "tsc"
}, },
"dependencies": { "dependencies": {
"@tauri-apps/api": "2.0.0-rc.4", "@tauri-apps/api": "2.0.0-rc.5",
"ahooks": "3.8.1", "ahooks": "3.8.1",
"ofetch": "1.3.4", "ofetch": "1.3.4",
"react": "rc", "react": "rc",

View File

@@ -16,12 +16,12 @@
"@generouted/react-router": "1.19.6", "@generouted/react-router": "1.19.6",
"@juggle/resize-observer": "3.4.0", "@juggle/resize-observer": "3.4.0",
"@material/material-color-utilities": "0.3.0", "@material/material-color-utilities": "0.3.0",
"@mui/icons-material": "6.0.2", "@mui/icons-material": "6.1.0",
"@mui/lab": "6.0.0-beta.9", "@mui/lab": "6.0.0-beta.9",
"@mui/material": "6.0.2", "@mui/material": "6.1.0",
"@nyanpasu/interface": "workspace:^", "@nyanpasu/interface": "workspace:^",
"@nyanpasu/ui": "workspace:^", "@nyanpasu/ui": "workspace:^",
"@tauri-apps/api": "2.0.0-rc.4", "@tauri-apps/api": "2.0.0-rc.5",
"@types/json-schema": "7.0.15", "@types/json-schema": "7.0.15",
"ahooks": "3.8.1", "ahooks": "3.8.1",
"allotment": "1.20.2", "allotment": "1.20.2",
@@ -76,7 +76,7 @@
"unplugin-auto-import": "0.18.3", "unplugin-auto-import": "0.18.3",
"unplugin-icons": "0.19.3", "unplugin-icons": "0.19.3",
"validator": "13.12.0", "validator": "13.12.0",
"vite": "5.4.5", "vite": "5.4.6",
"vite-plugin-monaco-editor": "1.1.3", "vite-plugin-monaco-editor": "1.1.3",
"vite-plugin-sass-dts": "1.3.29", "vite-plugin-sass-dts": "1.3.29",
"vite-plugin-svgr": "4.2.0", "vite-plugin-svgr": "4.2.0",

View File

@@ -17,12 +17,12 @@
}, },
"dependencies": { "dependencies": {
"@material/material-color-utilities": "0.3.0", "@material/material-color-utilities": "0.3.0",
"@mui/icons-material": "6.0.2", "@mui/icons-material": "6.1.0",
"@mui/lab": "6.0.0-beta.9", "@mui/lab": "6.0.0-beta.9",
"@mui/material": "6.0.2", "@mui/material": "6.1.0",
"@radix-ui/react-portal": "1.1.1", "@radix-ui/react-portal": "1.1.1",
"@radix-ui/react-scroll-area": "1.1.0", "@radix-ui/react-scroll-area": "1.1.0",
"@tauri-apps/api": "2.0.0-rc.4", "@tauri-apps/api": "2.0.0-rc.5",
"@types/d3": "7.4.3", "@types/d3": "7.4.3",
"@types/react": "18.3.5", "@types/react": "18.3.5",
"@vitejs/plugin-react": "4.3.1", "@vitejs/plugin-react": "4.3.1",
@@ -34,7 +34,7 @@
"react-error-boundary": "4.0.13", "react-error-boundary": "4.0.13",
"react-i18next": "15.0.2", "react-i18next": "15.0.2",
"react-use": "17.5.1", "react-use": "17.5.1",
"vite": "5.4.5", "vite": "5.4.6",
"vite-tsconfig-paths": "5.0.1" "vite-tsconfig-paths": "5.0.1"
}, },
"devDependencies": { "devDependencies": {

View File

@@ -59,7 +59,7 @@
"@commitlint/cli": "19.4.1", "@commitlint/cli": "19.4.1",
"@commitlint/config-conventional": "19.4.1", "@commitlint/config-conventional": "19.4.1",
"@ianvs/prettier-plugin-sort-imports": "4.3.1", "@ianvs/prettier-plugin-sort-imports": "4.3.1",
"@tauri-apps/cli": "2.0.0-rc.15", "@tauri-apps/cli": "2.0.0-rc.16",
"@types/fs-extra": "11.0.4", "@types/fs-extra": "11.0.4",
"@types/lodash-es": "4.17.12", "@types/lodash-es": "4.17.12",
"@types/node": "22.5.4", "@types/node": "22.5.4",

File diff suppressed because it is too large Load Diff

View File

@@ -30,7 +30,7 @@ For an explanation of the mieru protocol, see [mieru Proxy Protocol](./docs/prot
1. The server software supports socks5 outbound (proxy chain). 1. The server software supports socks5 outbound (proxy chain).
1. The client software supports Windows, Mac OS, Linux and Android. Android clients include 1. The client software supports Windows, Mac OS, Linux and Android. Android clients include
- [NekoBox](https://github.com/MatsuriDayo/NekoBoxForAndroid) version 1.3.1 or above, with [mieru plugin](https://github.com/enfein/NekoBoxPlugins). - [NekoBox](https://github.com/MatsuriDayo/NekoBoxForAndroid) version 1.3.1 or above, with [mieru plugin](https://github.com/enfein/NekoBoxPlugins).
- [Exclave](https://github.com/dyhkwong/Exclave), with mieru plugin. - [Exclave](https://github.com/dyhkwong/Exclave), with [mieru plugin](https://github.com/dyhkwong/Exclave/releases?q=mieru-plugin).
1. If you need advanced features like global proxy or customized routing rules, you can use mieru as the backend of a proxy platform such as [Xray](https://github.com/XTLS/Xray-core) and [sing-box](https://github.com/SagerNet/sing-box). 1. If you need advanced features like global proxy or customized routing rules, you can use mieru as the backend of a proxy platform such as [Xray](https://github.com/XTLS/Xray-core) and [sing-box](https://github.com/SagerNet/sing-box).
## User Guide ## User Guide

View File

@@ -28,7 +28,7 @@ mieru 的翻墙原理与 shadowsocks / v2ray 等软件类似,在客户端和
1. 服务器软件支持 socks5 出站(链式代理)。 1. 服务器软件支持 socks5 出站(链式代理)。
1. 客户端软件支持 Windows, Mac OS, Linux 和 Android 系统。Android 客户端包括 1. 客户端软件支持 Windows, Mac OS, Linux 和 Android 系统。Android 客户端包括
- [NekoBox](https://github.com/MatsuriDayo/NekoBoxForAndroid) 1.3.1 及以上版本,并安装 [mieru 插件](https://github.com/enfein/NekoBoxPlugins)。 - [NekoBox](https://github.com/MatsuriDayo/NekoBoxForAndroid) 1.3.1 及以上版本,并安装 [mieru 插件](https://github.com/enfein/NekoBoxPlugins)。
- [Exclave](https://github.com/dyhkwong/Exclave) 并安装 mieru 插件。 - [Exclave](https://github.com/dyhkwong/Exclave) 并安装 [mieru 插件](https://github.com/dyhkwong/Exclave/releases?q=mieru-plugin)
1. 如果需要全局代理或自定义路由规则等高级功能,可以将 mieru 作为 [Xray](https://github.com/XTLS/Xray-core) 和 [sing-box](https://github.com/SagerNet/sing-box) 等代理平台的后端。 1. 如果需要全局代理或自定义路由规则等高级功能,可以将 mieru 作为 [Xray](https://github.com/XTLS/Xray-core) 和 [sing-box](https://github.com/SagerNet/sing-box) 等代理平台的后端。
## 使用教程 ## 使用教程

View File

@@ -30,7 +30,7 @@ import (
pb "github.com/enfein/mieru/pkg/appctl/appctlpb" pb "github.com/enfein/mieru/pkg/appctl/appctlpb"
"github.com/enfein/mieru/pkg/log" "github.com/enfein/mieru/pkg/log"
"github.com/enfein/mieru/pkg/metrics" "github.com/enfein/mieru/pkg/metrics"
"github.com/enfein/mieru/pkg/protocolv2" "github.com/enfein/mieru/pkg/protocol"
"github.com/enfein/mieru/pkg/socks5" "github.com/enfein/mieru/pkg/socks5"
"github.com/enfein/mieru/pkg/stderror" "github.com/enfein/mieru/pkg/stderror"
"github.com/enfein/mieru/pkg/util" "github.com/enfein/mieru/pkg/util"
@@ -67,7 +67,7 @@ var (
clientSocks5ServerRef atomic.Pointer[socks5.Server] clientSocks5ServerRef atomic.Pointer[socks5.Server]
// clientMuxRef holds a pointer to client multiplexier. // clientMuxRef holds a pointer to client multiplexier.
clientMuxRef atomic.Pointer[protocolv2.Mux] clientMuxRef atomic.Pointer[protocol.Mux]
) )
func SetClientRPCServerRef(server *grpc.Server) { func SetClientRPCServerRef(server *grpc.Server) {
@@ -78,7 +78,7 @@ func SetClientSocks5ServerRef(server *socks5.Server) {
clientSocks5ServerRef.Store(server) clientSocks5ServerRef.Store(server)
} }
func SetClientMuxRef(mux *protocolv2.Mux) { func SetClientMuxRef(mux *protocol.Mux) {
clientMuxRef.Store(mux) clientMuxRef.Store(mux)
} }

View File

@@ -31,7 +31,7 @@ import (
"github.com/enfein/mieru/pkg/egress" "github.com/enfein/mieru/pkg/egress"
"github.com/enfein/mieru/pkg/log" "github.com/enfein/mieru/pkg/log"
"github.com/enfein/mieru/pkg/metrics" "github.com/enfein/mieru/pkg/metrics"
"github.com/enfein/mieru/pkg/protocolv2" "github.com/enfein/mieru/pkg/protocol"
"github.com/enfein/mieru/pkg/socks5" "github.com/enfein/mieru/pkg/socks5"
"github.com/enfein/mieru/pkg/stderror" "github.com/enfein/mieru/pkg/stderror"
"github.com/enfein/mieru/pkg/util" "github.com/enfein/mieru/pkg/util"
@@ -57,7 +57,7 @@ var (
socks5ServerRef atomic.Pointer[socks5.Server] socks5ServerRef atomic.Pointer[socks5.Server]
// serverMuxRef holds a pointer to server multiplexier. // serverMuxRef holds a pointer to server multiplexier.
serverMuxRef atomic.Pointer[protocolv2.Mux] serverMuxRef atomic.Pointer[protocol.Mux]
) )
func SetServerRPCServerRef(server *grpc.Server) { func SetServerRPCServerRef(server *grpc.Server) {
@@ -68,7 +68,7 @@ func SetSocks5Server(server *socks5.Server) {
socks5ServerRef.Store(server) socks5ServerRef.Store(server)
} }
func SetServerMuxRef(mux *protocolv2.Mux) { func SetServerMuxRef(mux *protocol.Mux) {
serverMuxRef.Store(mux) serverMuxRef.Store(mux)
} }
@@ -112,7 +112,7 @@ func (s *serverLifecycleService) Start(ctx context.Context, req *pb.Empty) (*pb.
SetAppStatus(pb.AppStatus_STARTING) SetAppStatus(pb.AppStatus_STARTING)
mux := protocolv2.NewMux(false).SetServerUsers(UserListToMap(config.GetUsers())) mux := protocol.NewMux(false).SetServerUsers(UserListToMap(config.GetUsers()))
SetServerMuxRef(mux) SetServerMuxRef(mux)
mtu := util.DefaultMTU mtu := util.DefaultMTU
if config.GetMtu() != 0 { if config.GetMtu() != 0 {
@@ -653,8 +653,8 @@ func ValidateFullServerConfig(config *pb.ServerConfig) error {
} }
// PortBindingsToUnderlayProperties converts port bindings to underlay properties. // PortBindingsToUnderlayProperties converts port bindings to underlay properties.
func PortBindingsToUnderlayProperties(portBindings []*pb.PortBinding, mtu int) ([]protocolv2.UnderlayProperties, error) { func PortBindingsToUnderlayProperties(portBindings []*pb.PortBinding, mtu int) ([]protocol.UnderlayProperties, error) {
endpoints := make([]protocolv2.UnderlayProperties, 0) endpoints := make([]protocol.UnderlayProperties, 0)
listenIP := net.ParseIP(util.AllIPAddr()) listenIP := net.ParseIP(util.AllIPAddr())
ipVersion := util.GetIPVersion(listenIP.String()) ipVersion := util.GetIPVersion(listenIP.String())
if listenIP == nil { if listenIP == nil {
@@ -666,17 +666,17 @@ func PortBindingsToUnderlayProperties(portBindings []*pb.PortBinding, mtu int) (
} }
n := len(portBindings) n := len(portBindings)
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
protocol := portBindings[i].GetProtocol() proto := portBindings[i].GetProtocol()
port := portBindings[i].GetPort() port := portBindings[i].GetPort()
switch protocol { switch proto {
case pb.TransportProtocol_TCP: case pb.TransportProtocol_TCP:
endpoint := protocolv2.NewUnderlayProperties(mtu, ipVersion, util.TCPTransport, &net.TCPAddr{IP: listenIP, Port: int(port)}, nil) endpoint := protocol.NewUnderlayProperties(mtu, ipVersion, util.TCPTransport, &net.TCPAddr{IP: listenIP, Port: int(port)}, nil)
endpoints = append(endpoints, endpoint) endpoints = append(endpoints, endpoint)
case pb.TransportProtocol_UDP: case pb.TransportProtocol_UDP:
endpoint := protocolv2.NewUnderlayProperties(mtu, ipVersion, util.UDPTransport, &net.UDPAddr{IP: listenIP, Port: int(port)}, nil) endpoint := protocol.NewUnderlayProperties(mtu, ipVersion, util.UDPTransport, &net.UDPAddr{IP: listenIP, Port: int(port)}, nil)
endpoints = append(endpoints, endpoint) endpoints = append(endpoints, endpoint)
default: default:
return []protocolv2.UnderlayProperties{}, fmt.Errorf(stderror.InvalidTransportProtocol) return []protocol.UnderlayProperties{}, fmt.Errorf(stderror.InvalidTransportProtocol)
} }
} }
return endpoints, nil return endpoints, nil

View File

@@ -37,7 +37,7 @@ import (
"github.com/enfein/mieru/pkg/http2socks" "github.com/enfein/mieru/pkg/http2socks"
"github.com/enfein/mieru/pkg/log" "github.com/enfein/mieru/pkg/log"
"github.com/enfein/mieru/pkg/metrics" "github.com/enfein/mieru/pkg/metrics"
"github.com/enfein/mieru/pkg/protocolv2" "github.com/enfein/mieru/pkg/protocol"
"github.com/enfein/mieru/pkg/socks5" "github.com/enfein/mieru/pkg/socks5"
"github.com/enfein/mieru/pkg/socks5client" "github.com/enfein/mieru/pkg/socks5client"
"github.com/enfein/mieru/pkg/stderror" "github.com/enfein/mieru/pkg/stderror"
@@ -459,7 +459,7 @@ var clientRunFunc = func(s []string) error {
} }
// Collect remote proxy addresses and password. // Collect remote proxy addresses and password.
mux := protocolv2.NewMux(true) mux := protocol.NewMux(true)
appctl.SetClientMuxRef(mux) appctl.SetClientMuxRef(mux)
var hashedPassword []byte var hashedPassword []byte
activeProfile, err := appctl.GetActiveProfileFromConfig(config, config.GetActiveProfile()) activeProfile, err := appctl.GetActiveProfileFromConfig(config, config.GetActiveProfile())
@@ -492,7 +492,7 @@ var clientRunFunc = func(s []string) error {
multiplexFactor = 3 multiplexFactor = 3
} }
mux = mux.SetClientMultiplexFactor(multiplexFactor) mux = mux.SetClientMultiplexFactor(multiplexFactor)
endpoints := make([]protocolv2.UnderlayProperties, 0) endpoints := make([]protocol.UnderlayProperties, 0)
resolver := &util.DNSResolver{} resolver := &util.DNSResolver{}
for _, serverInfo := range activeProfile.GetServers() { for _, serverInfo := range activeProfile.GetServers() {
var proxyHost string var proxyHost string
@@ -519,10 +519,10 @@ var clientRunFunc = func(s []string) error {
proxyPort := bindingInfo.GetPort() proxyPort := bindingInfo.GetPort()
switch bindingInfo.GetProtocol() { switch bindingInfo.GetProtocol() {
case appctlpb.TransportProtocol_TCP: case appctlpb.TransportProtocol_TCP:
endpoint := protocolv2.NewUnderlayProperties(mtu, ipVersion, util.TCPTransport, nil, &net.TCPAddr{IP: proxyIP, Port: int(proxyPort)}) endpoint := protocol.NewUnderlayProperties(mtu, ipVersion, util.TCPTransport, nil, &net.TCPAddr{IP: proxyIP, Port: int(proxyPort)})
endpoints = append(endpoints, endpoint) endpoints = append(endpoints, endpoint)
case appctlpb.TransportProtocol_UDP: case appctlpb.TransportProtocol_UDP:
endpoint := protocolv2.NewUnderlayProperties(mtu, ipVersion, util.UDPTransport, nil, &net.UDPAddr{IP: proxyIP, Port: int(proxyPort)}) endpoint := protocol.NewUnderlayProperties(mtu, ipVersion, util.UDPTransport, nil, &net.UDPAddr{IP: proxyIP, Port: int(proxyPort)})
endpoints = append(endpoints, endpoint) endpoints = append(endpoints, endpoint)
default: default:
return fmt.Errorf(stderror.InvalidTransportProtocol) return fmt.Errorf(stderror.InvalidTransportProtocol)

View File

@@ -35,7 +35,7 @@ import (
"github.com/enfein/mieru/pkg/http2socks" "github.com/enfein/mieru/pkg/http2socks"
"github.com/enfein/mieru/pkg/log" "github.com/enfein/mieru/pkg/log"
"github.com/enfein/mieru/pkg/metrics" "github.com/enfein/mieru/pkg/metrics"
"github.com/enfein/mieru/pkg/protocolv2" "github.com/enfein/mieru/pkg/protocol"
"github.com/enfein/mieru/pkg/socks5" "github.com/enfein/mieru/pkg/socks5"
"github.com/enfein/mieru/pkg/stderror" "github.com/enfein/mieru/pkg/stderror"
"github.com/enfein/mieru/pkg/util" "github.com/enfein/mieru/pkg/util"
@@ -404,7 +404,7 @@ var serverRunFunc = func(s []string) error {
if err = appctl.ValidateFullServerConfig(config); err == nil { if err = appctl.ValidateFullServerConfig(config); err == nil {
appctl.SetAppStatus(appctlpb.AppStatus_STARTING) appctl.SetAppStatus(appctlpb.AppStatus_STARTING)
mux := protocolv2.NewMux(false).SetServerUsers(appctl.UserListToMap(config.GetUsers())) mux := protocol.NewMux(false).SetServerUsers(appctl.UserListToMap(config.GetUsers()))
appctl.SetServerMuxRef(mux) appctl.SetServerMuxRef(mux)
mtu := util.DefaultMTU mtu := util.DefaultMTU
if config.GetMtu() != 0 { if config.GetMtu() != 0 {

View File

@@ -53,9 +53,6 @@ const (
// Allow 1 extra outstanding byte for each byte acknowledged. // Allow 1 extra outstanding byte for each byte acknowledged.
stateConservation stateConservation
// Allow 1.5 extra outstanding bytes for each byte acknowledged.
stateMediumGrowth
// Allow 2 extra outstanding bytes for each byte acknowledged (slow start). // Allow 2 extra outstanding bytes for each byte acknowledged (slow start).
stateGrowth stateGrowth
) )
@@ -279,6 +276,7 @@ type BBRSender struct {
minRTTSinceLastProbeRTT time.Duration minRTTSinceLastProbeRTT time.Duration
} }
// NewBBRSender constructs a new BBR sender object.
func NewBBRSender(loggingContext string, rttStats *RTTStats) *BBRSender { func NewBBRSender(loggingContext string, rttStats *RTTStats) *BBRSender {
s := &BBRSender{ s := &BBRSender{
loggingContext: loggingContext, loggingContext: loggingContext,
@@ -309,6 +307,7 @@ func NewBBRSender(loggingContext string, rttStats *RTTStats) *BBRSender {
return s return s
} }
// OnPacketSent updates BBR sender state when a packet is being sent.
func (b *BBRSender) OnPacketSent(sentTime time.Time, bytesInFlight int64, packetNumber int64, bytes int64, hasRetransmittableData bool) { func (b *BBRSender) OnPacketSent(sentTime time.Time, bytesInFlight int64, packetNumber int64, bytes int64, hasRetransmittableData bool) {
b.mu.Lock() b.mu.Lock()
defer b.mu.Unlock() defer b.mu.Unlock()
@@ -325,50 +324,13 @@ func (b *BBRSender) OnPacketSent(sentTime time.Time, bytesInFlight int64, packet
} }
b.sampler.OnPacketSent(sentTime, packetNumber, bytes, bytesInFlight, hasRetransmittableData) b.sampler.OnPacketSent(sentTime, packetNumber, bytes, bytesInFlight, hasRetransmittableData)
b.pacer.OnPacketSent(sentTime, bytes, b.PacingRate(bytesInFlight)) b.pacer.OnPacketSent(sentTime, bytes, b.getPacingRate())
if log.IsLevelEnabled(log.TraceLevel) { if log.IsLevelEnabled(log.TraceLevel) {
log.Tracef("[BBRSender %s] OnPacketSent(bytesInFlight=%d, packetNumber=%d, bytes=%d), pacingRate=%d => pacingBudget=%d", b.loggingContext, bytesInFlight, packetNumber, bytes, b.PacingRate(bytesInFlight), b.pacer.Budget(sentTime, b.PacingRate(bytesInFlight))) log.Tracef("[BBRSender %s] OnPacketSent(bytesInFlight=%d, packetNumber=%d, bytes=%d), pacingRate=%d => pacingBudget=%d", b.loggingContext, bytesInFlight, packetNumber, bytes, b.getPacingRate(), b.pacer.Budget(sentTime, b.getPacingRate()))
} }
} }
func (b *BBRSender) CanSend(bytesInFlight, bytes int64) bool { // OnCongestionEvent updates BBR sender state from acknowledged and lost packets.
b.mu.Lock()
defer b.mu.Unlock()
pacerCanSend := b.pacer.CanSend(time.Now(), bytes, b.PacingRate(bytesInFlight))
return bytesInFlight < b.GetCongestionWindow() && pacerCanSend
}
func (b *BBRSender) PacingRate(bytesInFlight int64) int64 {
if b.pacingRate <= 0 {
return int64(highGain * float64(BandwidthFromBytesAndTimeDelta(b.initialCongestionWindow, b.GetMinRTT())))
}
return b.pacingRate
}
func (b *BBRSender) BandwidthEstimate() int64 {
return b.maxBandwidth.GetBest()
}
func (b *BBRSender) GetCongestionWindow() int64 {
if b.mode == modeProbeRTT {
return b.ProbeRTTCongestionWindow()
}
if b.InRecovery() && !b.rateBasedRecovery && !(b.mode == modeStartUp && b.rateBasedStartup) {
return mathext.Min(b.congestionWindow, b.recoveryWindow)
}
return b.congestionWindow
}
func (b *BBRSender) InRecovery() bool {
return b.recoveryState != stateNotInRecovery
}
func (b *BBRSender) IsProbingForMoreBandwidth() bool {
return (b.mode == modeProbeBW && b.pacingGain > 1) || b.mode == modeStartUp
}
func (b *BBRSender) OnCongestionEvent(priorInFlight int64, eventTime time.Time, ackedPackets []AckedPacketInfo, lostPackets []LostPacketInfo) { func (b *BBRSender) OnCongestionEvent(priorInFlight int64, eventTime time.Time, ackedPackets []AckedPacketInfo, lostPackets []LostPacketInfo) {
b.mu.Lock() b.mu.Lock()
defer b.mu.Unlock() defer b.mu.Unlock()
@@ -386,20 +348,20 @@ func (b *BBRSender) OnCongestionEvent(priorInFlight int64, eventTime time.Time,
} }
b.bytesInFlight = mathext.Max(b.bytesInFlight, 0) b.bytesInFlight = mathext.Max(b.bytesInFlight, 0)
b.DiscardLostPackets(lostPackets) b.discardLostPackets(lostPackets)
// Input the new data into the BBR model of the connection. // Input the new data into the BBR model of the connection.
if len(ackedPackets) > 0 { if len(ackedPackets) > 0 {
lastAckedPacket := ackedPackets[len(ackedPackets)-1].PacketNumber lastAckedPacket := ackedPackets[len(ackedPackets)-1].PacketNumber
isRoundStart = b.UpdateRoundTripCounter(lastAckedPacket) isRoundStart = b.updateRoundTripCounter(lastAckedPacket)
isMinRTTExpired = b.UpdateBandwidthAndMinRTT(eventTime, ackedPackets) isMinRTTExpired = b.updateBandwidthAndMinRTT(eventTime, ackedPackets)
b.UpdateRecoveryState(lastAckedPacket, len(lostPackets) > 0, isRoundStart) b.updateRecoveryState(lastAckedPacket, len(lostPackets) > 0, isRoundStart)
bytesAcked := b.sampler.TotalBytesAcked() - totalBytesAckedBefore bytesAcked := b.sampler.TotalBytesAcked() - totalBytesAckedBefore
b.UpdateAckAggregationBytes(eventTime, bytesAcked) b.updateAckAggregationBytes(eventTime, bytesAcked)
if b.maxAggregationBytesMultiplier > 0 { if b.maxAggregationBytesMultiplier > 0 {
if b.bytesInFlight <= int64(1.25*float64(b.GetTargetCongestionWindow(b.pacingGain))) { if b.bytesInFlight <= int64(1.25*float64(b.getTargetCongestionWindow(b.pacingGain))) {
b.bytesAckedSinceQueueDrained = 0 b.bytesAckedSinceQueueDrained = 0
} else { } else {
b.bytesAckedSinceQueueDrained += bytesAcked b.bytesAckedSinceQueueDrained += bytesAcked
@@ -409,17 +371,17 @@ func (b *BBRSender) OnCongestionEvent(priorInFlight int64, eventTime time.Time,
// Handle logic specific to PROBE BW mode. // Handle logic specific to PROBE BW mode.
if b.mode == modeProbeBW { if b.mode == modeProbeBW {
b.UpdateGainCyclePhase(eventTime, priorInFlight, len(lostPackets) > 0) b.updateGainCyclePhase(eventTime, priorInFlight, len(lostPackets) > 0)
} }
// Handle logic specific to STARTUP and DRAIN modes. // Handle logic specific to STARTUP and DRAIN modes.
if isRoundStart && !b.isAtFullBandwidth { if isRoundStart && !b.isAtFullBandwidth {
b.CheckIfFullBandwidthReached() b.checkIfFullBandwidthReached()
} }
b.MaybeExitStartupOrDrain(eventTime) b.maybeExitStartupOrDrain(eventTime)
// Handle logic specific to PROBE RTT. // Handle logic specific to PROBE RTT.
b.MaybeEnterOrExitProbeRTT(eventTime, isRoundStart, isMinRTTExpired) b.maybeEnterOrExitProbeRTT(eventTime, isRoundStart, isMinRTTExpired)
// Calculate number of packets acked and lost. // Calculate number of packets acked and lost.
bytesAcked := b.sampler.TotalBytesAcked() - totalBytesAckedBefore bytesAcked := b.sampler.TotalBytesAcked() - totalBytesAckedBefore
@@ -430,9 +392,9 @@ func (b *BBRSender) OnCongestionEvent(priorInFlight int64, eventTime time.Time,
// After the model is updated, recalculate the pacing rate and congestion // After the model is updated, recalculate the pacing rate and congestion
// window. // window.
b.CalculatePacingRate() b.calculatePacingRate()
b.CalculateCongestionWindow(bytesAcked) b.calculateCongestionWindow(bytesAcked)
b.CalculateRecoveryWindow(bytesAcked, bytesLost) b.calculateRecoveryWindow(bytesAcked, bytesLost)
// Cleanup internal state. // Cleanup internal state.
// This is where we clean up obsolete (acked or lost) packets from the bandwidth sampler. // This is where we clean up obsolete (acked or lost) packets from the bandwidth sampler.
@@ -448,15 +410,62 @@ func (b *BBRSender) OnCongestionEvent(priorInFlight int64, eventTime time.Time,
b.sampler.RemoveObsoletePackets(leastUnacked) b.sampler.RemoveObsoletePackets(leastUnacked)
} }
func (b *BBRSender) GetMinRTT() time.Duration { // OnApplicationLimited updates BBR sender state when there is no application
// data to send.
func (b *BBRSender) OnApplicationLimited(bytesInFlight int64) {
if bytesInFlight >= b.getCongestionWindow() {
return
}
b.appLimitedSinceLastProbeRTT = true
b.sampler.OnAppLimited()
}
// CanSend returns true if a packet can be sent based on the congestion window.
func (b *BBRSender) CanSend(bytesInFlight, bytes int64) bool {
b.mu.Lock()
defer b.mu.Unlock()
pacerCanSend := b.pacer.CanSend(time.Now(), bytes, b.getPacingRate())
return bytesInFlight < b.getCongestionWindow() && pacerCanSend
}
// BandwidthEstimate returns the estimate of maximum bandwidth.
func (b *BBRSender) BandwidthEstimate() int64 {
return b.maxBandwidth.GetBest()
}
func (b *BBRSender) getPacingRate() int64 {
if b.pacingRate <= 0 {
return int64(highGain * float64(BandwidthFromBytesAndTimeDelta(b.initialCongestionWindow, b.getMinRTT())))
}
return b.pacingRate
}
func (b *BBRSender) getCongestionWindow() int64 {
if b.mode == modeProbeRTT {
return b.probeRTTCongestionWindow()
}
if b.inRecovery() && !b.rateBasedRecovery && !(b.mode == modeStartUp && b.rateBasedStartup) {
return mathext.Min(b.congestionWindow, b.recoveryWindow)
}
return b.congestionWindow
}
func (b *BBRSender) inRecovery() bool {
return b.recoveryState != stateNotInRecovery
}
func (b *BBRSender) getMinRTT() time.Duration {
if b.minRTT > 0 { if b.minRTT > 0 {
return b.minRTT return b.minRTT
} }
return defaultInitialRTT return defaultInitialRTT
} }
func (b *BBRSender) GetTargetCongestionWindow(gain float64) int64 { func (b *BBRSender) getTargetCongestionWindow(gain float64) int64 {
bdp := (b.GetMinRTT().Nanoseconds() * b.BandwidthEstimate()) / int64(time.Second) bdp := (b.getMinRTT().Nanoseconds() * b.BandwidthEstimate()) / int64(time.Second)
congestionWindow := int64(gain * float64(bdp)) congestionWindow := int64(gain * float64(bdp))
// BDP estimate will be zero if no bandwidth samples are available yet. // BDP estimate will be zero if no bandwidth samples are available yet.
@@ -467,11 +476,11 @@ func (b *BBRSender) GetTargetCongestionWindow(gain float64) int64 {
return mathext.Max(congestionWindow, b.minCongestionWindow) return mathext.Max(congestionWindow, b.minCongestionWindow)
} }
func (b *BBRSender) ProbeRTTCongestionWindow() int64 { func (b *BBRSender) probeRTTCongestionWindow() int64 {
return b.minCongestionWindow return b.minCongestionWindow
} }
func (b *BBRSender) EnterStartupMode() { func (b *BBRSender) enterStartupMode() {
b.mode = modeStartUp b.mode = modeStartUp
if log.IsLevelEnabled(log.TraceLevel) { if log.IsLevelEnabled(log.TraceLevel) {
log.Tracef("[BBRSender %s] Enter start up mode", b.loggingContext) log.Tracef("[BBRSender %s] Enter start up mode", b.loggingContext)
@@ -480,7 +489,7 @@ func (b *BBRSender) EnterStartupMode() {
b.congestionWindowGain = highGain b.congestionWindowGain = highGain
} }
func (b *BBRSender) EnterProbeBandwidthMode(now time.Time) { func (b *BBRSender) enterProbeBandwidthMode(now time.Time) {
b.mode = modeProbeBW b.mode = modeProbeBW
if log.IsLevelEnabled(log.TraceLevel) { if log.IsLevelEnabled(log.TraceLevel) {
log.Tracef("[BBRSender %s] Enter probe bandwidth mode", b.loggingContext) log.Tracef("[BBRSender %s] Enter probe bandwidth mode", b.loggingContext)
@@ -498,13 +507,13 @@ func (b *BBRSender) EnterProbeBandwidthMode(now time.Time) {
b.pacingGain = pacingGainList[cycleOffset] b.pacingGain = pacingGainList[cycleOffset]
} }
func (b *BBRSender) DiscardLostPackets(lostPackets []LostPacketInfo) { func (b *BBRSender) discardLostPackets(lostPackets []LostPacketInfo) {
for _, lost := range lostPackets { for _, lost := range lostPackets {
b.sampler.OnPacketLost(lost.PacketNumber) b.sampler.OnPacketLost(lost.PacketNumber)
} }
} }
func (b *BBRSender) UpdateRoundTripCounter(lastAckedPacket int64) bool { func (b *BBRSender) updateRoundTripCounter(lastAckedPacket int64) bool {
if lastAckedPacket > b.currentRoundTripEnd { if lastAckedPacket > b.currentRoundTripEnd {
b.roundTripCount++ b.roundTripCount++
b.currentRoundTripEnd = lastAckedPacket b.currentRoundTripEnd = lastAckedPacket
@@ -513,7 +522,7 @@ func (b *BBRSender) UpdateRoundTripCounter(lastAckedPacket int64) bool {
return false return false
} }
func (b *BBRSender) UpdateBandwidthAndMinRTT(now time.Time, ackedPackets []AckedPacketInfo) bool { func (b *BBRSender) updateBandwidthAndMinRTT(now time.Time, ackedPackets []AckedPacketInfo) bool {
sampleMinRTT := infDuration sampleMinRTT := infDuration
for _, acked := range ackedPackets { for _, acked := range ackedPackets {
bandwidthSample := b.sampler.OnPacketAcknowledged(now, acked.PacketNumber) bandwidthSample := b.sampler.OnPacketAcknowledged(now, acked.PacketNumber)
@@ -545,16 +554,16 @@ func (b *BBRSender) UpdateBandwidthAndMinRTT(now time.Time, ackedPackets []Acked
return minRTTExpired return minRTTExpired
} }
func (b *BBRSender) UpdateGainCyclePhase(now time.Time, priorInFlight int64, hasLosses bool) { func (b *BBRSender) updateGainCyclePhase(now time.Time, priorInFlight int64, hasLosses bool) {
// In most cases, the cycle is advanced after an RTT passes. // In most cases, the cycle is advanced after an RTT passes.
shouldAdvanceGainCycling := now.Sub(b.lastCycleStart) > b.GetMinRTT() shouldAdvanceGainCycling := now.Sub(b.lastCycleStart) > b.getMinRTT()
// If the pacing gain is above 1.0, the connection is trying to probe the // If the pacing gain is above 1.0, the connection is trying to probe the
// bandwidth by increasing the number of bytes in flight to at least // bandwidth by increasing the number of bytes in flight to at least
// pacing gain * BDP. Make sure that it actually reaches the target, as long // pacing gain * BDP. Make sure that it actually reaches the target, as long
// as there are no losses suggesting that the buffers are not able to hold // as there are no losses suggesting that the buffers are not able to hold
// that much. // that much.
if b.pacingGain > 1.0 && !hasLosses && priorInFlight < b.GetTargetCongestionWindow(b.pacingGain) { if b.pacingGain > 1.0 && !hasLosses && priorInFlight < b.getTargetCongestionWindow(b.pacingGain) {
shouldAdvanceGainCycling = false shouldAdvanceGainCycling = false
} }
@@ -562,7 +571,7 @@ func (b *BBRSender) UpdateGainCyclePhase(now time.Time, priorInFlight int64, has
// queue which could have been incurred by probing prior to it. If the number // queue which could have been incurred by probing prior to it. If the number
// of bytes in flight falls down to the estimated BDP value earlier, conclude // of bytes in flight falls down to the estimated BDP value earlier, conclude
// that the queue has been successfully drained and exit this cycle early. // that the queue has been successfully drained and exit this cycle early.
if b.pacingGain < 1.0 && priorInFlight <= b.GetTargetCongestionWindow(1.0) { if b.pacingGain < 1.0 && priorInFlight <= b.getTargetCongestionWindow(1.0) {
shouldAdvanceGainCycling = true shouldAdvanceGainCycling = true
} }
@@ -573,7 +582,7 @@ func (b *BBRSender) UpdateGainCyclePhase(now time.Time, priorInFlight int64, has
} }
} }
func (b *BBRSender) CheckIfFullBandwidthReached() { func (b *BBRSender) checkIfFullBandwidthReached() {
if b.lastSampleIsAppLimited { if b.lastSampleIsAppLimited {
return return
} }
@@ -586,12 +595,12 @@ func (b *BBRSender) CheckIfFullBandwidthReached() {
} }
b.roundsWithoutBandwidthGain++ b.roundsWithoutBandwidthGain++
if b.roundsWithoutBandwidthGain >= b.numStartupRTTs || (b.exitStartupOnLoss && b.InRecovery()) { if b.roundsWithoutBandwidthGain >= b.numStartupRTTs || (b.exitStartupOnLoss && b.inRecovery()) {
b.isAtFullBandwidth = true b.isAtFullBandwidth = true
} }
} }
func (b *BBRSender) MaybeExitStartupOrDrain(now time.Time) { func (b *BBRSender) maybeExitStartupOrDrain(now time.Time) {
if b.mode == modeStartUp && b.isAtFullBandwidth { if b.mode == modeStartUp && b.isAtFullBandwidth {
b.mode = modeDrain b.mode = modeDrain
if log.IsLevelEnabled(log.TraceLevel) { if log.IsLevelEnabled(log.TraceLevel) {
@@ -601,12 +610,12 @@ func (b *BBRSender) MaybeExitStartupOrDrain(now time.Time) {
b.congestionWindowGain = highGain b.congestionWindowGain = highGain
} }
if b.mode == modeDrain && b.bytesInFlight <= b.GetTargetCongestionWindow(1) { if b.mode == modeDrain && b.bytesInFlight <= b.getTargetCongestionWindow(1) {
b.EnterProbeBandwidthMode(now) b.enterProbeBandwidthMode(now)
} }
} }
func (b *BBRSender) MaybeEnterOrExitProbeRTT(now time.Time, isRoundStart bool, minRTTExpired bool) { func (b *BBRSender) maybeEnterOrExitProbeRTT(now time.Time, isRoundStart bool, minRTTExpired bool) {
if minRTTExpired && !b.exitingQuiescence && b.mode != modeProbeRTT { if minRTTExpired && !b.exitingQuiescence && b.mode != modeProbeRTT {
b.mode = modeProbeRTT b.mode = modeProbeRTT
if log.IsLevelEnabled(log.TraceLevel) { if log.IsLevelEnabled(log.TraceLevel) {
@@ -623,7 +632,7 @@ func (b *BBRSender) MaybeEnterOrExitProbeRTT(now time.Time, isRoundStart bool, m
if b.exitProbeRTTAt.IsZero() { if b.exitProbeRTTAt.IsZero() {
// If the window has reached the appropriate size, schedule exiting // If the window has reached the appropriate size, schedule exiting
// PROBE RTT. // PROBE RTT.
if b.bytesInFlight < b.ProbeRTTCongestionWindow()+maxDatagramSize { if b.bytesInFlight < b.probeRTTCongestionWindow()+maxDatagramSize {
b.exitProbeRTTAt = now.Add(probeRTTTime) b.exitProbeRTTAt = now.Add(probeRTTTime)
b.probeRTTRoundPassed = false b.probeRTTRoundPassed = false
} }
@@ -634,9 +643,9 @@ func (b *BBRSender) MaybeEnterOrExitProbeRTT(now time.Time, isRoundStart bool, m
if now.After(b.exitProbeRTTAt) && b.probeRTTRoundPassed { if now.After(b.exitProbeRTTAt) && b.probeRTTRoundPassed {
b.minRTTTimestamp = now b.minRTTTimestamp = now
if !b.isAtFullBandwidth { if !b.isAtFullBandwidth {
b.EnterStartupMode() b.enterStartupMode()
} else { } else {
b.EnterProbeBandwidthMode(now) b.enterProbeBandwidthMode(now)
} }
} }
} }
@@ -645,7 +654,7 @@ func (b *BBRSender) MaybeEnterOrExitProbeRTT(now time.Time, isRoundStart bool, m
b.exitingQuiescence = false b.exitingQuiescence = false
} }
func (b *BBRSender) UpdateRecoveryState(lastAckedPacket int64, hasLosses bool, isRoundStart bool) { func (b *BBRSender) updateRecoveryState(lastAckedPacket int64, hasLosses bool, isRoundStart bool) {
// Exit recovery when there are no losses for a round. // Exit recovery when there are no losses for a round.
if hasLosses { if hasLosses {
b.endRecoveryAt = b.lastSentPacket b.endRecoveryAt = b.lastSentPacket
@@ -667,8 +676,6 @@ func (b *BBRSender) UpdateRecoveryState(lastAckedPacket int64, hasLosses bool, i
b.currentRoundTripEnd = b.lastSentPacket b.currentRoundTripEnd = b.lastSentPacket
} }
case stateConservation: case stateConservation:
fallthrough
case stateMediumGrowth:
if isRoundStart { if isRoundStart {
b.recoveryState = stateGrowth b.recoveryState = stateGrowth
} }
@@ -681,7 +688,7 @@ func (b *BBRSender) UpdateRecoveryState(lastAckedPacket int64, hasLosses bool, i
} }
} }
func (b *BBRSender) UpdateAckAggregationBytes(ackTime time.Time, newlyAckedBytes int64) { func (b *BBRSender) updateAckAggregationBytes(ackTime time.Time, newlyAckedBytes int64) {
// Compute how many bytes are expected to be delivered, assuming max bandwidth // Compute how many bytes are expected to be delivered, assuming max bandwidth
// is correct. // is correct.
expectedBytesAcked := b.maxBandwidth.GetBest() * int64(ackTime.Sub(b.aggregationEpochStartTime)) / int64(time.Second) expectedBytesAcked := b.maxBandwidth.GetBest() * int64(ackTime.Sub(b.aggregationEpochStartTime)) / int64(time.Second)
@@ -700,13 +707,13 @@ func (b *BBRSender) UpdateAckAggregationBytes(ackTime time.Time, newlyAckedBytes
b.maxAckHeight.Update(b.aggregationEpochBytes-expectedBytesAcked, b.roundTripCount) b.maxAckHeight.Update(b.aggregationEpochBytes-expectedBytesAcked, b.roundTripCount)
} }
func (b *BBRSender) CalculatePacingRate() { func (b *BBRSender) calculatePacingRate() {
if b.BandwidthEstimate() <= 0 { if b.BandwidthEstimate() <= 0 {
return return
} }
targetRate := int64(b.pacingGain * float64(b.BandwidthEstimate())) targetRate := int64(b.pacingGain * float64(b.BandwidthEstimate()))
if b.rateBasedRecovery && b.InRecovery() { if b.rateBasedRecovery && b.inRecovery() {
b.pacingRate = int64(b.pacingGain * float64(b.maxBandwidth.GetThirdBest())) b.pacingRate = int64(b.pacingGain * float64(b.maxBandwidth.GetThirdBest()))
} }
if b.isAtFullBandwidth { if b.isAtFullBandwidth {
@@ -731,12 +738,12 @@ func (b *BBRSender) CalculatePacingRate() {
b.pacingRate = mathext.Max(b.pacingRate, targetRate) b.pacingRate = mathext.Max(b.pacingRate, targetRate)
} }
func (b *BBRSender) CalculateCongestionWindow(bytesAcked int64) { func (b *BBRSender) calculateCongestionWindow(bytesAcked int64) {
if b.mode == modeProbeRTT { if b.mode == modeProbeRTT {
return return
} }
targetWindow := b.GetTargetCongestionWindow(b.congestionWindowGain) targetWindow := b.getTargetCongestionWindow(b.congestionWindowGain)
if log.IsLevelEnabled(log.TraceLevel) { if log.IsLevelEnabled(log.TraceLevel) {
log.Tracef("[BBRSender %s] targetCongestionWindow=%d", b.loggingContext, targetWindow) log.Tracef("[BBRSender %s] targetCongestionWindow=%d", b.loggingContext, targetWindow)
} }
@@ -756,7 +763,7 @@ func (b *BBRSender) CalculateCongestionWindow(bytesAcked int64) {
b.congestionWindow = mathext.Min(b.congestionWindow, b.maxCongestionWindow) b.congestionWindow = mathext.Min(b.congestionWindow, b.maxCongestionWindow)
} }
func (b *BBRSender) CalculateRecoveryWindow(bytesAcked int64, bytesLost int64) { func (b *BBRSender) calculateRecoveryWindow(bytesAcked int64, bytesLost int64) {
if b.rateBasedRecovery || (b.mode == modeStartUp && b.rateBasedStartup) { if b.rateBasedRecovery || (b.mode == modeStartUp && b.rateBasedStartup) {
return return
} }
@@ -781,11 +788,8 @@ func (b *BBRSender) CalculateRecoveryWindow(bytesAcked int64, bytesLost int64) {
// In CONSERVATION mode, just subtracting losses is sufficient. In GROWTH, // In CONSERVATION mode, just subtracting losses is sufficient. In GROWTH,
// release additional bytesAcked to achieve a slow-start-like behavior. // release additional bytesAcked to achieve a slow-start-like behavior.
// In MEDIUM_GROWTH, release bytesAcked / 2 to split the difference.
if b.recoveryState == stateGrowth { if b.recoveryState == stateGrowth {
b.recoveryWindow += bytesAcked b.recoveryWindow += bytesAcked
} else if b.recoveryState == stateMediumGrowth {
b.recoveryWindow += bytesAcked / 2
} }
// Sanity checks. Ensure that we always allow to send at least // Sanity checks. Ensure that we always allow to send at least
@@ -793,12 +797,3 @@ func (b *BBRSender) CalculateRecoveryWindow(bytesAcked int64, bytesLost int64) {
b.recoveryWindow = mathext.Max(b.recoveryWindow, b.bytesInFlight+bytesAcked) b.recoveryWindow = mathext.Max(b.recoveryWindow, b.bytesInFlight+bytesAcked)
b.recoveryWindow = mathext.Max(b.recoveryWindow, b.minCongestionWindow) b.recoveryWindow = mathext.Max(b.recoveryWindow, b.minCongestionWindow)
} }
func (b *BBRSender) OnApplicationLimited(bytesInFlight int64) {
if bytesInFlight >= b.GetCongestionWindow() {
return
}
b.appLimitedSinceLastProbeRTT = true
b.sampler.OnAppLimited()
}

View File

@@ -15,7 +15,7 @@
//go:build !android //go:build !android
package protocolv2 package protocol
import "time" import "time"

View File

@@ -15,7 +15,7 @@
//go:build android //go:build android
package protocolv2 package protocol
import "time" import "time"

View File

@@ -13,7 +13,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>. // along with this program. If not, see <https://www.gnu.org/licenses/>.
package protocolv2 package protocol
import ( import (
"encoding/binary" "encoding/binary"

View File

@@ -13,7 +13,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>. // along with this program. If not, see <https://www.gnu.org/licenses/>.
package protocolv2 package protocol
import ( import (
mrand "math/rand" mrand "math/rand"

View File

@@ -13,7 +13,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>. // along with this program. If not, see <https://www.gnu.org/licenses/>.
package protocolv2 package protocol
import ( import (
"context" "context"

View File

@@ -13,7 +13,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>. // along with this program. If not, see <https://www.gnu.org/licenses/>.
package protocolv2 package protocol
import ( import (
"bytes" "bytes"

View File

@@ -13,7 +13,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/> // along with this program. If not, see <https://www.gnu.org/licenses/>
package protocolv2 package protocol
import ( import (
crand "crypto/rand" crand "crypto/rand"

View File

@@ -13,7 +13,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/> // along with this program. If not, see <https://www.gnu.org/licenses/>
package protocolv2 package protocol
import ( import (
"testing" "testing"

View File

@@ -13,7 +13,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>. // along with this program. If not, see <https://www.gnu.org/licenses/>.
package protocolv2 package protocol
import ( import (
"sync" "sync"

View File

@@ -13,7 +13,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>. // along with this program. If not, see <https://www.gnu.org/licenses/>.
package protocolv2 package protocol
import ( import (
"fmt" "fmt"

View File

@@ -13,7 +13,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>. // along with this program. If not, see <https://www.gnu.org/licenses/>.
package protocolv2 package protocol
import ( import (
mrand "math/rand" mrand "math/rand"

View File

@@ -13,7 +13,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>. // along with this program. If not, see <https://www.gnu.org/licenses/>.
package protocolv2 package protocol
import ( import (
"context" "context"

View File

@@ -13,7 +13,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/> // along with this program. If not, see <https://www.gnu.org/licenses/>
package protocolv2 package protocol
import ( import (
"context" "context"

View File

@@ -13,7 +13,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/> // along with this program. If not, see <https://www.gnu.org/licenses/>
package protocolv2 package protocol
import ( import (
"context" "context"

View File

@@ -13,7 +13,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/> // along with this program. If not, see <https://www.gnu.org/licenses/>
package protocolv2 package protocol
import ( import (
"context" "context"

View File

@@ -13,7 +13,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/> // along with this program. If not, see <https://www.gnu.org/licenses/>
package protocolv2 package protocol
import ( import (
"context" "context"

View File

@@ -12,7 +12,7 @@ import (
"github.com/enfein/mieru/pkg/egress" "github.com/enfein/mieru/pkg/egress"
"github.com/enfein/mieru/pkg/log" "github.com/enfein/mieru/pkg/log"
"github.com/enfein/mieru/pkg/metrics" "github.com/enfein/mieru/pkg/metrics"
"github.com/enfein/mieru/pkg/protocolv2" "github.com/enfein/mieru/pkg/protocol"
"github.com/enfein/mieru/pkg/stderror" "github.com/enfein/mieru/pkg/stderror"
"github.com/enfein/mieru/pkg/util" "github.com/enfein/mieru/pkg/util"
) )
@@ -40,7 +40,7 @@ var (
// Config is used to setup and configure a socks5 server. // Config is used to setup and configure a socks5 server.
type Config struct { type Config struct {
// Mieru proxy multiplexer. // Mieru proxy multiplexer.
ProxyMux *protocolv2.Mux ProxyMux *protocol.Mux
// Egress controller. // Egress controller.
EgressController egress.Controller EgressController egress.Controller

View File

@@ -505,17 +505,14 @@ func NewVless(option VlessOption) (*Vless, error) {
var addons *vless.Addons var addons *vless.Addons
if option.Network != "ws" && len(option.Flow) >= 16 { if option.Network != "ws" && len(option.Flow) >= 16 {
option.Flow = option.Flow[:16] option.Flow = option.Flow[:16]
switch option.Flow { if option.Flow != vless.XRV {
case vless.XRV:
log.Warnln("To use %s, ensure your server is upgrade to Xray-core v1.8.0+", vless.XRV)
addons = &vless.Addons{
Flow: option.Flow,
}
case vless.XRO, vless.XRD, vless.XRS:
log.Fatalln("Legacy XTLS protocol %s is deprecated and no longer supported", option.Flow)
default:
return nil, fmt.Errorf("unsupported xtls flow type: %s", option.Flow) return nil, fmt.Errorf("unsupported xtls flow type: %s", option.Flow)
} }
log.Warnln("To use %s, ensure your server is upgrade to Xray-core v1.8.0+", vless.XRV)
addons = &vless.Addons{
Flow: option.Flow,
}
} }
switch option.PacketEncoding { switch option.PacketEncoding {

View File

@@ -619,22 +619,26 @@ function gen_config_server(node)
end end
if node.protocol == "tuic" then if node.protocol == "tuic" then
tls.alpn = (node.tuic_alpn and node.tuic_alpn ~= "") and { if node.uuid then
node.tuic_alpn local users = {}
} or nil for i = 1, #node.uuid do
protocol_table = { users[i] = {
users = { name = node.uuid[i],
{ uuid = node.uuid[i],
name = "user1",
uuid = node.uuid,
password = node.password password = node.password
} }
}, end
congestion_control = node.tuic_congestion_control or "cubic", tls.alpn = (node.tuic_alpn and node.tuic_alpn ~= "") and {
zero_rtt_handshake = (node.tuic_zero_rtt_handshake == "1") and true or false, node.tuic_alpn
heartbeat = node.tuic_heartbeat .. "s", } or nil
tls = tls protocol_table = {
} users = users,
congestion_control = node.tuic_congestion_control or "cubic",
zero_rtt_handshake = (node.tuic_zero_rtt_handshake == "1") and true or false,
heartbeat = node.tuic_heartbeat .. "s",
tls = tls
}
end
end end
if node.protocol == "hysteria2" then if node.protocol == "hysteria2" then

View File

@@ -5,5 +5,6 @@ namespace Ryujinx.Graphics.GAL
Bilinear, Bilinear,
Nearest, Nearest,
Fsr, Fsr,
Area,
} }
} }

View File

@@ -0,0 +1,106 @@
using OpenTK.Graphics.OpenGL;
using Ryujinx.Common;
using Ryujinx.Graphics.GAL;
using Ryujinx.Graphics.OpenGL.Image;
using System;
using static Ryujinx.Graphics.OpenGL.Effects.ShaderHelper;
namespace Ryujinx.Graphics.OpenGL.Effects
{
internal class AreaScalingFilter : IScalingFilter
{
private readonly OpenGLRenderer _renderer;
private int _inputUniform;
private int _outputUniform;
private int _srcX0Uniform;
private int _srcX1Uniform;
private int _srcY0Uniform;
private int _scalingShaderProgram;
private int _srcY1Uniform;
private int _dstX0Uniform;
private int _dstX1Uniform;
private int _dstY0Uniform;
private int _dstY1Uniform;
public float Level { get; set; }
public AreaScalingFilter(OpenGLRenderer renderer)
{
Initialize();
_renderer = renderer;
}
public void Dispose()
{
if (_scalingShaderProgram != 0)
{
GL.DeleteProgram(_scalingShaderProgram);
}
}
private void Initialize()
{
var scalingShader = EmbeddedResources.ReadAllText("Ryujinx.Graphics.OpenGL/Effects/Shaders/area_scaling.glsl");
_scalingShaderProgram = CompileProgram(scalingShader, ShaderType.ComputeShader);
_inputUniform = GL.GetUniformLocation(_scalingShaderProgram, "Source");
_outputUniform = GL.GetUniformLocation(_scalingShaderProgram, "imgOutput");
_srcX0Uniform = GL.GetUniformLocation(_scalingShaderProgram, "srcX0");
_srcX1Uniform = GL.GetUniformLocation(_scalingShaderProgram, "srcX1");
_srcY0Uniform = GL.GetUniformLocation(_scalingShaderProgram, "srcY0");
_srcY1Uniform = GL.GetUniformLocation(_scalingShaderProgram, "srcY1");
_dstX0Uniform = GL.GetUniformLocation(_scalingShaderProgram, "dstX0");
_dstX1Uniform = GL.GetUniformLocation(_scalingShaderProgram, "dstX1");
_dstY0Uniform = GL.GetUniformLocation(_scalingShaderProgram, "dstY0");
_dstY1Uniform = GL.GetUniformLocation(_scalingShaderProgram, "dstY1");
}
public void Run(
TextureView view,
TextureView destinationTexture,
int width,
int height,
Extents2D source,
Extents2D destination)
{
int previousProgram = GL.GetInteger(GetPName.CurrentProgram);
int previousUnit = GL.GetInteger(GetPName.ActiveTexture);
GL.ActiveTexture(TextureUnit.Texture0);
int previousTextureBinding = GL.GetInteger(GetPName.TextureBinding2D);
GL.BindImageTexture(0, destinationTexture.Handle, 0, false, 0, TextureAccess.ReadWrite, SizedInternalFormat.Rgba8);
int threadGroupWorkRegionDim = 16;
int dispatchX = (width + (threadGroupWorkRegionDim - 1)) / threadGroupWorkRegionDim;
int dispatchY = (height + (threadGroupWorkRegionDim - 1)) / threadGroupWorkRegionDim;
// Scaling pass
GL.UseProgram(_scalingShaderProgram);
view.Bind(0);
GL.Uniform1(_inputUniform, 0);
GL.Uniform1(_outputUniform, 0);
GL.Uniform1(_srcX0Uniform, (float)source.X1);
GL.Uniform1(_srcX1Uniform, (float)source.X2);
GL.Uniform1(_srcY0Uniform, (float)source.Y1);
GL.Uniform1(_srcY1Uniform, (float)source.Y2);
GL.Uniform1(_dstX0Uniform, (float)destination.X1);
GL.Uniform1(_dstX1Uniform, (float)destination.X2);
GL.Uniform1(_dstY0Uniform, (float)destination.Y1);
GL.Uniform1(_dstY1Uniform, (float)destination.Y2);
GL.DispatchCompute(dispatchX, dispatchY, 1);
GL.UseProgram(previousProgram);
GL.MemoryBarrier(MemoryBarrierFlags.ShaderImageAccessBarrierBit);
(_renderer.Pipeline as Pipeline).RestoreImages1And2();
GL.ActiveTexture(TextureUnit.Texture0);
GL.BindTexture(TextureTarget.Texture2D, previousTextureBinding);
GL.ActiveTexture((TextureUnit)previousUnit);
}
}
}

View File

@@ -18,7 +18,7 @@ namespace Ryujinx.Graphics.OpenGL.Effects
private int _srcY0Uniform; private int _srcY0Uniform;
private int _scalingShaderProgram; private int _scalingShaderProgram;
private int _sharpeningShaderProgram; private int _sharpeningShaderProgram;
private float _scale = 1; private float _sharpeningLevel = 1;
private int _srcY1Uniform; private int _srcY1Uniform;
private int _dstX0Uniform; private int _dstX0Uniform;
private int _dstX1Uniform; private int _dstX1Uniform;
@@ -30,10 +30,10 @@ namespace Ryujinx.Graphics.OpenGL.Effects
public float Level public float Level
{ {
get => _scale; get => _sharpeningLevel;
set set
{ {
_scale = MathF.Max(0.01f, value); _sharpeningLevel = MathF.Max(0.01f, value);
} }
} }

View File

@@ -1,4 +1,5 @@
using OpenTK.Graphics.OpenGL; using OpenTK.Graphics.OpenGL;
using Ryujinx.Common.Logging;
namespace Ryujinx.Graphics.OpenGL.Effects namespace Ryujinx.Graphics.OpenGL.Effects
{ {
@@ -6,18 +7,7 @@ namespace Ryujinx.Graphics.OpenGL.Effects
{ {
public static int CompileProgram(string shaderCode, ShaderType shaderType) public static int CompileProgram(string shaderCode, ShaderType shaderType)
{ {
var shader = GL.CreateShader(shaderType); return CompileProgram(new string[] { shaderCode }, shaderType);
GL.ShaderSource(shader, shaderCode);
GL.CompileShader(shader);
var program = GL.CreateProgram();
GL.AttachShader(program, shader);
GL.LinkProgram(program);
GL.DetachShader(program, shader);
GL.DeleteShader(shader);
return program;
} }
public static int CompileProgram(string[] shaders, ShaderType shaderType) public static int CompileProgram(string[] shaders, ShaderType shaderType)
@@ -26,6 +16,15 @@ namespace Ryujinx.Graphics.OpenGL.Effects
GL.ShaderSource(shader, shaders.Length, shaders, (int[])null); GL.ShaderSource(shader, shaders.Length, shaders, (int[])null);
GL.CompileShader(shader); GL.CompileShader(shader);
GL.GetShader(shader, ShaderParameter.CompileStatus, out int isCompiled);
if (isCompiled == 0)
{
string log = GL.GetShaderInfoLog(shader);
Logger.Error?.Print(LogClass.Gpu, $"Failed to compile effect shader:\n\n{log}\n");
GL.DeleteShader(shader);
return 0;
}
var program = GL.CreateProgram(); var program = GL.CreateProgram();
GL.AttachShader(program, shader); GL.AttachShader(program, shader);
GL.LinkProgram(program); GL.LinkProgram(program);

View File

@@ -0,0 +1,119 @@
#version 430 core
precision mediump float;
layout (local_size_x = 16, local_size_y = 16) in;
layout(rgba8, binding = 0, location=0) uniform image2D imgOutput;
layout( location=1 ) uniform sampler2D Source;
layout( location=2 ) uniform float srcX0;
layout( location=3 ) uniform float srcX1;
layout( location=4 ) uniform float srcY0;
layout( location=5 ) uniform float srcY1;
layout( location=6 ) uniform float dstX0;
layout( location=7 ) uniform float dstX1;
layout( location=8 ) uniform float dstY0;
layout( location=9 ) uniform float dstY1;
/***** Area Sampling *****/
// By Sam Belliveau and Filippo Tarpini. Public Domain license.
// Effectively a more accurate sharp bilinear filter when upscaling,
// that also works as a mathematically perfect downscale filter.
// https://entropymine.com/imageworsener/pixelmixing/
// https://github.com/obsproject/obs-studio/pull/1715
// https://legacy.imagemagick.org/Usage/filter/
vec4 AreaSampling(vec2 xy)
{
// Determine the sizes of the source and target images.
vec2 source_size = vec2(abs(srcX1 - srcX0), abs(srcY1 - srcY0));
vec2 target_size = vec2(abs(dstX1 - dstX0), abs(dstY1 - dstY0));
vec2 inverted_target_size = vec2(1.0) / target_size;
// Compute the top-left and bottom-right corners of the target pixel box.
vec2 t_beg = floor(xy - vec2(dstX0 < dstX1 ? dstX0 : dstX1, dstY0 < dstY1 ? dstY0 : dstY1));
vec2 t_end = t_beg + vec2(1.0, 1.0);
// Convert the target pixel box to source pixel box.
vec2 beg = t_beg * inverted_target_size * source_size;
vec2 end = t_end * inverted_target_size * source_size;
// Compute the top-left and bottom-right corners of the pixel box.
ivec2 f_beg = ivec2(beg);
ivec2 f_end = ivec2(end);
// Compute how much of the start and end pixels are covered horizontally & vertically.
float area_w = 1.0 - fract(beg.x);
float area_n = 1.0 - fract(beg.y);
float area_e = fract(end.x);
float area_s = fract(end.y);
// Compute the areas of the corner pixels in the pixel box.
float area_nw = area_n * area_w;
float area_ne = area_n * area_e;
float area_sw = area_s * area_w;
float area_se = area_s * area_e;
// Initialize the color accumulator.
vec4 avg_color = vec4(0.0, 0.0, 0.0, 0.0);
// Accumulate corner pixels.
avg_color += area_nw * texelFetch(Source, ivec2(f_beg.x, f_beg.y), 0);
avg_color += area_ne * texelFetch(Source, ivec2(f_end.x, f_beg.y), 0);
avg_color += area_sw * texelFetch(Source, ivec2(f_beg.x, f_end.y), 0);
avg_color += area_se * texelFetch(Source, ivec2(f_end.x, f_end.y), 0);
// Determine the size of the pixel box.
int x_range = int(f_end.x - f_beg.x - 0.5);
int y_range = int(f_end.y - f_beg.y - 0.5);
// Accumulate top and bottom edge pixels.
for (int x = f_beg.x + 1; x <= f_beg.x + x_range; ++x)
{
avg_color += area_n * texelFetch(Source, ivec2(x, f_beg.y), 0);
avg_color += area_s * texelFetch(Source, ivec2(x, f_end.y), 0);
}
// Accumulate left and right edge pixels and all the pixels in between.
for (int y = f_beg.y + 1; y <= f_beg.y + y_range; ++y)
{
avg_color += area_w * texelFetch(Source, ivec2(f_beg.x, y), 0);
avg_color += area_e * texelFetch(Source, ivec2(f_end.x, y), 0);
for (int x = f_beg.x + 1; x <= f_beg.x + x_range; ++x)
{
avg_color += texelFetch(Source, ivec2(x, y), 0);
}
}
// Compute the area of the pixel box that was sampled.
float area_corners = area_nw + area_ne + area_sw + area_se;
float area_edges = float(x_range) * (area_n + area_s) + float(y_range) * (area_w + area_e);
float area_center = float(x_range) * float(y_range);
// Return the normalized average color.
return avg_color / (area_corners + area_edges + area_center);
}
float insideBox(vec2 v, vec2 bLeft, vec2 tRight) {
vec2 s = step(bLeft, v) - step(tRight, v);
return s.x * s.y;
}
vec2 translateDest(vec2 pos) {
vec2 translatedPos = vec2(pos.x, pos.y);
translatedPos.x = dstX1 < dstX0 ? dstX1 - translatedPos.x : translatedPos.x;
translatedPos.y = dstY0 > dstY1 ? dstY0 + dstY1 - translatedPos.y - 1 : translatedPos.y;
return translatedPos;
}
void main()
{
vec2 bLeft = vec2(dstX0 < dstX1 ? dstX0 : dstX1, dstY0 < dstY1 ? dstY0 : dstY1);
vec2 tRight = vec2(dstX1 > dstX0 ? dstX1 : dstX0, dstY1 > dstY0 ? dstY1 : dstY0);
ivec2 loc = ivec2(gl_GlobalInvocationID.x, gl_GlobalInvocationID.y);
if (insideBox(loc, bLeft, tRight) == 0) {
imageStore(imgOutput, loc, vec4(0, 0, 0, 1));
return;
}
vec4 outColor = AreaSampling(loc);
imageStore(imgOutput, ivec2(translateDest(loc)), vec4(outColor.rgb, 1));
}

View File

@@ -85,4 +85,4 @@ void main() {
CurrFilter(gxy); CurrFilter(gxy);
gxy.x -= 8u; gxy.x -= 8u;
CurrFilter(gxy); CurrFilter(gxy);
} }

View File

@@ -21,6 +21,7 @@
<EmbeddedResource Include="Effects\Shaders\ffx_fsr1.h" /> <EmbeddedResource Include="Effects\Shaders\ffx_fsr1.h" />
<EmbeddedResource Include="Effects\Shaders\ffx_a.h" /> <EmbeddedResource Include="Effects\Shaders\ffx_a.h" />
<EmbeddedResource Include="Effects\Shaders\fsr_scaling.glsl" /> <EmbeddedResource Include="Effects\Shaders\fsr_scaling.glsl" />
<EmbeddedResource Include="Effects\Shaders\area_scaling.glsl" />
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>

View File

@@ -373,6 +373,16 @@ namespace Ryujinx.Graphics.OpenGL
_isLinear = false; _isLinear = false;
_scalingFilter.Level = _scalingFilterLevel; _scalingFilter.Level = _scalingFilterLevel;
RecreateUpscalingTexture();
break;
case ScalingFilter.Area:
if (_scalingFilter is not AreaScalingFilter)
{
_scalingFilter?.Dispose();
_scalingFilter = new AreaScalingFilter(_renderer);
}
_isLinear = false;
RecreateUpscalingTexture(); RecreateUpscalingTexture();
break; break;
} }

View File

@@ -0,0 +1,101 @@
using Ryujinx.Common;
using Ryujinx.Graphics.GAL;
using Ryujinx.Graphics.Shader;
using Ryujinx.Graphics.Shader.Translation;
using Silk.NET.Vulkan;
using System;
using Extent2D = Ryujinx.Graphics.GAL.Extents2D;
using Format = Silk.NET.Vulkan.Format;
using SamplerCreateInfo = Ryujinx.Graphics.GAL.SamplerCreateInfo;
namespace Ryujinx.Graphics.Vulkan.Effects
{
internal class AreaScalingFilter : IScalingFilter
{
private readonly VulkanRenderer _renderer;
private PipelineHelperShader _pipeline;
private ISampler _sampler;
private ShaderCollection _scalingProgram;
private Device _device;
public float Level { get; set; }
public AreaScalingFilter(VulkanRenderer renderer, Device device)
{
_device = device;
_renderer = renderer;
Initialize();
}
public void Dispose()
{
_pipeline.Dispose();
_scalingProgram.Dispose();
_sampler.Dispose();
}
public void Initialize()
{
_pipeline = new PipelineHelperShader(_renderer, _device);
_pipeline.Initialize();
var scalingShader = EmbeddedResources.Read("Ryujinx.Graphics.Vulkan/Effects/Shaders/AreaScaling.spv");
var scalingResourceLayout = new ResourceLayoutBuilder()
.Add(ResourceStages.Compute, ResourceType.UniformBuffer, 2)
.Add(ResourceStages.Compute, ResourceType.TextureAndSampler, 1)
.Add(ResourceStages.Compute, ResourceType.Image, 0, true).Build();
_sampler = _renderer.CreateSampler(SamplerCreateInfo.Create(MinFilter.Linear, MagFilter.Linear));
_scalingProgram = _renderer.CreateProgramWithMinimalLayout(new[]
{
new ShaderSource(scalingShader, ShaderStage.Compute, TargetLanguage.Spirv),
}, scalingResourceLayout);
}
public void Run(
TextureView view,
CommandBufferScoped cbs,
Auto<DisposableImageView> destinationTexture,
Format format,
int width,
int height,
Extent2D source,
Extent2D destination)
{
_pipeline.SetCommandBuffer(cbs);
_pipeline.SetProgram(_scalingProgram);
_pipeline.SetTextureAndSampler(ShaderStage.Compute, 1, view, _sampler);
ReadOnlySpan<float> dimensionsBuffer = stackalloc float[]
{
source.X1,
source.X2,
source.Y1,
source.Y2,
destination.X1,
destination.X2,
destination.Y1,
destination.Y2,
};
int rangeSize = dimensionsBuffer.Length * sizeof(float);
using var buffer = _renderer.BufferManager.ReserveOrCreate(_renderer, cbs, rangeSize);
buffer.Holder.SetDataUnchecked(buffer.Offset, dimensionsBuffer);
int threadGroupWorkRegionDim = 16;
int dispatchX = (width + (threadGroupWorkRegionDim - 1)) / threadGroupWorkRegionDim;
int dispatchY = (height + (threadGroupWorkRegionDim - 1)) / threadGroupWorkRegionDim;
_pipeline.SetUniformBuffers(stackalloc[] { new BufferAssignment(2, buffer.Range) });
_pipeline.SetImage(0, destinationTexture);
_pipeline.DispatchCompute(dispatchX, dispatchY, 1);
_pipeline.ComputeBarrier();
_pipeline.Finish();
}
}
}

View File

@@ -0,0 +1,122 @@
// Scaling
#version 430 core
layout (local_size_x = 16, local_size_y = 16) in;
layout( rgba8, binding = 0, set = 3) uniform image2D imgOutput;
layout( binding = 1, set = 2) uniform sampler2D Source;
layout( binding = 2 ) uniform dimensions{
float srcX0;
float srcX1;
float srcY0;
float srcY1;
float dstX0;
float dstX1;
float dstY0;
float dstY1;
};
/***** Area Sampling *****/
// By Sam Belliveau and Filippo Tarpini. Public Domain license.
// Effectively a more accurate sharp bilinear filter when upscaling,
// that also works as a mathematically perfect downscale filter.
// https://entropymine.com/imageworsener/pixelmixing/
// https://github.com/obsproject/obs-studio/pull/1715
// https://legacy.imagemagick.org/Usage/filter/
vec4 AreaSampling(vec2 xy)
{
// Determine the sizes of the source and target images.
vec2 source_size = vec2(abs(srcX1 - srcX0), abs(srcY1 - srcY0));
vec2 target_size = vec2(abs(dstX1 - dstX0), abs(dstY1 - dstY0));
vec2 inverted_target_size = vec2(1.0) / target_size;
// Compute the top-left and bottom-right corners of the target pixel box.
vec2 t_beg = floor(xy - vec2(dstX0 < dstX1 ? dstX0 : dstX1, dstY0 < dstY1 ? dstY0 : dstY1));
vec2 t_end = t_beg + vec2(1.0, 1.0);
// Convert the target pixel box to source pixel box.
vec2 beg = t_beg * inverted_target_size * source_size;
vec2 end = t_end * inverted_target_size * source_size;
// Compute the top-left and bottom-right corners of the pixel box.
ivec2 f_beg = ivec2(beg);
ivec2 f_end = ivec2(end);
// Compute how much of the start and end pixels are covered horizontally & vertically.
float area_w = 1.0 - fract(beg.x);
float area_n = 1.0 - fract(beg.y);
float area_e = fract(end.x);
float area_s = fract(end.y);
// Compute the areas of the corner pixels in the pixel box.
float area_nw = area_n * area_w;
float area_ne = area_n * area_e;
float area_sw = area_s * area_w;
float area_se = area_s * area_e;
// Initialize the color accumulator.
vec4 avg_color = vec4(0.0, 0.0, 0.0, 0.0);
// Accumulate corner pixels.
avg_color += area_nw * texelFetch(Source, ivec2(f_beg.x, f_beg.y), 0);
avg_color += area_ne * texelFetch(Source, ivec2(f_end.x, f_beg.y), 0);
avg_color += area_sw * texelFetch(Source, ivec2(f_beg.x, f_end.y), 0);
avg_color += area_se * texelFetch(Source, ivec2(f_end.x, f_end.y), 0);
// Determine the size of the pixel box.
int x_range = int(f_end.x - f_beg.x - 0.5);
int y_range = int(f_end.y - f_beg.y - 0.5);
// Accumulate top and bottom edge pixels.
for (int x = f_beg.x + 1; x <= f_beg.x + x_range; ++x)
{
avg_color += area_n * texelFetch(Source, ivec2(x, f_beg.y), 0);
avg_color += area_s * texelFetch(Source, ivec2(x, f_end.y), 0);
}
// Accumulate left and right edge pixels and all the pixels in between.
for (int y = f_beg.y + 1; y <= f_beg.y + y_range; ++y)
{
avg_color += area_w * texelFetch(Source, ivec2(f_beg.x, y), 0);
avg_color += area_e * texelFetch(Source, ivec2(f_end.x, y), 0);
for (int x = f_beg.x + 1; x <= f_beg.x + x_range; ++x)
{
avg_color += texelFetch(Source, ivec2(x, y), 0);
}
}
// Compute the area of the pixel box that was sampled.
float area_corners = area_nw + area_ne + area_sw + area_se;
float area_edges = float(x_range) * (area_n + area_s) + float(y_range) * (area_w + area_e);
float area_center = float(x_range) * float(y_range);
// Return the normalized average color.
return avg_color / (area_corners + area_edges + area_center);
}
float insideBox(vec2 v, vec2 bLeft, vec2 tRight) {
vec2 s = step(bLeft, v) - step(tRight, v);
return s.x * s.y;
}
vec2 translateDest(vec2 pos) {
vec2 translatedPos = vec2(pos.x, pos.y);
translatedPos.x = dstX1 < dstX0 ? dstX1 - translatedPos.x : translatedPos.x;
translatedPos.y = dstY0 < dstY1 ? dstY1 + dstY0 - translatedPos.y - 1 : translatedPos.y;
return translatedPos;
}
void main()
{
vec2 bLeft = vec2(dstX0 < dstX1 ? dstX0 : dstX1, dstY0 < dstY1 ? dstY0 : dstY1);
vec2 tRight = vec2(dstX1 > dstX0 ? dstX1 : dstX0, dstY1 > dstY0 ? dstY1 : dstY0);
ivec2 loc = ivec2(gl_GlobalInvocationID.x, gl_GlobalInvocationID.y);
if (insideBox(loc, bLeft, tRight) == 0) {
imageStore(imgOutput, loc, vec4(0, 0, 0, 1));
return;
}
vec4 outColor = AreaSampling(loc);
imageStore(imgOutput, ivec2(translateDest(loc)), vec4(outColor.rgb, 1));
}

View File

@@ -15,6 +15,7 @@
<ItemGroup> <ItemGroup>
<EmbeddedResource Include="Effects\Textures\SmaaAreaTexture.bin" /> <EmbeddedResource Include="Effects\Textures\SmaaAreaTexture.bin" />
<EmbeddedResource Include="Effects\Textures\SmaaSearchTexture.bin" /> <EmbeddedResource Include="Effects\Textures\SmaaSearchTexture.bin" />
<EmbeddedResource Include="Effects\Shaders\AreaScaling.spv" />
<EmbeddedResource Include="Effects\Shaders\FsrScaling.spv" /> <EmbeddedResource Include="Effects\Shaders\FsrScaling.spv" />
<EmbeddedResource Include="Effects\Shaders\FsrSharpening.spv" /> <EmbeddedResource Include="Effects\Shaders\FsrSharpening.spv" />
<EmbeddedResource Include="Effects\Shaders\Fxaa.spv" /> <EmbeddedResource Include="Effects\Shaders\Fxaa.spv" />

View File

@@ -568,6 +568,13 @@ namespace Ryujinx.Graphics.Vulkan
_scalingFilter.Level = _scalingFilterLevel; _scalingFilter.Level = _scalingFilterLevel;
break; break;
case ScalingFilter.Area:
if (_scalingFilter is not AreaScalingFilter)
{
_scalingFilter?.Dispose();
_scalingFilter = new AreaScalingFilter(_gd, _device);
}
break;
} }
} }
} }

View File

@@ -647,7 +647,7 @@ namespace Ryujinx.UI
} }
var memoryConfiguration = ConfigurationState.Instance.System.ExpandRam.Value var memoryConfiguration = ConfigurationState.Instance.System.ExpandRam.Value
? HLE.MemoryConfiguration.MemoryConfiguration6GiB ? HLE.MemoryConfiguration.MemoryConfiguration8GiB
: HLE.MemoryConfiguration.MemoryConfiguration4GiB; : HLE.MemoryConfiguration.MemoryConfiguration4GiB;
IntegrityCheckLevel fsIntegrityCheckLevel = ConfigurationState.Instance.System.EnableFsIntegrityChecks ? IntegrityCheckLevel.ErrorOnInvalid : IntegrityCheckLevel.None; IntegrityCheckLevel fsIntegrityCheckLevel = ConfigurationState.Instance.System.EnableFsIntegrityChecks ? IntegrityCheckLevel.ErrorOnInvalid : IntegrityCheckLevel.None;

View File

@@ -28,8 +28,8 @@ namespace Ryujinx.HLE.HOS.Kernel.Common
MemoryArrange.MemoryArrange4GiBSystemDev or MemoryArrange.MemoryArrange4GiBSystemDev or
MemoryArrange.MemoryArrange6GiBAppletDev => 3285 * MiB, MemoryArrange.MemoryArrange6GiBAppletDev => 3285 * MiB,
MemoryArrange.MemoryArrange4GiBAppletDev => 2048 * MiB, MemoryArrange.MemoryArrange4GiBAppletDev => 2048 * MiB,
MemoryArrange.MemoryArrange6GiB or MemoryArrange.MemoryArrange6GiB => 4916 * MiB,
MemoryArrange.MemoryArrange8GiB => 4916 * MiB, MemoryArrange.MemoryArrange8GiB => 6964 * MiB,
_ => throw new ArgumentException($"Invalid memory arrange \"{arrange}\"."), _ => throw new ArgumentException($"Invalid memory arrange \"{arrange}\"."),
}; };
} }
@@ -42,8 +42,8 @@ namespace Ryujinx.HLE.HOS.Kernel.Common
MemoryArrange.MemoryArrange4GiBAppletDev => 1554 * MiB, MemoryArrange.MemoryArrange4GiBAppletDev => 1554 * MiB,
MemoryArrange.MemoryArrange4GiBSystemDev => 448 * MiB, MemoryArrange.MemoryArrange4GiBSystemDev => 448 * MiB,
MemoryArrange.MemoryArrange6GiB => 562 * MiB, MemoryArrange.MemoryArrange6GiB => 562 * MiB,
MemoryArrange.MemoryArrange6GiBAppletDev or MemoryArrange.MemoryArrange6GiBAppletDev => 2193 * MiB,
MemoryArrange.MemoryArrange8GiB => 2193 * MiB, MemoryArrange.MemoryArrange8GiB => 562 * MiB,
_ => throw new ArgumentException($"Invalid memory arrange \"{arrange}\"."), _ => throw new ArgumentException($"Invalid memory arrange \"{arrange}\"."),
}; };
} }

View File

@@ -219,7 +219,7 @@ namespace Ryujinx.Headless.SDL2
// Hacks // Hacks
[Option("expand-ram", Required = false, Default = false, HelpText = "Expands the RAM amount on the emulated system from 4GiB to 6GiB.")] [Option("expand-ram", Required = false, Default = false, HelpText = "Expands the RAM amount on the emulated system from 4GiB to 8GiB.")]
public bool ExpandRAM { get; set; } public bool ExpandRAM { get; set; }
[Option("ignore-missing-services", Required = false, Default = false, HelpText = "Enable ignoring missing services.")] [Option("ignore-missing-services", Required = false, Default = false, HelpText = "Enable ignoring missing services.")]

View File

@@ -562,7 +562,7 @@ namespace Ryujinx.Headless.SDL2
_userChannelPersistence, _userChannelPersistence,
renderer, renderer,
new SDL2HardwareDeviceDriver(), new SDL2HardwareDeviceDriver(),
options.ExpandRAM ? MemoryConfiguration.MemoryConfiguration6GiB : MemoryConfiguration.MemoryConfiguration4GiB, options.ExpandRAM ? MemoryConfiguration.MemoryConfiguration8GiB : MemoryConfiguration.MemoryConfiguration4GiB,
window, window,
options.SystemLanguage, options.SystemLanguage,
options.SystemRegion, options.SystemRegion,

View File

@@ -238,7 +238,7 @@ namespace Ryujinx.UI.Common.Configuration
public MemoryManagerMode MemoryManagerMode { get; set; } public MemoryManagerMode MemoryManagerMode { get; set; }
/// <summary> /// <summary>
/// Expands the RAM amount on the emulated system from 4GiB to 6GiB /// Expands the RAM amount on the emulated system from 4GiB to 8GiB
/// </summary> /// </summary>
public bool ExpandRam { get; set; } public bool ExpandRam { get; set; }

View File

@@ -845,7 +845,7 @@ namespace Ryujinx.Ava
Logger.Info?.PrintMsg(LogClass.Gpu, $"Backend Threading ({threadingMode}): {isGALThreaded}"); Logger.Info?.PrintMsg(LogClass.Gpu, $"Backend Threading ({threadingMode}): {isGALThreaded}");
// Initialize Configuration. // Initialize Configuration.
var memoryConfiguration = ConfigurationState.Instance.System.ExpandRam.Value ? MemoryConfiguration.MemoryConfiguration6GiB : MemoryConfiguration.MemoryConfiguration4GiB; var memoryConfiguration = ConfigurationState.Instance.System.ExpandRam.Value ? MemoryConfiguration.MemoryConfiguration8GiB : MemoryConfiguration.MemoryConfiguration4GiB;
HLEConfiguration configuration = new(VirtualFileSystem, HLEConfiguration configuration = new(VirtualFileSystem,
_viewModel.LibHacHorizonManager, _viewModel.LibHacHorizonManager,

View File

@@ -145,7 +145,7 @@
"SettingsTabSystemAudioBackendSDL2": "SDL2", "SettingsTabSystemAudioBackendSDL2": "SDL2",
"SettingsTabSystemHacks": "Hacks", "SettingsTabSystemHacks": "Hacks",
"SettingsTabSystemHacksNote": "May cause instability", "SettingsTabSystemHacksNote": "May cause instability",
"SettingsTabSystemExpandDramSize": "Use alternative memory layout (Developers)", "SettingsTabSystemExpandDramSize": "Expand DRAM to 8GiB",
"SettingsTabSystemIgnoreMissingServices": "Ignore Missing Services", "SettingsTabSystemIgnoreMissingServices": "Ignore Missing Services",
"SettingsTabGraphics": "Graphics", "SettingsTabGraphics": "Graphics",
"SettingsTabGraphicsAPI": "Graphics API", "SettingsTabGraphicsAPI": "Graphics API",
@@ -575,7 +575,7 @@
"MemoryManagerHostTooltip": "Directly map memory in the host address space. Much faster JIT compilation and execution.", "MemoryManagerHostTooltip": "Directly map memory in the host address space. Much faster JIT compilation and execution.",
"MemoryManagerUnsafeTooltip": "Directly map memory, but do not mask the address within the guest address space before access. Faster, but at the cost of safety. The guest application can access memory from anywhere in Ryujinx, so only run programs you trust with this mode.", "MemoryManagerUnsafeTooltip": "Directly map memory, but do not mask the address within the guest address space before access. Faster, but at the cost of safety. The guest application can access memory from anywhere in Ryujinx, so only run programs you trust with this mode.",
"UseHypervisorTooltip": "Use Hypervisor instead of JIT. Greatly improves performance when available, but can be unstable in its current state.", "UseHypervisorTooltip": "Use Hypervisor instead of JIT. Greatly improves performance when available, but can be unstable in its current state.",
"DRamTooltip": "Utilizes an alternative MemoryMode layout to mimic a Switch development model.\n\nThis is only useful for higher-resolution texture packs or 4k resolution mods. Does NOT improve performance.\n\nLeave OFF if unsure.", "DRamTooltip": "Utilizes an alternative memory mode with 8GiB of DRAM to mimic a Switch development model.\n\nThis is only useful for higher-resolution texture packs or 4k resolution mods. Does NOT improve performance.\n\nLeave OFF if unsure.",
"IgnoreMissingServicesTooltip": "Ignores unimplemented Horizon OS services. This may help in bypassing crashes when booting certain games.\n\nLeave OFF if unsure.", "IgnoreMissingServicesTooltip": "Ignores unimplemented Horizon OS services. This may help in bypassing crashes when booting certain games.\n\nLeave OFF if unsure.",
"GraphicsBackendThreadingTooltip": "Executes graphics backend commands on a second thread.\n\nSpeeds up shader compilation, reduces stuttering, and improves performance on GPU drivers without multithreading support of their own. Slightly better performance on drivers with multithreading.\n\nSet to AUTO if unsure.", "GraphicsBackendThreadingTooltip": "Executes graphics backend commands on a second thread.\n\nSpeeds up shader compilation, reduces stuttering, and improves performance on GPU drivers without multithreading support of their own. Slightly better performance on drivers with multithreading.\n\nSet to AUTO if unsure.",
"GalThreadingTooltip": "Executes graphics backend commands on a second thread.\n\nSpeeds up shader compilation, reduces stuttering, and improves performance on GPU drivers without multithreading support of their own. Slightly better performance on drivers with multithreading.\n\nSet to AUTO if unsure.", "GalThreadingTooltip": "Executes graphics backend commands on a second thread.\n\nSpeeds up shader compilation, reduces stuttering, and improves performance on GPU drivers without multithreading support of their own. Slightly better performance on drivers with multithreading.\n\nSet to AUTO if unsure.",
@@ -758,10 +758,11 @@
"GraphicsAATooltip": "Applies anti-aliasing to the game render.\n\nFXAA will blur most of the image, while SMAA will attempt to find jagged edges and smooth them out.\n\nNot recommended to use in conjunction with the FSR scaling filter.\n\nThis option can be changed while a game is running by clicking \"Apply\" below; you can simply move the settings window aside and experiment until you find your preferred look for a game.\n\nLeave on NONE if unsure.", "GraphicsAATooltip": "Applies anti-aliasing to the game render.\n\nFXAA will blur most of the image, while SMAA will attempt to find jagged edges and smooth them out.\n\nNot recommended to use in conjunction with the FSR scaling filter.\n\nThis option can be changed while a game is running by clicking \"Apply\" below; you can simply move the settings window aside and experiment until you find your preferred look for a game.\n\nLeave on NONE if unsure.",
"GraphicsAALabel": "Anti-Aliasing:", "GraphicsAALabel": "Anti-Aliasing:",
"GraphicsScalingFilterLabel": "Scaling Filter:", "GraphicsScalingFilterLabel": "Scaling Filter:",
"GraphicsScalingFilterTooltip": "Choose the scaling filter that will be applied when using resolution scale.\n\nBilinear works well for 3D games and is a safe default option.\n\nNearest is recommended for pixel art games.\n\nFSR 1.0 is merely a sharpening filter, not recommended for use with FXAA or SMAA.\n\nThis option can be changed while a game is running by clicking \"Apply\" below; you can simply move the settings window aside and experiment until you find your preferred look for a game.\n\nLeave on BILINEAR if unsure.", "GraphicsScalingFilterTooltip": "Choose the scaling filter that will be applied when using resolution scale.\n\nBilinear works well for 3D games and is a safe default option.\n\nNearest is recommended for pixel art games.\n\nFSR 1.0 is merely a sharpening filter, not recommended for use with FXAA or SMAA.\n\nArea scaling is recommended when downscaling resolutions that are larger than the output window. It can be used to achieve a supersampled anti-aliasing effect when downscaling by more than 2x.\n\nThis option can be changed while a game is running by clicking \"Apply\" below; you can simply move the settings window aside and experiment until you find your preferred look for a game.\n\nLeave on BILINEAR if unsure.",
"GraphicsScalingFilterBilinear": "Bilinear", "GraphicsScalingFilterBilinear": "Bilinear",
"GraphicsScalingFilterNearest": "Nearest", "GraphicsScalingFilterNearest": "Nearest",
"GraphicsScalingFilterFsr": "FSR", "GraphicsScalingFilterFsr": "FSR",
"GraphicsScalingFilterArea": "Area",
"GraphicsScalingFilterLevelLabel": "Level", "GraphicsScalingFilterLevelLabel": "Level",
"GraphicsScalingFilterLevelTooltip": "Set FSR 1.0 sharpening level. Higher is sharper.", "GraphicsScalingFilterLevelTooltip": "Set FSR 1.0 sharpening level. Higher is sharper.",
"SmaaLow": "SMAA Low", "SmaaLow": "SMAA Low",

View File

@@ -1,4 +1,4 @@
<UserControl <UserControl
x:Class="Ryujinx.Ava.UI.Views.Settings.SettingsGraphicsView" x:Class="Ryujinx.Ava.UI.Views.Settings.SettingsGraphicsView"
xmlns="https://github.com/avaloniaui" xmlns="https://github.com/avaloniaui"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
@@ -173,6 +173,9 @@
<ComboBoxItem> <ComboBoxItem>
<TextBlock Text="{locale:Locale GraphicsScalingFilterFsr}" /> <TextBlock Text="{locale:Locale GraphicsScalingFilterFsr}" />
</ComboBoxItem> </ComboBoxItem>
<ComboBoxItem>
<TextBlock Text="{locale:Locale GraphicsScalingFilterArea}" />
</ComboBoxItem>
</ComboBox> </ComboBox>
<controls:SliderScroll Value="{Binding ScalingFilterLevel}" <controls:SliderScroll Value="{Binding ScalingFilterLevel}"
ToolTip.Tip="{locale:Locale GraphicsScalingFilterLevelTooltip}" ToolTip.Tip="{locale:Locale GraphicsScalingFilterLevelTooltip}"

View File

@@ -13,6 +13,7 @@ use crate::{
}; };
/// Service context /// Service context
#[derive(Debug)]
pub struct Context { pub struct Context {
// Protector against replay attack // Protector against replay attack
// The actual replay detection behavior is implemented in ReplayProtector // The actual replay detection behavior is implemented in ReplayProtector

View File

@@ -38,6 +38,7 @@ pub trait DnsResolve {
} }
#[cfg(feature = "hickory-dns")] #[cfg(feature = "hickory-dns")]
#[derive(Debug)]
pub struct HickoryDnsSystemResolver { pub struct HickoryDnsSystemResolver {
resolver: ArcSwap<HickoryDnsResolver>, resolver: ArcSwap<HickoryDnsResolver>,
#[cfg_attr(any(windows, target_os = "android"), allow(dead_code))] #[cfg_attr(any(windows, target_os = "android"), allow(dead_code))]

View File

@@ -46,6 +46,7 @@ impl fmt::Display for ManagerSocketAddr {
/// Datagram socket for manager /// Datagram socket for manager
/// ///
/// For *nix system, this is a wrapper for both UDP socket and Unix socket /// For *nix system, this is a wrapper for both UDP socket and Unix socket
#[derive(Debug)]
pub enum ManagerDatagram { pub enum ManagerDatagram {
UdpDatagram(UdpSocket), UdpDatagram(UdpSocket),
#[cfg(unix)] #[cfg(unix)]

View File

@@ -13,6 +13,7 @@ use super::{
}; };
/// Manager server Listener /// Manager server Listener
#[derive(Debug)]
pub struct ManagerListener { pub struct ManagerListener {
socket: ManagerDatagram, socket: ManagerDatagram,
} }

View File

@@ -121,6 +121,7 @@ impl AsyncWrite for TcpStream {
} }
/// `TcpListener` for accepting inbound connections /// `TcpListener` for accepting inbound connections
#[derive(Debug)]
pub struct TcpListener { pub struct TcpListener {
inner: TokioTcpListener, inner: TokioTcpListener,
accept_opts: AcceptOpts, accept_opts: AcceptOpts,

View File

@@ -85,6 +85,7 @@ fn make_mtu_error(packet_size: usize, mtu: usize) -> io::Error {
} }
/// Wrappers for outbound `UdpSocket` /// Wrappers for outbound `UdpSocket`
#[derive(Debug)]
#[pin_project] #[pin_project]
pub struct UdpSocket { pub struct UdpSocket {
#[pin] #[pin]

View File

@@ -58,6 +58,7 @@ pub enum PluginMode {
} }
/// A shadowsocks SIP004 Plugin /// A shadowsocks SIP004 Plugin
#[derive(Debug)]
pub struct Plugin { pub struct Plugin {
process: Child, process: Child,
local_addr: SocketAddr, local_addr: SocketAddr,

View File

@@ -80,6 +80,7 @@ impl From<ProtocolError> for io::Error {
} }
} }
#[derive(Debug)]
enum DecryptReadState { enum DecryptReadState {
WaitSalt { key: Bytes }, WaitSalt { key: Bytes },
ReadLength, ReadLength,
@@ -320,6 +321,7 @@ impl DecryptedReader {
} }
} }
#[derive(Debug)]
enum EncryptWriteState { enum EncryptWriteState {
AssemblePacket, AssemblePacket,
Writing { pos: usize }, Writing { pos: usize },

View File

@@ -1,7 +1,7 @@
//! IO facilities for TCP relay //! IO facilities for TCP relay
use std::{ use std::{
io, fmt, io,
marker::Unpin, marker::Unpin,
pin::Pin, pin::Pin,
sync::Arc, sync::Arc,
@@ -313,6 +313,15 @@ pub struct CryptoStream<S> {
has_handshaked: bool, has_handshaked: bool,
} }
impl<S> fmt::Debug for CryptoStream<S> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("CryptoStream")
.field("method", &self.method)
.field("has_handshaked", &self.has_handshaked)
.finish()
}
}
impl<S> CryptoStream<S> { impl<S> CryptoStream<S> {
/// Create a new CryptoStream with the underlying stream connection /// Create a new CryptoStream with the underlying stream connection
pub fn from_stream( pub fn from_stream(

View File

@@ -17,6 +17,7 @@ use crate::{
}; };
/// A TCP listener for accepting shadowsocks' client connection /// A TCP listener for accepting shadowsocks' client connection
#[derive(Debug)]
pub struct ProxyListener { pub struct ProxyListener {
listener: TcpListener, listener: TcpListener,
method: CipherKind, method: CipherKind,

View File

@@ -30,12 +30,14 @@ use crate::{
}, },
}; };
#[derive(Debug)]
enum ProxyClientStreamWriteState { enum ProxyClientStreamWriteState {
Connect(Address), Connect(Address),
Connecting(BytesMut), Connecting(BytesMut),
Connected, Connected,
} }
#[derive(Debug)]
enum ProxyClientStreamReadState { enum ProxyClientStreamReadState {
#[cfg(feature = "aead-cipher-2022")] #[cfg(feature = "aead-cipher-2022")]
CheckRequestNonce, CheckRequestNonce,
@@ -43,6 +45,7 @@ enum ProxyClientStreamReadState {
} }
/// A stream for sending / receiving data stream from remote server via shadowsocks' proxy server /// A stream for sending / receiving data stream from remote server via shadowsocks' proxy server
#[derive(Debug)]
#[pin_project] #[pin_project]
pub struct ProxyClientStream<S> { pub struct ProxyClientStream<S> {
#[pin] #[pin]

View File

@@ -18,6 +18,7 @@ pub mod v1;
#[cfg(feature = "aead-cipher-2022")] #[cfg(feature = "aead-cipher-2022")]
pub mod v2; pub mod v2;
#[derive(Debug)]
pub enum TcpRequestHeader { pub enum TcpRequestHeader {
Stream(StreamTcpRequestHeader), Stream(StreamTcpRequestHeader),
#[cfg(feature = "aead-cipher-2022")] #[cfg(feature = "aead-cipher-2022")]
@@ -74,6 +75,7 @@ impl TcpRequestHeader {
} }
} }
#[derive(Debug)]
pub enum TcpRequestHeaderRef<'a> { pub enum TcpRequestHeaderRef<'a> {
Stream(StreamTcpRequestHeaderRef<'a>), Stream(StreamTcpRequestHeaderRef<'a>),
#[cfg(feature = "aead-cipher-2022")] #[cfg(feature = "aead-cipher-2022")]

View File

@@ -7,6 +7,7 @@ use tokio::io::AsyncRead;
use crate::relay::socks5::Address; use crate::relay::socks5::Address;
#[derive(Debug)]
pub struct StreamTcpRequestHeader { pub struct StreamTcpRequestHeader {
pub addr: Address, pub addr: Address,
} }
@@ -27,6 +28,7 @@ impl StreamTcpRequestHeader {
} }
} }
#[derive(Debug)]
pub struct StreamTcpRequestHeaderRef<'a> { pub struct StreamTcpRequestHeaderRef<'a> {
pub addr: &'a Address, pub addr: &'a Address,
} }

View File

@@ -66,6 +66,7 @@ impl Aead2022TcpRequestHeader {
} }
} }
#[derive(Debug)]
pub struct Aead2022TcpRequestHeaderRef<'a> { pub struct Aead2022TcpRequestHeaderRef<'a> {
pub addr: &'a Address, pub addr: &'a Address,
pub padding_size: u16, pub padding_size: u16,

View File

@@ -25,6 +25,7 @@ use crate::{
}, },
}; };
#[derive(Debug)]
enum ProxyServerStreamWriteState { enum ProxyServerStreamWriteState {
#[cfg(feature = "aead-cipher-2022")] #[cfg(feature = "aead-cipher-2022")]
PrepareHeader(Option<std::task::Waker>), PrepareHeader(Option<std::task::Waker>),
@@ -32,6 +33,7 @@ enum ProxyServerStreamWriteState {
} }
/// A stream for communicating with shadowsocks' proxy client /// A stream for communicating with shadowsocks' proxy client
#[derive(Debug)]
#[pin_project] #[pin_project]
pub struct ProxyServerStream<S> { pub struct ProxyServerStream<S> {
#[pin] #[pin]

View File

@@ -69,6 +69,7 @@ impl From<ProxySocketError> for io::Error {
pub type ProxySocketResult<T> = Result<T, ProxySocketError>; pub type ProxySocketResult<T> = Result<T, ProxySocketError>;
/// UDP client for communicating with ShadowSocks' server /// UDP client for communicating with ShadowSocks' server
#[derive(Debug)]
pub struct ProxySocket { pub struct ProxySocket {
socket_type: UdpSocketType, socket_type: UdpSocketType,
socket: ShadowUdpSocket, socket: ShadowUdpSocket,

View File

@@ -1,3 +1,5 @@
use std::fmt;
#[cfg(feature = "aead-cipher-2022")] #[cfg(feature = "aead-cipher-2022")]
use std::time::Duration; use std::time::Duration;
@@ -29,6 +31,12 @@ pub struct ReplayProtector {
nonce_set: spin::Mutex<LruCache<Vec<u8>, ()>>, nonce_set: spin::Mutex<LruCache<Vec<u8>, ()>>,
} }
impl fmt::Debug for ReplayProtector {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("ReplayProtector").finish()
}
}
impl ReplayProtector { impl ReplayProtector {
/// Create a new ReplayProtector /// Create a new ReplayProtector
#[allow(unused_variables)] #[allow(unused_variables)]

View File

@@ -27,6 +27,7 @@ const BF_ERROR_RATE_FOR_CLIENT: f64 = 1e-15;
// //
// It contains 2 bloom filters and each one holds 1/2 entries. // It contains 2 bloom filters and each one holds 1/2 entries.
// Use them as a ring buffer. // Use them as a ring buffer.
#[derive(Debug)]
pub struct PingPongBloom { pub struct PingPongBloom {
blooms: [Bloom<[u8]>; 2], blooms: [Bloom<[u8]>; 2],
bloom_count: [usize; 2], bloom_count: [usize; 2],

View File

@@ -619,22 +619,26 @@ function gen_config_server(node)
end end
if node.protocol == "tuic" then if node.protocol == "tuic" then
tls.alpn = (node.tuic_alpn and node.tuic_alpn ~= "") and { if node.uuid then
node.tuic_alpn local users = {}
} or nil for i = 1, #node.uuid do
protocol_table = { users[i] = {
users = { name = node.uuid[i],
{ uuid = node.uuid[i],
name = "user1",
uuid = node.uuid,
password = node.password password = node.password
} }
}, end
congestion_control = node.tuic_congestion_control or "cubic", tls.alpn = (node.tuic_alpn and node.tuic_alpn ~= "") and {
zero_rtt_handshake = (node.tuic_zero_rtt_handshake == "1") and true or false, node.tuic_alpn
heartbeat = node.tuic_heartbeat .. "s", } or nil
tls = tls protocol_table = {
} users = users,
congestion_control = node.tuic_congestion_control or "cubic",
zero_rtt_handshake = (node.tuic_zero_rtt_handshake == "1") and true or false,
heartbeat = node.tuic_heartbeat .. "s",
tls = tls
}
end
end end
if node.protocol == "hysteria2" then if node.protocol == "hysteria2" then

Binary file not shown.

After

Width:  |  Height:  |  Size: 249 KiB

View File

@@ -18,7 +18,7 @@ namespace ServiceLib.Common
Uri uri = new(url); Uri uri = new(url);
//Authorization Header //Authorization Header
var headers = new WebHeaderCollection(); var headers = new WebHeaderCollection();
if (!Utils.IsNullOrEmpty(uri.UserInfo)) if (Utils.IsNotEmpty(uri.UserInfo))
{ {
headers.Add(HttpRequestHeader.Authorization, "Basic " + Utils.Base64Encode(uri.UserInfo)); headers.Add(HttpRequestHeader.Authorization, "Basic " + Utils.Base64Encode(uri.UserInfo));
} }

View File

@@ -82,7 +82,7 @@ namespace ServiceLib.Common
} }
try try
{ {
if (!Utils.IsNullOrEmpty(ignoredName) && entry.Name.Contains(ignoredName)) if (Utils.IsNotEmpty(ignoredName) && entry.Name.Contains(ignoredName))
{ {
continue; continue;
} }
@@ -157,7 +157,7 @@ namespace ServiceLib.Common
// Get the files in the source directory and copy to the destination directory // Get the files in the source directory and copy to the destination directory
foreach (FileInfo file in dir.GetFiles()) foreach (FileInfo file in dir.GetFiles())
{ {
if (!Utils.IsNullOrEmpty(ignoredName) && file.Name.Contains(ignoredName)) if (Utils.IsNotEmpty(ignoredName) && file.Name.Contains(ignoredName))
{ {
continue; continue;
} }

View File

@@ -22,7 +22,7 @@ namespace ServiceLib.Common
public async Task<string?> TryGetAsync(string url) public async Task<string?> TryGetAsync(string url)
{ {
if (string.IsNullOrEmpty(url)) if (Utils.IsNullOrEmpty(url))
return null; return null;
try try

View File

@@ -14,6 +14,11 @@ namespace ServiceLib.Common
return string.IsNullOrWhiteSpace(value); return string.IsNullOrWhiteSpace(value);
} }
public static bool IsNotEmpty([NotNullWhen(false)] this string? value)
{
return !string.IsNullOrEmpty(value);
}
public static bool BeginWithAny(this string s, IEnumerable<char> chars) public static bool BeginWithAny(this string s, IEnumerable<char> chars)
{ {
if (s.IsNullOrEmpty()) return false; if (s.IsNullOrEmpty()) return false;

View File

@@ -417,6 +417,11 @@ namespace ServiceLib.Common
return false; return false;
} }
public static bool IsNotEmpty(string? text)
{
return !string.IsNullOrEmpty(text);
}
/// <summary> /// <summary>
/// 验证IP地址是否合法 /// 验证IP地址是否合法
/// </summary> /// </summary>

View File

@@ -23,7 +23,7 @@ namespace ServiceLib.Handler
{ {
//载入配置文件 //载入配置文件
var result = Utils.LoadResource(Utils.GetConfigPath(configRes)); var result = Utils.LoadResource(Utils.GetConfigPath(configRes));
if (!Utils.IsNullOrEmpty(result)) if (Utils.IsNotEmpty(result))
{ {
//转成Json //转成Json
config = JsonUtils.Deserialize<Config>(result); config = JsonUtils.Deserialize<Config>(result);
@@ -1007,7 +1007,7 @@ namespace ServiceLib.Handler
{ {
return -1; return -1;
} }
if (!Utils.IsNullOrEmpty(profileItem.security) && profileItem.security != Global.None) if (Utils.IsNotEmpty(profileItem.security) && profileItem.security != Global.None)
{ {
profileItem.security = Global.None; profileItem.security = Global.None;
} }
@@ -1045,7 +1045,7 @@ namespace ServiceLib.Handler
{ {
profileItem.configVersion = 2; profileItem.configVersion = 2;
if (!Utils.IsNullOrEmpty(profileItem.streamSecurity)) if (Utils.IsNotEmpty(profileItem.streamSecurity))
{ {
if (profileItem.streamSecurity != Global.StreamSecurity if (profileItem.streamSecurity != Global.StreamSecurity
&& profileItem.streamSecurity != Global.StreamSecurityReality) && profileItem.streamSecurity != Global.StreamSecurityReality)
@@ -1065,7 +1065,7 @@ namespace ServiceLib.Handler
} }
} }
if (!Utils.IsNullOrEmpty(profileItem.network) && !Global.Networks.Contains(profileItem.network)) if (Utils.IsNotEmpty(profileItem.network) && !Global.Networks.Contains(profileItem.network))
{ {
profileItem.network = Global.DefaultNetwork; profileItem.network = Global.DefaultNetwork;
} }
@@ -1186,7 +1186,7 @@ namespace ServiceLib.Handler
string subFilter = string.Empty; string subFilter = string.Empty;
//remove sub items //remove sub items
if (isSub && !Utils.IsNullOrEmpty(subid)) if (isSub && Utils.IsNotEmpty(subid))
{ {
RemoveServerViaSubid(config, subid, isSub); RemoveServerViaSubid(config, subid, isSub);
subFilter = LazyConfig.Instance.GetSubItem(subid)?.filter ?? ""; subFilter = LazyConfig.Instance.GetSubItem(subid)?.filter ?? "";
@@ -1219,7 +1219,7 @@ namespace ServiceLib.Handler
} }
//exist sub items //exist sub items
if (isSub && !Utils.IsNullOrEmpty(subid)) if (isSub && Utils.IsNotEmpty(subid))
{ {
var existItem = lstOriSub?.FirstOrDefault(t => t.isSub == isSub var existItem = lstOriSub?.FirstOrDefault(t => t.isSub == isSub
&& config.uiItem.enableUpdateSubOnlyRemarksExist ? t.remarks == profileItem.remarks : CompareProfileItem(t, profileItem, true)); && config.uiItem.enableUpdateSubOnlyRemarksExist ? t.remarks == profileItem.remarks : CompareProfileItem(t, profileItem, true));
@@ -1241,7 +1241,7 @@ namespace ServiceLib.Handler
} }
} }
//filter //filter
if (!Utils.IsNullOrEmpty(subFilter)) if (Utils.IsNotEmpty(subFilter))
{ {
if (!Regex.IsMatch(profileItem.remarks, subFilter)) if (!Regex.IsMatch(profileItem.remarks, subFilter))
{ {
@@ -1305,7 +1305,7 @@ namespace ServiceLib.Handler
} }
if (lstProfiles != null && lstProfiles.Count > 0) if (lstProfiles != null && lstProfiles.Count > 0)
{ {
if (isSub && !Utils.IsNullOrEmpty(subid)) if (isSub && Utils.IsNotEmpty(subid))
{ {
RemoveServerViaSubid(config, subid, isSub); RemoveServerViaSubid(config, subid, isSub);
} }
@@ -1361,7 +1361,7 @@ namespace ServiceLib.Handler
return -1; return -1;
} }
if (isSub && !Utils.IsNullOrEmpty(subid)) if (isSub && Utils.IsNotEmpty(subid))
{ {
RemoveServerViaSubid(config, subid, isSub); RemoveServerViaSubid(config, subid, isSub);
} }
@@ -1389,7 +1389,7 @@ namespace ServiceLib.Handler
return -1; return -1;
} }
if (isSub && !Utils.IsNullOrEmpty(subid)) if (isSub && Utils.IsNotEmpty(subid))
{ {
RemoveServerViaSubid(config, subid, isSub); RemoveServerViaSubid(config, subid, isSub);
} }
@@ -1421,7 +1421,7 @@ namespace ServiceLib.Handler
return -1; return -1;
} }
List<ProfileItem>? lstOriSub = null; List<ProfileItem>? lstOriSub = null;
if (isSub && !Utils.IsNullOrEmpty(subid)) if (isSub && Utils.IsNotEmpty(subid))
{ {
lstOriSub = LazyConfig.Instance.ProfileItems(subid); lstOriSub = LazyConfig.Instance.ProfileItems(subid);
} }

View File

@@ -43,7 +43,7 @@
} }
string addressFileName = node.address; string addressFileName = node.address;
if (string.IsNullOrEmpty(addressFileName)) if (Utils.IsNullOrEmpty(addressFileName))
{ {
msg = ResUI.FailedGetDefaultConfiguration; msg = ResUI.FailedGetDefaultConfiguration;
return -1; return -1;
@@ -117,7 +117,7 @@
if (_config.tunModeItem.enableTun) if (_config.tunModeItem.enableTun)
{ {
string tun = Utils.GetEmbedText(Global.ClashTunYaml); string tun = Utils.GetEmbedText(Global.ClashTunYaml);
if (!string.IsNullOrEmpty(tun)) if (Utils.IsNotEmpty(tun))
{ {
var tunContent = YamlUtils.FromYaml<Dictionary<string, object>>(tun); var tunContent = YamlUtils.FromYaml<Dictionary<string, object>>(tun);
if (tunContent != null) if (tunContent != null)

View File

@@ -370,7 +370,7 @@ namespace ServiceLib.Handler.CoreConfig
} }
string addressFileName = node.address; string addressFileName = node.address;
if (string.IsNullOrEmpty(addressFileName)) if (Utils.IsNullOrEmpty(addressFileName))
{ {
msg = ResUI.FailedGetDefaultConfiguration; msg = ResUI.FailedGetDefaultConfiguration;
return -1; return -1;
@@ -489,7 +489,7 @@ namespace ServiceLib.Handler.CoreConfig
if (_config.routingBasicItem.enableRoutingAdvanced) if (_config.routingBasicItem.enableRoutingAdvanced)
{ {
var routing = ConfigHandler.GetDefaultRouting(_config); var routing = ConfigHandler.GetDefaultRouting(_config);
if (!Utils.IsNullOrEmpty(routing.domainStrategy4Singbox)) if (Utils.IsNotEmpty(routing.domainStrategy4Singbox))
{ {
inbound.domain_strategy = routing.domainStrategy4Singbox; inbound.domain_strategy = routing.domainStrategy4Singbox;
} }
@@ -512,7 +512,7 @@ namespace ServiceLib.Handler.CoreConfig
singboxConfig.inbounds.Add(inbound4); singboxConfig.inbounds.Add(inbound4);
//auth //auth
if (!Utils.IsNullOrEmpty(_config.inbound[0].user) && !Utils.IsNullOrEmpty(_config.inbound[0].pass)) if (Utils.IsNotEmpty(_config.inbound[0].user) && Utils.IsNotEmpty(_config.inbound[0].pass))
{ {
inbound3.users = new() { new() { username = _config.inbound[0].user, password = _config.inbound[0].pass } }; inbound3.users = new() { new() { username = _config.inbound[0].user, password = _config.inbound[0].pass } };
inbound4.users = new() { new() { username = _config.inbound[0].user, password = _config.inbound[0].pass } }; inbound4.users = new() { new() { username = _config.inbound[0].user, password = _config.inbound[0].pass } };
@@ -604,8 +604,8 @@ namespace ServiceLib.Handler.CoreConfig
case EConfigType.Socks: case EConfigType.Socks:
{ {
outbound.version = "5"; outbound.version = "5";
if (!Utils.IsNullOrEmpty(node.security) if (Utils.IsNotEmpty(node.security)
&& !Utils.IsNullOrEmpty(node.id)) && Utils.IsNotEmpty(node.id))
{ {
outbound.username = node.security; outbound.username = node.security;
outbound.password = node.id; outbound.password = node.id;
@@ -614,8 +614,8 @@ namespace ServiceLib.Handler.CoreConfig
} }
case EConfigType.Http: case EConfigType.Http:
{ {
if (!Utils.IsNullOrEmpty(node.security) if (Utils.IsNotEmpty(node.security)
&& !Utils.IsNullOrEmpty(node.id)) && Utils.IsNotEmpty(node.id))
{ {
outbound.username = node.security; outbound.username = node.security;
outbound.password = node.id; outbound.password = node.id;
@@ -649,7 +649,7 @@ namespace ServiceLib.Handler.CoreConfig
{ {
outbound.password = node.id; outbound.password = node.id;
if (!Utils.IsNullOrEmpty(node.path)) if (Utils.IsNotEmpty(node.path))
{ {
outbound.obfs = new() outbound.obfs = new()
{ {
@@ -695,7 +695,7 @@ namespace ServiceLib.Handler.CoreConfig
{ {
try try
{ {
if (_config.coreBasicItem.muxEnabled && !Utils.IsNullOrEmpty(_config.mux4SboxItem.protocol)) if (_config.coreBasicItem.muxEnabled && Utils.IsNotEmpty(_config.mux4SboxItem.protocol))
{ {
var mux = new Multiplex4Sbox() var mux = new Multiplex4Sbox()
{ {
@@ -721,11 +721,11 @@ namespace ServiceLib.Handler.CoreConfig
if (node.streamSecurity == Global.StreamSecurityReality || node.streamSecurity == Global.StreamSecurity) if (node.streamSecurity == Global.StreamSecurityReality || node.streamSecurity == Global.StreamSecurity)
{ {
var server_name = string.Empty; var server_name = string.Empty;
if (!Utils.IsNullOrEmpty(node.sni)) if (Utils.IsNotEmpty(node.sni))
{ {
server_name = node.sni; server_name = node.sni;
} }
else if (!Utils.IsNullOrEmpty(node.requestHost)) else if (Utils.IsNotEmpty(node.requestHost))
{ {
server_name = Utils.String2List(node.requestHost)[0]; server_name = Utils.String2List(node.requestHost)[0];
} }
@@ -736,7 +736,7 @@ namespace ServiceLib.Handler.CoreConfig
insecure = Utils.ToBool(node.allowInsecure.IsNullOrEmpty() ? _config.coreBasicItem.defAllowInsecure.ToString().ToLower() : node.allowInsecure), insecure = Utils.ToBool(node.allowInsecure.IsNullOrEmpty() ? _config.coreBasicItem.defAllowInsecure.ToString().ToLower() : node.allowInsecure),
alpn = node.GetAlpn(), alpn = node.GetAlpn(),
}; };
if (!Utils.IsNullOrEmpty(node.fingerprint)) if (Utils.IsNotEmpty(node.fingerprint))
{ {
tls.utls = new Utls4Sbox() tls.utls = new Utls4Sbox()
{ {
@@ -798,7 +798,7 @@ namespace ServiceLib.Handler.CoreConfig
case nameof(ETransport.ws): case nameof(ETransport.ws):
transport.type = nameof(ETransport.ws); transport.type = nameof(ETransport.ws);
transport.path = Utils.IsNullOrEmpty(node.path) ? null : node.path; transport.path = Utils.IsNullOrEmpty(node.path) ? null : node.path;
if (!Utils.IsNullOrEmpty(node.requestHost)) if (Utils.IsNotEmpty(node.requestHost))
{ {
transport.headers = new() transport.headers = new()
{ {
@@ -1020,7 +1020,7 @@ namespace ServiceLib.Handler.CoreConfig
outbound = item.outboundTag, outbound = item.outboundTag,
}; };
if (!Utils.IsNullOrEmpty(item.port)) if (Utils.IsNotEmpty(item.port))
{ {
if (item.port.Contains("-")) if (item.port.Contains("-"))
{ {
@@ -1031,7 +1031,7 @@ namespace ServiceLib.Handler.CoreConfig
rule.port = new List<int> { Utils.ToInt(item.port) }; rule.port = new List<int> { Utils.ToInt(item.port) };
} }
} }
if (!Utils.IsNullOrEmpty(item.network)) if (Utils.IsNotEmpty(item.network))
{ {
rule.network = Utils.String2List(item.network); rule.network = Utils.String2List(item.network);
} }
@@ -1221,7 +1221,7 @@ namespace ServiceLib.Handler.CoreConfig
}); });
var lstDomain = singboxConfig.outbounds var lstDomain = singboxConfig.outbounds
.Where(t => !Utils.IsNullOrEmpty(t.server) && Utils.IsDomain(t.server)) .Where(t => Utils.IsNotEmpty(t.server) && Utils.IsDomain(t.server))
.Select(t => t.server) .Select(t => t.server)
.Distinct() .Distinct()
.ToList(); .ToList();
@@ -1324,10 +1324,10 @@ namespace ServiceLib.Handler.CoreConfig
if (_config.routingBasicItem.enableRoutingAdvanced) if (_config.routingBasicItem.enableRoutingAdvanced)
{ {
var routing = ConfigHandler.GetDefaultRouting(_config); var routing = ConfigHandler.GetDefaultRouting(_config);
if (!Utils.IsNullOrEmpty(routing.customRulesetPath4Singbox)) if (Utils.IsNotEmpty(routing.customRulesetPath4Singbox))
{ {
var result = Utils.LoadResource(routing.customRulesetPath4Singbox); var result = Utils.LoadResource(routing.customRulesetPath4Singbox);
if (!Utils.IsNullOrEmpty(result)) if (Utils.IsNotEmpty(result))
{ {
customRulesets = (JsonUtils.Deserialize<List<Ruleset4Sbox>>(result) ?? []) customRulesets = (JsonUtils.Deserialize<List<Ruleset4Sbox>>(result) ?? [])
.Where(t => t.tag != null) .Where(t => t.tag != null)

View File

@@ -392,7 +392,7 @@ namespace ServiceLib.Handler.CoreConfig
v2rayConfig.inbounds.Add(inbound4); v2rayConfig.inbounds.Add(inbound4);
//auth //auth
if (!Utils.IsNullOrEmpty(_config.inbound[0].user) && !Utils.IsNullOrEmpty(_config.inbound[0].pass)) if (Utils.IsNotEmpty(_config.inbound[0].user) && Utils.IsNotEmpty(_config.inbound[0].pass))
{ {
inbound3.settings.auth = "password"; inbound3.settings.auth = "password";
inbound3.settings.accounts = new List<AccountsItem4Ray> { new AccountsItem4Ray() { user = _config.inbound[0].user, pass = _config.inbound[0].pass } }; inbound3.settings.accounts = new List<AccountsItem4Ray> { new AccountsItem4Ray() { user = _config.inbound[0].user, pass = _config.inbound[0].pass } };
@@ -453,7 +453,7 @@ namespace ServiceLib.Handler.CoreConfig
var routing = ConfigHandler.GetDefaultRouting(_config); var routing = ConfigHandler.GetDefaultRouting(_config);
if (routing != null) if (routing != null)
{ {
if (!Utils.IsNullOrEmpty(routing.domainStrategy)) if (Utils.IsNotEmpty(routing.domainStrategy))
{ {
v2rayConfig.routing.domainStrategy = routing.domainStrategy; v2rayConfig.routing.domainStrategy = routing.domainStrategy;
} }
@@ -550,7 +550,7 @@ namespace ServiceLib.Handler.CoreConfig
} }
if (!hasDomainIp) if (!hasDomainIp)
{ {
if (!Utils.IsNullOrEmpty(rule.port) if (Utils.IsNotEmpty(rule.port)
|| rule.protocol?.Count > 0 || rule.protocol?.Count > 0
|| rule.inboundTag?.Count > 0 || rule.inboundTag?.Count > 0
) )
@@ -660,8 +660,8 @@ namespace ServiceLib.Handler.CoreConfig
serversItem.method = null; serversItem.method = null;
serversItem.password = null; serversItem.password = null;
if (!Utils.IsNullOrEmpty(node.security) if (Utils.IsNotEmpty(node.security)
&& !Utils.IsNullOrEmpty(node.id)) && Utils.IsNotEmpty(node.id))
{ {
SocksUsersItem4Ray socksUsersItem = new() SocksUsersItem4Ray socksUsersItem = new()
{ {
@@ -712,7 +712,7 @@ namespace ServiceLib.Handler.CoreConfig
if (node.streamSecurity == Global.StreamSecurityReality if (node.streamSecurity == Global.StreamSecurityReality
|| node.streamSecurity == Global.StreamSecurity) || node.streamSecurity == Global.StreamSecurity)
{ {
if (!Utils.IsNullOrEmpty(node.flow)) if (Utils.IsNotEmpty(node.flow))
{ {
usersItem.flow = node.flow; usersItem.flow = node.flow;
@@ -818,11 +818,11 @@ namespace ServiceLib.Handler.CoreConfig
alpn = node.GetAlpn(), alpn = node.GetAlpn(),
fingerprint = node.fingerprint.IsNullOrEmpty() ? _config.coreBasicItem.defFingerprint : node.fingerprint fingerprint = node.fingerprint.IsNullOrEmpty() ? _config.coreBasicItem.defFingerprint : node.fingerprint
}; };
if (!Utils.IsNullOrEmpty(sni)) if (Utils.IsNotEmpty(sni))
{ {
tlsSettings.serverName = sni; tlsSettings.serverName = sni;
} }
else if (!Utils.IsNullOrEmpty(host)) else if (Utils.IsNotEmpty(host))
{ {
tlsSettings.serverName = Utils.String2List(host)[0]; tlsSettings.serverName = Utils.String2List(host)[0];
} }
@@ -867,7 +867,7 @@ namespace ServiceLib.Handler.CoreConfig
{ {
type = node.headerType type = node.headerType
}; };
if (!Utils.IsNullOrEmpty(node.path)) if (Utils.IsNotEmpty(node.path))
{ {
kcpSettings.seed = node.path; kcpSettings.seed = node.path;
} }
@@ -878,15 +878,15 @@ namespace ServiceLib.Handler.CoreConfig
WsSettings4Ray wsSettings = new(); WsSettings4Ray wsSettings = new();
wsSettings.headers = new Headers4Ray(); wsSettings.headers = new Headers4Ray();
string path = node.path; string path = node.path;
if (!Utils.IsNullOrEmpty(host)) if (Utils.IsNotEmpty(host))
{ {
wsSettings.headers.Host = host; wsSettings.headers.Host = host;
} }
if (!Utils.IsNullOrEmpty(path)) if (Utils.IsNotEmpty(path))
{ {
wsSettings.path = path; wsSettings.path = path;
} }
if (!Utils.IsNullOrEmpty(useragent)) if (Utils.IsNotEmpty(useragent))
{ {
wsSettings.headers.UserAgent = useragent; wsSettings.headers.UserAgent = useragent;
} }
@@ -897,11 +897,11 @@ namespace ServiceLib.Handler.CoreConfig
case nameof(ETransport.httpupgrade): case nameof(ETransport.httpupgrade):
HttpupgradeSettings4Ray httpupgradeSettings = new(); HttpupgradeSettings4Ray httpupgradeSettings = new();
if (!Utils.IsNullOrEmpty(node.path)) if (Utils.IsNotEmpty(node.path))
{ {
httpupgradeSettings.path = node.path; httpupgradeSettings.path = node.path;
} }
if (!Utils.IsNullOrEmpty(host)) if (Utils.IsNotEmpty(host))
{ {
httpupgradeSettings.host = host; httpupgradeSettings.host = host;
} }
@@ -916,11 +916,11 @@ namespace ServiceLib.Handler.CoreConfig
maxConcurrentUploads = 10 maxConcurrentUploads = 10
}; };
if (!Utils.IsNullOrEmpty(node.path)) if (Utils.IsNotEmpty(node.path))
{ {
splithttpSettings.path = node.path; splithttpSettings.path = node.path;
} }
if (!Utils.IsNullOrEmpty(host)) if (Utils.IsNotEmpty(host))
{ {
splithttpSettings.host = host; splithttpSettings.host = host;
} }
@@ -931,7 +931,7 @@ namespace ServiceLib.Handler.CoreConfig
case nameof(ETransport.h2): case nameof(ETransport.h2):
HttpSettings4Ray httpSettings = new(); HttpSettings4Ray httpSettings = new();
if (!Utils.IsNullOrEmpty(host)) if (Utils.IsNotEmpty(host))
{ {
httpSettings.host = Utils.String2List(host); httpSettings.host = Utils.String2List(host);
} }
@@ -954,7 +954,7 @@ namespace ServiceLib.Handler.CoreConfig
streamSettings.quicSettings = quicsettings; streamSettings.quicSettings = quicsettings;
if (node.streamSecurity == Global.StreamSecurity) if (node.streamSecurity == Global.StreamSecurity)
{ {
if (!Utils.IsNullOrEmpty(sni)) if (Utils.IsNotEmpty(sni))
{ {
streamSettings.tlsSettings.serverName = sni; streamSettings.tlsSettings.serverName = sni;
} }
@@ -1000,7 +1000,7 @@ namespace ServiceLib.Handler.CoreConfig
request = request.Replace("$requestUserAgent$", $"\"{useragent}\""); request = request.Replace("$requestUserAgent$", $"\"{useragent}\"");
//Path //Path
string pathHttp = @"/"; string pathHttp = @"/";
if (!Utils.IsNullOrEmpty(node.path)) if (Utils.IsNotEmpty(node.path))
{ {
string[] arrPath = node.path.Split(','); string[] arrPath = node.path.Split(',');
pathHttp = string.Join("\",\"", arrPath); pathHttp = string.Join("\",\"", arrPath);
@@ -1033,7 +1033,7 @@ namespace ServiceLib.Handler.CoreConfig
} }
//Outbound Freedom domainStrategy //Outbound Freedom domainStrategy
if (!Utils.IsNullOrEmpty(domainStrategy4Freedom)) if (Utils.IsNotEmpty(domainStrategy4Freedom))
{ {
var outbound = v2rayConfig.outbounds[1]; var outbound = v2rayConfig.outbounds[1];
outbound.settings.domainStrategy = domainStrategy4Freedom; outbound.settings.domainStrategy = domainStrategy4Freedom;
@@ -1157,7 +1157,7 @@ namespace ServiceLib.Handler.CoreConfig
{ {
//fragment proxy //fragment proxy
if (_config.coreBasicItem.enableFragment if (_config.coreBasicItem.enableFragment
&& !Utils.IsNullOrEmpty(v2rayConfig.outbounds[0].streamSettings?.security)) && Utils.IsNotEmpty(v2rayConfig.outbounds[0].streamSettings?.security))
{ {
var fragmentOutbound = new Outbounds4Ray var fragmentOutbound = new Outbounds4Ray
{ {

View File

@@ -302,7 +302,7 @@ namespace ServiceLib.Handler
{ {
proc.OutputDataReceived += (sender, e) => proc.OutputDataReceived += (sender, e) =>
{ {
if (!Utils.IsNullOrEmpty(e.Data)) if (Utils.IsNotEmpty(e.Data))
{ {
string msg = e.Data + Environment.NewLine; string msg = e.Data + Environment.NewLine;
ShowMsg(false, msg); ShowMsg(false, msg);
@@ -310,7 +310,7 @@ namespace ServiceLib.Handler
}; };
proc.ErrorDataReceived += (sender, e) => proc.ErrorDataReceived += (sender, e) =>
{ {
if (!Utils.IsNullOrEmpty(e.Data)) if (Utils.IsNotEmpty(e.Data))
{ {
string msg = e.Data + Environment.NewLine; string msg = e.Data + Environment.NewLine;
ShowMsg(false, msg); ShowMsg(false, msg);

View File

@@ -117,7 +117,7 @@ namespace ServiceLib.Handler
try try
{ {
var result1 = await DownloadStringAsync(url, blProxy, userAgent); var result1 = await DownloadStringAsync(url, blProxy, userAgent);
if (!Utils.IsNullOrEmpty(result1)) if (Utils.IsNotEmpty(result1))
{ {
return result1; return result1;
} }
@@ -135,7 +135,7 @@ namespace ServiceLib.Handler
try try
{ {
var result2 = await DownloadStringViaDownloader(url, blProxy, userAgent); var result2 = await DownloadStringViaDownloader(url, blProxy, userAgent);
if (!Utils.IsNullOrEmpty(result2)) if (Utils.IsNotEmpty(result2))
{ {
return result2; return result2;
} }
@@ -155,7 +155,7 @@ namespace ServiceLib.Handler
using var wc = new WebClient(); using var wc = new WebClient();
wc.Proxy = GetWebProxy(blProxy); wc.Proxy = GetWebProxy(blProxy);
var result3 = await wc.DownloadStringTaskAsync(url); var result3 = await wc.DownloadStringTaskAsync(url);
if (!Utils.IsNullOrEmpty(result3)) if (Utils.IsNotEmpty(result3))
{ {
return result3; return result3;
} }
@@ -197,7 +197,7 @@ namespace ServiceLib.Handler
Uri uri = new(url); Uri uri = new(url);
//Authorization Header //Authorization Header
if (!Utils.IsNullOrEmpty(uri.UserInfo)) if (Utils.IsNotEmpty(uri.UserInfo))
{ {
client.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue("Basic", Utils.Base64Encode(uri.UserInfo)); client.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue("Basic", Utils.Base64Encode(uri.UserInfo));
} }

View File

@@ -16,12 +16,12 @@ namespace ServiceLib.Handler.Fmt
protected static int GetStdTransport(ProfileItem item, string? securityDef, ref Dictionary<string, string> dicQuery) protected static int GetStdTransport(ProfileItem item, string? securityDef, ref Dictionary<string, string> dicQuery)
{ {
if (!Utils.IsNullOrEmpty(item.flow)) if (Utils.IsNotEmpty(item.flow))
{ {
dicQuery.Add("flow", item.flow); dicQuery.Add("flow", item.flow);
} }
if (!Utils.IsNullOrEmpty(item.streamSecurity)) if (Utils.IsNotEmpty(item.streamSecurity))
{ {
dicQuery.Add("security", item.streamSecurity); dicQuery.Add("security", item.streamSecurity);
} }
@@ -32,27 +32,27 @@ namespace ServiceLib.Handler.Fmt
dicQuery.Add("security", securityDef); dicQuery.Add("security", securityDef);
} }
} }
if (!Utils.IsNullOrEmpty(item.sni)) if (Utils.IsNotEmpty(item.sni))
{ {
dicQuery.Add("sni", item.sni); dicQuery.Add("sni", item.sni);
} }
if (!Utils.IsNullOrEmpty(item.alpn)) if (Utils.IsNotEmpty(item.alpn))
{ {
dicQuery.Add("alpn", Utils.UrlEncode(item.alpn)); dicQuery.Add("alpn", Utils.UrlEncode(item.alpn));
} }
if (!Utils.IsNullOrEmpty(item.fingerprint)) if (Utils.IsNotEmpty(item.fingerprint))
{ {
dicQuery.Add("fp", Utils.UrlEncode(item.fingerprint)); dicQuery.Add("fp", Utils.UrlEncode(item.fingerprint));
} }
if (!Utils.IsNullOrEmpty(item.publicKey)) if (Utils.IsNotEmpty(item.publicKey))
{ {
dicQuery.Add("pbk", Utils.UrlEncode(item.publicKey)); dicQuery.Add("pbk", Utils.UrlEncode(item.publicKey));
} }
if (!Utils.IsNullOrEmpty(item.shortId)) if (Utils.IsNotEmpty(item.shortId))
{ {
dicQuery.Add("sid", Utils.UrlEncode(item.shortId)); dicQuery.Add("sid", Utils.UrlEncode(item.shortId));
} }
if (!Utils.IsNullOrEmpty(item.spiderX)) if (Utils.IsNotEmpty(item.spiderX))
{ {
dicQuery.Add("spx", Utils.UrlEncode(item.spiderX)); dicQuery.Add("spx", Utils.UrlEncode(item.spiderX));
} }
@@ -61,21 +61,21 @@ namespace ServiceLib.Handler.Fmt
dicQuery.Add("allowInsecure", "1"); dicQuery.Add("allowInsecure", "1");
} }
dicQuery.Add("type", !Utils.IsNullOrEmpty(item.network) ? item.network : nameof(ETransport.tcp)); dicQuery.Add("type", Utils.IsNotEmpty(item.network) ? item.network : nameof(ETransport.tcp));
switch (item.network) switch (item.network)
{ {
case nameof(ETransport.tcp): case nameof(ETransport.tcp):
dicQuery.Add("headerType", !Utils.IsNullOrEmpty(item.headerType) ? item.headerType : Global.None); dicQuery.Add("headerType", Utils.IsNotEmpty(item.headerType) ? item.headerType : Global.None);
if (!Utils.IsNullOrEmpty(item.requestHost)) if (Utils.IsNotEmpty(item.requestHost))
{ {
dicQuery.Add("host", Utils.UrlEncode(item.requestHost)); dicQuery.Add("host", Utils.UrlEncode(item.requestHost));
} }
break; break;
case nameof(ETransport.kcp): case nameof(ETransport.kcp):
dicQuery.Add("headerType", !Utils.IsNullOrEmpty(item.headerType) ? item.headerType : Global.None); dicQuery.Add("headerType", Utils.IsNotEmpty(item.headerType) ? item.headerType : Global.None);
if (!Utils.IsNullOrEmpty(item.path)) if (Utils.IsNotEmpty(item.path))
{ {
dicQuery.Add("seed", Utils.UrlEncode(item.path)); dicQuery.Add("seed", Utils.UrlEncode(item.path));
} }
@@ -84,11 +84,11 @@ namespace ServiceLib.Handler.Fmt
case nameof(ETransport.ws): case nameof(ETransport.ws):
case nameof(ETransport.httpupgrade): case nameof(ETransport.httpupgrade):
case nameof(ETransport.splithttp): case nameof(ETransport.splithttp):
if (!Utils.IsNullOrEmpty(item.requestHost)) if (Utils.IsNotEmpty(item.requestHost))
{ {
dicQuery.Add("host", Utils.UrlEncode(item.requestHost)); dicQuery.Add("host", Utils.UrlEncode(item.requestHost));
} }
if (!Utils.IsNullOrEmpty(item.path)) if (Utils.IsNotEmpty(item.path))
{ {
dicQuery.Add("path", Utils.UrlEncode(item.path)); dicQuery.Add("path", Utils.UrlEncode(item.path));
} }
@@ -97,24 +97,24 @@ namespace ServiceLib.Handler.Fmt
case nameof(ETransport.http): case nameof(ETransport.http):
case nameof(ETransport.h2): case nameof(ETransport.h2):
dicQuery["type"] = nameof(ETransport.http); dicQuery["type"] = nameof(ETransport.http);
if (!Utils.IsNullOrEmpty(item.requestHost)) if (Utils.IsNotEmpty(item.requestHost))
{ {
dicQuery.Add("host", Utils.UrlEncode(item.requestHost)); dicQuery.Add("host", Utils.UrlEncode(item.requestHost));
} }
if (!Utils.IsNullOrEmpty(item.path)) if (Utils.IsNotEmpty(item.path))
{ {
dicQuery.Add("path", Utils.UrlEncode(item.path)); dicQuery.Add("path", Utils.UrlEncode(item.path));
} }
break; break;
case nameof(ETransport.quic): case nameof(ETransport.quic):
dicQuery.Add("headerType", !Utils.IsNullOrEmpty(item.headerType) ? item.headerType : Global.None); dicQuery.Add("headerType", Utils.IsNotEmpty(item.headerType) ? item.headerType : Global.None);
dicQuery.Add("quicSecurity", Utils.UrlEncode(item.requestHost)); dicQuery.Add("quicSecurity", Utils.UrlEncode(item.requestHost));
dicQuery.Add("key", Utils.UrlEncode(item.path)); dicQuery.Add("key", Utils.UrlEncode(item.path));
break; break;
case nameof(ETransport.grpc): case nameof(ETransport.grpc):
if (!Utils.IsNullOrEmpty(item.path)) if (Utils.IsNotEmpty(item.path))
{ {
dicQuery.Add("authority", Utils.UrlEncode(item.requestHost)); dicQuery.Add("authority", Utils.UrlEncode(item.requestHost));
dicQuery.Add("serviceName", Utils.UrlEncode(item.path)); dicQuery.Add("serviceName", Utils.UrlEncode(item.path));

View File

@@ -31,20 +31,20 @@
string url = string.Empty; string url = string.Empty;
string remark = string.Empty; string remark = string.Empty;
if (!Utils.IsNullOrEmpty(item.remarks)) if (Utils.IsNotEmpty(item.remarks))
{ {
remark = "#" + Utils.UrlEncode(item.remarks); remark = "#" + Utils.UrlEncode(item.remarks);
} }
var dicQuery = new Dictionary<string, string>(); var dicQuery = new Dictionary<string, string>();
if (!Utils.IsNullOrEmpty(item.sni)) if (Utils.IsNotEmpty(item.sni))
{ {
dicQuery.Add("sni", item.sni); dicQuery.Add("sni", item.sni);
} }
if (!Utils.IsNullOrEmpty(item.alpn)) if (Utils.IsNotEmpty(item.alpn))
{ {
dicQuery.Add("alpn", Utils.UrlEncode(item.alpn)); dicQuery.Add("alpn", Utils.UrlEncode(item.alpn));
} }
if (!Utils.IsNullOrEmpty(item.path)) if (Utils.IsNotEmpty(item.path))
{ {
dicQuery.Add("obfs", "salamander"); dicQuery.Add("obfs", "salamander");
dicQuery.Add("obfs-password", Utils.UrlEncode(item.path)); dicQuery.Add("obfs-password", Utils.UrlEncode(item.path));

View File

@@ -30,7 +30,7 @@ namespace ServiceLib.Handler.Fmt
string url = string.Empty; string url = string.Empty;
string remark = string.Empty; string remark = string.Empty;
if (!Utils.IsNullOrEmpty(item.remarks)) if (Utils.IsNotEmpty(item.remarks))
{ {
remark = "#" + Utils.UrlEncode(item.remarks); remark = "#" + Utils.UrlEncode(item.remarks);
} }
@@ -59,7 +59,7 @@ namespace ServiceLib.Handler.Fmt
ProfileItem item = new(); ProfileItem item = new();
var base64 = match.Groups["base64"].Value.TrimEnd('/'); var base64 = match.Groups["base64"].Value.TrimEnd('/');
var tag = match.Groups["tag"].Value; var tag = match.Groups["tag"].Value;
if (!Utils.IsNullOrEmpty(tag)) if (Utils.IsNotEmpty(tag))
{ {
item.remarks = Utils.UrlDecode(tag); item.remarks = Utils.UrlDecode(tag);
} }
@@ -128,7 +128,7 @@ namespace ServiceLib.Handler.Fmt
{ {
//obfs-host exists //obfs-host exists
var obfsHost = queryParameters["plugin"]?.Split(';').FirstOrDefault(t => t.Contains("obfs-host")); var obfsHost = queryParameters["plugin"]?.Split(';').FirstOrDefault(t => t.Contains("obfs-host"));
if (queryParameters["plugin"].Contains("obfs=http") && !Utils.IsNullOrEmpty(obfsHost)) if (queryParameters["plugin"].Contains("obfs=http") && Utils.IsNotEmpty(obfsHost))
{ {
obfsHost = obfsHost?.Replace("obfs-host=", ""); obfsHost = obfsHost?.Replace("obfs-host=", "");
item.network = Global.DefaultNetwork; item.network = Global.DefaultNetwork;

View File

@@ -28,7 +28,7 @@
string url = string.Empty; string url = string.Empty;
string remark = string.Empty; string remark = string.Empty;
if (!Utils.IsNullOrEmpty(item.remarks)) if (Utils.IsNotEmpty(item.remarks))
{ {
remark = "#" + Utils.UrlEncode(item.remarks); remark = "#" + Utils.UrlEncode(item.remarks);
} }

View File

@@ -30,7 +30,7 @@
string url = string.Empty; string url = string.Empty;
string remark = string.Empty; string remark = string.Empty;
if (!Utils.IsNullOrEmpty(item.remarks)) if (Utils.IsNotEmpty(item.remarks))
{ {
remark = "#" + Utils.UrlEncode(item.remarks); remark = "#" + Utils.UrlEncode(item.remarks);
} }

View File

@@ -36,16 +36,16 @@
string url = string.Empty; string url = string.Empty;
string remark = string.Empty; string remark = string.Empty;
if (!Utils.IsNullOrEmpty(item.remarks)) if (Utils.IsNotEmpty(item.remarks))
{ {
remark = "#" + Utils.UrlEncode(item.remarks); remark = "#" + Utils.UrlEncode(item.remarks);
} }
var dicQuery = new Dictionary<string, string>(); var dicQuery = new Dictionary<string, string>();
if (!Utils.IsNullOrEmpty(item.sni)) if (Utils.IsNotEmpty(item.sni))
{ {
dicQuery.Add("sni", item.sni); dicQuery.Add("sni", item.sni);
} }
if (!Utils.IsNullOrEmpty(item.alpn)) if (Utils.IsNotEmpty(item.alpn))
{ {
dicQuery.Add("alpn", Utils.UrlEncode(item.alpn)); dicQuery.Add("alpn", Utils.UrlEncode(item.alpn));
} }

View File

@@ -33,12 +33,12 @@
string url = string.Empty; string url = string.Empty;
string remark = string.Empty; string remark = string.Empty;
if (!Utils.IsNullOrEmpty(item.remarks)) if (Utils.IsNotEmpty(item.remarks))
{ {
remark = "#" + Utils.UrlEncode(item.remarks); remark = "#" + Utils.UrlEncode(item.remarks);
} }
var dicQuery = new Dictionary<string, string>(); var dicQuery = new Dictionary<string, string>();
if (!Utils.IsNullOrEmpty(item.security)) if (Utils.IsNotEmpty(item.security))
{ {
dicQuery.Add("encryption", item.security); dicQuery.Add("encryption", item.security);
} }

View File

@@ -78,12 +78,12 @@
item.alterId = Utils.ToInt(vmessQRCode.aid); item.alterId = Utils.ToInt(vmessQRCode.aid);
item.security = Utils.ToString(vmessQRCode.scy); item.security = Utils.ToString(vmessQRCode.scy);
item.security = !Utils.IsNullOrEmpty(vmessQRCode.scy) ? vmessQRCode.scy : Global.DefaultSecurity; item.security = Utils.IsNotEmpty(vmessQRCode.scy) ? vmessQRCode.scy : Global.DefaultSecurity;
if (!Utils.IsNullOrEmpty(vmessQRCode.net)) if (Utils.IsNotEmpty(vmessQRCode.net))
{ {
item.network = vmessQRCode.net; item.network = vmessQRCode.net;
} }
if (!Utils.IsNullOrEmpty(vmessQRCode.type)) if (Utils.IsNotEmpty(vmessQRCode.type))
{ {
item.headerType = vmessQRCode.type; item.headerType = vmessQRCode.type;
} }

View File

@@ -34,25 +34,25 @@
string url = string.Empty; string url = string.Empty;
string remark = string.Empty; string remark = string.Empty;
if (!Utils.IsNullOrEmpty(item.remarks)) if (Utils.IsNotEmpty(item.remarks))
{ {
remark = "#" + Utils.UrlEncode(item.remarks); remark = "#" + Utils.UrlEncode(item.remarks);
} }
var dicQuery = new Dictionary<string, string>(); var dicQuery = new Dictionary<string, string>();
if (!Utils.IsNullOrEmpty(item.publicKey)) if (Utils.IsNotEmpty(item.publicKey))
{ {
dicQuery.Add("publickey", Utils.UrlEncode(item.publicKey)); dicQuery.Add("publickey", Utils.UrlEncode(item.publicKey));
} }
if (!Utils.IsNullOrEmpty(item.path)) if (Utils.IsNotEmpty(item.path))
{ {
dicQuery.Add("reserved", Utils.UrlEncode(item.path)); dicQuery.Add("reserved", Utils.UrlEncode(item.path));
} }
if (!Utils.IsNullOrEmpty(item.requestHost)) if (Utils.IsNotEmpty(item.requestHost))
{ {
dicQuery.Add("address", Utils.UrlEncode(item.requestHost)); dicQuery.Add("address", Utils.UrlEncode(item.requestHost));
} }
if (!Utils.IsNullOrEmpty(item.shortId)) if (Utils.IsNotEmpty(item.shortId))
{ {
dicQuery.Add("mtu", Utils.UrlEncode(item.shortId)); dicQuery.Add("mtu", Utils.UrlEncode(item.shortId));
} }

View File

@@ -104,11 +104,11 @@
from ProfileItem a from ProfileItem a
left join SubItem b on a.subid = b.id left join SubItem b on a.subid = b.id
where 1=1 "; where 1=1 ";
if (!Utils.IsNullOrEmpty(subid)) if (Utils.IsNotEmpty(subid))
{ {
sql += $" and a.subid = '{subid}'"; sql += $" and a.subid = '{subid}'";
} }
if (!Utils.IsNullOrEmpty(filter)) if (Utils.IsNotEmpty(filter))
{ {
if (filter.Contains('\'')) if (filter.Contains('\''))
{ {

View File

@@ -35,7 +35,7 @@ namespace ServiceLib.Handler
private void IndexIdEnqueue(string indexId) private void IndexIdEnqueue(string indexId)
{ {
if (!Utils.IsNullOrEmpty(indexId) && !_queIndexIds.Contains(indexId)) if (Utils.IsNotEmpty(indexId) && !_queIndexIds.Contains(indexId))
{ {
_queIndexIds.Enqueue(indexId); _queIndexIds.Enqueue(indexId);
} }

View File

@@ -88,7 +88,7 @@ namespace ServiceLib.Handler.Statistics
while (!res.CloseStatus.HasValue) while (!res.CloseStatus.HasValue)
{ {
var result = Encoding.UTF8.GetString(buffer, 0, res.Count); var result = Encoding.UTF8.GetString(buffer, 0, res.Count);
if (!Utils.IsNullOrEmpty(result)) if (Utils.IsNotEmpty(result))
{ {
ParseOutput(result, out ulong up, out ulong down); ParseOutput(result, out ulong up, out ulong down);

View File

@@ -143,7 +143,7 @@ namespace ServiceLib.Handler
string url = item.url.TrimEx(); string url = item.url.TrimEx();
string userAgent = item.userAgent.TrimEx(); string userAgent = item.userAgent.TrimEx();
string hashCode = $"{item.remarks}->"; string hashCode = $"{item.remarks}->";
if (Utils.IsNullOrEmpty(id) || Utils.IsNullOrEmpty(url) || (!Utils.IsNullOrEmpty(subId) && item.id != subId)) if (Utils.IsNullOrEmpty(id) || Utils.IsNullOrEmpty(url) || (Utils.IsNotEmpty(subId) && item.id != subId))
{ {
//_updateFunc(false, $"{hashCode}{ResUI.MsgNoValidSubscription}"); //_updateFunc(false, $"{hashCode}{ResUI.MsgNoValidSubscription}");
continue; continue;
@@ -169,7 +169,7 @@ namespace ServiceLib.Handler
//one url //one url
url = Utils.GetPunycode(url); url = Utils.GetPunycode(url);
//convert //convert
if (!Utils.IsNullOrEmpty(item.convertTarget)) if (Utils.IsNotEmpty(item.convertTarget))
{ {
var subConvertUrl = Utils.IsNullOrEmpty(config.constItem.subConvertUrl) ? Global.SubConvertUrls.FirstOrDefault() : config.constItem.subConvertUrl; var subConvertUrl = Utils.IsNullOrEmpty(config.constItem.subConvertUrl) ? Global.SubConvertUrls.FirstOrDefault() : config.constItem.subConvertUrl;
url = string.Format(subConvertUrl!, Utils.UrlEncode(url)); url = string.Format(subConvertUrl!, Utils.UrlEncode(url));
@@ -189,9 +189,9 @@ namespace ServiceLib.Handler
} }
//more url //more url
if (Utils.IsNullOrEmpty(item.convertTarget) && !Utils.IsNullOrEmpty(item.moreUrl.TrimEx())) if (Utils.IsNullOrEmpty(item.convertTarget) && Utils.IsNotEmpty(item.moreUrl.TrimEx()))
{ {
if (!Utils.IsNullOrEmpty(result) && Utils.IsBase64String(result!)) if (Utils.IsNotEmpty(result) && Utils.IsBase64String(result!))
{ {
result = Utils.Base64Decode(result); result = Utils.Base64Decode(result);
} }
@@ -210,7 +210,7 @@ namespace ServiceLib.Handler
{ {
result2 = await downloadHandle.TryDownloadString(url2, false, userAgent); result2 = await downloadHandle.TryDownloadString(url2, false, userAgent);
} }
if (!Utils.IsNullOrEmpty(result2)) if (Utils.IsNotEmpty(result2))
{ {
if (Utils.IsBase64String(result2!)) if (Utils.IsBase64String(result2!))
{ {
@@ -277,7 +277,7 @@ namespace ServiceLib.Handler
var url = coreInfo?.coreReleaseApiUrl; var url = coreInfo?.coreReleaseApiUrl;
var result = await downloadHandle.DownloadStringAsync(url, true, Global.AppName); var result = await downloadHandle.DownloadStringAsync(url, true, Global.AppName);
if (!Utils.IsNullOrEmpty(result)) if (Utils.IsNotEmpty(result))
{ {
return await ParseDownloadUrl(type, result, preRelease); return await ParseDownloadUrl(type, result, preRelease);
} }

View File

@@ -105,6 +105,15 @@ namespace ServiceLib.Resx {
} }
} }
/// <summary>
/// 查找类似 Host filter 的本地化字符串。
/// </summary>
public static string ConnectionsHostFilterTitle {
get {
return ResourceManager.GetString("ConnectionsHostFilterTitle", resourceCulture);
}
}
/// <summary> /// <summary>
/// 查找类似 Note that custom configuration relies entirely on your own configuration and does not work with all settings. If you want to use the system proxy, please modify the listening port manually. 的本地化字符串。 /// 查找类似 Note that custom configuration relies entirely on your own configuration and does not work with all settings. If you want to use the system proxy, please modify the listening port manually. 的本地化字符串。
/// </summary> /// </summary>

Some files were not shown because too many files have changed in this diff Show More