Update On Tue Jun 11 20:31:39 CEST 2024

This commit is contained in:
github-action[bot]
2024-06-11 20:31:39 +02:00
parent ec45d11fc1
commit 01ac10ab55
87 changed files with 2648 additions and 1404 deletions

1
.github/update.log vendored
View File

@@ -669,3 +669,4 @@ Update On Fri Jun 7 20:34:28 CEST 2024
Update On Sat Jun 8 20:29:12 CEST 2024
Update On Sun Jun 9 20:29:25 CEST 2024
Update On Mon Jun 10 20:32:26 CEST 2024
Update On Tue Jun 11 20:31:29 CEST 2024

View File

@@ -1,7 +1,7 @@
{
"version": "20240606",
"text": "Clean the web with Brook",
"link": "https://www.txthinking.com/talks/articles/brook-clean-the-web-en.article",
"text_zh": "使用 Brook 净化互联网",
"link_zh": "https://www.txthinking.com/talks/articles/brook-clean-the-web.article"
"text": "Refer to get Brook Plus for free",
"link": "https://www.txthinking.com/brook.html#referrals",
"text_zh": "邀请以免费获得 Brook Plus",
"link_zh": "https://www.txthinking.com/brook.html#referrals"
}

View File

@@ -6,6 +6,11 @@ modules = append(modules, {
dnsquery: func(m) {
text := import("text")
l := [
"analytics.google.com",
"apple.com",
"comodoca.com",
"autonavi.com",
"giphy.com",
"facebook.com",
"fbcdn.net",
"facebook.net",

View File

@@ -10,6 +10,7 @@ import (
"github.com/metacubex/mihomo/component/loopback"
"github.com/metacubex/mihomo/component/resolver"
C "github.com/metacubex/mihomo/constant"
"github.com/metacubex/mihomo/constant/features"
)
type Direct struct {
@@ -24,9 +25,11 @@ type DirectOption struct {
// DialContext implements C.ProxyAdapter
func (d *Direct) DialContext(ctx context.Context, metadata *C.Metadata, opts ...dialer.Option) (C.Conn, error) {
if !features.CMFA {
if err := d.loopBack.CheckConn(metadata); err != nil {
return nil, err
}
}
opts = append(opts, dialer.WithResolver(resolver.DefaultResolver))
c, err := dialer.DialContext(ctx, "tcp", metadata.RemoteAddress(), d.Base.DialOptions(opts...)...)
if err != nil {
@@ -38,9 +41,11 @@ func (d *Direct) DialContext(ctx context.Context, metadata *C.Metadata, opts ...
// ListenPacketContext implements C.ProxyAdapter
func (d *Direct) ListenPacketContext(ctx context.Context, metadata *C.Metadata, opts ...dialer.Option) (C.PacketConn, error) {
if !features.CMFA {
if err := d.loopBack.CheckPacketConn(metadata); err != nil {
return nil, err
}
}
// net.UDPConn.WriteTo only working with *net.UDPAddr, so we need a net.UDPAddr
if !metadata.Resolved() {
ip, err := resolver.ResolveIPWithResolver(ctx, metadata.Host, resolver.DefaultResolver)

View File

@@ -819,9 +819,9 @@ dependencies = [
[[package]]
name = "clap"
version = "4.5.6"
version = "4.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9689a29b593160de5bc4aacab7b5d54fb52231de70122626c178e6a368994c7"
checksum = "5db83dced34638ad474f39f250d7fea9598bdd239eaced1bdf45d597da0f433f"
dependencies = [
"clap_builder",
"clap_derive",
@@ -829,9 +829,9 @@ dependencies = [
[[package]]
name = "clap_builder"
version = "4.5.6"
version = "4.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e5387378c84f6faa26890ebf9f0a92989f8873d4d380467bcd0d8d8620424df"
checksum = "f7e204572485eb3fbf28f871612191521df159bc3e15a9f5064c66dba3a8c05f"
dependencies = [
"anstream",
"anstyle",

View File

@@ -50,6 +50,18 @@ export const useClashCore = () => {
await mutate();
};
const setGlobalProxy = async (name: string) => {
const group = data?.global;
if (!group) {
return;
}
await tauri.selectProxy(group?.name, name);
await mutate();
};
const getRules = useSWR("getRules", clash.getRules);
const getRulesProviders = useSWR<{ [name: string]: ProviderRules }>(
@@ -80,6 +92,7 @@ export const useClashCore = () => {
updateGroupDelay,
updateProxiesDelay,
setGroupProxy,
setGlobalProxy,
getRules,
getRulesProviders,
updateRulesProviders,

View File

@@ -53,9 +53,9 @@
"@types/react": "18.3.3",
"@types/react-dom": "18.3.0",
"@types/react-transition-group": "4.4.10",
"@typescript-eslint/eslint-plugin": "7.12.0",
"@typescript-eslint/parser": "7.12.0",
"@vitejs/plugin-react": "4.3.0",
"@typescript-eslint/eslint-plugin": "7.13.0",
"@typescript-eslint/parser": "7.13.0",
"@vitejs/plugin-react": "4.3.1",
"sass": "1.77.4",
"shiki": "1.6.3",
"vite": "5.2.13",

View File

@@ -10,7 +10,14 @@ import { Clash, useClashCore, useNyanpasu } from "@nyanpasu/interface";
import { useBreakpoint } from "@nyanpasu/ui";
import { useAtom, useAtomValue } from "jotai";
import { proxyGroupAtom, proxyGroupSortAtom } from "@/store";
import { CSSProperties, memo, useEffect, useMemo, useState } from "react";
import {
CSSProperties,
memo,
useEffect,
useMemo,
useState,
useTransition,
} from "react";
import { classNames } from "@/utils";
import { VList } from "virtua";
import { AnimatePresence, motion } from "framer-motion";
@@ -31,10 +38,11 @@ const getColorForDelay = (delay: number): string => {
const { palette } = useTheme();
const delayColorMapping: { [key: string]: string } = {
"0": palette.text.secondary,
"0": palette.error.main,
"1": palette.text.secondary,
"100": palette.success.main,
"500": palette.warning.main,
"1000": palette.error.main,
"10000": palette.error.main,
};
let color: string = palette.text.secondary;
@@ -102,7 +110,7 @@ const DelayChip = memo(function DelayChip({
loading ? "opacity-0" : "opacity-1",
)}
>
{`${delay} ms`}
{delay ? `${delay} ms` : "timeout"}
</span>
<CircularProgress
@@ -168,7 +176,10 @@ const NodeCard = memo(function NodeCard({
});
export const NodeList = () => {
const { data, setGroupProxy, updateProxiesDelay } = useClashCore();
const { data, setGroupProxy, setGlobalProxy, updateProxiesDelay } =
useClashCore();
const [isPending, startTransition] = useTransition();
const { getCurrentMode } = useNyanpasu();
@@ -194,6 +205,9 @@ export const NodeList = () => {
if (delayA === -1 || delayA === -2) return 1;
if (delayB === -1 || delayB === -2) return -1;
if (delayA === 0) return 1;
if (delayB === 0) return -1;
return delayA - delayB;
});
} else if (proxyGroupSort === "name") {
@@ -223,7 +237,7 @@ export const NodeList = () => {
const [renderList, setRenderList] = useState<RenderClashProxy[][]>([]);
useEffect(() => {
const updateRenderList = () => {
if (!group?.all) return;
const nodeNames: string[] = [];
@@ -257,15 +271,32 @@ export const NodeList = () => {
);
setRenderList(list);
};
useEffect(() => {
startTransition(() => {
updateRenderList();
});
}, [group?.all, column]);
const hendleClick = (node: string) => {
if (!getCurrentMode.global) {
setGroupProxy(proxyGroup.selector as number, node);
} else {
setGlobalProxy(node);
}
};
return (
<AnimatePresence initial={false}>
<VList style={{ flex: 1 }} className="p-2">
<AnimatePresence initial={false} mode="sync">
<VList
style={{ flex: 1 }}
className={classNames(
"transition-opacity",
"p-2",
isPending ? "opacity-0" : "opacity-1",
)}
>
{renderList?.map((node, index) => {
return (
<div
@@ -280,9 +311,9 @@ export const NodeList = () => {
layoutId={`node-${render.renderLayoutKey}`}
className="relative overflow-hidden"
layout="position"
initial={false}
animate="center"
exit="exit"
initial={{ scale: 0.7, opacity: 0 }}
animate={{ scale: 1, opacity: 1 }}
exit={{ opacity: 0 }}
>
<NodeCard
node={render}

View File

@@ -2,7 +2,7 @@
"manifest_version": 1,
"latest": {
"mihomo": "v1.18.5",
"mihomo_alpha": "alpha-0d4e57c",
"mihomo_alpha": "alpha-cacfefa",
"clash_rs": "v0.1.18",
"clash_premium": "2023-09-05-gdcc8d87"
},
@@ -36,5 +36,5 @@
"darwin-x64": "clash-darwin-amd64-n{}.gz"
}
},
"updated_at": "2024-06-07T22:20:07.011Z"
"updated_at": "2024-06-10T22:20:47.739Z"
}

View File

@@ -95,7 +95,7 @@
"postcss-html": "1.7.0",
"postcss-import": "16.1.0",
"postcss-scss": "4.0.9",
"prettier": "3.3.1",
"prettier": "3.3.2",
"prettier-plugin-toml": "2.0.1",
"react-devtools": "5.2.0",
"stylelint": "16.6.1",
@@ -106,10 +106,10 @@
"stylelint-order": "6.0.4",
"stylelint-scss": "6.3.1",
"tailwindcss": "3.4.4",
"tsx": "4.15.1",
"tsx": "4.15.2",
"typescript": "5.4.5"
},
"packageManager": "pnpm@9.2.0",
"packageManager": "pnpm@9.3.0",
"engines": {
"node": "22.2.0"
},

View File

@@ -75,7 +75,7 @@ importers:
version: 16.6.2(eslint@8.57.0)
eslint-plugin-prettier:
specifier: 5.1.3
version: 5.1.3(eslint-config-prettier@9.1.0(eslint@8.57.0))(eslint@8.57.0)(prettier@3.3.1)
version: 5.1.3(eslint-config-prettier@9.1.0(eslint@8.57.0))(eslint@8.57.0)(prettier@3.3.2)
eslint-plugin-promise:
specifier: 6.2.0
version: 6.2.0(eslint@8.57.0)
@@ -101,11 +101,11 @@ importers:
specifier: 4.0.9
version: 4.0.9(postcss@8.4.38)
prettier:
specifier: 3.3.1
version: 3.3.1
specifier: 3.3.2
version: 3.3.2
prettier-plugin-toml:
specifier: 2.0.1
version: 2.0.1(prettier@3.3.1)
version: 2.0.1(prettier@3.3.2)
react-devtools:
specifier: 5.2.0
version: 5.2.0(bufferutil@4.0.8)(utf-8-validate@5.0.10)
@@ -134,8 +134,8 @@ importers:
specifier: 3.4.4
version: 3.4.4
tsx:
specifier: 4.15.1
version: 4.15.1
specifier: 4.15.2
version: 4.15.2
typescript:
specifier: 5.4.5
version: 5.4.5
@@ -250,7 +250,7 @@ importers:
version: 7.51.5(react@19.0.0-rc-9d4fba0788-20240530)
react-hook-form-mui:
specifier: 7.0.0
version: 7.0.0(@mui/icons-material@5.15.19(@mui/material@5.15.19(@emotion/react@11.11.4(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(@emotion/styled@11.11.5(@emotion/react@11.11.4(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react-dom@19.0.0-rc-9d4fba0788-20240530(react@19.0.0-rc-9d4fba0788-20240530))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(@mui/material@5.15.19(@emotion/react@11.11.4(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(@emotion/styled@11.11.5(@emotion/react@11.11.4(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react-dom@19.0.0-rc-9d4fba0788-20240530(react@19.0.0-rc-9d4fba0788-20240530))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react-hook-form@7.51.5(react@19.0.0-rc-9d4fba0788-20240530))(react@19.0.0-rc-9d4fba0788-20240530)
version: 7.0.0(szplwmyfv5kdrzoa2ayly36z7i)
react-i18next:
specifier: 14.1.2
version: 14.1.2(i18next@23.11.5)(react-dom@19.0.0-rc-9d4fba0788-20240530(react@19.0.0-rc-9d4fba0788-20240530))(react@19.0.0-rc-9d4fba0788-20240530)
@@ -292,14 +292,14 @@ importers:
specifier: 4.4.10
version: 4.4.10
'@typescript-eslint/eslint-plugin':
specifier: 7.12.0
version: 7.12.0(@typescript-eslint/parser@7.12.0(eslint@8.57.0)(typescript@5.4.5))(eslint@8.57.0)(typescript@5.4.5)
specifier: 7.13.0
version: 7.13.0(@typescript-eslint/parser@7.13.0(eslint@8.57.0)(typescript@5.4.5))(eslint@8.57.0)(typescript@5.4.5)
'@typescript-eslint/parser':
specifier: 7.12.0
version: 7.12.0(eslint@8.57.0)(typescript@5.4.5)
specifier: 7.13.0
version: 7.13.0(eslint@8.57.0)(typescript@5.4.5)
'@vitejs/plugin-react':
specifier: 4.3.0
version: 4.3.0(vite@5.2.13(@types/node@20.14.2)(less@4.2.0)(sass@1.77.4)(stylus@0.62.0))
specifier: 4.3.1
version: 4.3.1(vite@5.2.13(@types/node@20.14.2)(less@4.2.0)(sass@1.77.4)(stylus@0.62.0))
sass:
specifier: 1.77.4
version: 1.77.4
@@ -314,7 +314,7 @@ importers:
version: vite-plugin-monaco-editor-new@1.1.3(monaco-editor@0.49.0)
vite-plugin-sass-dts:
specifier: 1.3.22
version: 1.3.22(postcss@8.4.38)(prettier@3.3.1)(sass@1.77.4)(vite@5.2.13(@types/node@20.14.2)(less@4.2.0)(sass@1.77.4)(stylus@0.62.0))
version: 1.3.22(postcss@8.4.38)(prettier@3.3.2)(sass@1.77.4)(vite@5.2.13(@types/node@20.14.2)(less@4.2.0)(sass@1.77.4)(stylus@0.62.0))
vite-plugin-svgr:
specifier: 4.2.0
version: 4.2.0(rollup@4.17.2)(typescript@5.4.5)(vite@5.2.13(@types/node@20.14.2)(less@4.2.0)(sass@1.77.4)(stylus@0.62.0))
@@ -1175,6 +1175,7 @@ packages:
'@humanwhocodes/config-array@0.11.14':
resolution: {integrity: sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==}
engines: {node: '>=10.10.0'}
deprecated: Use @eslint/config-array instead
'@humanwhocodes/module-importer@1.0.1':
resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==}
@@ -1182,6 +1183,7 @@ packages:
'@humanwhocodes/object-schema@2.0.3':
resolution: {integrity: sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==}
deprecated: Use @eslint/object-schema instead
'@isaacs/cliui@8.0.2':
resolution: {integrity: sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==}
@@ -1756,8 +1758,8 @@ packages:
'@types/yauzl@2.10.3':
resolution: {integrity: sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q==}
'@typescript-eslint/eslint-plugin@7.12.0':
resolution: {integrity: sha512-7F91fcbuDf/d3S8o21+r3ZncGIke/+eWk0EpO21LXhDfLahriZF9CGj4fbAetEjlaBdjdSm9a6VeXbpbT6Z40Q==}
'@typescript-eslint/eslint-plugin@7.13.0':
resolution: {integrity: sha512-FX1X6AF0w8MdVFLSdqwqN/me2hyhuQg4ykN6ZpVhh1ij/80pTvDKclX1sZB9iqex8SjQfVhwMKs3JtnnMLzG9w==}
engines: {node: ^18.18.0 || >=20.0.0}
peerDependencies:
'@typescript-eslint/parser': ^7.0.0
@@ -1767,8 +1769,8 @@ packages:
typescript:
optional: true
'@typescript-eslint/parser@7.12.0':
resolution: {integrity: sha512-dm/J2UDY3oV3TKius2OUZIFHsomQmpHtsV0FTh1WO8EKgHLQ1QCADUqscPgTpU+ih1e21FQSRjXckHn3txn6kQ==}
'@typescript-eslint/parser@7.13.0':
resolution: {integrity: sha512-EjMfl69KOS9awXXe83iRN7oIEXy9yYdqWfqdrFAYAAr6syP8eLEFI7ZE4939antx2mNgPRW/o1ybm2SFYkbTVA==}
engines: {node: ^18.18.0 || >=20.0.0}
peerDependencies:
eslint: ^8.56.0
@@ -1777,12 +1779,12 @@ packages:
typescript:
optional: true
'@typescript-eslint/scope-manager@7.12.0':
resolution: {integrity: sha512-itF1pTnN6F3unPak+kutH9raIkL3lhH1YRPGgt7QQOh43DQKVJXmWkpb+vpc/TiDHs6RSd9CTbDsc/Y+Ygq7kg==}
'@typescript-eslint/scope-manager@7.13.0':
resolution: {integrity: sha512-ZrMCe1R6a01T94ilV13egvcnvVJ1pxShkE0+NDjDzH4nvG1wXpwsVI5bZCvE7AEDH1mXEx5tJSVR68bLgG7Dng==}
engines: {node: ^18.18.0 || >=20.0.0}
'@typescript-eslint/type-utils@7.12.0':
resolution: {integrity: sha512-lib96tyRtMhLxwauDWUp/uW3FMhLA6D0rJ8T7HmH7x23Gk1Gwwu8UZ94NMXBvOELn6flSPiBrCKlehkiXyaqwA==}
'@typescript-eslint/type-utils@7.13.0':
resolution: {integrity: sha512-xMEtMzxq9eRkZy48XuxlBFzpVMDurUAfDu5Rz16GouAtXm0TaAoTFzqWUFPPuQYXI/CDaH/Bgx/fk/84t/Bc9A==}
engines: {node: ^18.18.0 || >=20.0.0}
peerDependencies:
eslint: ^8.56.0
@@ -1791,12 +1793,12 @@ packages:
typescript:
optional: true
'@typescript-eslint/types@7.12.0':
resolution: {integrity: sha512-o+0Te6eWp2ppKY3mLCU+YA9pVJxhUJE15FV7kxuD9jgwIAa+w/ycGJBMrYDTpVGUM/tgpa9SeMOugSabWFq7bg==}
'@typescript-eslint/types@7.13.0':
resolution: {integrity: sha512-QWuwm9wcGMAuTsxP+qz6LBBd3Uq8I5Nv8xb0mk54jmNoCyDspnMvVsOxI6IsMmway5d1S9Su2+sCKv1st2l6eA==}
engines: {node: ^18.18.0 || >=20.0.0}
'@typescript-eslint/typescript-estree@7.12.0':
resolution: {integrity: sha512-5bwqLsWBULv1h6pn7cMW5dXX/Y2amRqLaKqsASVwbBHMZSnHqE/HN4vT4fE0aFsiwxYvr98kqOWh1a8ZKXalCQ==}
'@typescript-eslint/typescript-estree@7.13.0':
resolution: {integrity: sha512-cAvBvUoobaoIcoqox1YatXOnSl3gx92rCZoMRPzMNisDiM12siGilSM4+dJAekuuHTibI2hVC2fYK79iSFvWjw==}
engines: {node: ^18.18.0 || >=20.0.0}
peerDependencies:
typescript: '*'
@@ -1804,21 +1806,21 @@ packages:
typescript:
optional: true
'@typescript-eslint/utils@7.12.0':
resolution: {integrity: sha512-Y6hhwxwDx41HNpjuYswYp6gDbkiZ8Hin9Bf5aJQn1bpTs3afYY4GX+MPYxma8jtoIV2GRwTM/UJm/2uGCVv+DQ==}
'@typescript-eslint/utils@7.13.0':
resolution: {integrity: sha512-jceD8RgdKORVnB4Y6BqasfIkFhl4pajB1wVxrF4akxD2QPM8GNYjgGwEzYS+437ewlqqrg7Dw+6dhdpjMpeBFQ==}
engines: {node: ^18.18.0 || >=20.0.0}
peerDependencies:
eslint: ^8.56.0
'@typescript-eslint/visitor-keys@7.12.0':
resolution: {integrity: sha512-uZk7DevrQLL3vSnfFl5bj4sL75qC9D6EdjemIdbtkuUmIheWpuiiylSY01JxJE7+zGrOWDZrp1WxOuDntvKrHQ==}
'@typescript-eslint/visitor-keys@7.13.0':
resolution: {integrity: sha512-nxn+dozQx+MK61nn/JP+M4eCkHDSxSLDpgE3WcQo0+fkjEolnaB5jswvIKC4K56By8MMgIho7f1PVxERHEo8rw==}
engines: {node: ^18.18.0 || >=20.0.0}
'@ungap/structured-clone@1.2.0':
resolution: {integrity: sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==}
'@vitejs/plugin-react@4.3.0':
resolution: {integrity: sha512-KcEbMsn4Dpk+LIbHMj7gDPRKaTMStxxWRkRmxsg/jVdFdJCZWt1SchZcf0M4t8lIKdwwMsEyzhrcOXRrDPtOBw==}
'@vitejs/plugin-react@4.3.1':
resolution: {integrity: sha512-m/V2syj5CuVnaxcUJOQRel/Wr31FFXRFlnOoq1TVtkCxsY5veGMTEmpWHndrhB2U8ScHtCQB1e+4hWYExQc6Lg==}
engines: {node: ^14.18.0 || >=16.0.0}
peerDependencies:
vite: ^4.2.0 || ^5.0.0
@@ -4160,8 +4162,8 @@ packages:
peerDependencies:
prettier: ^3.0.3
prettier@3.3.1:
resolution: {integrity: sha512-7CAwy5dRsxs8PHXT3twixW9/OEll8MLE0VRPCJyl7CkS6VHGPSlsVaWTiASPTyGyYRyApxlaWTzwUxVNrhcwDg==}
prettier@3.3.2:
resolution: {integrity: sha512-rAVeHYMcv8ATV5d508CFdn+8/pHPpXeIid1DdrPwXnaAdH7cqjVbpJaT5eq4yRAFU/lsbwYwSF/n5iNrdJHPQA==}
engines: {node: '>=14'}
hasBin: true
@@ -4819,8 +4821,8 @@ packages:
tslib@2.6.2:
resolution: {integrity: sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==}
tsx@4.15.1:
resolution: {integrity: sha512-k/6h17jA1KfUR7SpcteOa880zGmF56s8gMIcSqUR5avyNFi9nlCEKpMiHLrzrqyARGr52A/JablmGey1DEWbCA==}
tsx@4.15.2:
resolution: {integrity: sha512-kIZTOCmR37nEw0qxQks2dR+eZWSXydhTGmz7yx94vEiJtJGBTkUl0D/jt/5fey+CNdm6i3Cp+29WKRay9ScQUw==}
engines: {node: '>=18.0.0'}
hasBin: true
@@ -6411,14 +6413,14 @@ snapshots:
'@types/node': 20.14.2
optional: true
'@typescript-eslint/eslint-plugin@7.12.0(@typescript-eslint/parser@7.12.0(eslint@8.57.0)(typescript@5.4.5))(eslint@8.57.0)(typescript@5.4.5)':
'@typescript-eslint/eslint-plugin@7.13.0(@typescript-eslint/parser@7.13.0(eslint@8.57.0)(typescript@5.4.5))(eslint@8.57.0)(typescript@5.4.5)':
dependencies:
'@eslint-community/regexpp': 4.10.0
'@typescript-eslint/parser': 7.12.0(eslint@8.57.0)(typescript@5.4.5)
'@typescript-eslint/scope-manager': 7.12.0
'@typescript-eslint/type-utils': 7.12.0(eslint@8.57.0)(typescript@5.4.5)
'@typescript-eslint/utils': 7.12.0(eslint@8.57.0)(typescript@5.4.5)
'@typescript-eslint/visitor-keys': 7.12.0
'@typescript-eslint/parser': 7.13.0(eslint@8.57.0)(typescript@5.4.5)
'@typescript-eslint/scope-manager': 7.13.0
'@typescript-eslint/type-utils': 7.13.0(eslint@8.57.0)(typescript@5.4.5)
'@typescript-eslint/utils': 7.13.0(eslint@8.57.0)(typescript@5.4.5)
'@typescript-eslint/visitor-keys': 7.13.0
eslint: 8.57.0
graphemer: 1.4.0
ignore: 5.3.1
@@ -6429,12 +6431,12 @@ snapshots:
transitivePeerDependencies:
- supports-color
'@typescript-eslint/parser@7.12.0(eslint@8.57.0)(typescript@5.4.5)':
'@typescript-eslint/parser@7.13.0(eslint@8.57.0)(typescript@5.4.5)':
dependencies:
'@typescript-eslint/scope-manager': 7.12.0
'@typescript-eslint/types': 7.12.0
'@typescript-eslint/typescript-estree': 7.12.0(typescript@5.4.5)
'@typescript-eslint/visitor-keys': 7.12.0
'@typescript-eslint/scope-manager': 7.13.0
'@typescript-eslint/types': 7.13.0
'@typescript-eslint/typescript-estree': 7.13.0(typescript@5.4.5)
'@typescript-eslint/visitor-keys': 7.13.0
debug: 4.3.4
eslint: 8.57.0
optionalDependencies:
@@ -6442,15 +6444,15 @@ snapshots:
transitivePeerDependencies:
- supports-color
'@typescript-eslint/scope-manager@7.12.0':
'@typescript-eslint/scope-manager@7.13.0':
dependencies:
'@typescript-eslint/types': 7.12.0
'@typescript-eslint/visitor-keys': 7.12.0
'@typescript-eslint/types': 7.13.0
'@typescript-eslint/visitor-keys': 7.13.0
'@typescript-eslint/type-utils@7.12.0(eslint@8.57.0)(typescript@5.4.5)':
'@typescript-eslint/type-utils@7.13.0(eslint@8.57.0)(typescript@5.4.5)':
dependencies:
'@typescript-eslint/typescript-estree': 7.12.0(typescript@5.4.5)
'@typescript-eslint/utils': 7.12.0(eslint@8.57.0)(typescript@5.4.5)
'@typescript-eslint/typescript-estree': 7.13.0(typescript@5.4.5)
'@typescript-eslint/utils': 7.13.0(eslint@8.57.0)(typescript@5.4.5)
debug: 4.3.4
eslint: 8.57.0
ts-api-utils: 1.3.0(typescript@5.4.5)
@@ -6459,12 +6461,12 @@ snapshots:
transitivePeerDependencies:
- supports-color
'@typescript-eslint/types@7.12.0': {}
'@typescript-eslint/types@7.13.0': {}
'@typescript-eslint/typescript-estree@7.12.0(typescript@5.4.5)':
'@typescript-eslint/typescript-estree@7.13.0(typescript@5.4.5)':
dependencies:
'@typescript-eslint/types': 7.12.0
'@typescript-eslint/visitor-keys': 7.12.0
'@typescript-eslint/types': 7.13.0
'@typescript-eslint/visitor-keys': 7.13.0
debug: 4.3.4
globby: 11.1.0
is-glob: 4.0.3
@@ -6476,25 +6478,25 @@ snapshots:
transitivePeerDependencies:
- supports-color
'@typescript-eslint/utils@7.12.0(eslint@8.57.0)(typescript@5.4.5)':
'@typescript-eslint/utils@7.13.0(eslint@8.57.0)(typescript@5.4.5)':
dependencies:
'@eslint-community/eslint-utils': 4.4.0(eslint@8.57.0)
'@typescript-eslint/scope-manager': 7.12.0
'@typescript-eslint/types': 7.12.0
'@typescript-eslint/typescript-estree': 7.12.0(typescript@5.4.5)
'@typescript-eslint/scope-manager': 7.13.0
'@typescript-eslint/types': 7.13.0
'@typescript-eslint/typescript-estree': 7.13.0(typescript@5.4.5)
eslint: 8.57.0
transitivePeerDependencies:
- supports-color
- typescript
'@typescript-eslint/visitor-keys@7.12.0':
'@typescript-eslint/visitor-keys@7.13.0':
dependencies:
'@typescript-eslint/types': 7.12.0
'@typescript-eslint/types': 7.13.0
eslint-visitor-keys: 3.4.3
'@ungap/structured-clone@1.2.0': {}
'@vitejs/plugin-react@4.3.0(vite@5.2.13(@types/node@20.14.2)(less@4.2.0)(sass@1.77.4)(stylus@0.62.0))':
'@vitejs/plugin-react@4.3.1(vite@5.2.13(@types/node@20.14.2)(less@4.2.0)(sass@1.77.4)(stylus@0.62.0))':
dependencies:
'@babel/core': 7.24.5
'@babel/plugin-transform-react-jsx-self': 7.24.5(@babel/core@7.24.5)
@@ -7490,10 +7492,10 @@ snapshots:
resolve: 1.22.8
semver: 7.6.1
eslint-plugin-prettier@5.1.3(eslint-config-prettier@9.1.0(eslint@8.57.0))(eslint@8.57.0)(prettier@3.3.1):
eslint-plugin-prettier@5.1.3(eslint-config-prettier@9.1.0(eslint@8.57.0))(eslint@8.57.0)(prettier@3.3.2):
dependencies:
eslint: 8.57.0
prettier: 3.3.1
prettier: 3.3.2
prettier-linter-helpers: 1.0.0
synckit: 0.8.8
optionalDependencies:
@@ -9130,12 +9132,12 @@ snapshots:
dependencies:
fast-diff: 1.3.0
prettier-plugin-toml@2.0.1(prettier@3.3.1):
prettier-plugin-toml@2.0.1(prettier@3.3.2):
dependencies:
'@taplo/lib': 0.4.0-alpha.2
prettier: 3.3.1
prettier: 3.3.2
prettier@3.3.1: {}
prettier@3.3.2: {}
progress@2.0.3: {}
@@ -9210,8 +9212,8 @@ snapshots:
react: 19.0.0-rc-9d4fba0788-20240530
react-dom: 19.0.0-rc-9d4fba0788-20240530(react@19.0.0-rc-9d4fba0788-20240530)
? react-hook-form-mui@7.0.0(@mui/icons-material@5.15.19(@mui/material@5.15.19(@emotion/react@11.11.4(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(@emotion/styled@11.11.5(@emotion/react@11.11.4(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react-dom@19.0.0-rc-9d4fba0788-20240530(react@19.0.0-rc-9d4fba0788-20240530))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(@mui/material@5.15.19(@emotion/react@11.11.4(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(@emotion/styled@11.11.5(@emotion/react@11.11.4(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react-dom@19.0.0-rc-9d4fba0788-20240530(react@19.0.0-rc-9d4fba0788-20240530))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react-hook-form@7.51.5(react@19.0.0-rc-9d4fba0788-20240530))(react@19.0.0-rc-9d4fba0788-20240530)
: dependencies:
react-hook-form-mui@7.0.0(szplwmyfv5kdrzoa2ayly36z7i):
dependencies:
'@mui/material': 5.15.19(@emotion/react@11.11.4(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(@emotion/styled@11.11.5(@emotion/react@11.11.4(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react-dom@19.0.0-rc-9d4fba0788-20240530(react@19.0.0-rc-9d4fba0788-20240530))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0)
react: 19.0.0-rc-9d4fba0788-20240530
react-hook-form: 7.51.5(react@19.0.0-rc-9d4fba0788-20240530)
@@ -9912,7 +9914,7 @@ snapshots:
tslib@2.6.2: {}
tsx@4.15.1:
tsx@4.15.2:
dependencies:
esbuild: 0.21.4
get-tsconfig: 4.7.5
@@ -10132,11 +10134,11 @@ snapshots:
esbuild: 0.19.12
monaco-editor: 0.49.0
vite-plugin-sass-dts@1.3.22(postcss@8.4.38)(prettier@3.3.1)(sass@1.77.4)(vite@5.2.13(@types/node@20.14.2)(less@4.2.0)(sass@1.77.4)(stylus@0.62.0)):
vite-plugin-sass-dts@1.3.22(postcss@8.4.38)(prettier@3.3.2)(sass@1.77.4)(vite@5.2.13(@types/node@20.14.2)(less@4.2.0)(sass@1.77.4)(stylus@0.62.0)):
dependencies:
postcss: 8.4.38
postcss-js: 4.0.1(postcss@8.4.38)
prettier: 3.3.1
prettier: 3.3.2
sass: 1.77.4
vite: 5.2.13(@types/node@20.14.2)(less@4.2.0)(sass@1.77.4)(stylus@0.62.0)

View File

@@ -69,7 +69,7 @@ export const ProviderButton = () => {
sx={{ textTransform: "capitalize" }}
onClick={() => setOpen(true)}
>
{t("Provider")}
{t("Proxy Provider")}
</Button>
<BaseDialog
@@ -171,7 +171,7 @@ export const ProviderButton = () => {
<IconButton
size="small"
color="inherit"
title="Update Provider"
title={`${t("Update")}${t("Proxy Provider")}`}
onClick={() => handleUpdate(key, index)}
sx={{
animation: updating[index]

View File

@@ -67,7 +67,7 @@ export const ProviderButton = () => {
sx={{ textTransform: "capitalize" }}
onClick={() => setOpen(true)}
>
{t("Provider")}
{t("Rule Provider")}
</Button>
<BaseDialog
@@ -145,7 +145,7 @@ export const ProviderButton = () => {
<IconButton
size="small"
color="inherit"
title="Update Provider"
title={`${t("Update")}${t("Rule Provider")}`}
onClick={() => handleUpdate(key, index)}
sx={{
animation: updating[index]

View File

@@ -21,6 +21,9 @@
"Proxies": "Proxies",
"Proxy Groups": "Proxy Groups",
"Proxy Provider": "Proxy Provider",
"Update All": "Update All",
"Update At": "Update At",
"rule": "rule",
"global": "global",
"direct": "direct",
@@ -68,6 +71,8 @@
"To End": "To End",
"Connections": "Connections",
"Table View": "Table View",
"List View": "List View",
"Close All": "Close All",
"Default": "Default",
"Download Speed": "Download Speed",
@@ -86,8 +91,7 @@
"Close Connection": "Close Connection",
"Rules": "Rules",
"Update All": "Update All",
"Update At": "Update At",
"Rule Provider": "Rule Provider",
"Logs": "Logs",
"Pause": "Pause",

View File

@@ -21,6 +21,9 @@
"Proxies": "پراکسی‌ها",
"Proxy Groups": "گروه‌های پراکسی",
"Proxy Provider": "تأمین‌کننده پروکسی",
"Update All": "به‌روزرسانی همه",
"Update At": "به‌روزرسانی در",
"rule": "قانون",
"global": "جهانی",
"direct": "مستقیم",
@@ -68,6 +71,8 @@
"To End": "به پایان",
"Connections": "اتصالات",
"Table View": "نمای جدولی",
"List View": "نمای لیستی",
"Close All": "بستن همه",
"Default": "پیش‌فرض",
"Download Speed": "سرعت دانلود",
@@ -86,8 +91,7 @@
"Close Connection": "بستن اتصال",
"Rules": "قوانین",
"Update All": "به‌روزرسانی همه",
"Update At": "به‌روزرسانی در",
"Rule Provider": "تأمین‌کننده قانون",
"Logs": "لاگ‌ها",
"Pause": "توقف",

View File

@@ -21,6 +21,9 @@
"Proxies": "Прокси",
"Proxy Groups": "Группы прокси",
"Proxy Provider": "Провайдер прокси",
"Update All": "Обновить все",
"Update At": "Обновлено в",
"rule": "правила",
"global": "глобальный",
"direct": "прямой",
@@ -68,6 +71,8 @@
"To End": "Вниз",
"Connections": "Соединения",
"Table View": "Tablichnyy vid",
"List View": "Spiskovyy vid",
"Close All": "Закрыть всё",
"Default": "По умолчанию",
"Download Speed": "Скорость загрузки",
@@ -86,8 +91,7 @@
"Close Connection": "Закрыть соединение",
"Rules": "Правила",
"Update All": "Обновить все",
"Update At": "Обновлено в",
"Rule Provider": "Провайдер правило",
"Logs": "Логи",
"Pause": "Пауза",

View File

@@ -21,6 +21,9 @@
"Proxies": "代理",
"Proxy Groups": "代理组",
"Proxy Provider": "代理集合",
"Update All": "更新全部",
"Update At": "更新于",
"rule": "规则",
"global": "全局",
"direct": "直连",
@@ -68,6 +71,8 @@
"To End": "移到末尾",
"Connections": "连接",
"Table View": "表格视图",
"List View": "列表视图",
"Close All": "关闭全部",
"Default": "默认",
"Download Speed": "下载速度",
@@ -86,8 +91,7 @@
"Close Connection": "关闭连接",
"Rules": "规则",
"Update All": "更新全部",
"Update At": "更新于",
"Rule Provider": "规则集合",
"Logs": "日志",
"Pause": "暂停",

View File

@@ -143,9 +143,13 @@ const ConnectionsPage = () => {
}
>
{isTableLayout ? (
<TableChartRounded fontSize="inherit" />
) : (
<span title={t("List View")}>
<TableRowsRounded fontSize="inherit" />
</span>
) : (
<span title={t("Table View")}>
<TableChartRounded fontSize="inherit" />
</span>
)}
</IconButton>

View File

@@ -0,0 +1,46 @@
From 97eb5d51b4a584a60e5d096bdb6b33edc9f50d8d Mon Sep 17 00:00:00 2001
From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
Date: Mon, 15 Jan 2024 12:43:38 +0000
Subject: [PATCH] net: sfp-bus: fix SFP mode detect from bitrate
The referenced commit moved the setting of the Autoneg and pause bits
early in sfp_parse_support(). However, we check whether the modes are
empty before using the bitrate to set some modes. Setting these bits
so early causes that test to always be false, preventing this working,
and thus some modules that used to work no longer do.
Move them just before the call to the quirk.
Fixes: 8110633db49d ("net: sfp-bus: allow SFP quirks to override Autoneg and pause bits")
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Reviewed-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Link: https://lore.kernel.org/r/E1rPMJW-001Ahf-L0@rmk-PC.armlinux.org.uk
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
drivers/net/phy/sfp-bus.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -151,10 +151,6 @@ void sfp_parse_support(struct sfp_bus *b
unsigned int br_min, br_nom, br_max;
__ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, };
- phylink_set(modes, Autoneg);
- phylink_set(modes, Pause);
- phylink_set(modes, Asym_Pause);
-
/* Decode the bitrate information to MBd */
br_min = br_nom = br_max = 0;
if (id->base.br_nominal) {
@@ -339,6 +335,10 @@ void sfp_parse_support(struct sfp_bus *b
}
}
+ phylink_set(modes, Autoneg);
+ phylink_set(modes, Pause);
+ phylink_set(modes, Asym_Pause);
+
if (bus->sfp_quirk && bus->sfp_quirk->modes)
bus->sfp_quirk->modes(id, modes, interfaces);

View File

@@ -0,0 +1,61 @@
From 629c701fc39f1ada9416e0766a86729e83bde86c Mon Sep 17 00:00:00 2001
Message-ID: <629c701fc39f1ada9416e0766a86729e83bde86c.1694465766.git.daniel@makrotopia.org>
From: Daniel Golle <daniel@makrotopia.org>
Date: Mon, 11 Sep 2023 21:27:44 +0100
Subject: [PATCH] serial: 8250_mtk: track busclk state to avoid bus error
To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
Jiri Slaby <jirislaby@kernel.org>,
Matthias Brugger <matthias.bgg@gmail.com>,
AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>,
Daniel Golle <daniel@makrotopia.org>,
John Ogness <john.ogness@linutronix.de>,
Chen-Yu Tsai <wenst@chromium.org>,
Changqi Hu <changqi.hu@mediatek.com>,
linux-kernel@vger.kernel.org,
linux-serial@vger.kernel.org,
linux-arm-kernel@lists.infradead.org,
linux-mediatek@lists.infradead.org
Commit e32a83c70cf9 ("serial: 8250-mtk: modify mtk uart power and
clock management") introduced polling a debug register to make sure
the UART is idle before disabling the bus clock. However, at least on
some MediaTek SoCs access to that very debug register requires the bus
clock being enabled. Hence calling the suspend function while already
in suspended state results in that register access triggering a bus
error. In order to avoid that, track the state of the bus clock and
only poll the debug register if not already in suspended state.
Fixes: e32a83c70cf9 ("serial: 8250-mtk: modify mtk uart power and clock management")
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
drivers/tty/serial/8250/8250_mtk.c | 11 ++++++++++-
1 file changed, 10 insertions(+), 1 deletion(-)
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -32,7 +32,7 @@
#define MTK_UART_RXTRI_AD 0x14 /* RX Trigger address */
#define MTK_UART_FRACDIV_L 0x15 /* Fractional divider LSB address */
#define MTK_UART_FRACDIV_M 0x16 /* Fractional divider MSB address */
-#define MTK_UART_DEBUG0 0x18
+#define MTK_UART_DEBUG0 0x18
#define MTK_UART_IER_XOFFI 0x20 /* Enable XOFF character interrupt */
#define MTK_UART_IER_RTSI 0x40 /* Enable RTS Modem status interrupt */
#define MTK_UART_IER_CTSI 0x80 /* Enable CTS Modem status interrupt */
@@ -418,13 +418,12 @@ static int __maybe_unused mtk8250_runtim
struct mtk8250_data *data = dev_get_drvdata(dev);
struct uart_8250_port *up = serial8250_get_port(data->line);
- /* wait until UART in idle status */
- while
- (serial_in(up, MTK_UART_DEBUG0));
-
if (data->clk_count == 0U) {
dev_dbg(dev, "%s clock count is 0\n", __func__);
} else {
+ /* wait until UART in idle status */
+ while
+ (serial_in(up, MTK_UART_DEBUG0));
clk_disable_unprepare(data->bus_clk);
data->clk_count--;
}

View File

@@ -0,0 +1,61 @@
From 629c701fc39f1ada9416e0766a86729e83bde86c Mon Sep 17 00:00:00 2001
Message-ID: <629c701fc39f1ada9416e0766a86729e83bde86c.1694465766.git.daniel@makrotopia.org>
From: Daniel Golle <daniel@makrotopia.org>
Date: Mon, 11 Sep 2023 21:27:44 +0100
Subject: [PATCH] serial: 8250_mtk: track busclk state to avoid bus error
To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
Jiri Slaby <jirislaby@kernel.org>,
Matthias Brugger <matthias.bgg@gmail.com>,
AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>,
Daniel Golle <daniel@makrotopia.org>,
John Ogness <john.ogness@linutronix.de>,
Chen-Yu Tsai <wenst@chromium.org>,
Changqi Hu <changqi.hu@mediatek.com>,
linux-kernel@vger.kernel.org,
linux-serial@vger.kernel.org,
linux-arm-kernel@lists.infradead.org,
linux-mediatek@lists.infradead.org
Commit e32a83c70cf9 ("serial: 8250-mtk: modify mtk uart power and
clock management") introduced polling a debug register to make sure
the UART is idle before disabling the bus clock. However, at least on
some MediaTek SoCs access to that very debug register requires the bus
clock being enabled. Hence calling the suspend function while already
in suspended state results in that register access triggering a bus
error. In order to avoid that, track the state of the bus clock and
only poll the debug register if not already in suspended state.
Fixes: e32a83c70cf9 ("serial: 8250-mtk: modify mtk uart power and clock management")
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
drivers/tty/serial/8250/8250_mtk.c | 11 ++++++++++-
1 file changed, 10 insertions(+), 1 deletion(-)
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -32,7 +32,7 @@
#define MTK_UART_RXTRI_AD 0x14 /* RX Trigger address */
#define MTK_UART_FRACDIV_L 0x15 /* Fractional divider LSB address */
#define MTK_UART_FRACDIV_M 0x16 /* Fractional divider MSB address */
-#define MTK_UART_DEBUG0 0x18
+#define MTK_UART_DEBUG0 0x18
#define MTK_UART_IER_XOFFI 0x20 /* Enable XOFF character interrupt */
#define MTK_UART_IER_RTSI 0x40 /* Enable RTS Modem status interrupt */
#define MTK_UART_IER_CTSI 0x80 /* Enable CTS Modem status interrupt */
@@ -418,13 +418,12 @@ static int __maybe_unused mtk8250_runtim
struct mtk8250_data *data = dev_get_drvdata(dev);
struct uart_8250_port *up = serial8250_get_port(data->line);
- /* wait until UART in idle status */
- while
- (serial_in(up, MTK_UART_DEBUG0));
-
if (data->clk_count == 0U) {
dev_dbg(dev, "%s clock count is 0\n", __func__);
} else {
+ /* wait until UART in idle status */
+ while
+ (serial_in(up, MTK_UART_DEBUG0));
clk_disable_unprepare(data->bus_clk);
data->clk_count--;
}

View File

@@ -10,6 +10,7 @@ import (
"github.com/metacubex/mihomo/component/loopback"
"github.com/metacubex/mihomo/component/resolver"
C "github.com/metacubex/mihomo/constant"
"github.com/metacubex/mihomo/constant/features"
)
type Direct struct {
@@ -24,9 +25,11 @@ type DirectOption struct {
// DialContext implements C.ProxyAdapter
func (d *Direct) DialContext(ctx context.Context, metadata *C.Metadata, opts ...dialer.Option) (C.Conn, error) {
if !features.CMFA {
if err := d.loopBack.CheckConn(metadata); err != nil {
return nil, err
}
}
opts = append(opts, dialer.WithResolver(resolver.DefaultResolver))
c, err := dialer.DialContext(ctx, "tcp", metadata.RemoteAddress(), d.Base.DialOptions(opts...)...)
if err != nil {
@@ -38,9 +41,11 @@ func (d *Direct) DialContext(ctx context.Context, metadata *C.Metadata, opts ...
// ListenPacketContext implements C.ProxyAdapter
func (d *Direct) ListenPacketContext(ctx context.Context, metadata *C.Metadata, opts ...dialer.Option) (C.PacketConn, error) {
if !features.CMFA {
if err := d.loopBack.CheckPacketConn(metadata); err != nil {
return nil, err
}
}
// net.UDPConn.WriteTo only working with *net.UDPAddr, so we need a net.UDPAddr
if !metadata.Resolved() {
ip, err := resolver.ResolveIPWithResolver(ctx, metadata.Host, resolver.DefaultResolver)

View File

@@ -8,7 +8,6 @@
#
START=99
STOP=99
USE_PROCD=1
CONFIG=koolproxy

View File

@@ -532,18 +532,18 @@ dependencies = [
[[package]]
name = "clap"
version = "4.5.6"
version = "4.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9689a29b593160de5bc4aacab7b5d54fb52231de70122626c178e6a368994c7"
checksum = "5db83dced34638ad474f39f250d7fea9598bdd239eaced1bdf45d597da0f433f"
dependencies = [
"clap_builder",
]
[[package]]
name = "clap_builder"
version = "4.5.6"
version = "4.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e5387378c84f6faa26890ebf9f0a92989f8873d4d380467bcd0d8d8620424df"
checksum = "f7e204572485eb3fbf28f871612191521df159bc3e15a9f5064c66dba3a8c05f"
dependencies = [
"anstream",
"anstyle",
@@ -1397,12 +1397,12 @@ dependencies = [
[[package]]
name = "http-body-util"
version = "0.1.1"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d"
checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f"
dependencies = [
"bytes",
"futures-core",
"futures-util",
"http 1.1.0",
"http-body",
"pin-project-lite",
@@ -1410,9 +1410,9 @@ dependencies = [
[[package]]
name = "httparse"
version = "1.9.1"
version = "1.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8720bf4c5bfb5b6c350840c4cd14b787bf00ed51c148c857fbf7a6ddb7062764"
checksum = "9f3935c160d00ac752e09787e6e6bfc26494c2183cc922f1bc678a60d4733bc2"
[[package]]
name = "httpdate"
@@ -2653,7 +2653,6 @@ dependencies = [
"futures-core",
"futures-util",
"h2 0.4.5",
"hickory-resolver",
"http 1.1.0",
"http-body",
"http-body-util",
@@ -3220,6 +3219,7 @@ dependencies = [
"etherparse",
"futures",
"hickory-resolver",
"http 1.1.0",
"http-body-util",
"hyper",
"idna 1.0.0",
@@ -3229,6 +3229,7 @@ dependencies = [
"libc",
"log",
"lru_time_cache",
"mime",
"native-tls",
"nix",
"once_cell",

View File

@@ -121,7 +121,7 @@ service = ["local", "server", "manager"]
winservice = ["service", "windows-service"]
# Enables Hickory-DNS for replacing tokio's builtin DNS resolver
hickory-dns = ["shadowsocks-service/hickory-dns", "reqwest/hickory-dns"]
hickory-dns = ["shadowsocks-service/hickory-dns"]
# Hickory-DNS was renamed from Trust-DNS, keep compatibility.
trust-dns = ["hickory-dns"]
dns-over-tls = ["shadowsocks-service/dns-over-tls"]
@@ -161,12 +161,11 @@ local-socks4 = ["local", "shadowsocks-service/local-socks4"]
# Enable Tun interface protocol for sslocal
local-tun = ["local", "shadowsocks-service/local-tun", "ipnet"]
# Enable Fake DNS for sslocal
local-fake-dns = ["local", "shadowsocks-service/local-fake-dns"]
local-fake-dns = ["local", "shadowsocks-service/local-fake-dns", "ipnet"]
# sslocal support online URL (SIP008 Online Configuration Delivery)
# https://shadowsocks.org/doc/sip008.html
local-online-config = [
"local",
"reqwest",
"mime",
"shadowsocks-service/local-online-config",
]

View File

@@ -98,7 +98,7 @@ local-tun = ["local", "etherparse", "tun2", "smoltcp"]
local-fake-dns = ["local", "trust-dns", "sled", "bson"]
# sslocal support online URL (SIP008 Online Configuration Delivery)
# https://shadowsocks.org/doc/sip008.html
local-online-config = ["local"]
local-online-config = ["local", "local-http", "mime", "http"]
# Enable Stream Cipher Protocol
# WARN: Stream Cipher Protocol is proved to be insecure
@@ -157,6 +157,7 @@ libc = "0.2.141"
hyper = { version = "1.3", optional = true, features = ["full"] }
http-body-util = { version = "0.1", optional = true }
http = { version = "1.1", optional = true }
hickory-resolver = { version = "0.24", optional = true, features = [
"serde-config",
@@ -166,6 +167,7 @@ idna = "1.0"
ipnet = "2.9"
iprange = "0.6"
regex = "1.4"
mime = { version = "0.3", optional = true }
tun2 = { version = "1", optional = true, features = ["async"] }
etherparse = { version = "0.15", optional = true }

View File

@@ -70,14 +70,7 @@ use serde::{Deserialize, Serialize};
use shadowsocks::relay::socks5::Address;
use shadowsocks::{
config::{
ManagerAddr,
Mode,
ReplayAttackPolicy,
ServerAddr,
ServerConfig,
ServerSource,
ServerUser,
ServerUserManager,
ManagerAddr, Mode, ReplayAttackPolicy, ServerAddr, ServerConfig, ServerSource, ServerUser, ServerUserManager,
ServerWeight,
},
crypto::CipherKind,
@@ -234,6 +227,10 @@ struct SSConfig {
#[cfg(feature = "local-online-config")]
#[serde(skip_serializing_if = "Option::is_none")]
version: Option<u32>,
#[cfg(feature = "local-online-config")]
#[serde(skip_serializing_if = "Option::is_none")]
online_config: Option<SSOnlineConfig>,
}
#[derive(Serialize, Deserialize, Debug, Default)]
@@ -406,6 +403,13 @@ struct SSServerExtConfig {
outbound_bind_interface: Option<String>,
}
#[cfg(feature = "local-online-config")]
#[derive(Serialize, Deserialize, Debug, Default)]
struct SSOnlineConfig {
config_url: String,
update_interval: Option<u64>,
}
/// Server config type
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum ConfigType {
@@ -1237,6 +1241,17 @@ impl LocalInstanceConfig {
}
}
/// OnlineConfiguration (SIP008)
/// https://shadowsocks.org/doc/sip008.html
#[cfg(feature = "local-online-config")]
#[derive(Debug, Clone)]
pub struct OnlineConfig {
/// SIP008 URL
pub config_url: String,
/// Update interval, 3600s by default
pub update_interval: Option<Duration>,
}
/// Configuration
#[derive(Clone, Debug)]
pub struct Config {
@@ -1341,6 +1356,11 @@ pub struct Config {
/// Workers in runtime
/// It should be replaced with metrics APIs: https://github.com/tokio-rs/tokio/issues/4073
pub worker_count: usize,
/// OnlineConfiguration (SIP008)
/// https://shadowsocks.org/doc/sip008.html
#[cfg(feature = "local-online-config")]
pub online_config: Option<OnlineConfig>,
}
/// Configuration parsing error kind
@@ -1462,6 +1482,9 @@ impl Config {
config_path: None,
worker_count: 1,
#[cfg(feature = "local-online-config")]
online_config: None,
}
}
@@ -2352,6 +2375,14 @@ impl Config {
nconfig.acl = Some(acl);
}
#[cfg(feature = "local-online-config")]
if let Some(online_config) = config.online_config {
nconfig.online_config = Some(OnlineConfig {
config_url: online_config.config_url,
update_interval: online_config.update_interval.map(Duration::from_secs),
});
}
Ok(nconfig)
}
@@ -3090,6 +3121,15 @@ impl fmt::Display for Config {
jconf.acl = Some(acl.file_path().to_str().unwrap().to_owned());
}
// OnlineConfig
#[cfg(feature = "local-online-config")]
if let Some(ref online_config) = self.online_config {
jconf.online_config = Some(SSOnlineConfig {
config_url: online_config.config_url.clone(),
update_interval: online_config.update_interval.as_ref().map(Duration::as_secs),
});
}
write!(f, "{}", json5::to_string(&jconf).unwrap())
}
}

View File

@@ -2,20 +2,26 @@
use std::{
collections::VecDeque,
fmt::Debug,
future::Future,
io::{self, ErrorKind},
pin::Pin,
sync::Arc,
task::{Context, Poll},
time::{Duration, Instant},
};
use hyper::{
body,
body::{self, Body},
client::conn::{http1, http2},
http::uri::Scheme,
rt::{Sleep, Timer},
Request,
Response,
};
use log::{error, trace};
use lru_time_cache::LruCache;
use pin_project::pin_project;
use shadowsocks::relay::Address;
use tokio::sync::Mutex;
@@ -29,33 +35,96 @@ use super::{
const CONNECTION_EXPIRE_DURATION: Duration = Duration::from_secs(20);
/// HTTPClient API request errors
#[derive(thiserror::Error, Debug)]
pub enum HttpClientError {
/// Errors from hyper
#[error("{0}")]
Hyper(#[from] hyper::Error),
/// std::io::Error
#[error("{0}")]
Io(#[from] io::Error),
}
#[derive(Clone)]
pub struct HttpClient {
#[allow(clippy::type_complexity)]
cache_conn: Arc<Mutex<LruCache<Address, VecDeque<(HttpConnection, Instant)>>>>,
#[derive(Clone, Debug)]
pub struct TokioTimer;
impl Timer for TokioTimer {
fn sleep(&self, duration: Duration) -> Pin<Box<dyn Sleep>> {
Box::pin(TokioSleep {
inner: tokio::time::sleep(duration),
})
}
fn sleep_until(&self, deadline: Instant) -> Pin<Box<dyn Sleep>> {
Box::pin(TokioSleep {
inner: tokio::time::sleep_until(deadline.into()),
})
}
fn reset(&self, sleep: &mut Pin<Box<dyn Sleep>>, new_deadline: Instant) {
if let Some(sleep) = sleep.as_mut().downcast_mut_pin::<TokioSleep>() {
sleep.reset(new_deadline.into())
}
}
}
impl HttpClient {
pub fn new() -> HttpClient {
#[pin_project]
pub(crate) struct TokioSleep {
#[pin]
pub(crate) inner: tokio::time::Sleep,
}
impl Future for TokioSleep {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.project().inner.poll(cx)
}
}
impl Sleep for TokioSleep {}
impl TokioSleep {
pub fn reset(self: Pin<&mut Self>, deadline: Instant) {
self.project().inner.as_mut().reset(deadline.into());
}
}
/// HTTPClient, supporting HTTP/1.1 and H2, HTTPS.
pub struct HttpClient<B> {
#[allow(clippy::type_complexity)]
cache_conn: Arc<Mutex<LruCache<Address, VecDeque<(HttpConnection<B>, Instant)>>>>,
}
impl<B> Clone for HttpClient<B> {
fn clone(&self) -> Self {
HttpClient {
cache_conn: self.cache_conn.clone(),
}
}
}
impl<B> HttpClient<B>
where
B: Body + Send + Unpin + Debug + 'static,
B::Data: Send,
B::Error: Into<Box<dyn ::std::error::Error + Send + Sync>>,
{
/// Create a new HttpClient
pub fn new() -> HttpClient<B> {
HttpClient {
cache_conn: Arc::new(Mutex::new(LruCache::with_expiry_duration(CONNECTION_EXPIRE_DURATION))),
}
}
/// Make HTTP requests
#[inline]
pub async fn send_request(
&self,
context: Arc<ServiceContext>,
req: Request<body::Incoming>,
balancer: &PingBalancer,
req: Request<B>,
balancer: Option<&PingBalancer>,
) -> Result<Response<body::Incoming>, HttpClientError> {
let host = match host_addr(req.uri()) {
Some(h) => h,
@@ -96,7 +165,7 @@ impl HttpClient {
self.send_request_conn(host, c, req).await.map_err(Into::into)
}
async fn get_cached_connection(&self, host: &Address) -> Option<HttpConnection> {
async fn get_cached_connection(&self, host: &Address) -> Option<HttpConnection<B>> {
if let Some(q) = self.cache_conn.lock().await.get_mut(host) {
while let Some((c, inst)) = q.pop_front() {
let now = Instant::now();
@@ -115,8 +184,8 @@ impl HttpClient {
async fn send_request_conn(
&self,
host: Address,
mut c: HttpConnection,
req: Request<body::Incoming>,
mut c: HttpConnection<B>,
req: Request<B>,
) -> hyper::Result<Response<body::Incoming>> {
trace!("HTTP making request to host: {}, request: {:?}", host, req);
let response = c.send_request(req).await?;
@@ -141,19 +210,24 @@ impl HttpClient {
}
}
enum HttpConnection {
Http1(http1::SendRequest<body::Incoming>),
Http2(http2::SendRequest<body::Incoming>),
enum HttpConnection<B> {
Http1(http1::SendRequest<B>),
Http2(http2::SendRequest<B>),
}
impl HttpConnection {
impl<B> HttpConnection<B>
where
B: Body + Send + Unpin + 'static,
B::Data: Send,
B::Error: Into<Box<dyn ::std::error::Error + Send + Sync>>,
{
async fn connect(
context: Arc<ServiceContext>,
scheme: &Scheme,
host: Address,
domain: &str,
balancer: &PingBalancer,
) -> io::Result<HttpConnection> {
balancer: Option<&PingBalancer>,
) -> io::Result<HttpConnection<B>> {
if *scheme != Scheme::HTTP && *scheme != Scheme::HTTPS {
return Err(io::Error::new(ErrorKind::InvalidInput, "invalid scheme"));
}
@@ -173,7 +247,7 @@ impl HttpConnection {
scheme: &Scheme,
host: Address,
stream: AutoProxyClientStream,
) -> io::Result<HttpConnection> {
) -> io::Result<HttpConnection<B>> {
trace!(
"HTTP making new HTTP/1.1 connection to host: {}, scheme: {}",
host,
@@ -207,7 +281,7 @@ impl HttpConnection {
host: Address,
domain: &str,
stream: AutoProxyClientStream,
) -> io::Result<HttpConnection> {
) -> io::Result<HttpConnection<B>> {
trace!("HTTP making new TLS connection to host: {}, scheme: {}", host, scheme);
// TLS handshake, check alpn for h2 support.
@@ -216,6 +290,7 @@ impl HttpConnection {
if stream.negotiated_http2() {
// H2 connnection
let (send_request, connection) = match http2::Builder::new(TokioExecutor)
.timer(TokioTimer)
.keep_alive_interval(Duration::from_secs(15))
.handshake(TokioIo::new(stream))
.await
@@ -254,7 +329,7 @@ impl HttpConnection {
}
#[inline]
pub async fn send_request(&mut self, req: Request<body::Incoming>) -> hyper::Result<Response<body::Incoming>> {
pub async fn send_request(&mut self, req: Request<B>) -> hyper::Result<Response<body::Incoming>> {
match self {
HttpConnection::Http1(r) => r.send_request(req).await,
HttpConnection::Http2(r) => r.send_request(req).await,

View File

@@ -35,7 +35,7 @@ use super::{
pub struct HttpService {
context: Arc<ServiceContext>,
peer_addr: SocketAddr,
http_client: HttpClient,
http_client: HttpClient<body::Incoming>,
balancer: PingBalancer,
}
@@ -43,7 +43,7 @@ impl HttpService {
pub fn new(
context: Arc<ServiceContext>,
peer_addr: SocketAddr,
http_client: HttpClient,
http_client: HttpClient<body::Incoming>,
balancer: PingBalancer,
) -> HttpService {
HttpService {
@@ -90,7 +90,7 @@ impl HttpService {
// Connect to Shadowsocks' remote
//
// FIXME: What STATUS should I return for connection error?
let (mut stream, server_opt) = match connect_host(self.context, &host, &self.balancer).await {
let (mut stream, server_opt) = match connect_host(self.context, &host, Some(&self.balancer)).await {
Ok(s) => s,
Err(err) => {
error!("failed to CONNECT host: {}, error: {}", host, err);
@@ -153,7 +153,11 @@ impl HttpService {
// Set keep-alive for connection with remote
set_conn_keep_alive(version, req.headers_mut(), conn_keep_alive);
let mut res = match self.http_client.send_request(self.context, req, &self.balancer).await {
let mut res = match self
.http_client
.send_request(self.context, req, Some(&self.balancer))
.await
{
Ok(resp) => resp,
Err(HttpClientError::Hyper(e)) => return Err(e),
Err(HttpClientError::Io(err)) => {

View File

@@ -70,26 +70,20 @@ impl ProxyHttpStream {
static TLS_CONFIG: Lazy<Arc<ClientConfig>> = Lazy::new(|| {
let mut config = ClientConfig::builder()
.with_root_certificates(match rustls_native_certs::load_native_certs() {
Ok(certs) => {
.with_root_certificates({
// Load WebPKI roots (Mozilla's root certificates)
let mut store = RootCertStore::empty();
store.extend(webpki_roots::TLS_SERVER_ROOTS.iter().cloned());
if let Ok(certs) = rustls_native_certs::load_native_certs() {
for cert in certs {
if let Err(err) = store.add(cert) {
warn!("failed to add cert (native), error: {}", err);
}
}
}
store
}
Err(err) => {
warn!("failed to load native certs, {}, going to load from webpki-roots", err);
let mut store = RootCertStore::empty();
store.extend(webpki_roots::TLS_SERVER_ROOTS.iter().cloned());
store
}
})
.with_no_client_auth();

View File

@@ -2,7 +2,10 @@
//!
//! https://www.ietf.org/rfc/rfc2068.txt
pub use self::server::{Http, HttpBuilder, HttpConnectionHandler};
pub use self::{
http_client::{HttpClient, HttpClientError},
server::{Http, HttpBuilder, HttpConnectionHandler},
};
mod http_client;
mod http_service;

View File

@@ -4,7 +4,7 @@
use std::{io, net::SocketAddr, sync::Arc, time::Duration};
use hyper::{server::conn::http1, service};
use hyper::{body, server::conn::http1, service};
use log::{error, info, trace};
use shadowsocks::{config::ServerAddr, net::TcpListener};
use tokio::{
@@ -138,7 +138,7 @@ impl Http {
pub struct HttpConnectionHandler {
context: Arc<ServiceContext>,
balancer: PingBalancer,
http_client: HttpClient,
http_client: HttpClient<body::Incoming>,
}
impl HttpConnectionHandler {

View File

@@ -118,20 +118,28 @@ pub fn check_keep_alive(version: Version, headers: &HeaderMap<HeaderValue>, chec
pub async fn connect_host(
context: Arc<ServiceContext>,
host: &Address,
balancer: &PingBalancer,
balancer: Option<&PingBalancer>,
) -> io::Result<(AutoProxyClientStream, Option<Arc<ServerIdent>>)> {
if balancer.is_empty() {
match AutoProxyClientStream::connect_bypassed(context, host).await {
match balancer {
None => match AutoProxyClientStream::connect_bypassed(context, host).await {
Ok(s) => Ok((s, None)),
Err(err) => {
error!("failed to connect host {} bypassed, err: {}", host, err);
Err(err)
}
},
Some(balancer) if balancer.is_empty() => match AutoProxyClientStream::connect_bypassed(context, host).await {
Ok(s) => Ok((s, None)),
Err(err) => {
error!("failed to connect host {} bypassed, err: {}", host, err);
Err(err)
}
} else {
},
Some(balancer) => {
let server = balancer.best_tcp_server();
match AutoProxyClientStream::connect_with_opts(context, server.as_ref(), host, server.connect_opts_ref()).await
match AutoProxyClientStream::connect_with_opts(context, server.as_ref(), host, server.connect_opts_ref())
.await
{
Ok(s) => Ok((s, Some(server))),
Err(err) => {
@@ -145,4 +153,5 @@ pub async fn connect_host(
}
}
}
}
}

View File

@@ -18,7 +18,7 @@ use byte_string::ByteStr;
use futures::future;
use log::{debug, error, info, trace, warn};
use shadowsocks::{
config::Mode,
config::{Mode, ServerSource},
plugin::{Plugin, PluginMode},
relay::{
socks5::Address,
@@ -721,10 +721,27 @@ impl PingBalancer {
}
/// Reset servers in load balancer. Designed for auto-reloading configuration file.
pub async fn reset_servers(&self, servers: Vec<ServerInstanceConfig>) -> io::Result<()> {
pub async fn reset_servers(
&self,
servers: Vec<ServerInstanceConfig>,
replace_server_sources: &[ServerSource],
) -> io::Result<()> {
let old_context = self.inner.context.load();
let servers = servers
let mut old_servers = old_context.servers.clone();
let mut idx = 0;
while idx < old_servers.len() {
let source_match = replace_server_sources
.iter()
.any(|src| *src == old_servers[idx].server_config().source());
if source_match {
old_servers.swap_remove(idx);
} else {
idx += 1;
}
}
let mut servers = servers
.into_iter()
.map(|s| {
Arc::new(ServerIdent::new(
@@ -736,6 +753,16 @@ impl PingBalancer {
})
.collect::<Vec<Arc<ServerIdent>>>();
// Recreate a new instance for old servers (old server instance may still being held by clients)
for old_server in old_servers {
servers.push(Arc::new(ServerIdent::new(
old_context.context.clone(),
old_server.server_instance_config().clone(),
old_context.max_server_rtt,
old_context.check_interval * EXPECTED_CHECK_POINTS_IN_CHECK_WINDOW,
)));
}
let (shared_context, task_abortable) = PingBalancerContext::new(
servers,
old_context.context.clone(),

View File

@@ -114,6 +114,10 @@ impl ServerIdent {
&mut self.svr_cfg.config
}
pub fn server_instance_config(&self) -> &ServerInstanceConfig {
&self.svr_cfg
}
pub fn tcp_score(&self) -> &ServerScore {
&self.tcp_score
}

View File

@@ -35,6 +35,8 @@ use self::dns::{Dns, DnsBuilder};
use self::fake_dns::{FakeDns, FakeDnsBuilder};
#[cfg(feature = "local-http")]
use self::http::{Http, HttpBuilder};
#[cfg(feature = "local-online-config")]
use self::online_config::{OnlineConfigService, OnlineConfigServiceBuilder};
#[cfg(feature = "local-redir")]
use self::redir::{Redir, RedirBuilder};
use self::socks::{Socks, SocksBuilder};
@@ -52,6 +54,8 @@ pub mod fake_dns;
pub mod http;
pub mod loadbalancing;
pub mod net;
#[cfg(feature = "local-online-config")]
pub mod online_config;
#[cfg(feature = "local-redir")]
pub mod redir;
pub mod socks;
@@ -107,6 +111,8 @@ pub struct Server {
local_stat_addr: Option<LocalFlowStatAddress>,
#[cfg(feature = "local-flow-stat")]
flow_stat: Arc<FlowStat>,
#[cfg(feature = "local-online-config")]
online_config: Option<OnlineConfigService>,
}
impl Server {
@@ -117,6 +123,7 @@ impl Server {
trace!("{:?}", config);
// Warning for Stream Ciphers
// NOTE: This will only check servers in config.
#[cfg(feature = "stream-cipher")]
for inst in config.server.iter() {
let server = &inst.config;
@@ -225,8 +232,8 @@ impl Server {
balancer_builder.check_best_interval(intv);
}
for server in config.server {
balancer_builder.add_server(server);
for server in &config.server {
balancer_builder.add_server(server.clone());
}
balancer_builder.build().await?
@@ -251,6 +258,21 @@ impl Server {
local_stat_addr: config.local_stat_addr,
#[cfg(feature = "local-flow-stat")]
flow_stat: context.flow_stat(),
#[cfg(feature = "local-online-config")]
online_config: match config.online_config {
None => None,
Some(online_config) => {
let mut builder = OnlineConfigServiceBuilder::new(
Arc::new(context.clone()),
online_config.config_url,
balancer.clone(),
);
if let Some(update_interval) = online_config.update_interval {
builder.set_update_interval(update_interval);
}
Some(builder.build().await?)
}
},
};
for local_instance in config.local {
@@ -567,6 +589,11 @@ impl Server {
vfut.push(ServerHandle(tokio::spawn(report_fut)));
}
#[cfg(feature = "local-online-config")]
if let Some(online_config) = self.online_config {
vfut.push(ServerHandle(tokio::spawn(online_config.run())));
}
let (res, ..) = future::select_all(vfut).await;
res
}

View File

@@ -0,0 +1,233 @@
//! Online Config (SIP008)
//!
//! Online Configuration Delivery URL (https://shadowsocks.org/doc/sip008.html)
use std::{
io,
sync::Arc,
time::{Duration, Instant},
};
use crate::{
config::{Config, ConfigType},
local::{context::ServiceContext, http::HttpClient, loadbalancing::PingBalancer},
};
use futures::StreamExt;
use http_body_util::BodyExt;
use log::{debug, error, trace, warn};
use mime::Mime;
use shadowsocks::config::ServerSource;
use tokio::time;
/// OnlineConfigService builder pattern
pub struct OnlineConfigServiceBuilder {
context: Arc<ServiceContext>,
config_url: String,
balancer: PingBalancer,
config_update_interval: Duration,
}
impl OnlineConfigServiceBuilder {
/// Create a Builder
pub fn new(context: Arc<ServiceContext>, config_url: String, balancer: PingBalancer) -> OnlineConfigServiceBuilder {
OnlineConfigServiceBuilder {
context,
config_url,
balancer,
config_update_interval: Duration::from_secs(3600),
}
}
/// Set update interval. Default is 3600s
pub fn set_update_interval(&mut self, update_interval: Duration) {
self.config_update_interval = update_interval;
}
/// Build OnlineConfigService
pub async fn build(self) -> io::Result<OnlineConfigService> {
let mut service = OnlineConfigService {
context: self.context,
http_client: HttpClient::new(),
config_url: self.config_url,
config_update_interval: self.config_update_interval,
balancer: self.balancer,
};
// Run once after creation.
service.run_once().await?;
Ok(service)
}
}
pub struct OnlineConfigService {
context: Arc<ServiceContext>,
http_client: HttpClient<String>,
config_url: String,
config_update_interval: Duration,
balancer: PingBalancer,
}
impl OnlineConfigService {
async fn run_once(&mut self) -> io::Result<()> {
match time::timeout(Duration::from_secs(30), self.run_once_impl()).await {
Ok(o) => o,
Err(..) => {
error!("server-loader task timeout, url: {}", self.config_url);
Err(io::ErrorKind::TimedOut.into())
}
}
}
async fn run_once_impl(&mut self) -> io::Result<()> {
static SHADOWSOCKS_USER_AGENT: &str = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"));
let start_time = Instant::now();
let req = match hyper::Request::builder()
.header("User-Agent", SHADOWSOCKS_USER_AGENT)
.method("GET")
.uri(&self.config_url)
.body(String::new())
{
Ok(r) => r,
Err(err) => {
error!("server-loader task failed to make hyper::Request, error: {}", err);
return Err(io::Error::new(io::ErrorKind::Other, err));
}
};
let rsp = match self.http_client.send_request(self.context.clone(), req, None).await {
Ok(r) => r,
Err(err) => {
error!("server-loader task failed to get {}, error: {}", self.config_url, err);
return Err(io::Error::new(io::ErrorKind::Other, err));
}
};
let fetch_time = Instant::now();
// Content-Type: application/json; charset=utf-8
// mandatory in standard SIP008
match rsp.headers().get("Content-Type") {
Some(h) => match h.to_str() {
Ok(hstr) => match hstr.parse::<Mime>() {
Ok(content_type) => {
if content_type.type_() == mime::APPLICATION
&& content_type.subtype() == mime::JSON
&& content_type.get_param(mime::CHARSET) == Some(mime::UTF_8)
{
trace!("checked Content-Type: {:?}", h);
} else {
warn!(
"Content-Type is not \"application/json; charset=utf-8\", which is mandatory in standard SIP008. found {:?}",
h
);
}
}
Err(err) => {
warn!("Content-Type parse failed, value: {:?}, error: {}", h, err);
}
},
Err(..) => {
warn!("Content-Type is not a UTF-8 string: {:?}", h);
}
},
None => {
warn!("missing Content-Type in SIP008 response from {}", self.config_url);
}
}
let mut collected_body = Vec::new();
if let Some(content_length) = rsp.headers().get(http::header::CONTENT_LENGTH) {
if let Ok(content_length) = content_length.to_str() {
if let Ok(content_length) = content_length.parse::<usize>() {
collected_body.reserve(content_length);
}
}
};
let mut body = rsp.into_data_stream();
while let Some(data) = body.next().await {
match data {
Ok(data) => collected_body.extend_from_slice(&data),
Err(err) => {
error!(
"server-loader task failed to read body, url: {}, error: {}",
self.config_url, err
);
return Err(io::Error::new(io::ErrorKind::Other, err));
}
}
}
let parsed_body = match String::from_utf8(collected_body) {
Ok(b) => b,
Err(..) => return Err(io::Error::new(io::ErrorKind::Other, "body contains non-utf8 bytes").into()),
};
let online_config = match Config::load_from_str(&parsed_body, ConfigType::OnlineConfig) {
Ok(c) => c,
Err(err) => {
error!(
"server-loader task failed to load from url: {}, error: {}",
self.config_url, err
);
return Err(io::Error::new(io::ErrorKind::Other, err).into());
}
};
if let Err(err) = online_config.check_integrity() {
error!(
"server-loader task failed to load from url: {}, error: {}",
self.config_url, err
);
return Err(io::Error::new(io::ErrorKind::Other, err).into());
}
let after_read_time = Instant::now();
// Merge with static servers
let server_len = online_config.server.len();
// Update into ping balancers
if let Err(err) = self
.balancer
.reset_servers(online_config.server, &[ServerSource::OnlineConfig])
.await
{
error!(
"server-loader task failed to reset balancer, url: {}, error: {}",
self.config_url, err
);
return Err(err);
};
let finish_time = Instant::now();
debug!("server-loader task finished loading {} servers from url: {}, fetch time: {:?}, read time: {:?}, load time: {:?}, total time: {:?}",
server_len,
self.config_url,
fetch_time - start_time,
after_read_time - fetch_time,
finish_time - after_read_time,
finish_time - start_time,
);
Ok(())
}
/// Start service loop
pub async fn run(mut self) -> io::Result<()> {
debug!(
"server-loader task started, url: {}, update interval: {:?}",
self.config_url, self.config_update_interval
);
loop {
time::sleep(self.config_update_interval).await;
let _ = self.run_once().await;
}
}
}

View File

@@ -1,7 +1,5 @@
//! Common configuration utilities
#[cfg(feature = "local-online-config")]
use std::time::Duration;
use std::{
env,
fs::OpenOptions,
@@ -104,10 +102,6 @@ pub struct Config {
/// Runtime configuration
pub runtime: RuntimeConfig,
/// Online Configuration Delivery (SIP008)
#[cfg(feature = "local-online-config")]
pub online_config: Option<OnlineConfig>,
}
impl Config {
@@ -171,14 +165,6 @@ impl Config {
config.runtime = nruntime;
}
#[cfg(feature = "local-online-config")]
if let Some(online_config) = ssconfig.online_config {
config.online_config = Some(OnlineConfig {
config_url: online_config.config_url,
update_interval: online_config.update_interval.map(Duration::from_secs),
});
}
Ok(config)
}
@@ -272,24 +258,11 @@ pub struct RuntimeConfig {
pub mode: RuntimeMode,
}
/// OnlineConfiguration (SIP008)
/// https://shadowsocks.org/doc/sip008.html
#[cfg(feature = "local-online-config")]
#[derive(Debug, Clone)]
pub struct OnlineConfig {
/// SIP008 URL
pub config_url: String,
/// Update interval, 3600s by default
pub update_interval: Option<Duration>,
}
#[derive(Deserialize)]
struct SSConfig {
#[cfg(feature = "logging")]
log: Option<SSLogConfig>,
runtime: Option<SSRuntimeConfig>,
#[cfg(feature = "local-online-config")]
online_config: Option<SSOnlineConfig>,
}
#[cfg(feature = "logging")]
@@ -312,10 +285,3 @@ struct SSRuntimeConfig {
worker_count: Option<usize>,
mode: Option<String>,
}
#[cfg(feature = "local-online-config")]
#[derive(Deserialize, Debug, Default)]
struct SSOnlineConfig {
config_url: String,
update_interval: Option<u64>,
}

View File

@@ -1,7 +1,6 @@
//! Local server launchers
use std::{
fmt::{self, Display},
future::Future,
net::IpAddr,
path::PathBuf,
@@ -11,7 +10,7 @@ use std::{
};
use clap::{builder::PossibleValuesParser, Arg, ArgAction, ArgGroup, ArgMatches, Command, ValueHint};
use futures::future::{self, BoxFuture, FutureExt};
use futures::future::{self, FutureExt};
use log::{error, info, trace};
use tokio::{
self,
@@ -25,12 +24,7 @@ use shadowsocks_service::shadowsocks::relay::socks5::Address;
use shadowsocks_service::{
acl::AccessControl,
config::{
read_variable_field_value,
Config,
ConfigType,
LocalConfig,
LocalInstanceConfig,
ProtocolType,
read_variable_field_value, Config, ConfigType, LocalConfig, LocalInstanceConfig, ProtocolType,
ServerInstanceConfig,
},
local::{loadbalancing::PingBalancer, Server},
@@ -45,8 +39,7 @@ use shadowsocks_service::{
use crate::logging;
use crate::{
config::{Config as ServiceConfig, RuntimeMode},
monitor,
vparser,
monitor, vparser,
};
#[cfg(feature = "local-dns")]
@@ -576,7 +569,7 @@ pub fn define_command_line_options(mut app: Command) -> Command {
/// Create `Runtime` and `main` entry
pub fn create(matches: &ArgMatches) -> Result<(Runtime, impl Future<Output = ExitCode>), ExitCode> {
#[cfg_attr(not(feature = "local-online-config"), allow(unused_mut))]
let (mut config, service_config, runtime) = {
let (config, _, runtime) = {
let config_path_opt = matches.get_one::<PathBuf>("CONFIG").cloned().or_else(|| {
if !matches.contains_id("SERVER_CONFIG") {
match crate::config::get_default_config_path("local.json") {
@@ -930,10 +923,10 @@ pub fn create(matches: &ArgMatches) -> Result<(Runtime, impl Future<Output = Exi
#[cfg(feature = "local-online-config")]
if let Some(online_config_url) = matches.get_one::<String>("ONLINE_CONFIG_URL") {
use crate::config::OnlineConfig;
use shadowsocks_service::config::OnlineConfig;
let online_config_update_interval = matches.get_one::<u64>("ONLINE_CONFIG_UPDATE_INTERVAL").cloned();
service_config.online_config = Some(OnlineConfig {
config.online_config = Some(OnlineConfig {
config_url: online_config_url.clone(),
update_interval: online_config_update_interval.map(Duration::from_secs),
});
@@ -991,45 +984,17 @@ pub fn create(matches: &ArgMatches) -> Result<(Runtime, impl Future<Output = Exi
let main_fut = async move {
let config_path = config.config_path.clone();
let mut static_servers = Vec::new();
for server in config.server.iter() {
match server.config.source() {
ServerSource::Default | ServerSource::CommandLine => {
static_servers.push(server.clone());
}
_ => {}
}
}
#[cfg(not(feature = "local-online-config"))]
let _ = service_config;
// Fetch servers from remote for the first time
#[cfg(feature = "local-online-config")]
if let Some(ref online_config) = service_config.online_config {
if let Ok(mut servers) = get_online_config_servers(&online_config.config_url).await {
config.server.append(&mut servers);
}
}
// Double check
if config.server.is_empty() {
eprintln!("local server cannot run without any valid servers");
return crate::EXIT_CODE_LOAD_CONFIG_FAILURE.into();
}
let instance = Server::new(config).await.expect("create local");
let reload_task = ServerReloader {
config_path,
let reload_task = match config_path {
Some(config_path) => ServerReloader {
config_path: config_path.clone(),
balancer: instance.server_balancer().clone(),
static_servers,
#[cfg(feature = "local-online-config")]
online_config_url: service_config.online_config.as_ref().map(|c| c.config_url.clone()),
#[cfg(feature = "local-online-config")]
online_config_update_interval: service_config.online_config.as_ref().and_then(|c| c.update_interval),
}
.launch_reload_server_task();
.launch_reload_server_task()
.boxed(),
None => future::pending().boxed(),
};
let abort_signal = monitor::create_signal_monitor();
let server = instance.run();
@@ -1083,110 +1048,8 @@ pub fn main(matches: &ArgMatches) -> ExitCode {
}
struct ServerReloader {
config_path: Option<PathBuf>,
static_servers: Vec<ServerInstanceConfig>,
config_path: PathBuf,
balancer: PingBalancer,
#[cfg(feature = "local-online-config")]
online_config_url: Option<String>,
#[cfg(feature = "local-online-config")]
online_config_update_interval: Option<Duration>,
}
#[cfg(feature = "local-online-config")]
async fn get_online_config_servers(
online_config_url: &str,
) -> Result<Vec<ServerInstanceConfig>, Box<dyn std::error::Error>> {
use log::warn;
use mime::Mime;
use reqwest::{redirect::Policy, Client};
#[inline]
async fn get_online_config(online_config_url: &str) -> reqwest::Result<String> {
static SHADOWSOCKS_USER_AGENT: &str = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"));
let client = Client::builder()
.user_agent(SHADOWSOCKS_USER_AGENT)
.deflate(true)
.gzip(true)
.brotli(true)
.zstd(true)
.redirect(Policy::limited(3))
.timeout(Duration::from_secs(30))
.build()?;
let response = client.get(online_config_url).send().await?;
if response.url().scheme() != "https" {
warn!(
"SIP008 suggests configuration URL should use https, but current URL is {}",
response.url().scheme()
);
}
// Content-Type: application/json; charset=utf-8
// mandatory in standard SIP008
match response.headers().get("Content-Type") {
Some(h) => match h.to_str() {
Ok(hstr) => match hstr.parse::<Mime>() {
Ok(content_type) => {
if content_type.type_() == mime::APPLICATION
&& content_type.subtype() == mime::JSON
&& content_type.get_param(mime::CHARSET) == Some(mime::UTF_8)
{
trace!("checked Content-Type: {:?}", h);
} else {
warn!(
"Content-Type is not \"application/json; charset=utf-8\", which is mandatory in standard SIP008. found {:?}",
h
);
}
}
Err(err) => {
warn!("Content-Type parse failed, value: {:?}, error: {}", h, err);
}
},
Err(..) => {
warn!("Content-Type is not a UTF-8 string: {:?}", h);
}
},
None => {
warn!("missing Content-Type in SIP008 response from {}", online_config_url);
}
}
response.text().await
}
let body = match get_online_config(online_config_url).await {
Ok(b) => b,
Err(err) => {
error!(
"server-loader task failed to load from url: {}, error: {:?}",
online_config_url, err
);
return Err(Box::new(err));
}
};
let online_config = match Config::load_from_str(&body, ConfigType::OnlineConfig) {
Ok(c) => c,
Err(err) => {
error!(
"server-loader task failed to load from url: {}, error: {}",
online_config_url, err
);
return Err(Box::new(err));
}
};
if let Err(err) = online_config.check_integrity() {
error!(
"server-loader task failed to load from url: {}, error: {}",
online_config_url, err
);
return Err(Box::new(err));
}
Ok(online_config.server)
}
impl ServerReloader {
@@ -1194,59 +1057,28 @@ impl ServerReloader {
async fn run_once(&self) -> Result<(), Box<dyn std::error::Error>> {
let start_time = Instant::now();
let mut servers = self.static_servers.clone();
// Load servers from source
if let Some(ref config_path) = self.config_path {
let mut source_config = match Config::load_from_file(config_path, ConfigType::Local) {
let source_config = match Config::load_from_file(&self.config_path, ConfigType::Local) {
Ok(c) => c,
Err(err) => {
error!(
"server-loader task failed to load from file: {}, error: {}",
config_path.display(),
self.config_path.display(),
err
);
return Err(Box::new(err));
}
};
servers.append(&mut source_config.server);
}
// Load servers from online-config (SIP008)
#[cfg(feature = "local-online-config")]
if let Some(ref online_config_url) = self.online_config_url {
let mut online_servers = get_online_config_servers(online_config_url).await?;
servers.append(&mut online_servers);
}
let server_len = servers.len();
struct ConfigDisplay<'a>(&'a ServerReloader);
impl Display for ConfigDisplay<'_> {
#[cfg_attr(not(feature = "local-online-config"), allow(unused_assignments, unused_variables))]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut is_first = true;
if let Some(ref config_path) = self.0.config_path {
config_path.display().fmt(f)?;
is_first = false;
}
#[cfg(feature = "local-online-config")]
if let Some(ref online_config_url) = self.0.online_config_url {
if !is_first {
f.write_str(", ")?;
f.write_str(online_config_url)?;
}
}
Ok(())
}
}
let server_len = source_config.server.len();
let fetch_end_time = Instant::now();
if let Err(err) = self.balancer.reset_servers(servers).await {
if let Err(err) = self
.balancer
.reset_servers(source_config.server, &[ServerSource::Configuration])
.await
{
error!("server-loader task {} servers but found error: {}", server_len, err);
return Err(Box::new(err));
}
@@ -1255,7 +1087,7 @@ impl ServerReloader {
info!(
"server-loader task load from {} with {} servers, fetch costs: {:?}, total costs: {:?}",
ConfigDisplay(self),
self.config_path.display(),
server_len,
fetch_end_time - start_time,
total_end_time - start_time,
@@ -1278,53 +1110,16 @@ impl ServerReloader {
}
}
#[cfg(feature = "local-online-config")]
async fn launch_online_reload_server_task(self: Arc<Self>) {
use log::debug;
use tokio::time;
let update_interval = self
.online_config_update_interval
.unwrap_or(Duration::from_secs(60 * 60));
debug!("server-loader task updating in interval {:?}", update_interval);
loop {
time::sleep(update_interval).await;
let _ = self.run_once().await;
}
}
#[cfg(unix)]
async fn launch_reload_server_task(self) {
let arc_self = Arc::new(self);
#[allow(unused_mut)]
let mut futs: Vec<BoxFuture<()>> = Vec::new();
#[cfg(unix)]
{
#[cfg_attr(not(feature = "local-online-config"), allow(unused_mut))]
let mut has_things_to_do = arc_self.config_path.is_some();
#[cfg(feature = "local-online-config")]
{
has_things_to_do = has_things_to_do || arc_self.online_config_url.is_some();
arc_self.launch_signal_reload_server_task().await
}
if has_things_to_do {
futs.push(arc_self.clone().launch_signal_reload_server_task().boxed());
}
}
#[cfg(feature = "local-online-config")]
if arc_self.online_config_url.is_some() {
futs.push(arc_self.clone().launch_online_reload_server_task().boxed());
}
if !futs.is_empty() {
future::join_all(futs.into_iter()).await;
}
drop(arc_self);
#[cfg(windows)]
async fn launch_reload_server_task(self) {
let _ = self.config_path;
let _ = self.balancer;
}
}

View File

@@ -4,7 +4,7 @@
use std::net::{IpAddr, SocketAddr};
#[cfg(feature = "local-tun")]
#[cfg(any(feature = "local-tun", feature = "local-fake-dns"))]
use ipnet::IpNet;
#[cfg(feature = "local-redir")]
use shadowsocks_service::config::RedirType;
@@ -56,7 +56,7 @@ pub fn parse_server_url(v: &str) -> Result<ServerConfig, String> {
}
}
#[cfg(feature = "local-tun")]
#[cfg(any(feature = "local-tun", feature = "local-fake-dns"))]
pub fn parse_ipnet(v: &str) -> Result<IpNet, String> {
match v.parse::<IpNet>() {
Err(..) => Err("should be a CIDR address like 10.1.2.3/24".to_owned()),

View File

@@ -111,6 +111,7 @@ func New(options Options) (*Box, error) {
ctx,
router,
logFactory.NewLogger(F.ToString("inbound/", inboundOptions.Type, "[", tag, "]")),
tag,
inboundOptions,
options.PlatformInterface,
)

View File

@@ -32,6 +32,12 @@ const (
func ProxyDisplayName(proxyType string) string {
switch proxyType {
case TypeTun:
return "TUN"
case TypeRedirect:
return "Redirect"
case TypeTProxy:
return "TProxy"
case TypeDirect:
return "Direct"
case TypeBlock:
@@ -42,6 +48,8 @@ func ProxyDisplayName(proxyType string) string {
return "SOCKS"
case TypeHTTP:
return "HTTP"
case TypeMixed:
return "Mixed"
case TypeShadowsocks:
return "Shadowsocks"
case TypeVMess:

View File

@@ -2,8 +2,9 @@
icon: material/alert-decagram
---
#### 1.10.0-alpha.11
#### 1.10.0-alpha.12
* Fix auto-redirect not configuring nftables forward chain correctly
* Fixes and improvements
### 1.9.3

View File

@@ -14,6 +14,7 @@ import (
"github.com/go-chi/chi/v5"
"github.com/go-chi/render"
"github.com/gofrs/uuid/v5"
)
func connectionRouter(router adapter.Router, trafficManager *trafficontrol.Manager) http.Handler {
@@ -76,10 +77,10 @@ func getConnections(trafficManager *trafficontrol.Manager) func(w http.ResponseW
func closeConnection(trafficManager *trafficontrol.Manager) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
id := chi.URLParam(r, "id")
id := uuid.FromStringOrNil(chi.URLParam(r, "id"))
snapshot := trafficManager.Snapshot()
for _, c := range snapshot.Connections {
if id == c.ID() {
if id == c.Metadata().ID {
c.Close()
break
}

View File

@@ -19,7 +19,6 @@ import (
"github.com/sagernet/sing-box/option"
"github.com/sagernet/sing/common"
E "github.com/sagernet/sing/common/exceptions"
F "github.com/sagernet/sing/common/format"
"github.com/sagernet/sing/common/json"
N "github.com/sagernet/sing/common/network"
"github.com/sagernet/sing/service"
@@ -218,58 +217,15 @@ func (s *Server) TrafficManager() *trafficontrol.Manager {
}
func (s *Server) RoutedConnection(ctx context.Context, conn net.Conn, metadata adapter.InboundContext, matchedRule adapter.Rule) (net.Conn, adapter.Tracker) {
tracker := trafficontrol.NewTCPTracker(conn, s.trafficManager, castMetadata(metadata), s.router, matchedRule)
tracker := trafficontrol.NewTCPTracker(conn, s.trafficManager, metadata, s.router, matchedRule)
return tracker, tracker
}
func (s *Server) RoutedPacketConnection(ctx context.Context, conn N.PacketConn, metadata adapter.InboundContext, matchedRule adapter.Rule) (N.PacketConn, adapter.Tracker) {
tracker := trafficontrol.NewUDPTracker(conn, s.trafficManager, castMetadata(metadata), s.router, matchedRule)
tracker := trafficontrol.NewUDPTracker(conn, s.trafficManager, metadata, s.router, matchedRule)
return tracker, tracker
}
func castMetadata(metadata adapter.InboundContext) trafficontrol.Metadata {
var inbound string
if metadata.Inbound != "" {
inbound = metadata.InboundType + "/" + metadata.Inbound
} else {
inbound = metadata.InboundType
}
var domain string
if metadata.Domain != "" {
domain = metadata.Domain
} else {
domain = metadata.Destination.Fqdn
}
var processPath string
if metadata.ProcessInfo != nil {
if metadata.ProcessInfo.ProcessPath != "" {
processPath = metadata.ProcessInfo.ProcessPath
} else if metadata.ProcessInfo.PackageName != "" {
processPath = metadata.ProcessInfo.PackageName
}
if processPath == "" {
if metadata.ProcessInfo.UserId != -1 {
processPath = F.ToString(metadata.ProcessInfo.UserId)
}
} else if metadata.ProcessInfo.User != "" {
processPath = F.ToString(processPath, " (", metadata.ProcessInfo.User, ")")
} else if metadata.ProcessInfo.UserId != -1 {
processPath = F.ToString(processPath, " (", metadata.ProcessInfo.UserId, ")")
}
}
return trafficontrol.Metadata{
NetWork: metadata.Network,
Type: inbound,
SrcIP: metadata.Source.Addr,
DstIP: metadata.Destination.Addr,
SrcPort: F.ToString(metadata.Source.Port),
DstPort: F.ToString(metadata.Destination.Port),
Host: domain,
DNSMode: "normal",
ProcessPath: processPath,
}
}
func authentication(serverSecret string) func(next http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {

View File

@@ -2,10 +2,17 @@ package trafficontrol
import (
"runtime"
"sync"
"time"
C "github.com/sagernet/sing-box/constant"
"github.com/sagernet/sing-box/experimental/clashapi/compatible"
"github.com/sagernet/sing/common"
"github.com/sagernet/sing/common/atomic"
"github.com/sagernet/sing/common/json"
"github.com/sagernet/sing/common/x/list"
"github.com/gofrs/uuid/v5"
)
type Manager struct {
@@ -16,7 +23,9 @@ type Manager struct {
uploadTotal atomic.Int64
downloadTotal atomic.Int64
connections compatible.Map[string, tracker]
connections compatible.Map[uuid.UUID, Tracker]
closedConnectionsAccess sync.Mutex
closedConnections list.List[TrackerMetadata]
ticker *time.Ticker
done chan struct{}
// process *process.Process
@@ -33,12 +42,22 @@ func NewManager() *Manager {
return manager
}
func (m *Manager) Join(c tracker) {
m.connections.Store(c.ID(), c)
func (m *Manager) Join(c Tracker) {
m.connections.Store(c.Metadata().ID, c)
}
func (m *Manager) Leave(c tracker) {
m.connections.Delete(c.ID())
func (m *Manager) Leave(c Tracker) {
metadata := c.Metadata()
_, loaded := m.connections.LoadAndDelete(metadata.ID)
if loaded {
metadata.ClosedAt = time.Now()
m.closedConnectionsAccess.Lock()
defer m.closedConnectionsAccess.Unlock()
if m.closedConnections.Len() >= 1000 {
m.closedConnections.PopFront()
}
m.closedConnections.PushBack(metadata)
}
}
func (m *Manager) PushUploaded(size int64) {
@@ -59,14 +78,39 @@ func (m *Manager) Total() (up int64, down int64) {
return m.uploadTotal.Load(), m.downloadTotal.Load()
}
func (m *Manager) Connections() int {
func (m *Manager) ConnectionsLen() int {
return m.connections.Len()
}
func (m *Manager) Connections() []TrackerMetadata {
var connections []TrackerMetadata
m.connections.Range(func(_ uuid.UUID, value Tracker) bool {
connections = append(connections, value.Metadata())
return true
})
return connections
}
func (m *Manager) ClosedConnections() []TrackerMetadata {
m.closedConnectionsAccess.Lock()
defer m.closedConnectionsAccess.Unlock()
return m.closedConnections.Array()
}
func (m *Manager) Connection(id uuid.UUID) Tracker {
connection, loaded := m.connections.Load(id)
if !loaded {
return nil
}
return connection
}
func (m *Manager) Snapshot() *Snapshot {
var connections []tracker
m.connections.Range(func(_ string, value tracker) bool {
var connections []Tracker
m.connections.Range(func(_ uuid.UUID, value Tracker) bool {
if value.Metadata().OutboundType != C.TypeDNS {
connections = append(connections, value)
}
return true
})
@@ -75,8 +119,8 @@ func (m *Manager) Snapshot() *Snapshot {
m.memory = memStats.StackInuse + memStats.HeapInuse + memStats.HeapIdle - memStats.HeapReleased
return &Snapshot{
UploadTotal: m.uploadTotal.Load(),
DownloadTotal: m.downloadTotal.Load(),
Upload: m.uploadTotal.Load(),
Download: m.downloadTotal.Load(),
Connections: connections,
Memory: m.memory,
}
@@ -114,8 +158,17 @@ func (m *Manager) Close() error {
}
type Snapshot struct {
DownloadTotal int64 `json:"downloadTotal"`
UploadTotal int64 `json:"uploadTotal"`
Connections []tracker `json:"connections"`
Memory uint64 `json:"memory"`
Download int64
Upload int64
Connections []Tracker
Memory uint64
}
func (s *Snapshot) MarshalJSON() ([]byte, error) {
return json.Marshal(map[string]any{
"downloadTotal": s.Download,
"uploadTotal": s.Upload,
"connections": common.Map(s.Connections, func(t Tracker) TrackerMetadata { return t.Metadata() }),
"memory": s.Memory,
})
}

View File

@@ -2,97 +2,135 @@ package trafficontrol
import (
"net"
"net/netip"
"time"
"github.com/sagernet/sing-box/adapter"
"github.com/sagernet/sing/common"
"github.com/sagernet/sing/common/atomic"
"github.com/sagernet/sing/common/bufio"
F "github.com/sagernet/sing/common/format"
"github.com/sagernet/sing/common/json"
N "github.com/sagernet/sing/common/network"
"github.com/gofrs/uuid/v5"
)
type Metadata struct {
NetWork string `json:"network"`
Type string `json:"type"`
SrcIP netip.Addr `json:"sourceIP"`
DstIP netip.Addr `json:"destinationIP"`
SrcPort string `json:"sourcePort"`
DstPort string `json:"destinationPort"`
Host string `json:"host"`
DNSMode string `json:"dnsMode"`
ProcessPath string `json:"processPath"`
type TrackerMetadata struct {
ID uuid.UUID
Metadata adapter.InboundContext
CreatedAt time.Time
ClosedAt time.Time
Upload *atomic.Int64
Download *atomic.Int64
Chain []string
Rule adapter.Rule
Outbound string
OutboundType string
}
type tracker interface {
ID() string
Close() error
Leave()
}
type trackerInfo struct {
UUID uuid.UUID `json:"id"`
Metadata Metadata `json:"metadata"`
UploadTotal *atomic.Int64 `json:"upload"`
DownloadTotal *atomic.Int64 `json:"download"`
Start time.Time `json:"start"`
Chain []string `json:"chains"`
Rule string `json:"rule"`
RulePayload string `json:"rulePayload"`
}
func (t trackerInfo) MarshalJSON() ([]byte, error) {
func (t TrackerMetadata) MarshalJSON() ([]byte, error) {
var inbound string
if t.Metadata.Inbound != "" {
inbound = t.Metadata.InboundType + "/" + t.Metadata.Inbound
} else {
inbound = t.Metadata.InboundType
}
var domain string
if t.Metadata.Domain != "" {
domain = t.Metadata.Domain
} else {
domain = t.Metadata.Destination.Fqdn
}
var processPath string
if t.Metadata.ProcessInfo != nil {
if t.Metadata.ProcessInfo.ProcessPath != "" {
processPath = t.Metadata.ProcessInfo.ProcessPath
} else if t.Metadata.ProcessInfo.PackageName != "" {
processPath = t.Metadata.ProcessInfo.PackageName
}
if processPath == "" {
if t.Metadata.ProcessInfo.UserId != -1 {
processPath = F.ToString(t.Metadata.ProcessInfo.UserId)
}
} else if t.Metadata.ProcessInfo.User != "" {
processPath = F.ToString(processPath, " (", t.Metadata.ProcessInfo.User, ")")
} else if t.Metadata.ProcessInfo.UserId != -1 {
processPath = F.ToString(processPath, " (", t.Metadata.ProcessInfo.UserId, ")")
}
}
var rule string
if t.Rule != nil {
rule = F.ToString(t.Rule, " => ", t.Rule.Outbound())
} else {
rule = "final"
}
return json.Marshal(map[string]any{
"id": t.UUID.String(),
"metadata": t.Metadata,
"upload": t.UploadTotal.Load(),
"download": t.DownloadTotal.Load(),
"start": t.Start,
"id": t.ID,
"metadata": map[string]any{
"network": t.Metadata.Network,
"type": inbound,
"sourceIP": t.Metadata.Source.Addr,
"destinationIP": t.Metadata.Destination.Addr,
"sourcePort": t.Metadata.Source.Port,
"destinationPort": t.Metadata.Destination.Port,
"host": domain,
"dnsMode": "normal",
"processPath": processPath,
},
"upload": t.Upload.Load(),
"download": t.Download.Load(),
"start": t.CreatedAt,
"chains": t.Chain,
"rule": t.Rule,
"rulePayload": t.RulePayload,
"rule": rule,
"rulePayload": "",
})
}
type tcpTracker struct {
N.ExtendedConn `json:"-"`
*trackerInfo
type Tracker interface {
adapter.Tracker
Metadata() TrackerMetadata
Close() error
}
type TCPConn struct {
N.ExtendedConn
metadata TrackerMetadata
manager *Manager
}
func (tt *tcpTracker) ID() string {
return tt.UUID.String()
func (tt *TCPConn) Metadata() TrackerMetadata {
return tt.metadata
}
func (tt *tcpTracker) Close() error {
func (tt *TCPConn) Close() error {
tt.manager.Leave(tt)
return tt.ExtendedConn.Close()
}
func (tt *tcpTracker) Leave() {
func (tt *TCPConn) Leave() {
tt.manager.Leave(tt)
}
func (tt *tcpTracker) Upstream() any {
func (tt *TCPConn) Upstream() any {
return tt.ExtendedConn
}
func (tt *tcpTracker) ReaderReplaceable() bool {
func (tt *TCPConn) ReaderReplaceable() bool {
return true
}
func (tt *tcpTracker) WriterReplaceable() bool {
func (tt *TCPConn) WriterReplaceable() bool {
return true
}
func NewTCPTracker(conn net.Conn, manager *Manager, metadata Metadata, router adapter.Router, rule adapter.Rule) *tcpTracker {
uuid, _ := uuid.NewV4()
var chain []string
var next string
func NewTCPTracker(conn net.Conn, manager *Manager, metadata adapter.InboundContext, router adapter.Router, rule adapter.Rule) *TCPConn {
id, _ := uuid.NewV4()
var (
chain []string
next string
outbound string
outboundType string
)
if rule == nil {
if defaultOutbound, err := router.DefaultOutbound(N.NetworkTCP); err == nil {
next = defaultOutbound.Tag()
@@ -106,17 +144,17 @@ func NewTCPTracker(conn net.Conn, manager *Manager, metadata Metadata, router ad
if !loaded {
break
}
outbound = detour.Tag()
outboundType = detour.Type()
group, isGroup := detour.(adapter.OutboundGroup)
if !isGroup {
break
}
next = group.Now()
}
upload := new(atomic.Int64)
download := new(atomic.Int64)
t := &tcpTracker{
tracker := &TCPConn{
ExtendedConn: bufio.NewCounterConn(conn, []N.CountFunc{func(n int64) {
upload.Add(n)
manager.PushUploaded(n)
@@ -124,64 +162,62 @@ func NewTCPTracker(conn net.Conn, manager *Manager, metadata Metadata, router ad
download.Add(n)
manager.PushDownloaded(n)
}}),
manager: manager,
trackerInfo: &trackerInfo{
UUID: uuid,
Start: time.Now(),
metadata: TrackerMetadata{
ID: id,
Metadata: metadata,
CreatedAt: time.Now(),
Upload: upload,
Download: download,
Chain: common.Reverse(chain),
Rule: "",
UploadTotal: upload,
DownloadTotal: download,
Rule: rule,
Outbound: outbound,
OutboundType: outboundType,
},
manager: manager,
}
if rule != nil {
t.trackerInfo.Rule = rule.String() + " => " + rule.Outbound()
} else {
t.trackerInfo.Rule = "final"
}
manager.Join(t)
return t
manager.Join(tracker)
return tracker
}
type udpTracker struct {
type UDPConn struct {
N.PacketConn `json:"-"`
*trackerInfo
metadata TrackerMetadata
manager *Manager
}
func (ut *udpTracker) ID() string {
return ut.UUID.String()
func (ut *UDPConn) Metadata() TrackerMetadata {
return ut.metadata
}
func (ut *udpTracker) Close() error {
func (ut *UDPConn) Close() error {
ut.manager.Leave(ut)
return ut.PacketConn.Close()
}
func (ut *udpTracker) Leave() {
func (ut *UDPConn) Leave() {
ut.manager.Leave(ut)
}
func (ut *udpTracker) Upstream() any {
func (ut *UDPConn) Upstream() any {
return ut.PacketConn
}
func (ut *udpTracker) ReaderReplaceable() bool {
func (ut *UDPConn) ReaderReplaceable() bool {
return true
}
func (ut *udpTracker) WriterReplaceable() bool {
func (ut *UDPConn) WriterReplaceable() bool {
return true
}
func NewUDPTracker(conn N.PacketConn, manager *Manager, metadata Metadata, router adapter.Router, rule adapter.Rule) *udpTracker {
uuid, _ := uuid.NewV4()
var chain []string
var next string
func NewUDPTracker(conn N.PacketConn, manager *Manager, metadata adapter.InboundContext, router adapter.Router, rule adapter.Rule) *UDPConn {
id, _ := uuid.NewV4()
var (
chain []string
next string
outbound string
outboundType string
)
if rule == nil {
if defaultOutbound, err := router.DefaultOutbound(N.NetworkUDP); err == nil {
next = defaultOutbound.Tag()
@@ -195,17 +231,17 @@ func NewUDPTracker(conn N.PacketConn, manager *Manager, metadata Metadata, route
if !loaded {
break
}
outbound = detour.Tag()
outboundType = detour.Type()
group, isGroup := detour.(adapter.OutboundGroup)
if !isGroup {
break
}
next = group.Now()
}
upload := new(atomic.Int64)
download := new(atomic.Int64)
ut := &udpTracker{
trackerConn := &UDPConn{
PacketConn: bufio.NewCounterPacketConn(conn, []N.CountFunc{func(n int64) {
upload.Add(n)
manager.PushUploaded(n)
@@ -213,24 +249,19 @@ func NewUDPTracker(conn N.PacketConn, manager *Manager, metadata Metadata, route
download.Add(n)
manager.PushDownloaded(n)
}}),
manager: manager,
trackerInfo: &trackerInfo{
UUID: uuid,
Start: time.Now(),
metadata: TrackerMetadata{
ID: id,
Metadata: metadata,
CreatedAt: time.Now(),
Upload: upload,
Download: download,
Chain: common.Reverse(chain),
Rule: "",
UploadTotal: upload,
DownloadTotal: download,
Rule: rule,
Outbound: outbound,
OutboundType: outboundType,
},
manager: manager,
}
if rule != nil {
ut.trackerInfo.Rule = rule.String() + " => " + rule.Outbound()
} else {
ut.trackerInfo.Rule = "final"
}
manager.Join(ut)
return ut
manager.Join(trackerConn)
return trackerConn
}

View File

@@ -14,4 +14,6 @@ const (
CommandSetClashMode
CommandGetSystemProxyStatus
CommandSetSystemProxyEnabled
CommandConnections
CommandCloseConnection
)

View File

@@ -31,6 +31,7 @@ type CommandClientHandler interface {
WriteGroups(message OutboundGroupIterator)
InitializeClashMode(modeList StringIterator, currentMode string)
UpdateClashMode(newMode string)
WriteConnections(message *Connections)
}
func NewStandaloneCommandClient() *CommandClient {
@@ -116,6 +117,13 @@ func (c *CommandClient) Connect() error {
return nil
}
go c.handleModeConn(conn)
case CommandConnections:
err = binary.Write(conn, binary.BigEndian, c.options.StatusInterval)
if err != nil {
return E.Cause(err, "write interval")
}
c.handler.Connected()
go c.handleConnectionsConn(conn)
}
return nil
}

View File

@@ -0,0 +1,53 @@
package libbox
import (
"bufio"
"net"
"github.com/sagernet/sing-box/experimental/clashapi"
"github.com/sagernet/sing/common/binary"
E "github.com/sagernet/sing/common/exceptions"
"github.com/gofrs/uuid/v5"
)
func (c *CommandClient) CloseConnection(connId string) error {
conn, err := c.directConnect()
if err != nil {
return err
}
defer conn.Close()
writer := bufio.NewWriter(conn)
err = binary.WriteData(writer, binary.BigEndian, connId)
if err != nil {
return err
}
err = writer.Flush()
if err != nil {
return err
}
return readError(conn)
}
func (s *CommandServer) handleCloseConnection(conn net.Conn) error {
reader := bufio.NewReader(conn)
var connId string
err := binary.ReadData(reader, binary.BigEndian, &connId)
if err != nil {
return E.Cause(err, "read connection id")
}
service := s.service
if service == nil {
return writeError(conn, E.New("service not ready"))
}
clashServer := service.instance.Router().ClashServer()
if clashServer == nil {
return writeError(conn, E.New("Clash API disabled"))
}
targetConn := clashServer.(*clashapi.Server).TrafficManager().Connection(uuid.FromStringOrNil(connId))
if targetConn == nil {
return writeError(conn, E.New("connection already closed"))
}
targetConn.Close()
return writeError(conn, nil)
}

View File

@@ -0,0 +1,268 @@
package libbox
import (
"bufio"
"net"
"slices"
"strings"
"time"
"github.com/sagernet/sing-box/experimental/clashapi"
"github.com/sagernet/sing-box/experimental/clashapi/trafficontrol"
"github.com/sagernet/sing/common/binary"
E "github.com/sagernet/sing/common/exceptions"
M "github.com/sagernet/sing/common/metadata"
"github.com/gofrs/uuid/v5"
)
func (c *CommandClient) handleConnectionsConn(conn net.Conn) {
defer conn.Close()
reader := bufio.NewReader(conn)
var connections Connections
for {
err := binary.ReadData(reader, binary.BigEndian, &connections.connections)
if err != nil {
c.handler.Disconnected(err.Error())
return
}
c.handler.WriteConnections(&connections)
}
}
func (s *CommandServer) handleConnectionsConn(conn net.Conn) error {
var interval int64
err := binary.Read(conn, binary.BigEndian, &interval)
if err != nil {
return E.Cause(err, "read interval")
}
ticker := time.NewTicker(time.Duration(interval))
defer ticker.Stop()
ctx := connKeepAlive(conn)
var trafficManager *trafficontrol.Manager
for {
service := s.service
if service != nil {
clashServer := service.instance.Router().ClashServer()
if clashServer == nil {
return E.New("Clash API disabled")
}
trafficManager = clashServer.(*clashapi.Server).TrafficManager()
break
}
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
}
}
var (
connections = make(map[uuid.UUID]*Connection)
outConnections []Connection
)
writer := bufio.NewWriter(conn)
for {
outConnections = outConnections[:0]
for _, connection := range trafficManager.Connections() {
outConnections = append(outConnections, newConnection(connections, connection, false))
}
for _, connection := range trafficManager.ClosedConnections() {
outConnections = append(outConnections, newConnection(connections, connection, true))
}
err = binary.WriteData(writer, binary.BigEndian, outConnections)
if err != nil {
return err
}
err = writer.Flush()
if err != nil {
return err
}
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
}
}
}
const (
ConnectionStateAll = iota
ConnectionStateActive
ConnectionStateClosed
)
type Connections struct {
connections []Connection
filteredConnections []Connection
outConnections *[]Connection
}
func (c *Connections) FilterState(state int32) {
c.filteredConnections = c.filteredConnections[:0]
switch state {
case ConnectionStateAll:
c.filteredConnections = append(c.filteredConnections, c.connections...)
case ConnectionStateActive:
for _, connection := range c.connections {
if connection.ClosedAt == 0 {
c.filteredConnections = append(c.filteredConnections, connection)
}
}
case ConnectionStateClosed:
for _, connection := range c.connections {
if connection.ClosedAt != 0 {
c.filteredConnections = append(c.filteredConnections, connection)
}
}
}
}
func (c *Connections) SortByDate() {
slices.SortStableFunc(c.filteredConnections, func(x, y Connection) int {
if x.CreatedAt < y.CreatedAt {
return 1
} else if x.CreatedAt > y.CreatedAt {
return -1
} else {
return strings.Compare(y.ID, x.ID)
}
})
}
func (c *Connections) SortByTraffic() {
slices.SortStableFunc(c.filteredConnections, func(x, y Connection) int {
xTraffic := x.Uplink + x.Downlink
yTraffic := y.Uplink + y.Downlink
if xTraffic < yTraffic {
return 1
} else if xTraffic > yTraffic {
return -1
} else {
return strings.Compare(y.ID, x.ID)
}
})
}
func (c *Connections) SortByTrafficTotal() {
slices.SortStableFunc(c.filteredConnections, func(x, y Connection) int {
xTraffic := x.UplinkTotal + x.DownlinkTotal
yTraffic := y.UplinkTotal + y.DownlinkTotal
if xTraffic < yTraffic {
return 1
} else if xTraffic > yTraffic {
return -1
} else {
return strings.Compare(y.ID, x.ID)
}
})
}
func (c *Connections) Iterator() ConnectionIterator {
return newPtrIterator(c.filteredConnections)
}
type Connection struct {
ID string
Inbound string
InboundType string
IPVersion int32
Network string
Source string
Destination string
Domain string
Protocol string
User string
FromOutbound string
CreatedAt int64
ClosedAt int64
Uplink int64
Downlink int64
UplinkTotal int64
DownlinkTotal int64
Rule string
Outbound string
OutboundType string
ChainList []string
}
func (c *Connection) Chain() StringIterator {
return newIterator(c.ChainList)
}
func (c *Connection) DisplayDestination() string {
destination := M.ParseSocksaddr(c.Destination)
if destination.IsIP() && c.Domain != "" {
destination = M.Socksaddr{
Fqdn: c.Domain,
Port: destination.Port,
}
return destination.String()
}
return c.Destination
}
type ConnectionIterator interface {
Next() *Connection
HasNext() bool
}
func newConnection(connections map[uuid.UUID]*Connection, metadata trafficontrol.TrackerMetadata, isClosed bool) Connection {
if oldConnection, loaded := connections[metadata.ID]; loaded {
if isClosed {
if oldConnection.ClosedAt == 0 {
oldConnection.Uplink = 0
oldConnection.Downlink = 0
oldConnection.ClosedAt = metadata.ClosedAt.UnixMilli()
}
return *oldConnection
}
lastUplink := oldConnection.UplinkTotal
lastDownlink := oldConnection.DownlinkTotal
uplinkTotal := metadata.Upload.Load()
downlinkTotal := metadata.Download.Load()
oldConnection.Uplink = uplinkTotal - lastUplink
oldConnection.Downlink = downlinkTotal - lastDownlink
oldConnection.UplinkTotal = uplinkTotal
oldConnection.DownlinkTotal = downlinkTotal
return *oldConnection
}
var rule string
if metadata.Rule != nil {
rule = metadata.Rule.String()
}
uplinkTotal := metadata.Upload.Load()
downlinkTotal := metadata.Download.Load()
uplink := uplinkTotal
downlink := downlinkTotal
var closedAt int64
if !metadata.ClosedAt.IsZero() {
closedAt = metadata.ClosedAt.UnixMilli()
uplink = 0
downlink = 0
}
connection := Connection{
ID: metadata.ID.String(),
Inbound: metadata.Metadata.Inbound,
InboundType: metadata.Metadata.InboundType,
IPVersion: int32(metadata.Metadata.IPVersion),
Network: metadata.Metadata.Network,
Source: metadata.Metadata.Source.String(),
Destination: metadata.Metadata.Destination.String(),
Domain: metadata.Metadata.Domain,
Protocol: metadata.Metadata.Protocol,
User: metadata.Metadata.User,
FromOutbound: metadata.Metadata.Outbound,
CreatedAt: metadata.CreatedAt.UnixMilli(),
ClosedAt: closedAt,
Uplink: uplink,
Downlink: downlink,
UplinkTotal: uplinkTotal,
DownlinkTotal: downlinkTotal,
Rule: rule,
Outbound: metadata.Outbound,
OutboundType: metadata.OutboundType,
ChainList: metadata.Chain,
}
connections[metadata.ID] = &connection
return connection
}

View File

@@ -14,36 +14,6 @@ import (
"github.com/sagernet/sing/service"
)
type OutboundGroup struct {
Tag string
Type string
Selectable bool
Selected string
IsExpand bool
items []*OutboundGroupItem
}
func (g *OutboundGroup) GetItems() OutboundGroupItemIterator {
return newIterator(g.items)
}
type OutboundGroupIterator interface {
Next() *OutboundGroup
HasNext() bool
}
type OutboundGroupItem struct {
Tag string
Type string
URLTestTime int64
URLTestDelay int32
}
type OutboundGroupItemIterator interface {
Next() *OutboundGroupItem
HasNext() bool
}
func (c *CommandClient) handleGroupConn(conn net.Conn) {
defer conn.Close()
@@ -92,6 +62,36 @@ func (s *CommandServer) handleGroupConn(conn net.Conn) error {
}
}
type OutboundGroup struct {
Tag string
Type string
Selectable bool
Selected string
IsExpand bool
items []*OutboundGroupItem
}
func (g *OutboundGroup) GetItems() OutboundGroupItemIterator {
return newIterator(g.items)
}
type OutboundGroupIterator interface {
Next() *OutboundGroup
HasNext() bool
}
type OutboundGroupItem struct {
Tag string
Type string
URLTestTime int64
URLTestDelay int32
}
type OutboundGroupItemIterator interface {
Next() *OutboundGroupItem
HasNext() bool
}
func readGroups(reader io.Reader) (OutboundGroupIterator, error) {
var groupLength uint16
err := binary.Read(reader, binary.BigEndian, &groupLength)

View File

@@ -33,6 +33,8 @@ type CommandServer struct {
urlTestUpdate chan struct{}
modeUpdate chan struct{}
logReset chan struct{}
closedConnections []Connection
}
type CommandServerHandler interface {
@@ -176,6 +178,10 @@ func (s *CommandServer) handleConnection(conn net.Conn) error {
return s.handleGetSystemProxyStatus(conn)
case CommandSetSystemProxyEnabled:
return s.handleSetSystemProxyEnabled(conn)
case CommandConnections:
return s.handleConnectionsConn(conn)
case CommandCloseConnection:
return s.handleCloseConnection(conn)
default:
return E.New("unknown command: ", command)
}

View File

@@ -36,7 +36,7 @@ func (s *CommandServer) readStatus() StatusMessage {
trafficManager := clashServer.(*clashapi.Server).TrafficManager()
message.Uplink, message.Downlink = trafficManager.Now()
message.UplinkTotal, message.DownlinkTotal = trafficManager.Total()
message.ConnectionsIn = int32(trafficManager.Connections())
message.ConnectionsIn = int32(trafficManager.ConnectionsLen())
}
}

View File

@@ -17,6 +17,10 @@ func newIterator[T any](values []T) *iterator[T] {
return &iterator[T]{values}
}
func newPtrIterator[T any](values []T) *iterator[*T] {
return &iterator[*T]{common.Map(values, func(value T) *T { return &value })}
}
func (i *iterator[T]) Next() T {
if len(i.values) == 0 {
return common.DefaultValue[T]()

View File

@@ -149,33 +149,6 @@ func (w *platformInterfaceWrapper) OpenTun(options *tun.Options, platformOptions
return tun.New(*options)
}
func (w *platformInterfaceWrapper) FindProcessInfo(ctx context.Context, network string, source netip.AddrPort, destination netip.AddrPort) (*process.Info, error) {
var uid int32
if w.useProcFS {
uid = procfs.ResolveSocketByProcSearch(network, source, destination)
if uid == -1 {
return nil, E.New("procfs: not found")
}
} else {
var ipProtocol int32
switch N.NetworkName(network) {
case N.NetworkTCP:
ipProtocol = syscall.IPPROTO_TCP
case N.NetworkUDP:
ipProtocol = syscall.IPPROTO_UDP
default:
return nil, E.New("unknown network: ", network)
}
var err error
uid, err = w.iif.FindConnectionOwner(ipProtocol, source.Addr().String(), int32(source.Port()), destination.Addr().String(), int32(destination.Port()))
if err != nil {
return nil, err
}
}
packageName, _ := w.iif.PackageNameByUid(uid)
return &process.Info{UserId: uid, PackageName: packageName}, nil
}
func (w *platformInterfaceWrapper) UsePlatformDefaultInterfaceMonitor() bool {
return w.iif.UsePlatformDefaultInterfaceMonitor()
}
@@ -229,6 +202,33 @@ func (w *platformInterfaceWrapper) ReadWIFIState() adapter.WIFIState {
return (adapter.WIFIState)(*wifiState)
}
func (w *platformInterfaceWrapper) FindProcessInfo(ctx context.Context, network string, source netip.AddrPort, destination netip.AddrPort) (*process.Info, error) {
var uid int32
if w.useProcFS {
uid = procfs.ResolveSocketByProcSearch(network, source, destination)
if uid == -1 {
return nil, E.New("procfs: not found")
}
} else {
var ipProtocol int32
switch N.NetworkName(network) {
case N.NetworkTCP:
ipProtocol = syscall.IPPROTO_TCP
case N.NetworkUDP:
ipProtocol = syscall.IPPROTO_UDP
default:
return nil, E.New("unknown network: ", network)
}
var err error
uid, err = w.iif.FindConnectionOwner(ipProtocol, source.Addr().String(), int32(source.Port()), destination.Addr().String(), int32(destination.Port()))
if err != nil {
return nil, err
}
}
packageName, _ := w.iif.PackageNameByUid(uid)
return &process.Info{UserId: uid, PackageName: packageName}, nil
}
func (w *platformInterfaceWrapper) DisableColors() bool {
return runtime.GOOS != "android"
}

View File

@@ -4,10 +4,12 @@ import (
"os"
"os/user"
"strconv"
"time"
"github.com/sagernet/sing-box/common/humanize"
C "github.com/sagernet/sing-box/constant"
_ "github.com/sagernet/sing-box/include"
"github.com/sagernet/sing-box/log"
)
var (
@@ -59,6 +61,10 @@ func FormatMemoryBytes(length int64) string {
return humanize.MemoryBytes(uint64(length))
}
func FormatDuration(duration int64) string {
return log.FormatDuration(time.Duration(duration) * time.Millisecond)
}
func ProxyDisplayType(proxyType string) string {
return C.ProxyDisplayName(proxyType)
}

View File

@@ -26,14 +26,14 @@ require (
github.com/sagernet/gvisor v0.0.0-20240428053021-e691de28565f
github.com/sagernet/quic-go v0.45.0-beta.2
github.com/sagernet/reality v0.0.0-20230406110435-ee17307e7691
github.com/sagernet/sing v0.5.0-alpha.9
github.com/sagernet/sing v0.5.0-alpha.10
github.com/sagernet/sing-dns v0.3.0-beta.5
github.com/sagernet/sing-mux v0.2.0
github.com/sagernet/sing-quic v0.2.0-beta.9
github.com/sagernet/sing-shadowsocks v0.2.6
github.com/sagernet/sing-shadowsocks2 v0.2.0
github.com/sagernet/sing-shadowtls v0.1.4
github.com/sagernet/sing-tun v0.4.0-beta.8
github.com/sagernet/sing-tun v0.4.0-beta.9
github.com/sagernet/sing-vmess v0.1.8
github.com/sagernet/smux v0.0.0-20231208180855-7041f6ea79e7
github.com/sagernet/tfo-go v0.0.0-20231209031829-7b5343ac1dc6

View File

@@ -113,8 +113,8 @@ github.com/sagernet/quic-go v0.45.0-beta.2/go.mod h1:rs3XCo3SQ2sB96NtaKnEyq+Zkya
github.com/sagernet/reality v0.0.0-20230406110435-ee17307e7691 h1:5Th31OC6yj8byLGkEnIYp6grlXfo1QYUfiYFGjewIdc=
github.com/sagernet/reality v0.0.0-20230406110435-ee17307e7691/go.mod h1:B8lp4WkQ1PwNnrVMM6KyuFR20pU8jYBD+A4EhJovEXU=
github.com/sagernet/sing v0.2.18/go.mod h1:OL6k2F0vHmEzXz2KW19qQzu172FDgSbUSODylighuVo=
github.com/sagernet/sing v0.5.0-alpha.9 h1:Mmg+LCbaKXBeQD/ttzi0/MQa3NcUyfadIgkGzhQW7o0=
github.com/sagernet/sing v0.5.0-alpha.9/go.mod h1:ARkL0gM13/Iv5VCZmci/NuoOlePoIsW0m7BWfln/Hak=
github.com/sagernet/sing v0.5.0-alpha.10 h1:kuHl10gpjbKQAdQfyogQU3u0CVnpqC3wrAHe/+BFaXc=
github.com/sagernet/sing v0.5.0-alpha.10/go.mod h1:ARkL0gM13/Iv5VCZmci/NuoOlePoIsW0m7BWfln/Hak=
github.com/sagernet/sing-dns v0.3.0-beta.5 h1:lX+wfnBVaOlSd7+GBgb431Tt/gmYwJXSHvS1HutfnD4=
github.com/sagernet/sing-dns v0.3.0-beta.5/go.mod h1:qeO/lOUK/c3Zczp5a1VO13fbmolaM8xGKCUXtaX0/NQ=
github.com/sagernet/sing-mux v0.2.0 h1:4C+vd8HztJCWNYfufvgL49xaOoOHXty2+EAjnzN3IYo=
@@ -127,8 +127,8 @@ github.com/sagernet/sing-shadowsocks2 v0.2.0 h1:wpZNs6wKnR7mh1wV9OHwOyUr21VkS3wK
github.com/sagernet/sing-shadowsocks2 v0.2.0/go.mod h1:RnXS0lExcDAovvDeniJ4IKa2IuChrdipolPYWBv9hWQ=
github.com/sagernet/sing-shadowtls v0.1.4 h1:aTgBSJEgnumzFenPvc+kbD9/W0PywzWevnVpEx6Tw3k=
github.com/sagernet/sing-shadowtls v0.1.4/go.mod h1:F8NBgsY5YN2beQavdgdm1DPlhaKQlaL6lpDdcBglGK4=
github.com/sagernet/sing-tun v0.4.0-beta.8 h1:3FM7KpE3kmTj7aA9LYtn82pBAFHIrk2O1b84lpx/5ns=
github.com/sagernet/sing-tun v0.4.0-beta.8/go.mod h1:uoRiCzWHzHLw/angVqXDzUNiQcMRl/ZrElJryQLJFhY=
github.com/sagernet/sing-tun v0.4.0-beta.9 h1:/5hXQ0u7tHtngfXozRc+o/gt6zfHBHMOwSIHXF0+S3I=
github.com/sagernet/sing-tun v0.4.0-beta.9/go.mod h1:uoRiCzWHzHLw/angVqXDzUNiQcMRl/ZrElJryQLJFhY=
github.com/sagernet/sing-vmess v0.1.8 h1:XVWad1RpTy9b5tPxdm5MCU8cGfrTGdR8qCq6HV2aCNc=
github.com/sagernet/sing-vmess v0.1.8/go.mod h1:vhx32UNzTDUkNwOyIjcZQohre1CaytquC5mPplId8uA=
github.com/sagernet/smux v0.0.0-20231208180855-7041f6ea79e7 h1:DImB4lELfQhplLTxeq2z31Fpv8CQqqrUwTbrIRumZqQ=

View File

@@ -11,43 +11,43 @@ import (
E "github.com/sagernet/sing/common/exceptions"
)
func New(ctx context.Context, router adapter.Router, logger log.ContextLogger, options option.Inbound, platformInterface platform.Interface) (adapter.Inbound, error) {
func New(ctx context.Context, router adapter.Router, logger log.ContextLogger, tag string, options option.Inbound, platformInterface platform.Interface) (adapter.Inbound, error) {
if options.Type == "" {
return nil, E.New("missing inbound type")
}
switch options.Type {
case C.TypeTun:
return NewTun(ctx, router, logger, options.Tag, options.TunOptions, platformInterface)
return NewTun(ctx, router, logger, tag, options.TunOptions, platformInterface)
case C.TypeRedirect:
return NewRedirect(ctx, router, logger, options.Tag, options.RedirectOptions), nil
return NewRedirect(ctx, router, logger, tag, options.RedirectOptions), nil
case C.TypeTProxy:
return NewTProxy(ctx, router, logger, options.Tag, options.TProxyOptions), nil
return NewTProxy(ctx, router, logger, tag, options.TProxyOptions), nil
case C.TypeDirect:
return NewDirect(ctx, router, logger, options.Tag, options.DirectOptions), nil
return NewDirect(ctx, router, logger, tag, options.DirectOptions), nil
case C.TypeSOCKS:
return NewSocks(ctx, router, logger, options.Tag, options.SocksOptions), nil
return NewSocks(ctx, router, logger, tag, options.SocksOptions), nil
case C.TypeHTTP:
return NewHTTP(ctx, router, logger, options.Tag, options.HTTPOptions)
return NewHTTP(ctx, router, logger, tag, options.HTTPOptions)
case C.TypeMixed:
return NewMixed(ctx, router, logger, options.Tag, options.MixedOptions), nil
return NewMixed(ctx, router, logger, tag, options.MixedOptions), nil
case C.TypeShadowsocks:
return NewShadowsocks(ctx, router, logger, options.Tag, options.ShadowsocksOptions)
return NewShadowsocks(ctx, router, logger, tag, options.ShadowsocksOptions)
case C.TypeVMess:
return NewVMess(ctx, router, logger, options.Tag, options.VMessOptions)
return NewVMess(ctx, router, logger, tag, options.VMessOptions)
case C.TypeTrojan:
return NewTrojan(ctx, router, logger, options.Tag, options.TrojanOptions)
return NewTrojan(ctx, router, logger, tag, options.TrojanOptions)
case C.TypeNaive:
return NewNaive(ctx, router, logger, options.Tag, options.NaiveOptions)
return NewNaive(ctx, router, logger, tag, options.NaiveOptions)
case C.TypeHysteria:
return NewHysteria(ctx, router, logger, options.Tag, options.HysteriaOptions)
return NewHysteria(ctx, router, logger, tag, options.HysteriaOptions)
case C.TypeShadowTLS:
return NewShadowTLS(ctx, router, logger, options.Tag, options.ShadowTLSOptions)
return NewShadowTLS(ctx, router, logger, tag, options.ShadowTLSOptions)
case C.TypeVLESS:
return NewVLESS(ctx, router, logger, options.Tag, options.VLESSOptions)
return NewVLESS(ctx, router, logger, tag, options.VLESSOptions)
case C.TypeTUIC:
return NewTUIC(ctx, router, logger, options.Tag, options.TUICOptions)
return NewTUIC(ctx, router, logger, tag, options.TUICOptions)
case C.TypeHysteria2:
return NewHysteria2(ctx, router, logger, options.Tag, options.Hysteria2Options)
return NewHysteria2(ctx, router, logger, tag, options.Hysteria2Options)
default:
return nil, E.New("unknown inbound type: ", options.Type)
}

View File

@@ -43,7 +43,7 @@ func (f Formatter) Format(ctx context.Context, level Level, tag string, message
id, hasId = IDFromContext(ctx)
}
if hasId {
activeDuration := formatDuration(time.Since(id.CreatedAt))
activeDuration := FormatDuration(time.Since(id.CreatedAt))
if !f.DisableColors {
var color aurora.Color
color = aurora.Color(uint8(id.ID))
@@ -113,7 +113,7 @@ func (f Formatter) FormatWithSimple(ctx context.Context, level Level, tag string
id, hasId = IDFromContext(ctx)
}
if hasId {
activeDuration := formatDuration(time.Since(id.CreatedAt))
activeDuration := FormatDuration(time.Since(id.CreatedAt))
if !f.DisableColors {
var color aurora.Color
color = aurora.Color(uint8(id.ID))
@@ -163,7 +163,7 @@ func xd(value int, x int) string {
return message
}
func formatDuration(duration time.Duration) string {
func FormatDuration(duration time.Duration) string {
if duration < time.Second {
return F.ToString(duration.Milliseconds(), "ms")
} else if duration < time.Minute {

View File

@@ -531,7 +531,7 @@ func (r *Router) Start() error {
r.dnsClient.Start()
monitor.Finish()
if C.IsAndroid && r.platformInterface == nil {
if r.needPackageManager && r.platformInterface == nil {
monitor.Start("initialize package manager")
packageManager, err := tun.NewPackageManager(r)
monitor.Finish()

View File

@@ -5,7 +5,7 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=naiveproxy
PKG_VERSION:=125.0.6422.35-1
PKG_VERSION:=126.0.6478.40-1
PKG_RELEASE:=1
# intel 80386 & riscv64 & cortex-a76
@@ -20,47 +20,47 @@ else ifeq ($(ARCH_PREBUILT),riscv64_riscv64)
endif
ifeq ($(ARCH_PACKAGES),aarch64_cortex-a53)
PKG_HASH:=829e033c930645730e39529a7be54bde571d43d3ca01ad5f8a6c58749db34308
PKG_HASH:=d805374e3c84c199679d120bf38b427f9a2a2572310ba2209ea3f54897021dfa
else ifeq ($(ARCH_PACKAGES),aarch64_cortex-a72)
PKG_HASH:=f32ce43b362aa6ceb2f24a5a8ec5c6190722b06b5a2c97ca6fddc6cfa76202cd
PKG_HASH:=2c29345f266d3b7b617756c1bee1d9c9d0c18cd2df5f91778fa3c9ee78cbb6e0
else ifeq ($(ARCH_PACKAGES),aarch64_generic)
PKG_HASH:=702dc4bb621d7a8189482f49d12d8d4943dbdfffe2c7c197bbe32f8f9c0f4ee0
PKG_HASH:=712be9c6c31f92737e3a9aa6345d1797bb76111ba139c95e32f81ca92d9f94d8
else ifeq ($(ARCH_PACKAGES),arm_arm1176jzf-s_vfp)
PKG_HASH:=d14931ec0a312f8fd996c14ae1ec012a5f9e03d92139509f5edd3f19feba40d4
PKG_HASH:=9eb54e9e5aaa46c6555275ab1760c5597ff882fd44d3c7b861f74bdb1f11b7ee
else ifeq ($(ARCH_PACKAGES),arm_arm926ej-s)
PKG_HASH:=bd8f45efa94ab89a8af08b48523a9a4a21cf954261123d5c73ea57246c29d611
PKG_HASH:=165d08dce7efccc0dd09aed9d09745e3932a9f9090d2aece21fc3ba9cf7c8d7f
else ifeq ($(ARCH_PACKAGES),arm_cortex-a15_neon-vfpv4)
PKG_HASH:=790e264c4798031781eea07360906e86494a95f4bf7466e5d34735adc5b3842a
PKG_HASH:=2814ee81d18154af761dde96449108e12c7a076e839fb6adc90914602118afb5
else ifeq ($(ARCH_PACKAGES),arm_cortex-a5_vfpv4)
PKG_HASH:=11f585c0896236343f400f1d31e2aec7a91948f0d0f55c095264a05bbb93771a
PKG_HASH:=0a9d2f15e85b6a93580173b8a5a2527d40926fce70272570e2101f82c5bb96df
else ifeq ($(ARCH_PACKAGES),arm_cortex-a7)
PKG_HASH:=dc40da97ca7d7a24aa45e8520611c6f0dcb324b4326bc03051db12d50dcb4c35
PKG_HASH:=f62ea0698f39f30d0845c3f81389d1fc929fccacf6bd92794803dc9c40397228
else ifeq ($(ARCH_PACKAGES),arm_cortex-a7_neon-vfpv4)
PKG_HASH:=282ca2cd3dc567a99da67bca9927180ef7147cc89249f43f0359c030fbcdcc55
PKG_HASH:=55d7c177689f4d598ee45dcf4f8f837e62accdec99c3939ed351bad6abe92f46
else ifeq ($(ARCH_PACKAGES),arm_cortex-a7_vfpv4)
PKG_HASH:=c0987d38af34aae4687868625591a71d59b36906f2e453e7db458addebc594f1
PKG_HASH:=f9c00185b42913cf5623f4caa3eb9ba7883da6c0f304506889cc798e0c987a11
else ifeq ($(ARCH_PACKAGES),arm_cortex-a8_vfpv3)
PKG_HASH:=ce7a23164af8720d71fff998ead2f0279792ede5fdb9b0cc54af05d77215af43
PKG_HASH:=54285cd36969fb7a90624b569fd1c0dcbded72a992597793936f5efb7789f0c9
else ifeq ($(ARCH_PACKAGES),arm_cortex-a9)
PKG_HASH:=95af8607ce6302f62ff8c8b5ccf37c89d6c9b6a588249fb14a3120d1aab5c97e
PKG_HASH:=016895a8fa4a6ec36efa4a4890566bf33ea888526a8902da1b915573006d8dab
else ifeq ($(ARCH_PACKAGES),arm_cortex-a9_neon)
PKG_HASH:=9875ca3884cbcf1704ea44900fc5c89f62502ed76a7c79137d1ff3c32e912988
PKG_HASH:=d2508c999796c4e65a93044faa243a3640dfd9be36cf758535b7a801e61149a5
else ifeq ($(ARCH_PACKAGES),arm_cortex-a9_vfpv3-d16)
PKG_HASH:=072c9ebbcbaeedd8f7fa5d3da5733460bbb7047895f5f087356de34dd5014d7a
PKG_HASH:=0687360a7488b534818df5db071ff0feae8a0a8e6c0464fe0f64533d63682841
else ifeq ($(ARCH_PACKAGES),arm_mpcore)
PKG_HASH:=7cebee26ac672b12f4b6f7d8fd06d251c52ed75ead434f0a54c377eca4f2797d
PKG_HASH:=13cdb19c23add28f8cc02b9d0234db5836e851ef3ff4968363da27f6045b94ae
else ifeq ($(ARCH_PACKAGES),arm_xscale)
PKG_HASH:=92237ec96e598c2b508b8793cf1574172a4362b64b8fd9ad505bd3c3e86b8bb6
PKG_HASH:=a9d4e1825a391ef9294b58d765cc6425322848a70b29f64955c5933121990288
else ifeq ($(ARCH_PACKAGES),mipsel_24kc)
PKG_HASH:=3e9cc1282a67c7487595f437a2d1a07ccf94c822ecd63086227a2d6b699a71d5
PKG_HASH:=7b358d7306f77f87bcee33beb6be1d8c1d70c2128172475616bb1531bb3aa908
else ifeq ($(ARCH_PACKAGES),mipsel_mips32)
PKG_HASH:=0aa2920f09f10c60d292b809a571e562df8cf83f8ea86281457f2d06ad466533
PKG_HASH:=1bc0af17f48b83e2439534f91d462b286d8c35888bfee87785f70337088a5d32
else ifeq ($(ARCH_PACKAGES),riscv64)
PKG_HASH:=8cae7646c9cc4e99b33b2f4de65795ebeb6eb7744e9babc39e6357180eb3bfb0
PKG_HASH:=8862ca30f93825298a00473fddbf698ffed251deef28a40958c3ccd06da91e6a
else ifeq ($(ARCH_PACKAGES),x86)
PKG_HASH:=21d83d8217ab3de9d41443530e7d2a34cc3a0b0395da881b1b210209bea601c6
PKG_HASH:=c403e1bd29d19dcf811e034bf6cc6940c6ef9425b80d87a1643000e7361016aa
else ifeq ($(ARCH_PACKAGES),x86_64)
PKG_HASH:=c39f4334f1ca292febd31fa153ed662f4cfea241183cb5ee97da2ca731d7ae9e
PKG_HASH:=d88b2cc80fb3b79f13f0f1d426d2b2dda9127b0b24f477c008b4c8cfa86d99ce
else
PKG_HASH:=dummy
endif

View File

@@ -90,7 +90,7 @@ commands:
export "CXX=$PWD/third_party/clang+llvm-17.0.6-x86_64-linux-gnu-ubuntu-22.04/bin/clang++"
mkdir build
cd build
cmake -G Ninja -DGUI=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release -DUSE_TCMALLOC=on ..
cmake -G Ninja -DGUI=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release -DUSE_LIBCXX=on -DUSE_TCMALLOC=on ..
configure-qt6:
steps:
@@ -100,7 +100,7 @@ commands:
export "CXX=$PWD/third_party/clang+llvm-17.0.6-x86_64-linux-gnu-ubuntu-22.04/bin/clang++"
mkdir build
cd build
cmake -G Ninja -DGUI=on -DUSE_QT6=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release -DUSE_TCMALLOC=on ..
cmake -G Ninja -DGUI=on -DUSE_QT6=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release -DUSE_LIBCXX=on -DUSE_TCMALLOC=on ..
configure-gcc:
steps:
@@ -130,7 +130,7 @@ commands:
export "CXX=clang++"
mkdir build
cd build
cmake -G Ninja -DGUI=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release ..
cmake -G Ninja -DGUI=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release -DUSE_LIBCXX=on ..
configure-asan:
steps:
@@ -140,7 +140,7 @@ commands:
export "CXX=$PWD/third_party/llvm-build/Release+Asserts/bin/clang++"
mkdir build
cd build
cmake -G Ninja -DGUI=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release -DASAN=on ..
cmake -G Ninja -DGUI=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release -DASAN=on -DUSE_LIBCXX=on ..
configure-ubsan:
steps:
@@ -160,7 +160,7 @@ commands:
export "CXX=$PWD/third_party/llvm-build/Release+Asserts/bin/clang++"
mkdir build
cd build
cmake -G Ninja -DGUI=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release -DTSAN=on ..
cmake -G Ninja -DGUI=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release -DTSAN=on -DUSE_LIBCXX=on ..
configure-msan:
steps:
@@ -170,7 +170,7 @@ commands:
export "CXX=$PWD/third_party/llvm-build/Release+Asserts/bin/clang++"
mkdir build
cd build
cmake -G Ninja -DGUI=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release -DMSAN=on -DUSE_CURL=off ..
cmake -G Ninja -DGUI=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release -DMSAN=on -DUSE_CURL=off -DUSE_LIBCXX=on ..
build:
steps:

View File

@@ -31,7 +31,7 @@ freebsd_task:
- export CXX=clang++
- mkdir build
- cd build
- cmake -G Ninja -DBUILD_TESTS=on ${configure} ..
- cmake -G Ninja -DBUILD_TESTS=on -DUSE_LIBCXX=on ${configure} ..
compile_script:
- ninja -C build yass_cli yass_server yass_test
test_script:

View File

@@ -119,7 +119,7 @@ jobs:
REM start to build with workaround
mkdir build-mingw
cd build-mingw
cmake -G Ninja -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} -DBUILD_BENCHMARKS=on -DBUILD_TESTS=on -DGUI=on -DMINGW_MSVCRT100=on -DMINGW_WORKAROUND=on .. ${{ env.CMAKE_OPTIONS }} -DCMAKE_C_COMPILER_TARGET=${{ matrix.arch }}-pc-windows-gnu -DCMAKE_CXX_COMPILER_TARGET=${{ matrix.arch }}-pc-windows-gnu -DCMAKE_ASM_COMPILER_TARGET=${{ matrix.arch }}-pc-windows-gnu -DCMAKE_SYSROOT="%CD%\..\third_party\${{ matrix.mingw_dir }}\${{ matrix.arch }}-w64-mingw32" -DUSE_TCMALLOC=on -DENABLE_LLD=on
cmake -G Ninja -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} -DBUILD_BENCHMARKS=on -DBUILD_TESTS=on -DGUI=on -DMINGW_MSVCRT100=on -DMINGW_WORKAROUND=on .. ${{ env.CMAKE_OPTIONS }} -DCMAKE_C_COMPILER_TARGET=${{ matrix.arch }}-pc-windows-gnu -DCMAKE_CXX_COMPILER_TARGET=${{ matrix.arch }}-pc-windows-gnu -DCMAKE_ASM_COMPILER_TARGET=${{ matrix.arch }}-pc-windows-gnu -DCMAKE_SYSROOT="%CD%\..\third_party\${{ matrix.mingw_dir }}\${{ matrix.arch }}-w64-mingw32" -DUSE_TCMALLOC=on -DUSE_LIBCXX=on -DENABLE_LLD=on
ninja yass yass_benchmark yass_test
- name: Packaging
shell: bash

View File

@@ -44,6 +44,11 @@ jobs:
# unshallow must come first otherwise submodule may be get unshallowed
git fetch --tags --unshallow
git submodule update --init --depth 1
- name: Patch libcxx for gcc 14 support
run: |
cd third_party/libc++/trunk
patch -p1 < ../gcc14.patch
git clean -xfd
- name: Patch libcxxabi for both of armel and armhf
run: |
cd third_party/libc++abi

View File

@@ -29,13 +29,13 @@ jobs:
build_type: [Debug, Release]
sanitizer:
- name: address
cmake_options: -DASAN=on
cmake_options: -DASAN=on -DUSE_LIBCXX=on
- name: undefined behavior
cmake_options: -DUBSAN=on -DUSE_LIBCXX=off
- name: thread
cmake_options: -DTSAN=on
cmake_options: -DTSAN=on -DUSE_LIBCXX=on
- name: memory
cmake_options: -DMSAN=on -DUSE_CURL=off
cmake_options: -DMSAN=on -DUSE_LIBCXX=on -DUSE_CURL=off
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4

View File

@@ -445,7 +445,7 @@ cmake_dependent_option(
USE_SYSTEM_MIMALLOC "Use system or vendored mimalloc" OFF
USE_MIMALLOC OFF)
option(USE_LIBCXX "Build with libc++" ON)
option(USE_LIBCXX "Build with custom libc++" OFF)
option(USE_NGHTTP2 "Build with libnghttp2" ON)
cmake_dependent_option(

View File

@@ -31,14 +31,14 @@ Post Quantum Kyber Support (not enabled by default) is added on all of supported
See [Protecting Chrome Traffic with Hybrid Kyber KEM](https://blog.chromium.org/2023/08/protecting-chrome-traffic-with-hybrid.html) for more.
### Prebuilt binaries
- Android [download apk](https://github.com/Chilledheart/yass/releases/download/1.10.4/yass-android-release-arm64-1.10.4.apk) or [download 32-bit apk](https://github.com/Chilledheart/yass/releases/download/1.10.4/yass-android-release-arm-1.10.4.apk)
- Android [download apk](https://github.com/Chilledheart/yass/releases/download/1.11.0/yass-android-release-arm64-1.11.0.apk) or [download 32-bit apk](https://github.com/Chilledheart/yass/releases/download/1.11.0/yass-android-release-arm-1.11.0.apk)
- iOS [join via TestFlight](https://testflight.apple.com/join/6AkiEq09)
- Windows [download installer](https://github.com/Chilledheart/yass/releases/download/1.10.4/yass-mingw-win7-release-x86_64-1.10.4-system-installer.exe) [(require KB2999226 below windows 10)][KB2999226] or [download 32-bit installer](https://github.com/Chilledheart/yass/releases/download/1.10.4/yass-mingw-winxp-release-i686-1.10.4-system-installer.exe) [(require vc 2010 runtime)][vs2010_x86] or [download woa arm64 installer](https://github.com/Chilledheart/yass/releases/download/1.10.4/yass-mingw-release-aarch64-1.10.4-system-installer.exe)
- macOS [download intel dmg](https://github.com/Chilledheart/yass/releases/download/1.10.4/yass-macos-release-x64-1.10.4.dmg) or [download apple silicon dmg](https://github.com/Chilledheart/yass/releases/download/1.10.4/yass-macos-release-arm64-1.10.4.dmg)
- Windows [download installer](https://github.com/Chilledheart/yass/releases/download/1.11.0/yass-mingw-win7-release-x86_64-1.11.0-system-installer.exe) [(require KB2999226 below windows 10)][KB2999226] or [download 32-bit installer](https://github.com/Chilledheart/yass/releases/download/1.11.0/yass-mingw-winxp-release-i686-1.11.0-system-installer.exe) [(require vc 2010 runtime)][vs2010_x86] or [download woa arm64 installer](https://github.com/Chilledheart/yass/releases/download/1.11.0/yass-mingw-release-aarch64-1.11.0-system-installer.exe)
- macOS [download intel dmg](https://github.com/Chilledheart/yass/releases/download/1.11.0/yass-macos-release-x64-1.11.0.dmg) or [download apple silicon dmg](https://github.com/Chilledheart/yass/releases/download/1.11.0/yass-macos-release-arm64-1.11.0.dmg)
> via homebrew: `brew install --cask yass`
- Linux [download rpm](https://github.com/Chilledheart/yass/releases/download/1.10.4/yass.el7.x86_64.1.10.4.rpm) or [download deb](https://github.com/Chilledheart/yass/releases/download/1.10.4/yass-ubuntu-16.04-xenial_amd64.1.10.4.deb)
- Linux [download rpm](https://github.com/Chilledheart/yass/releases/download/1.11.0/yass.el7.x86_64.1.11.0.rpm) or [download deb](https://github.com/Chilledheart/yass/releases/download/1.11.0/yass-ubuntu-16.04-xenial_amd64.1.11.0.deb)
View more at [Release Page](https://github.com/Chilledheart/yass/releases/tag/1.10.4)
View more at [Release Page](https://github.com/Chilledheart/yass/releases/tag/1.11.0)
### NaïveProxy-Compatible Protocol Support
Cipher http2 and https are NaïveProxy-compatible.

View File

@@ -1,3 +1,9 @@
yass (1.11.0-1) UNRELEASED; urgency=medium
* bump to chromium 127 dependents
* add gtk3/gtk4/qt6 build profile
-- Chilledheart <keeyou-cn@outlook.com> Tue, 11 Jun 2024 11:02:21 +0800
yass (1.10.5-1) UNRELEASED; urgency=medium
* miscellaneous fixes

View File

@@ -58,7 +58,7 @@ ifneq ($(filter cross,$(DEB_BUILD_PROFILES)),)
override_dh_auto_configure: PKG_CONFIG = ${DEB_HOST_GNU_TYPE}-pkg-config
endif
override_dh_auto_configure: CMAKE_OPTIONS += -DENABLE_LTO=on -DENABLE_LLD=on
override_dh_auto_configure: CMAKE_OPTIONS += -DENABLE_LTO=on -DENABLE_LLD=on -DUSE_LIBCXX=on
override_dh_auto_configure: CMAKE_OPTIONS += -DUSE_SYSTEM_ZLIB=on
override_dh_auto_configure: CMAKE_OPTIONS += -DCMAKE_SYSTEM_NAME=Linux -DCMAKE_SYSTEM_PROCESSOR=$(DEB_HOST_ARCH)
override_dh_auto_configure: CMAKE_OPTIONS += -DUSE_OLD_SYSTEMD_SERVICE=on

View File

@@ -410,7 +410,11 @@ void PrintMallocStats() {
for (auto property : properties) {
size_t size;
if (MallocExtension_GetNumericProperty(property, &size)) {
if (std::string_view(property).ends_with("_bytes")) {
LOG(ERROR) << "TCMALLOC: " << property << " = " << size << " bytes";
} else {
LOG(ERROR) << "TCMALLOC: " << property << " = " << size;
}
}
}
#elif defined(HAVE_MIMALLOC)

View File

@@ -19,7 +19,7 @@ OptionDialog::OptionDialog(QWidget* parent) : QDialog(parent) {
setWindowFlags(windowFlags() & ~Qt::WindowContextHelpButtonHint);
QGridLayout* grid = new QGridLayout;
grid->setContentsMargins(10, 0, 20, 0);
grid->setContentsMargins(20, 15, 20, 15);
auto tcp_keep_alive_label = new QLabel(tr("TCP keep alive"));
auto tcp_keep_alive_cnt_label = new QLabel(tr("The number of TCP keep-alive probes"));

View File

@@ -1,26 +1,167 @@
From 93dc957bd07760c5d810785707bf6bea2b18676e Mon Sep 17 00:00:00 2001
From: Nikolas Klauser <nikolasklauser@berlin.de>
Date: Sat, 1 Jun 2024 12:20:41 +0200
Subject: [PATCH] [libc++] Fix failures with GCC 14 (#92663)
Fixes #91831
NOKEYCHECK=True
GitOrigin-RevId: cb7a03b41fff563c0cbb5145eed09f9b17edf9e2
---
include/__string/constexpr_c_functions.h | 2 +-
include/__type_traits/remove_pointer.h | 5 +++++
include/bitset | 3 +++
.../time.zone/time.zone.leap/nonmembers/comparison.pass.cpp | 4 ++--
.../expected/expected.expected/monadic/transform.pass.cpp | 2 +-
.../expected.expected/monadic/transform_error.pass.cpp | 2 +-
.../expected/expected.void/monadic/transform_error.pass.cpp | 2 +-
.../format.formatter.spec/formatter.char_array.pass.cpp | 2 +-
.../utilities/tuple/tuple.tuple/tuple.cnstr/PR31384.pass.cpp | 3 +++
.../variant/variant.visit.member/visit_return_type.pass.cpp | 3 +++
10 files changed, 21 insertions(+), 7 deletions(-)
diff --git a/include/__string/constexpr_c_functions.h b/include/__string/constexpr_c_functions.h
index 4da8542e3..a978f816f 100644
--- a/include/__string/constexpr_c_functions.h
+++ b/include/__string/constexpr_c_functions.h
@@ -123,7 +123,7 @@ __constexpr_memcmp_equal(const _Tp* __lhs, const _Up* __rhs, __element_count __n
}
return true;
} else {
- return __builtin_memcmp(__lhs, __rhs, __count * sizeof(_Tp)) == 0;
+ return ::__builtin_memcmp(__lhs, __rhs, __count * sizeof(_Tp)) == 0;
}
}
diff --git a/include/__type_traits/remove_pointer.h b/include/__type_traits/remove_pointer.h
index 54390a193..eea523ab2 100644
index 54390a193..1048f6705 100644
--- a/include/__type_traits/remove_pointer.h
+++ b/include/__type_traits/remove_pointer.h
@@ -22,9 +22,6 @@ template <class _Tp>
struct remove_pointer {
@@ -23,8 +23,13 @@ struct remove_pointer {
using type _LIBCPP_NODEBUG = __remove_pointer(_Tp);
};
-
-template <class _Tp>
-using __remove_pointer_t = __remove_pointer(_Tp);
+# ifdef _LIBCPP_COMPILER_GCC
+template <class _Tp>
+using __remove_pointer_t = typename remove_pointer<_Tp>::type;
+# else
template <class _Tp>
using __remove_pointer_t = __remove_pointer(_Tp);
+# endif
#else
// clang-format off
template <class _Tp> struct _LIBCPP_TEMPLATE_VIS remove_pointer {typedef _LIBCPP_NODEBUG _Tp type;};
@@ -33,10 +30,10 @@ template <class _Tp> struct _LIBCPP_TEMPLATE_VIS remove_pointer<_Tp* const>
template <class _Tp> struct _LIBCPP_TEMPLATE_VIS remove_pointer<_Tp* volatile> {typedef _LIBCPP_NODEBUG _Tp type;};
template <class _Tp> struct _LIBCPP_TEMPLATE_VIS remove_pointer<_Tp* const volatile> {typedef _LIBCPP_NODEBUG _Tp type;};
// clang-format on
+#endif // !defined(_LIBCPP_WORKAROUND_OBJCXX_COMPILER_INTRINSICS) && __has_builtin(__remove_pointer)
diff --git a/include/bitset b/include/bitset
index 8818ab656..6bd7bfe58 100644
--- a/include/bitset
+++ b/include/bitset
@@ -375,8 +375,11 @@ template <size_t _N_words, size_t _Size>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 unsigned long long
__bitset<_N_words, _Size>::to_ullong(true_type, true_type) const {
unsigned long long __r = __first_[0];
+ _LIBCPP_DIAGNOSTIC_PUSH
+ _LIBCPP_GCC_DIAGNOSTIC_IGNORED("-Wshift-count-overflow")
for (size_t __i = 1; __i < sizeof(unsigned long long) / sizeof(__storage_type); ++__i)
__r |= static_cast<unsigned long long>(__first_[__i]) << (sizeof(__storage_type) * CHAR_BIT);
+ _LIBCPP_DIAGNOSTIC_POP
return __r;
}
template <class _Tp>
using __remove_pointer_t = typename remove_pointer<_Tp>::type;
-#endif // !defined(_LIBCPP_WORKAROUND_OBJCXX_COMPILER_INTRINSICS) && __has_builtin(__remove_pointer)
diff --git a/test/std/time/time.zone/time.zone.leap/nonmembers/comparison.pass.cpp b/test/std/time/time.zone/time.zone.leap/nonmembers/comparison.pass.cpp
index 448cd88d1..ccff0248e 100644
--- a/test/std/time/time.zone/time.zone.leap/nonmembers/comparison.pass.cpp
+++ b/test/std/time/time.zone/time.zone.leap/nonmembers/comparison.pass.cpp
@@ -9,8 +9,8 @@
// UNSUPPORTED: c++03, c++11, c++14, c++17
// UNSUPPORTED: no-filesystem, no-localization, no-tzdb
-// TODO TZDB test whether this can be enabled with gcc 14.
-// UNSUPPORTED: gcc-13
+// TODO TZDB investigate why this fails with GCC
+// UNSUPPORTED: gcc-13, gcc-14
// XFAIL: libcpp-has-no-experimental-tzdb
// XFAIL: availability-tzdb-missing
diff --git a/test/std/utilities/expected/expected.expected/monadic/transform.pass.cpp b/test/std/utilities/expected/expected.expected/monadic/transform.pass.cpp
index d38a46f04..aa7106fb9 100644
--- a/test/std/utilities/expected/expected.expected/monadic/transform.pass.cpp
+++ b/test/std/utilities/expected/expected.expected/monadic/transform.pass.cpp
@@ -10,7 +10,7 @@
// GCC has a issue for `Guaranteed copy elision for potentially-overlapping non-static data members`,
// please refer to: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108333
-// XFAIL: gcc-13
+// XFAIL: gcc-13, gcc-14
// <expected>
diff --git a/test/std/utilities/expected/expected.expected/monadic/transform_error.pass.cpp b/test/std/utilities/expected/expected.expected/monadic/transform_error.pass.cpp
index ec55f637f..ae9feccb5 100644
--- a/test/std/utilities/expected/expected.expected/monadic/transform_error.pass.cpp
+++ b/test/std/utilities/expected/expected.expected/monadic/transform_error.pass.cpp
@@ -10,7 +10,7 @@
// GCC has a issue for `Guaranteed copy elision for potentially-overlapping non-static data members`,
// please refer to: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108333.
-// XFAIL: gcc-13
+// XFAIL: gcc-13, gcc-14
// <expected>
diff --git a/test/std/utilities/expected/expected.void/monadic/transform_error.pass.cpp b/test/std/utilities/expected/expected.void/monadic/transform_error.pass.cpp
index cd6e5a503..f70bddbed 100644
--- a/test/std/utilities/expected/expected.void/monadic/transform_error.pass.cpp
+++ b/test/std/utilities/expected/expected.void/monadic/transform_error.pass.cpp
@@ -10,7 +10,7 @@
// GCC has a issue for `Guaranteed copy elision for potentially-overlapping non-static data members`,
// please refer to: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108333
-// XFAIL: gcc-13
+// XFAIL: gcc-13, gcc-14
// <expected>
diff --git a/test/std/utilities/format/format.formatter/format.formatter.spec/formatter.char_array.pass.cpp b/test/std/utilities/format/format.formatter/format.formatter.spec/formatter.char_array.pass.cpp
index b0ee399a1..cad13c1ef 100644
--- a/test/std/utilities/format/format.formatter/format.formatter.spec/formatter.char_array.pass.cpp
+++ b/test/std/utilities/format/format.formatter/format.formatter.spec/formatter.char_array.pass.cpp
@@ -7,7 +7,7 @@
// UNSUPPORTED: c++03, c++11, c++14, c++17
// TODO FMT __builtin_memcpy isn't constexpr in GCC
-// UNSUPPORTED: gcc-13
+// UNSUPPORTED: gcc-13, gcc-14
// <format>
diff --git a/test/std/utilities/tuple/tuple.tuple/tuple.cnstr/PR31384.pass.cpp b/test/std/utilities/tuple/tuple.tuple/tuple.cnstr/PR31384.pass.cpp
index c9e7bb6a5..0b40ac9ff 100644
--- a/test/std/utilities/tuple/tuple.tuple/tuple.cnstr/PR31384.pass.cpp
+++ b/test/std/utilities/tuple/tuple.tuple/tuple.cnstr/PR31384.pass.cpp
@@ -8,6 +8,9 @@
// UNSUPPORTED: c++03
+// FIXME: Why does this start to fail with GCC 14?
+// XFAIL: gcc-14
+
// See https://llvm.org/PR31384.
#include <tuple>
diff --git a/test/std/utilities/variant/variant.visit.member/visit_return_type.pass.cpp b/test/std/utilities/variant/variant.visit.member/visit_return_type.pass.cpp
index 2c1cbb06e..7429cdf80 100644
--- a/test/std/utilities/variant/variant.visit.member/visit_return_type.pass.cpp
+++ b/test/std/utilities/variant/variant.visit.member/visit_return_type.pass.cpp
@@ -34,6 +34,9 @@ struct overloaded : Ts... {
using Ts::operator()...;
};
+template <class... Ts>
+overloaded(Ts...) -> overloaded<Ts...>;
+
void test_overload_ambiguity() {
using V = std::variant<float, long, std::string>;
using namespace std::string_literals;
--
2.45.2
#if _LIBCPP_STD_VER >= 14
template <class _Tp>

View File

@@ -11,20 +11,27 @@
// Basic test for float registers number are accepted.
#include <dlfcn.h>
#include <stdlib.h>
#include <string.h>
#include <unwind.h>
// Using __attribute__((section("main_func"))) is ELF specific, but then
// this entire test is marked as requiring Linux, so we should be good.
//
// We don't use dladdr() because on musl it's a no-op when statically linked.
extern char __start_main_func;
extern char __stop_main_func;
_Unwind_Reason_Code frame_handler(struct _Unwind_Context *ctx, void *arg) {
(void)arg;
Dl_info info = {0, 0, 0, 0};
// Unwind util the main is reached, above frames depend on the platform and
// Unwind until the main is reached, above frames depend on the platform and
// architecture.
if (dladdr(reinterpret_cast<void *>(_Unwind_GetIP(ctx)), &info) &&
info.dli_sname && !strcmp("main", info.dli_sname))
uintptr_t ip = _Unwind_GetIP(ctx);
if (ip >= (uintptr_t)&__start_main_func &&
ip < (uintptr_t)&__stop_main_func) {
_Exit(0);
}
return _URC_NO_REASON;
}
@@ -45,7 +52,7 @@ __attribute__((noinline)) void foo() {
_Unwind_Backtrace(frame_handler, NULL);
}
int main() {
__attribute__((section("main_func"))) int main() {
foo();
return -2;
}

View File

@@ -17,7 +17,6 @@
#undef NDEBUG
#include <assert.h>
#include <dlfcn.h>
#include <signal.h>
#include <stdint.h>
#include <stdio.h>
@@ -27,6 +26,13 @@
#include <unistd.h>
#include <unwind.h>
// Using __attribute__((section("main_func"))) is Linux specific, but then
// this entire test is marked as requiring Linux, so we should be good.
//
// We don't use dladdr() because on musl it's a no-op when statically linked.
extern char __start_main_func;
extern char __stop_main_func;
void foo();
_Unwind_Exception ex;
@@ -41,14 +47,14 @@ _Unwind_Reason_Code stop(int version, _Unwind_Action actions,
assert(exceptionObject == &ex);
assert(stop_parameter == &foo);
Dl_info info = {0, 0, 0, 0};
// Unwind util the main is reached, above frames depend on the platform and
// Unwind until the main is reached, above frames depend on the platform and
// architecture.
if (dladdr(reinterpret_cast<void *>(_Unwind_GetIP(context)), &info) &&
info.dli_sname && !strcmp("main", info.dli_sname)) {
uintptr_t ip = _Unwind_GetIP(context);
if (ip >= (uintptr_t)&__start_main_func &&
ip < (uintptr_t)&__stop_main_func) {
_Exit(0);
}
return _URC_NO_REASON;
}
@@ -66,7 +72,7 @@ __attribute__((noinline)) void foo() {
_Unwind_ForcedUnwind(e, stop, (void *)&foo);
}
int main() {
__attribute__((section("main_func"))) int main() {
foo();
return -2;
}

View File

@@ -13,9 +13,15 @@
// TODO: Figure out why this fails with Memory Sanitizer.
// XFAIL: msan
// Note: this test fails on musl because:
//
// (a) musl disables emission of unwind information for its build, and
// (b) musl's signal trampolines don't include unwind information
//
// XFAIL: target={{.*}}-musl
#undef NDEBUG
#include <assert.h>
#include <dlfcn.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
@@ -24,16 +30,24 @@
#include <unistd.h>
#include <unwind.h>
// Using __attribute__((section("main_func"))) is ELF specific, but then
// this entire test is marked as requiring Linux, so we should be good.
//
// We don't use dladdr() because on musl it's a no-op when statically linked.
extern char __start_main_func;
extern char __stop_main_func;
_Unwind_Reason_Code frame_handler(struct _Unwind_Context* ctx, void* arg) {
(void)arg;
Dl_info info = { 0, 0, 0, 0 };
// Unwind util the main is reached, above frames depend on the platform and
// Unwind until the main is reached, above frames depend on the platform and
// architecture.
if (dladdr(reinterpret_cast<void *>(_Unwind_GetIP(ctx)), &info) &&
info.dli_sname && !strcmp("main", info.dli_sname)) {
uintptr_t ip = _Unwind_GetIP(ctx);
if (ip >= (uintptr_t)&__start_main_func &&
ip < (uintptr_t)&__stop_main_func) {
_Exit(0);
}
return _URC_NO_REASON;
}
@@ -43,7 +57,7 @@ void signal_handler(int signum) {
_Exit(-1);
}
int main(int, char**) {
__attribute__((section("main_func"))) int main(int, char **) {
signal(SIGUSR1, signal_handler);
kill(getpid(), SIGUSR1);
return -2;

View File

@@ -13,9 +13,15 @@
// TODO: Figure out why this fails with Memory Sanitizer.
// XFAIL: msan
// Note: this test fails on musl because:
//
// (a) musl disables emission of unwind information for its build, and
// (b) musl's signal trampolines don't include unwind information
//
// XFAIL: target={{.*}}-musl
#undef NDEBUG
#include <assert.h>
#include <dlfcn.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
@@ -24,16 +30,24 @@
#include <unistd.h>
#include <unwind.h>
// Using __attribute__((section("main_func"))) is ELF specific, but then
// this entire test is marked as requiring Linux, so we should be good.
//
// We don't use dladdr() because on musl it's a no-op when statically linked.
extern char __start_main_func;
extern char __stop_main_func;
_Unwind_Reason_Code frame_handler(struct _Unwind_Context* ctx, void* arg) {
(void)arg;
Dl_info info = { 0, 0, 0, 0 };
// Unwind until the main is reached, above frames depend on the platform and
// architecture.
if (dladdr(reinterpret_cast<void *>(_Unwind_GetIP(ctx)), &info) &&
info.dli_sname && !strcmp("main", info.dli_sname)) {
uintptr_t ip = _Unwind_GetIP(ctx);
if (ip >= (uintptr_t)&__start_main_func &&
ip < (uintptr_t)&__stop_main_func) {
_Exit(0);
}
return _URC_NO_REASON;
}
@@ -56,7 +70,7 @@ __attribute__((noinline)) void crashing_leaf_func(int do_trap) {
__builtin_trap();
}
int main(int, char**) {
__attribute__((section("main_func"))) int main(int, char **) {
signal(SIGTRAP, signal_handler);
signal(SIGILL, signal_handler);
crashing_leaf_func(1);

View File

@@ -260,6 +260,9 @@ for embedded devices and low end boxes.
%systemd_postun_with_restart yass-redir.service
%changelog
* Tue Jun 11 2024 Chilledheart <keeyou-cn@outlook.com> - 1.11.0-1
- bump to chromium 127 dependents
- add gtk3/gtk4/qt6 build profile (source)
* Mon Jun 3 2024 Chilledheart <keeyou-cn@outlook.com> - 1.10.5-1
- miscellaneous fixes
- fix gtk3 wayland app icon issue

View File

@@ -5,9 +5,9 @@ import hashlib
import json
import os.path
import re
import types
import ssl
import sys
import types
import unittest
import youtube_dl.extractor
@@ -181,18 +181,18 @@ def expect_value(self, got, expected, field):
op, _, expected_num = expected.partition(':')
expected_num = int(expected_num)
if op == 'mincount':
assert_func = assertGreaterEqual
assert_func = self.assertGreaterEqual
msg_tmpl = 'Expected %d items in field %s, but only got %d'
elif op == 'maxcount':
assert_func = assertLessEqual
assert_func = self.assertLessEqual
msg_tmpl = 'Expected maximum %d items in field %s, but got %d'
elif op == 'count':
assert_func = assertEqual
assert_func = self.assertEqual
msg_tmpl = 'Expected exactly %d items in field %s, but got %d'
else:
assert False
assert_func(
self, len(got), expected_num,
len(got), expected_num,
msg_tmpl % (expected_num, field, len(got)))
return
self.assertEqual(
@@ -262,27 +262,6 @@ def assertRegexpMatches(self, text, regexp, msg=None):
self.assertTrue(m, msg)
def assertGreaterEqual(self, got, expected, msg=None):
if not (got >= expected):
if msg is None:
msg = '%r not greater than or equal to %r' % (got, expected)
self.assertTrue(got >= expected, msg)
def assertLessEqual(self, got, expected, msg=None):
if not (got <= expected):
if msg is None:
msg = '%r not less than or equal to %r' % (got, expected)
self.assertTrue(got <= expected, msg)
def assertEqual(self, got, expected, msg=None):
if not (got == expected):
if msg is None:
msg = '%r not equal to %r' % (got, expected)
self.assertTrue(got == expected, msg)
def expect_warnings(ydl, warnings_re):
real_warning = ydl.report_warning

View File

@@ -9,8 +9,6 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import (
assertGreaterEqual,
assertLessEqual,
expect_warnings,
get_params,
gettestcases,
@@ -36,12 +34,20 @@ from youtube_dl.utils import (
ExtractorError,
error_to_compat_str,
format_bytes,
IDENTITY,
preferredencoding,
UnavailableVideoError,
)
from youtube_dl.extractor import get_info_extractor
RETRIES = 3
# Some unittest APIs require actual str
if not isinstance('TEST', str):
_encode_str = lambda s: s.encode(preferredencoding())
else:
_encode_str = IDENTITY
class YoutubeDL(youtube_dl.YoutubeDL):
def __init__(self, *args, **kwargs):
@@ -102,7 +108,7 @@ def generator(test_case, tname):
def print_skipping(reason):
print('Skipping %s: %s' % (test_case['name'], reason))
self.skipTest(reason)
self.skipTest(_encode_str(reason))
if not ie.working():
print_skipping('IE marked as not _WORKING')
@@ -187,16 +193,14 @@ def generator(test_case, tname):
expect_info_dict(self, res_dict, test_case.get('info_dict', {}))
if 'playlist_mincount' in test_case:
assertGreaterEqual(
self,
self.assertGreaterEqual(
len(res_dict['entries']),
test_case['playlist_mincount'],
'Expected at least %d in playlist %s, but got only %d' % (
test_case['playlist_mincount'], test_case['url'],
len(res_dict['entries'])))
if 'playlist_maxcount' in test_case:
assertLessEqual(
self,
self.assertLessEqual(
len(res_dict['entries']),
test_case['playlist_maxcount'],
'Expected at most %d in playlist %s, but got %d' % (
@@ -243,8 +247,8 @@ def generator(test_case, tname):
if params.get('test'):
expected_minsize = max(expected_minsize, 10000)
got_fsize = os.path.getsize(tc_filename)
assertGreaterEqual(
self, got_fsize, expected_minsize,
self.assertGreaterEqual(
got_fsize, expected_minsize,
'Expected %s to be at least %s, but it\'s only %s ' %
(tc_filename, format_bytes(expected_minsize),
format_bytes(got_fsize)))

View File

@@ -1039,8 +1039,8 @@ class YoutubeDL(object):
elif result_type in ('playlist', 'multi_video'):
# Protect from infinite recursion due to recursively nested playlists
# (see https://github.com/ytdl-org/youtube-dl/issues/27833)
webpage_url = ie_result['webpage_url']
if webpage_url in self._playlist_urls:
webpage_url = ie_result.get('webpage_url') # not all pl/mv have this
if webpage_url and webpage_url in self._playlist_urls:
self.to_screen(
'[download] Skipping already downloaded playlist: %s'
% ie_result.get('title') or ie_result.get('id'))
@@ -1048,6 +1048,10 @@ class YoutubeDL(object):
self._playlist_level += 1
self._playlist_urls.add(webpage_url)
new_result = dict((k, v) for k, v in extra_info.items() if k not in ie_result)
if new_result:
new_result.update(ie_result)
ie_result = new_result
try:
return self.__process_playlist(ie_result, download)
finally:
@@ -1593,6 +1597,28 @@ class YoutubeDL(object):
self.cookiejar.add_cookie_header(pr)
return pr.get_header('Cookie')
def _fill_common_fields(self, info_dict, final=True):
for ts_key, date_key in (
('timestamp', 'upload_date'),
('release_timestamp', 'release_date'),
):
if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
# see http://bugs.python.org/issue1646728)
try:
upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
info_dict[date_key] = compat_str(upload_date.strftime('%Y%m%d'))
except (ValueError, OverflowError, OSError):
pass
# Auto generate title fields corresponding to the *_number fields when missing
# in order to always have clean titles. This is very common for TV series.
if final:
for field in ('chapter', 'season', 'episode'):
if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
def process_video_result(self, info_dict, download=True):
assert info_dict.get('_type', 'video') == 'video'
@@ -1660,24 +1686,7 @@ class YoutubeDL(object):
if 'display_id' not in info_dict and 'id' in info_dict:
info_dict['display_id'] = info_dict['id']
for ts_key, date_key in (
('timestamp', 'upload_date'),
('release_timestamp', 'release_date'),
):
if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
# see http://bugs.python.org/issue1646728)
try:
upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
info_dict[date_key] = compat_str(upload_date.strftime('%Y%m%d'))
except (ValueError, OverflowError, OSError):
pass
# Auto generate title fields corresponding to the *_number fields when missing
# in order to always have clean titles. This is very common for TV series.
for field in ('chapter', 'season', 'episode'):
if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
self._fill_common_fields(info_dict)
for cc_kind in ('subtitles', 'automatic_captions'):
cc = info_dict.get(cc_kind)

View File

@@ -898,21 +898,13 @@ from .ooyala import (
)
from .ora import OraTVIE
from .orf import (
ORFTVthekIE,
ORFFM4IE,
ORFONIE,
ORFONLiveIE,
ORFFM4StoryIE,
ORFOE1IE,
ORFOE3IE,
ORFNOEIE,
ORFWIEIE,
ORFBGLIE,
ORFOOEIE,
ORFSTMIE,
ORFKTNIE,
ORFSBGIE,
ORFTIRIE,
ORFVBGIE,
ORFIPTVIE,
ORFPodcastIE,
ORFRadioIE,
ORFRadioCollectionIE,
)
from .outsidetv import OutsideTVIE
from .packtpub import (

View File

@@ -1,3 +1,4 @@
# coding: utf-8
from __future__ import unicode_literals
import itertools
@@ -10,7 +11,7 @@ from ..compat import (
compat_ord,
compat_str,
compat_urllib_parse_unquote,
compat_zip
compat_zip as zip,
)
from ..utils import (
int_or_none,
@@ -24,7 +25,7 @@ class MixcloudBaseIE(InfoExtractor):
def _call_api(self, object_type, object_fields, display_id, username, slug=None):
lookup_key = object_type + 'Lookup'
return self._download_json(
'https://www.mixcloud.com/graphql', display_id, query={
'https://app.mixcloud.com/graphql', display_id, query={
'query': '''{
%s(lookup: {username: "%s"%s}) {
%s
@@ -44,7 +45,7 @@ class MixcloudIE(MixcloudBaseIE):
'ext': 'm4a',
'title': 'Cryptkeeper',
'description': 'After quite a long silence from myself, finally another Drum\'n\'Bass mix with my favourite current dance floor bangers.',
'uploader': 'Daniel Holbach',
'uploader': 'dholbach', # was: 'Daniel Holbach',
'uploader_id': 'dholbach',
'thumbnail': r're:https?://.*\.jpg',
'view_count': int,
@@ -57,7 +58,7 @@ class MixcloudIE(MixcloudBaseIE):
'id': 'gillespeterson_caribou-7-inch-vinyl-mix-chat',
'ext': 'mp3',
'title': 'Caribou 7 inch Vinyl Mix & Chat',
'description': 'md5:2b8aec6adce69f9d41724647c65875e8',
'description': r're:Last week Dan Snaith aka Caribou swung by the Brownswood.{136}',
'uploader': 'Gilles Peterson Worldwide',
'uploader_id': 'gillespeterson',
'thumbnail': 're:https?://.*',
@@ -65,6 +66,23 @@ class MixcloudIE(MixcloudBaseIE):
'timestamp': 1422987057,
'upload_date': '20150203',
},
'params': {
'skip_download': '404 not found',
},
}, {
'url': 'https://www.mixcloud.com/gillespeterson/carnival-m%C3%BAsica-popular-brasileira-mix/',
'info_dict': {
'id': 'gillespeterson_carnival-música-popular-brasileira-mix',
'ext': 'm4a',
'title': 'Carnival Música Popular Brasileira Mix',
'description': r're:Gilles was recently in Brazil to play at Boiler Room.{208}',
'timestamp': 1454347174,
'upload_date': '20160201',
'uploader': 'Gilles Peterson Worldwide',
'uploader_id': 'gillespeterson',
'thumbnail': 're:https?://.*',
'view_count': int,
},
}, {
'url': 'https://beta.mixcloud.com/RedLightRadio/nosedrip-15-red-light-radio-01-18-2016/',
'only_matching': True,
@@ -76,10 +94,10 @@ class MixcloudIE(MixcloudBaseIE):
"""Encrypt/Decrypt XOR cipher. Both ways are possible because it's XOR."""
return ''.join([
compat_chr(compat_ord(ch) ^ compat_ord(k))
for ch, k in compat_zip(ciphertext, itertools.cycle(key))])
for ch, k in zip(ciphertext, itertools.cycle(key))])
def _real_extract(self, url):
username, slug = re.match(self._VALID_URL, url).groups()
username, slug = self._match_valid_url(url).groups()
username, slug = compat_urllib_parse_unquote(username), compat_urllib_parse_unquote(slug)
track_id = '%s_%s' % (username, slug)

File diff suppressed because it is too large Load Diff

View File

@@ -4,6 +4,7 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
float_or_none,
get_element_by_id,
@@ -11,6 +12,7 @@ from ..utils import (
strip_or_none,
unified_strdate,
urljoin,
str_to_int,
)
@@ -35,6 +37,26 @@ class VidLiiIE(InfoExtractor):
'categories': ['News & Politics'],
'tags': ['Vidlii', 'Jan', 'Videogames'],
}
}, {
# HD
'url': 'https://www.vidlii.com/watch?v=2Ng8Abj2Fkl',
'md5': '450e7da379c884788c3a4fa02a3ce1a4',
'info_dict': {
'id': '2Ng8Abj2Fkl',
'ext': 'mp4',
'title': 'test',
'description': 'md5:cc55a86032a7b6b3cbfd0f6b155b52e9',
'thumbnail': 'https://www.vidlii.com/usfi/thmp/2Ng8Abj2Fkl.jpg',
'uploader': 'VidLii',
'uploader_url': 'https://www.vidlii.com/user/VidLii',
'upload_date': '20200927',
'duration': 5,
'view_count': int,
'comment_count': int,
'average_rating': float,
'categories': ['Film & Animation'],
'tags': list,
},
}, {
'url': 'https://www.vidlii.com/embed?v=tJluaH4BJ3v&a=0',
'only_matching': True,
@@ -46,11 +68,32 @@ class VidLiiIE(InfoExtractor):
webpage = self._download_webpage(
'https://www.vidlii.com/watch?v=%s' % video_id, video_id)
video_url = self._search_regex(
r'src\s*:\s*(["\'])(?P<url>(?:https?://)?(?:(?!\1).)+)\1', webpage,
'video url', group='url')
formats = []
title = self._search_regex(
def add_format(format_url, height=None):
height = int(self._search_regex(r'(\d+)\.mp4',
format_url, 'height', default=360))
formats.append({
'url': format_url,
'format_id': '%dp' % height if height else None,
'height': height,
})
sources = re.findall(
r'src\s*:\s*(["\'])(?P<url>(?:https?://)?(?:(?!\1).)+)\1',
webpage)
formats = []
if len(sources) > 1:
add_format(sources[1][1])
self._check_formats(formats, video_id)
if len(sources) > 0:
add_format(sources[0][1])
self._sort_formats(formats)
title = self._html_search_regex(
(r'<h1>([^<]+)</h1>', r'<title>([^<]+) - VidLii<'), webpage,
'title')
@@ -82,9 +125,9 @@ class VidLiiIE(InfoExtractor):
default=None) or self._search_regex(
r'duration\s*:\s*(\d+)', webpage, 'duration', fatal=False))
view_count = int_or_none(self._search_regex(
(r'<strong>(\d+)</strong> views',
r'Views\s*:\s*<strong>(\d+)</strong>'),
view_count = str_to_int(self._html_search_regex(
(r'<strong>([\d,.]+)</strong> views',
r'Views\s*:\s*<strong>([\d,.]+)</strong>'),
webpage, 'view count', fatal=False))
comment_count = int_or_none(self._search_regex(
@@ -109,7 +152,7 @@ class VidLiiIE(InfoExtractor):
return {
'id': video_id,
'url': video_url,
'formats': formats,
'title': title,
'description': description,
'thumbnail': thumbnail,