mirror of
https://github.com/bolucat/Archive.git
synced 2025-10-05 00:03:19 +08:00
Update On Tue Jun 11 20:31:39 CEST 2024
This commit is contained in:
1
.github/update.log
vendored
1
.github/update.log
vendored
@@ -669,3 +669,4 @@ Update On Fri Jun 7 20:34:28 CEST 2024
|
|||||||
Update On Sat Jun 8 20:29:12 CEST 2024
|
Update On Sat Jun 8 20:29:12 CEST 2024
|
||||||
Update On Sun Jun 9 20:29:25 CEST 2024
|
Update On Sun Jun 9 20:29:25 CEST 2024
|
||||||
Update On Mon Jun 10 20:32:26 CEST 2024
|
Update On Mon Jun 10 20:32:26 CEST 2024
|
||||||
|
Update On Tue Jun 11 20:31:29 CEST 2024
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
{
|
{
|
||||||
"version": "20240606",
|
"version": "20240606",
|
||||||
"text": "Clean the web with Brook",
|
"text": "Refer to get Brook Plus for free",
|
||||||
"link": "https://www.txthinking.com/talks/articles/brook-clean-the-web-en.article",
|
"link": "https://www.txthinking.com/brook.html#referrals",
|
||||||
"text_zh": "使用 Brook 净化互联网",
|
"text_zh": "邀请以免费获得 Brook Plus",
|
||||||
"link_zh": "https://www.txthinking.com/talks/articles/brook-clean-the-web.article"
|
"link_zh": "https://www.txthinking.com/brook.html#referrals"
|
||||||
}
|
}
|
||||||
|
@@ -6,6 +6,11 @@ modules = append(modules, {
|
|||||||
dnsquery: func(m) {
|
dnsquery: func(m) {
|
||||||
text := import("text")
|
text := import("text")
|
||||||
l := [
|
l := [
|
||||||
|
"analytics.google.com",
|
||||||
|
"apple.com",
|
||||||
|
"comodoca.com",
|
||||||
|
"autonavi.com",
|
||||||
|
"giphy.com",
|
||||||
"facebook.com",
|
"facebook.com",
|
||||||
"fbcdn.net",
|
"fbcdn.net",
|
||||||
"facebook.net",
|
"facebook.net",
|
||||||
|
@@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/metacubex/mihomo/component/loopback"
|
"github.com/metacubex/mihomo/component/loopback"
|
||||||
"github.com/metacubex/mihomo/component/resolver"
|
"github.com/metacubex/mihomo/component/resolver"
|
||||||
C "github.com/metacubex/mihomo/constant"
|
C "github.com/metacubex/mihomo/constant"
|
||||||
|
"github.com/metacubex/mihomo/constant/features"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Direct struct {
|
type Direct struct {
|
||||||
@@ -24,9 +25,11 @@ type DirectOption struct {
|
|||||||
|
|
||||||
// DialContext implements C.ProxyAdapter
|
// DialContext implements C.ProxyAdapter
|
||||||
func (d *Direct) DialContext(ctx context.Context, metadata *C.Metadata, opts ...dialer.Option) (C.Conn, error) {
|
func (d *Direct) DialContext(ctx context.Context, metadata *C.Metadata, opts ...dialer.Option) (C.Conn, error) {
|
||||||
|
if !features.CMFA {
|
||||||
if err := d.loopBack.CheckConn(metadata); err != nil {
|
if err := d.loopBack.CheckConn(metadata); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
opts = append(opts, dialer.WithResolver(resolver.DefaultResolver))
|
opts = append(opts, dialer.WithResolver(resolver.DefaultResolver))
|
||||||
c, err := dialer.DialContext(ctx, "tcp", metadata.RemoteAddress(), d.Base.DialOptions(opts...)...)
|
c, err := dialer.DialContext(ctx, "tcp", metadata.RemoteAddress(), d.Base.DialOptions(opts...)...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -38,9 +41,11 @@ func (d *Direct) DialContext(ctx context.Context, metadata *C.Metadata, opts ...
|
|||||||
|
|
||||||
// ListenPacketContext implements C.ProxyAdapter
|
// ListenPacketContext implements C.ProxyAdapter
|
||||||
func (d *Direct) ListenPacketContext(ctx context.Context, metadata *C.Metadata, opts ...dialer.Option) (C.PacketConn, error) {
|
func (d *Direct) ListenPacketContext(ctx context.Context, metadata *C.Metadata, opts ...dialer.Option) (C.PacketConn, error) {
|
||||||
|
if !features.CMFA {
|
||||||
if err := d.loopBack.CheckPacketConn(metadata); err != nil {
|
if err := d.loopBack.CheckPacketConn(metadata); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
// net.UDPConn.WriteTo only working with *net.UDPAddr, so we need a net.UDPAddr
|
// net.UDPConn.WriteTo only working with *net.UDPAddr, so we need a net.UDPAddr
|
||||||
if !metadata.Resolved() {
|
if !metadata.Resolved() {
|
||||||
ip, err := resolver.ResolveIPWithResolver(ctx, metadata.Host, resolver.DefaultResolver)
|
ip, err := resolver.ResolveIPWithResolver(ctx, metadata.Host, resolver.DefaultResolver)
|
||||||
|
8
clash-nyanpasu/backend/Cargo.lock
generated
8
clash-nyanpasu/backend/Cargo.lock
generated
@@ -819,9 +819,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "clap"
|
name = "clap"
|
||||||
version = "4.5.6"
|
version = "4.5.7"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a9689a29b593160de5bc4aacab7b5d54fb52231de70122626c178e6a368994c7"
|
checksum = "5db83dced34638ad474f39f250d7fea9598bdd239eaced1bdf45d597da0f433f"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clap_builder",
|
"clap_builder",
|
||||||
"clap_derive",
|
"clap_derive",
|
||||||
@@ -829,9 +829,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "clap_builder"
|
name = "clap_builder"
|
||||||
version = "4.5.6"
|
version = "4.5.7"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "2e5387378c84f6faa26890ebf9f0a92989f8873d4d380467bcd0d8d8620424df"
|
checksum = "f7e204572485eb3fbf28f871612191521df159bc3e15a9f5064c66dba3a8c05f"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anstream",
|
"anstream",
|
||||||
"anstyle",
|
"anstyle",
|
||||||
|
@@ -50,6 +50,18 @@ export const useClashCore = () => {
|
|||||||
await mutate();
|
await mutate();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const setGlobalProxy = async (name: string) => {
|
||||||
|
const group = data?.global;
|
||||||
|
|
||||||
|
if (!group) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
await tauri.selectProxy(group?.name, name);
|
||||||
|
|
||||||
|
await mutate();
|
||||||
|
};
|
||||||
|
|
||||||
const getRules = useSWR("getRules", clash.getRules);
|
const getRules = useSWR("getRules", clash.getRules);
|
||||||
|
|
||||||
const getRulesProviders = useSWR<{ [name: string]: ProviderRules }>(
|
const getRulesProviders = useSWR<{ [name: string]: ProviderRules }>(
|
||||||
@@ -80,6 +92,7 @@ export const useClashCore = () => {
|
|||||||
updateGroupDelay,
|
updateGroupDelay,
|
||||||
updateProxiesDelay,
|
updateProxiesDelay,
|
||||||
setGroupProxy,
|
setGroupProxy,
|
||||||
|
setGlobalProxy,
|
||||||
getRules,
|
getRules,
|
||||||
getRulesProviders,
|
getRulesProviders,
|
||||||
updateRulesProviders,
|
updateRulesProviders,
|
||||||
|
@@ -53,9 +53,9 @@
|
|||||||
"@types/react": "18.3.3",
|
"@types/react": "18.3.3",
|
||||||
"@types/react-dom": "18.3.0",
|
"@types/react-dom": "18.3.0",
|
||||||
"@types/react-transition-group": "4.4.10",
|
"@types/react-transition-group": "4.4.10",
|
||||||
"@typescript-eslint/eslint-plugin": "7.12.0",
|
"@typescript-eslint/eslint-plugin": "7.13.0",
|
||||||
"@typescript-eslint/parser": "7.12.0",
|
"@typescript-eslint/parser": "7.13.0",
|
||||||
"@vitejs/plugin-react": "4.3.0",
|
"@vitejs/plugin-react": "4.3.1",
|
||||||
"sass": "1.77.4",
|
"sass": "1.77.4",
|
||||||
"shiki": "1.6.3",
|
"shiki": "1.6.3",
|
||||||
"vite": "5.2.13",
|
"vite": "5.2.13",
|
||||||
|
@@ -10,7 +10,14 @@ import { Clash, useClashCore, useNyanpasu } from "@nyanpasu/interface";
|
|||||||
import { useBreakpoint } from "@nyanpasu/ui";
|
import { useBreakpoint } from "@nyanpasu/ui";
|
||||||
import { useAtom, useAtomValue } from "jotai";
|
import { useAtom, useAtomValue } from "jotai";
|
||||||
import { proxyGroupAtom, proxyGroupSortAtom } from "@/store";
|
import { proxyGroupAtom, proxyGroupSortAtom } from "@/store";
|
||||||
import { CSSProperties, memo, useEffect, useMemo, useState } from "react";
|
import {
|
||||||
|
CSSProperties,
|
||||||
|
memo,
|
||||||
|
useEffect,
|
||||||
|
useMemo,
|
||||||
|
useState,
|
||||||
|
useTransition,
|
||||||
|
} from "react";
|
||||||
import { classNames } from "@/utils";
|
import { classNames } from "@/utils";
|
||||||
import { VList } from "virtua";
|
import { VList } from "virtua";
|
||||||
import { AnimatePresence, motion } from "framer-motion";
|
import { AnimatePresence, motion } from "framer-motion";
|
||||||
@@ -31,10 +38,11 @@ const getColorForDelay = (delay: number): string => {
|
|||||||
const { palette } = useTheme();
|
const { palette } = useTheme();
|
||||||
|
|
||||||
const delayColorMapping: { [key: string]: string } = {
|
const delayColorMapping: { [key: string]: string } = {
|
||||||
"0": palette.text.secondary,
|
"0": palette.error.main,
|
||||||
|
"1": palette.text.secondary,
|
||||||
"100": palette.success.main,
|
"100": palette.success.main,
|
||||||
"500": palette.warning.main,
|
"500": palette.warning.main,
|
||||||
"1000": palette.error.main,
|
"10000": palette.error.main,
|
||||||
};
|
};
|
||||||
|
|
||||||
let color: string = palette.text.secondary;
|
let color: string = palette.text.secondary;
|
||||||
@@ -102,7 +110,7 @@ const DelayChip = memo(function DelayChip({
|
|||||||
loading ? "opacity-0" : "opacity-1",
|
loading ? "opacity-0" : "opacity-1",
|
||||||
)}
|
)}
|
||||||
>
|
>
|
||||||
{`${delay} ms`}
|
{delay ? `${delay} ms` : "timeout"}
|
||||||
</span>
|
</span>
|
||||||
|
|
||||||
<CircularProgress
|
<CircularProgress
|
||||||
@@ -168,7 +176,10 @@ const NodeCard = memo(function NodeCard({
|
|||||||
});
|
});
|
||||||
|
|
||||||
export const NodeList = () => {
|
export const NodeList = () => {
|
||||||
const { data, setGroupProxy, updateProxiesDelay } = useClashCore();
|
const { data, setGroupProxy, setGlobalProxy, updateProxiesDelay } =
|
||||||
|
useClashCore();
|
||||||
|
|
||||||
|
const [isPending, startTransition] = useTransition();
|
||||||
|
|
||||||
const { getCurrentMode } = useNyanpasu();
|
const { getCurrentMode } = useNyanpasu();
|
||||||
|
|
||||||
@@ -194,6 +205,9 @@ export const NodeList = () => {
|
|||||||
if (delayA === -1 || delayA === -2) return 1;
|
if (delayA === -1 || delayA === -2) return 1;
|
||||||
if (delayB === -1 || delayB === -2) return -1;
|
if (delayB === -1 || delayB === -2) return -1;
|
||||||
|
|
||||||
|
if (delayA === 0) return 1;
|
||||||
|
if (delayB === 0) return -1;
|
||||||
|
|
||||||
return delayA - delayB;
|
return delayA - delayB;
|
||||||
});
|
});
|
||||||
} else if (proxyGroupSort === "name") {
|
} else if (proxyGroupSort === "name") {
|
||||||
@@ -223,7 +237,7 @@ export const NodeList = () => {
|
|||||||
|
|
||||||
const [renderList, setRenderList] = useState<RenderClashProxy[][]>([]);
|
const [renderList, setRenderList] = useState<RenderClashProxy[][]>([]);
|
||||||
|
|
||||||
useEffect(() => {
|
const updateRenderList = () => {
|
||||||
if (!group?.all) return;
|
if (!group?.all) return;
|
||||||
|
|
||||||
const nodeNames: string[] = [];
|
const nodeNames: string[] = [];
|
||||||
@@ -257,15 +271,32 @@ export const NodeList = () => {
|
|||||||
);
|
);
|
||||||
|
|
||||||
setRenderList(list);
|
setRenderList(list);
|
||||||
|
};
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
startTransition(() => {
|
||||||
|
updateRenderList();
|
||||||
|
});
|
||||||
}, [group?.all, column]);
|
}, [group?.all, column]);
|
||||||
|
|
||||||
const hendleClick = (node: string) => {
|
const hendleClick = (node: string) => {
|
||||||
|
if (!getCurrentMode.global) {
|
||||||
setGroupProxy(proxyGroup.selector as number, node);
|
setGroupProxy(proxyGroup.selector as number, node);
|
||||||
|
} else {
|
||||||
|
setGlobalProxy(node);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<AnimatePresence initial={false}>
|
<AnimatePresence initial={false} mode="sync">
|
||||||
<VList style={{ flex: 1 }} className="p-2">
|
<VList
|
||||||
|
style={{ flex: 1 }}
|
||||||
|
className={classNames(
|
||||||
|
"transition-opacity",
|
||||||
|
"p-2",
|
||||||
|
isPending ? "opacity-0" : "opacity-1",
|
||||||
|
)}
|
||||||
|
>
|
||||||
{renderList?.map((node, index) => {
|
{renderList?.map((node, index) => {
|
||||||
return (
|
return (
|
||||||
<div
|
<div
|
||||||
@@ -280,9 +311,9 @@ export const NodeList = () => {
|
|||||||
layoutId={`node-${render.renderLayoutKey}`}
|
layoutId={`node-${render.renderLayoutKey}`}
|
||||||
className="relative overflow-hidden"
|
className="relative overflow-hidden"
|
||||||
layout="position"
|
layout="position"
|
||||||
initial={false}
|
initial={{ scale: 0.7, opacity: 0 }}
|
||||||
animate="center"
|
animate={{ scale: 1, opacity: 1 }}
|
||||||
exit="exit"
|
exit={{ opacity: 0 }}
|
||||||
>
|
>
|
||||||
<NodeCard
|
<NodeCard
|
||||||
node={render}
|
node={render}
|
||||||
|
@@ -2,7 +2,7 @@
|
|||||||
"manifest_version": 1,
|
"manifest_version": 1,
|
||||||
"latest": {
|
"latest": {
|
||||||
"mihomo": "v1.18.5",
|
"mihomo": "v1.18.5",
|
||||||
"mihomo_alpha": "alpha-0d4e57c",
|
"mihomo_alpha": "alpha-cacfefa",
|
||||||
"clash_rs": "v0.1.18",
|
"clash_rs": "v0.1.18",
|
||||||
"clash_premium": "2023-09-05-gdcc8d87"
|
"clash_premium": "2023-09-05-gdcc8d87"
|
||||||
},
|
},
|
||||||
@@ -36,5 +36,5 @@
|
|||||||
"darwin-x64": "clash-darwin-amd64-n{}.gz"
|
"darwin-x64": "clash-darwin-amd64-n{}.gz"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"updated_at": "2024-06-07T22:20:07.011Z"
|
"updated_at": "2024-06-10T22:20:47.739Z"
|
||||||
}
|
}
|
||||||
|
@@ -95,7 +95,7 @@
|
|||||||
"postcss-html": "1.7.0",
|
"postcss-html": "1.7.0",
|
||||||
"postcss-import": "16.1.0",
|
"postcss-import": "16.1.0",
|
||||||
"postcss-scss": "4.0.9",
|
"postcss-scss": "4.0.9",
|
||||||
"prettier": "3.3.1",
|
"prettier": "3.3.2",
|
||||||
"prettier-plugin-toml": "2.0.1",
|
"prettier-plugin-toml": "2.0.1",
|
||||||
"react-devtools": "5.2.0",
|
"react-devtools": "5.2.0",
|
||||||
"stylelint": "16.6.1",
|
"stylelint": "16.6.1",
|
||||||
@@ -106,10 +106,10 @@
|
|||||||
"stylelint-order": "6.0.4",
|
"stylelint-order": "6.0.4",
|
||||||
"stylelint-scss": "6.3.1",
|
"stylelint-scss": "6.3.1",
|
||||||
"tailwindcss": "3.4.4",
|
"tailwindcss": "3.4.4",
|
||||||
"tsx": "4.15.1",
|
"tsx": "4.15.2",
|
||||||
"typescript": "5.4.5"
|
"typescript": "5.4.5"
|
||||||
},
|
},
|
||||||
"packageManager": "pnpm@9.2.0",
|
"packageManager": "pnpm@9.3.0",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": "22.2.0"
|
"node": "22.2.0"
|
||||||
},
|
},
|
||||||
|
150
clash-nyanpasu/pnpm-lock.yaml
generated
150
clash-nyanpasu/pnpm-lock.yaml
generated
@@ -75,7 +75,7 @@ importers:
|
|||||||
version: 16.6.2(eslint@8.57.0)
|
version: 16.6.2(eslint@8.57.0)
|
||||||
eslint-plugin-prettier:
|
eslint-plugin-prettier:
|
||||||
specifier: 5.1.3
|
specifier: 5.1.3
|
||||||
version: 5.1.3(eslint-config-prettier@9.1.0(eslint@8.57.0))(eslint@8.57.0)(prettier@3.3.1)
|
version: 5.1.3(eslint-config-prettier@9.1.0(eslint@8.57.0))(eslint@8.57.0)(prettier@3.3.2)
|
||||||
eslint-plugin-promise:
|
eslint-plugin-promise:
|
||||||
specifier: 6.2.0
|
specifier: 6.2.0
|
||||||
version: 6.2.0(eslint@8.57.0)
|
version: 6.2.0(eslint@8.57.0)
|
||||||
@@ -101,11 +101,11 @@ importers:
|
|||||||
specifier: 4.0.9
|
specifier: 4.0.9
|
||||||
version: 4.0.9(postcss@8.4.38)
|
version: 4.0.9(postcss@8.4.38)
|
||||||
prettier:
|
prettier:
|
||||||
specifier: 3.3.1
|
specifier: 3.3.2
|
||||||
version: 3.3.1
|
version: 3.3.2
|
||||||
prettier-plugin-toml:
|
prettier-plugin-toml:
|
||||||
specifier: 2.0.1
|
specifier: 2.0.1
|
||||||
version: 2.0.1(prettier@3.3.1)
|
version: 2.0.1(prettier@3.3.2)
|
||||||
react-devtools:
|
react-devtools:
|
||||||
specifier: 5.2.0
|
specifier: 5.2.0
|
||||||
version: 5.2.0(bufferutil@4.0.8)(utf-8-validate@5.0.10)
|
version: 5.2.0(bufferutil@4.0.8)(utf-8-validate@5.0.10)
|
||||||
@@ -134,8 +134,8 @@ importers:
|
|||||||
specifier: 3.4.4
|
specifier: 3.4.4
|
||||||
version: 3.4.4
|
version: 3.4.4
|
||||||
tsx:
|
tsx:
|
||||||
specifier: 4.15.1
|
specifier: 4.15.2
|
||||||
version: 4.15.1
|
version: 4.15.2
|
||||||
typescript:
|
typescript:
|
||||||
specifier: 5.4.5
|
specifier: 5.4.5
|
||||||
version: 5.4.5
|
version: 5.4.5
|
||||||
@@ -250,7 +250,7 @@ importers:
|
|||||||
version: 7.51.5(react@19.0.0-rc-9d4fba0788-20240530)
|
version: 7.51.5(react@19.0.0-rc-9d4fba0788-20240530)
|
||||||
react-hook-form-mui:
|
react-hook-form-mui:
|
||||||
specifier: 7.0.0
|
specifier: 7.0.0
|
||||||
version: 7.0.0(@mui/icons-material@5.15.19(@mui/material@5.15.19(@emotion/react@11.11.4(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(@emotion/styled@11.11.5(@emotion/react@11.11.4(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react-dom@19.0.0-rc-9d4fba0788-20240530(react@19.0.0-rc-9d4fba0788-20240530))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(@mui/material@5.15.19(@emotion/react@11.11.4(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(@emotion/styled@11.11.5(@emotion/react@11.11.4(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react-dom@19.0.0-rc-9d4fba0788-20240530(react@19.0.0-rc-9d4fba0788-20240530))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react-hook-form@7.51.5(react@19.0.0-rc-9d4fba0788-20240530))(react@19.0.0-rc-9d4fba0788-20240530)
|
version: 7.0.0(szplwmyfv5kdrzoa2ayly36z7i)
|
||||||
react-i18next:
|
react-i18next:
|
||||||
specifier: 14.1.2
|
specifier: 14.1.2
|
||||||
version: 14.1.2(i18next@23.11.5)(react-dom@19.0.0-rc-9d4fba0788-20240530(react@19.0.0-rc-9d4fba0788-20240530))(react@19.0.0-rc-9d4fba0788-20240530)
|
version: 14.1.2(i18next@23.11.5)(react-dom@19.0.0-rc-9d4fba0788-20240530(react@19.0.0-rc-9d4fba0788-20240530))(react@19.0.0-rc-9d4fba0788-20240530)
|
||||||
@@ -292,14 +292,14 @@ importers:
|
|||||||
specifier: 4.4.10
|
specifier: 4.4.10
|
||||||
version: 4.4.10
|
version: 4.4.10
|
||||||
'@typescript-eslint/eslint-plugin':
|
'@typescript-eslint/eslint-plugin':
|
||||||
specifier: 7.12.0
|
specifier: 7.13.0
|
||||||
version: 7.12.0(@typescript-eslint/parser@7.12.0(eslint@8.57.0)(typescript@5.4.5))(eslint@8.57.0)(typescript@5.4.5)
|
version: 7.13.0(@typescript-eslint/parser@7.13.0(eslint@8.57.0)(typescript@5.4.5))(eslint@8.57.0)(typescript@5.4.5)
|
||||||
'@typescript-eslint/parser':
|
'@typescript-eslint/parser':
|
||||||
specifier: 7.12.0
|
specifier: 7.13.0
|
||||||
version: 7.12.0(eslint@8.57.0)(typescript@5.4.5)
|
version: 7.13.0(eslint@8.57.0)(typescript@5.4.5)
|
||||||
'@vitejs/plugin-react':
|
'@vitejs/plugin-react':
|
||||||
specifier: 4.3.0
|
specifier: 4.3.1
|
||||||
version: 4.3.0(vite@5.2.13(@types/node@20.14.2)(less@4.2.0)(sass@1.77.4)(stylus@0.62.0))
|
version: 4.3.1(vite@5.2.13(@types/node@20.14.2)(less@4.2.0)(sass@1.77.4)(stylus@0.62.0))
|
||||||
sass:
|
sass:
|
||||||
specifier: 1.77.4
|
specifier: 1.77.4
|
||||||
version: 1.77.4
|
version: 1.77.4
|
||||||
@@ -314,7 +314,7 @@ importers:
|
|||||||
version: vite-plugin-monaco-editor-new@1.1.3(monaco-editor@0.49.0)
|
version: vite-plugin-monaco-editor-new@1.1.3(monaco-editor@0.49.0)
|
||||||
vite-plugin-sass-dts:
|
vite-plugin-sass-dts:
|
||||||
specifier: 1.3.22
|
specifier: 1.3.22
|
||||||
version: 1.3.22(postcss@8.4.38)(prettier@3.3.1)(sass@1.77.4)(vite@5.2.13(@types/node@20.14.2)(less@4.2.0)(sass@1.77.4)(stylus@0.62.0))
|
version: 1.3.22(postcss@8.4.38)(prettier@3.3.2)(sass@1.77.4)(vite@5.2.13(@types/node@20.14.2)(less@4.2.0)(sass@1.77.4)(stylus@0.62.0))
|
||||||
vite-plugin-svgr:
|
vite-plugin-svgr:
|
||||||
specifier: 4.2.0
|
specifier: 4.2.0
|
||||||
version: 4.2.0(rollup@4.17.2)(typescript@5.4.5)(vite@5.2.13(@types/node@20.14.2)(less@4.2.0)(sass@1.77.4)(stylus@0.62.0))
|
version: 4.2.0(rollup@4.17.2)(typescript@5.4.5)(vite@5.2.13(@types/node@20.14.2)(less@4.2.0)(sass@1.77.4)(stylus@0.62.0))
|
||||||
@@ -1175,6 +1175,7 @@ packages:
|
|||||||
'@humanwhocodes/config-array@0.11.14':
|
'@humanwhocodes/config-array@0.11.14':
|
||||||
resolution: {integrity: sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==}
|
resolution: {integrity: sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==}
|
||||||
engines: {node: '>=10.10.0'}
|
engines: {node: '>=10.10.0'}
|
||||||
|
deprecated: Use @eslint/config-array instead
|
||||||
|
|
||||||
'@humanwhocodes/module-importer@1.0.1':
|
'@humanwhocodes/module-importer@1.0.1':
|
||||||
resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==}
|
resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==}
|
||||||
@@ -1182,6 +1183,7 @@ packages:
|
|||||||
|
|
||||||
'@humanwhocodes/object-schema@2.0.3':
|
'@humanwhocodes/object-schema@2.0.3':
|
||||||
resolution: {integrity: sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==}
|
resolution: {integrity: sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==}
|
||||||
|
deprecated: Use @eslint/object-schema instead
|
||||||
|
|
||||||
'@isaacs/cliui@8.0.2':
|
'@isaacs/cliui@8.0.2':
|
||||||
resolution: {integrity: sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==}
|
resolution: {integrity: sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==}
|
||||||
@@ -1756,8 +1758,8 @@ packages:
|
|||||||
'@types/yauzl@2.10.3':
|
'@types/yauzl@2.10.3':
|
||||||
resolution: {integrity: sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q==}
|
resolution: {integrity: sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q==}
|
||||||
|
|
||||||
'@typescript-eslint/eslint-plugin@7.12.0':
|
'@typescript-eslint/eslint-plugin@7.13.0':
|
||||||
resolution: {integrity: sha512-7F91fcbuDf/d3S8o21+r3ZncGIke/+eWk0EpO21LXhDfLahriZF9CGj4fbAetEjlaBdjdSm9a6VeXbpbT6Z40Q==}
|
resolution: {integrity: sha512-FX1X6AF0w8MdVFLSdqwqN/me2hyhuQg4ykN6ZpVhh1ij/80pTvDKclX1sZB9iqex8SjQfVhwMKs3JtnnMLzG9w==}
|
||||||
engines: {node: ^18.18.0 || >=20.0.0}
|
engines: {node: ^18.18.0 || >=20.0.0}
|
||||||
peerDependencies:
|
peerDependencies:
|
||||||
'@typescript-eslint/parser': ^7.0.0
|
'@typescript-eslint/parser': ^7.0.0
|
||||||
@@ -1767,8 +1769,8 @@ packages:
|
|||||||
typescript:
|
typescript:
|
||||||
optional: true
|
optional: true
|
||||||
|
|
||||||
'@typescript-eslint/parser@7.12.0':
|
'@typescript-eslint/parser@7.13.0':
|
||||||
resolution: {integrity: sha512-dm/J2UDY3oV3TKius2OUZIFHsomQmpHtsV0FTh1WO8EKgHLQ1QCADUqscPgTpU+ih1e21FQSRjXckHn3txn6kQ==}
|
resolution: {integrity: sha512-EjMfl69KOS9awXXe83iRN7oIEXy9yYdqWfqdrFAYAAr6syP8eLEFI7ZE4939antx2mNgPRW/o1ybm2SFYkbTVA==}
|
||||||
engines: {node: ^18.18.0 || >=20.0.0}
|
engines: {node: ^18.18.0 || >=20.0.0}
|
||||||
peerDependencies:
|
peerDependencies:
|
||||||
eslint: ^8.56.0
|
eslint: ^8.56.0
|
||||||
@@ -1777,12 +1779,12 @@ packages:
|
|||||||
typescript:
|
typescript:
|
||||||
optional: true
|
optional: true
|
||||||
|
|
||||||
'@typescript-eslint/scope-manager@7.12.0':
|
'@typescript-eslint/scope-manager@7.13.0':
|
||||||
resolution: {integrity: sha512-itF1pTnN6F3unPak+kutH9raIkL3lhH1YRPGgt7QQOh43DQKVJXmWkpb+vpc/TiDHs6RSd9CTbDsc/Y+Ygq7kg==}
|
resolution: {integrity: sha512-ZrMCe1R6a01T94ilV13egvcnvVJ1pxShkE0+NDjDzH4nvG1wXpwsVI5bZCvE7AEDH1mXEx5tJSVR68bLgG7Dng==}
|
||||||
engines: {node: ^18.18.0 || >=20.0.0}
|
engines: {node: ^18.18.0 || >=20.0.0}
|
||||||
|
|
||||||
'@typescript-eslint/type-utils@7.12.0':
|
'@typescript-eslint/type-utils@7.13.0':
|
||||||
resolution: {integrity: sha512-lib96tyRtMhLxwauDWUp/uW3FMhLA6D0rJ8T7HmH7x23Gk1Gwwu8UZ94NMXBvOELn6flSPiBrCKlehkiXyaqwA==}
|
resolution: {integrity: sha512-xMEtMzxq9eRkZy48XuxlBFzpVMDurUAfDu5Rz16GouAtXm0TaAoTFzqWUFPPuQYXI/CDaH/Bgx/fk/84t/Bc9A==}
|
||||||
engines: {node: ^18.18.0 || >=20.0.0}
|
engines: {node: ^18.18.0 || >=20.0.0}
|
||||||
peerDependencies:
|
peerDependencies:
|
||||||
eslint: ^8.56.0
|
eslint: ^8.56.0
|
||||||
@@ -1791,12 +1793,12 @@ packages:
|
|||||||
typescript:
|
typescript:
|
||||||
optional: true
|
optional: true
|
||||||
|
|
||||||
'@typescript-eslint/types@7.12.0':
|
'@typescript-eslint/types@7.13.0':
|
||||||
resolution: {integrity: sha512-o+0Te6eWp2ppKY3mLCU+YA9pVJxhUJE15FV7kxuD9jgwIAa+w/ycGJBMrYDTpVGUM/tgpa9SeMOugSabWFq7bg==}
|
resolution: {integrity: sha512-QWuwm9wcGMAuTsxP+qz6LBBd3Uq8I5Nv8xb0mk54jmNoCyDspnMvVsOxI6IsMmway5d1S9Su2+sCKv1st2l6eA==}
|
||||||
engines: {node: ^18.18.0 || >=20.0.0}
|
engines: {node: ^18.18.0 || >=20.0.0}
|
||||||
|
|
||||||
'@typescript-eslint/typescript-estree@7.12.0':
|
'@typescript-eslint/typescript-estree@7.13.0':
|
||||||
resolution: {integrity: sha512-5bwqLsWBULv1h6pn7cMW5dXX/Y2amRqLaKqsASVwbBHMZSnHqE/HN4vT4fE0aFsiwxYvr98kqOWh1a8ZKXalCQ==}
|
resolution: {integrity: sha512-cAvBvUoobaoIcoqox1YatXOnSl3gx92rCZoMRPzMNisDiM12siGilSM4+dJAekuuHTibI2hVC2fYK79iSFvWjw==}
|
||||||
engines: {node: ^18.18.0 || >=20.0.0}
|
engines: {node: ^18.18.0 || >=20.0.0}
|
||||||
peerDependencies:
|
peerDependencies:
|
||||||
typescript: '*'
|
typescript: '*'
|
||||||
@@ -1804,21 +1806,21 @@ packages:
|
|||||||
typescript:
|
typescript:
|
||||||
optional: true
|
optional: true
|
||||||
|
|
||||||
'@typescript-eslint/utils@7.12.0':
|
'@typescript-eslint/utils@7.13.0':
|
||||||
resolution: {integrity: sha512-Y6hhwxwDx41HNpjuYswYp6gDbkiZ8Hin9Bf5aJQn1bpTs3afYY4GX+MPYxma8jtoIV2GRwTM/UJm/2uGCVv+DQ==}
|
resolution: {integrity: sha512-jceD8RgdKORVnB4Y6BqasfIkFhl4pajB1wVxrF4akxD2QPM8GNYjgGwEzYS+437ewlqqrg7Dw+6dhdpjMpeBFQ==}
|
||||||
engines: {node: ^18.18.0 || >=20.0.0}
|
engines: {node: ^18.18.0 || >=20.0.0}
|
||||||
peerDependencies:
|
peerDependencies:
|
||||||
eslint: ^8.56.0
|
eslint: ^8.56.0
|
||||||
|
|
||||||
'@typescript-eslint/visitor-keys@7.12.0':
|
'@typescript-eslint/visitor-keys@7.13.0':
|
||||||
resolution: {integrity: sha512-uZk7DevrQLL3vSnfFl5bj4sL75qC9D6EdjemIdbtkuUmIheWpuiiylSY01JxJE7+zGrOWDZrp1WxOuDntvKrHQ==}
|
resolution: {integrity: sha512-nxn+dozQx+MK61nn/JP+M4eCkHDSxSLDpgE3WcQo0+fkjEolnaB5jswvIKC4K56By8MMgIho7f1PVxERHEo8rw==}
|
||||||
engines: {node: ^18.18.0 || >=20.0.0}
|
engines: {node: ^18.18.0 || >=20.0.0}
|
||||||
|
|
||||||
'@ungap/structured-clone@1.2.0':
|
'@ungap/structured-clone@1.2.0':
|
||||||
resolution: {integrity: sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==}
|
resolution: {integrity: sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==}
|
||||||
|
|
||||||
'@vitejs/plugin-react@4.3.0':
|
'@vitejs/plugin-react@4.3.1':
|
||||||
resolution: {integrity: sha512-KcEbMsn4Dpk+LIbHMj7gDPRKaTMStxxWRkRmxsg/jVdFdJCZWt1SchZcf0M4t8lIKdwwMsEyzhrcOXRrDPtOBw==}
|
resolution: {integrity: sha512-m/V2syj5CuVnaxcUJOQRel/Wr31FFXRFlnOoq1TVtkCxsY5veGMTEmpWHndrhB2U8ScHtCQB1e+4hWYExQc6Lg==}
|
||||||
engines: {node: ^14.18.0 || >=16.0.0}
|
engines: {node: ^14.18.0 || >=16.0.0}
|
||||||
peerDependencies:
|
peerDependencies:
|
||||||
vite: ^4.2.0 || ^5.0.0
|
vite: ^4.2.0 || ^5.0.0
|
||||||
@@ -4160,8 +4162,8 @@ packages:
|
|||||||
peerDependencies:
|
peerDependencies:
|
||||||
prettier: ^3.0.3
|
prettier: ^3.0.3
|
||||||
|
|
||||||
prettier@3.3.1:
|
prettier@3.3.2:
|
||||||
resolution: {integrity: sha512-7CAwy5dRsxs8PHXT3twixW9/OEll8MLE0VRPCJyl7CkS6VHGPSlsVaWTiASPTyGyYRyApxlaWTzwUxVNrhcwDg==}
|
resolution: {integrity: sha512-rAVeHYMcv8ATV5d508CFdn+8/pHPpXeIid1DdrPwXnaAdH7cqjVbpJaT5eq4yRAFU/lsbwYwSF/n5iNrdJHPQA==}
|
||||||
engines: {node: '>=14'}
|
engines: {node: '>=14'}
|
||||||
hasBin: true
|
hasBin: true
|
||||||
|
|
||||||
@@ -4819,8 +4821,8 @@ packages:
|
|||||||
tslib@2.6.2:
|
tslib@2.6.2:
|
||||||
resolution: {integrity: sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==}
|
resolution: {integrity: sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==}
|
||||||
|
|
||||||
tsx@4.15.1:
|
tsx@4.15.2:
|
||||||
resolution: {integrity: sha512-k/6h17jA1KfUR7SpcteOa880zGmF56s8gMIcSqUR5avyNFi9nlCEKpMiHLrzrqyARGr52A/JablmGey1DEWbCA==}
|
resolution: {integrity: sha512-kIZTOCmR37nEw0qxQks2dR+eZWSXydhTGmz7yx94vEiJtJGBTkUl0D/jt/5fey+CNdm6i3Cp+29WKRay9ScQUw==}
|
||||||
engines: {node: '>=18.0.0'}
|
engines: {node: '>=18.0.0'}
|
||||||
hasBin: true
|
hasBin: true
|
||||||
|
|
||||||
@@ -6411,14 +6413,14 @@ snapshots:
|
|||||||
'@types/node': 20.14.2
|
'@types/node': 20.14.2
|
||||||
optional: true
|
optional: true
|
||||||
|
|
||||||
'@typescript-eslint/eslint-plugin@7.12.0(@typescript-eslint/parser@7.12.0(eslint@8.57.0)(typescript@5.4.5))(eslint@8.57.0)(typescript@5.4.5)':
|
'@typescript-eslint/eslint-plugin@7.13.0(@typescript-eslint/parser@7.13.0(eslint@8.57.0)(typescript@5.4.5))(eslint@8.57.0)(typescript@5.4.5)':
|
||||||
dependencies:
|
dependencies:
|
||||||
'@eslint-community/regexpp': 4.10.0
|
'@eslint-community/regexpp': 4.10.0
|
||||||
'@typescript-eslint/parser': 7.12.0(eslint@8.57.0)(typescript@5.4.5)
|
'@typescript-eslint/parser': 7.13.0(eslint@8.57.0)(typescript@5.4.5)
|
||||||
'@typescript-eslint/scope-manager': 7.12.0
|
'@typescript-eslint/scope-manager': 7.13.0
|
||||||
'@typescript-eslint/type-utils': 7.12.0(eslint@8.57.0)(typescript@5.4.5)
|
'@typescript-eslint/type-utils': 7.13.0(eslint@8.57.0)(typescript@5.4.5)
|
||||||
'@typescript-eslint/utils': 7.12.0(eslint@8.57.0)(typescript@5.4.5)
|
'@typescript-eslint/utils': 7.13.0(eslint@8.57.0)(typescript@5.4.5)
|
||||||
'@typescript-eslint/visitor-keys': 7.12.0
|
'@typescript-eslint/visitor-keys': 7.13.0
|
||||||
eslint: 8.57.0
|
eslint: 8.57.0
|
||||||
graphemer: 1.4.0
|
graphemer: 1.4.0
|
||||||
ignore: 5.3.1
|
ignore: 5.3.1
|
||||||
@@ -6429,12 +6431,12 @@ snapshots:
|
|||||||
transitivePeerDependencies:
|
transitivePeerDependencies:
|
||||||
- supports-color
|
- supports-color
|
||||||
|
|
||||||
'@typescript-eslint/parser@7.12.0(eslint@8.57.0)(typescript@5.4.5)':
|
'@typescript-eslint/parser@7.13.0(eslint@8.57.0)(typescript@5.4.5)':
|
||||||
dependencies:
|
dependencies:
|
||||||
'@typescript-eslint/scope-manager': 7.12.0
|
'@typescript-eslint/scope-manager': 7.13.0
|
||||||
'@typescript-eslint/types': 7.12.0
|
'@typescript-eslint/types': 7.13.0
|
||||||
'@typescript-eslint/typescript-estree': 7.12.0(typescript@5.4.5)
|
'@typescript-eslint/typescript-estree': 7.13.0(typescript@5.4.5)
|
||||||
'@typescript-eslint/visitor-keys': 7.12.0
|
'@typescript-eslint/visitor-keys': 7.13.0
|
||||||
debug: 4.3.4
|
debug: 4.3.4
|
||||||
eslint: 8.57.0
|
eslint: 8.57.0
|
||||||
optionalDependencies:
|
optionalDependencies:
|
||||||
@@ -6442,15 +6444,15 @@ snapshots:
|
|||||||
transitivePeerDependencies:
|
transitivePeerDependencies:
|
||||||
- supports-color
|
- supports-color
|
||||||
|
|
||||||
'@typescript-eslint/scope-manager@7.12.0':
|
'@typescript-eslint/scope-manager@7.13.0':
|
||||||
dependencies:
|
dependencies:
|
||||||
'@typescript-eslint/types': 7.12.0
|
'@typescript-eslint/types': 7.13.0
|
||||||
'@typescript-eslint/visitor-keys': 7.12.0
|
'@typescript-eslint/visitor-keys': 7.13.0
|
||||||
|
|
||||||
'@typescript-eslint/type-utils@7.12.0(eslint@8.57.0)(typescript@5.4.5)':
|
'@typescript-eslint/type-utils@7.13.0(eslint@8.57.0)(typescript@5.4.5)':
|
||||||
dependencies:
|
dependencies:
|
||||||
'@typescript-eslint/typescript-estree': 7.12.0(typescript@5.4.5)
|
'@typescript-eslint/typescript-estree': 7.13.0(typescript@5.4.5)
|
||||||
'@typescript-eslint/utils': 7.12.0(eslint@8.57.0)(typescript@5.4.5)
|
'@typescript-eslint/utils': 7.13.0(eslint@8.57.0)(typescript@5.4.5)
|
||||||
debug: 4.3.4
|
debug: 4.3.4
|
||||||
eslint: 8.57.0
|
eslint: 8.57.0
|
||||||
ts-api-utils: 1.3.0(typescript@5.4.5)
|
ts-api-utils: 1.3.0(typescript@5.4.5)
|
||||||
@@ -6459,12 +6461,12 @@ snapshots:
|
|||||||
transitivePeerDependencies:
|
transitivePeerDependencies:
|
||||||
- supports-color
|
- supports-color
|
||||||
|
|
||||||
'@typescript-eslint/types@7.12.0': {}
|
'@typescript-eslint/types@7.13.0': {}
|
||||||
|
|
||||||
'@typescript-eslint/typescript-estree@7.12.0(typescript@5.4.5)':
|
'@typescript-eslint/typescript-estree@7.13.0(typescript@5.4.5)':
|
||||||
dependencies:
|
dependencies:
|
||||||
'@typescript-eslint/types': 7.12.0
|
'@typescript-eslint/types': 7.13.0
|
||||||
'@typescript-eslint/visitor-keys': 7.12.0
|
'@typescript-eslint/visitor-keys': 7.13.0
|
||||||
debug: 4.3.4
|
debug: 4.3.4
|
||||||
globby: 11.1.0
|
globby: 11.1.0
|
||||||
is-glob: 4.0.3
|
is-glob: 4.0.3
|
||||||
@@ -6476,25 +6478,25 @@ snapshots:
|
|||||||
transitivePeerDependencies:
|
transitivePeerDependencies:
|
||||||
- supports-color
|
- supports-color
|
||||||
|
|
||||||
'@typescript-eslint/utils@7.12.0(eslint@8.57.0)(typescript@5.4.5)':
|
'@typescript-eslint/utils@7.13.0(eslint@8.57.0)(typescript@5.4.5)':
|
||||||
dependencies:
|
dependencies:
|
||||||
'@eslint-community/eslint-utils': 4.4.0(eslint@8.57.0)
|
'@eslint-community/eslint-utils': 4.4.0(eslint@8.57.0)
|
||||||
'@typescript-eslint/scope-manager': 7.12.0
|
'@typescript-eslint/scope-manager': 7.13.0
|
||||||
'@typescript-eslint/types': 7.12.0
|
'@typescript-eslint/types': 7.13.0
|
||||||
'@typescript-eslint/typescript-estree': 7.12.0(typescript@5.4.5)
|
'@typescript-eslint/typescript-estree': 7.13.0(typescript@5.4.5)
|
||||||
eslint: 8.57.0
|
eslint: 8.57.0
|
||||||
transitivePeerDependencies:
|
transitivePeerDependencies:
|
||||||
- supports-color
|
- supports-color
|
||||||
- typescript
|
- typescript
|
||||||
|
|
||||||
'@typescript-eslint/visitor-keys@7.12.0':
|
'@typescript-eslint/visitor-keys@7.13.0':
|
||||||
dependencies:
|
dependencies:
|
||||||
'@typescript-eslint/types': 7.12.0
|
'@typescript-eslint/types': 7.13.0
|
||||||
eslint-visitor-keys: 3.4.3
|
eslint-visitor-keys: 3.4.3
|
||||||
|
|
||||||
'@ungap/structured-clone@1.2.0': {}
|
'@ungap/structured-clone@1.2.0': {}
|
||||||
|
|
||||||
'@vitejs/plugin-react@4.3.0(vite@5.2.13(@types/node@20.14.2)(less@4.2.0)(sass@1.77.4)(stylus@0.62.0))':
|
'@vitejs/plugin-react@4.3.1(vite@5.2.13(@types/node@20.14.2)(less@4.2.0)(sass@1.77.4)(stylus@0.62.0))':
|
||||||
dependencies:
|
dependencies:
|
||||||
'@babel/core': 7.24.5
|
'@babel/core': 7.24.5
|
||||||
'@babel/plugin-transform-react-jsx-self': 7.24.5(@babel/core@7.24.5)
|
'@babel/plugin-transform-react-jsx-self': 7.24.5(@babel/core@7.24.5)
|
||||||
@@ -7490,10 +7492,10 @@ snapshots:
|
|||||||
resolve: 1.22.8
|
resolve: 1.22.8
|
||||||
semver: 7.6.1
|
semver: 7.6.1
|
||||||
|
|
||||||
eslint-plugin-prettier@5.1.3(eslint-config-prettier@9.1.0(eslint@8.57.0))(eslint@8.57.0)(prettier@3.3.1):
|
eslint-plugin-prettier@5.1.3(eslint-config-prettier@9.1.0(eslint@8.57.0))(eslint@8.57.0)(prettier@3.3.2):
|
||||||
dependencies:
|
dependencies:
|
||||||
eslint: 8.57.0
|
eslint: 8.57.0
|
||||||
prettier: 3.3.1
|
prettier: 3.3.2
|
||||||
prettier-linter-helpers: 1.0.0
|
prettier-linter-helpers: 1.0.0
|
||||||
synckit: 0.8.8
|
synckit: 0.8.8
|
||||||
optionalDependencies:
|
optionalDependencies:
|
||||||
@@ -9130,12 +9132,12 @@ snapshots:
|
|||||||
dependencies:
|
dependencies:
|
||||||
fast-diff: 1.3.0
|
fast-diff: 1.3.0
|
||||||
|
|
||||||
prettier-plugin-toml@2.0.1(prettier@3.3.1):
|
prettier-plugin-toml@2.0.1(prettier@3.3.2):
|
||||||
dependencies:
|
dependencies:
|
||||||
'@taplo/lib': 0.4.0-alpha.2
|
'@taplo/lib': 0.4.0-alpha.2
|
||||||
prettier: 3.3.1
|
prettier: 3.3.2
|
||||||
|
|
||||||
prettier@3.3.1: {}
|
prettier@3.3.2: {}
|
||||||
|
|
||||||
progress@2.0.3: {}
|
progress@2.0.3: {}
|
||||||
|
|
||||||
@@ -9210,8 +9212,8 @@ snapshots:
|
|||||||
react: 19.0.0-rc-9d4fba0788-20240530
|
react: 19.0.0-rc-9d4fba0788-20240530
|
||||||
react-dom: 19.0.0-rc-9d4fba0788-20240530(react@19.0.0-rc-9d4fba0788-20240530)
|
react-dom: 19.0.0-rc-9d4fba0788-20240530(react@19.0.0-rc-9d4fba0788-20240530)
|
||||||
|
|
||||||
? react-hook-form-mui@7.0.0(@mui/icons-material@5.15.19(@mui/material@5.15.19(@emotion/react@11.11.4(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(@emotion/styled@11.11.5(@emotion/react@11.11.4(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react-dom@19.0.0-rc-9d4fba0788-20240530(react@19.0.0-rc-9d4fba0788-20240530))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(@mui/material@5.15.19(@emotion/react@11.11.4(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(@emotion/styled@11.11.5(@emotion/react@11.11.4(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react-dom@19.0.0-rc-9d4fba0788-20240530(react@19.0.0-rc-9d4fba0788-20240530))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react-hook-form@7.51.5(react@19.0.0-rc-9d4fba0788-20240530))(react@19.0.0-rc-9d4fba0788-20240530)
|
react-hook-form-mui@7.0.0(szplwmyfv5kdrzoa2ayly36z7i):
|
||||||
: dependencies:
|
dependencies:
|
||||||
'@mui/material': 5.15.19(@emotion/react@11.11.4(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(@emotion/styled@11.11.5(@emotion/react@11.11.4(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react-dom@19.0.0-rc-9d4fba0788-20240530(react@19.0.0-rc-9d4fba0788-20240530))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0)
|
'@mui/material': 5.15.19(@emotion/react@11.11.4(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(@emotion/styled@11.11.5(@emotion/react@11.11.4(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0))(react-dom@19.0.0-rc-9d4fba0788-20240530(react@19.0.0-rc-9d4fba0788-20240530))(react@19.0.0-rc-9d4fba0788-20240530)(types-react@19.0.0-rc.0)
|
||||||
react: 19.0.0-rc-9d4fba0788-20240530
|
react: 19.0.0-rc-9d4fba0788-20240530
|
||||||
react-hook-form: 7.51.5(react@19.0.0-rc-9d4fba0788-20240530)
|
react-hook-form: 7.51.5(react@19.0.0-rc-9d4fba0788-20240530)
|
||||||
@@ -9912,7 +9914,7 @@ snapshots:
|
|||||||
|
|
||||||
tslib@2.6.2: {}
|
tslib@2.6.2: {}
|
||||||
|
|
||||||
tsx@4.15.1:
|
tsx@4.15.2:
|
||||||
dependencies:
|
dependencies:
|
||||||
esbuild: 0.21.4
|
esbuild: 0.21.4
|
||||||
get-tsconfig: 4.7.5
|
get-tsconfig: 4.7.5
|
||||||
@@ -10132,11 +10134,11 @@ snapshots:
|
|||||||
esbuild: 0.19.12
|
esbuild: 0.19.12
|
||||||
monaco-editor: 0.49.0
|
monaco-editor: 0.49.0
|
||||||
|
|
||||||
vite-plugin-sass-dts@1.3.22(postcss@8.4.38)(prettier@3.3.1)(sass@1.77.4)(vite@5.2.13(@types/node@20.14.2)(less@4.2.0)(sass@1.77.4)(stylus@0.62.0)):
|
vite-plugin-sass-dts@1.3.22(postcss@8.4.38)(prettier@3.3.2)(sass@1.77.4)(vite@5.2.13(@types/node@20.14.2)(less@4.2.0)(sass@1.77.4)(stylus@0.62.0)):
|
||||||
dependencies:
|
dependencies:
|
||||||
postcss: 8.4.38
|
postcss: 8.4.38
|
||||||
postcss-js: 4.0.1(postcss@8.4.38)
|
postcss-js: 4.0.1(postcss@8.4.38)
|
||||||
prettier: 3.3.1
|
prettier: 3.3.2
|
||||||
sass: 1.77.4
|
sass: 1.77.4
|
||||||
vite: 5.2.13(@types/node@20.14.2)(less@4.2.0)(sass@1.77.4)(stylus@0.62.0)
|
vite: 5.2.13(@types/node@20.14.2)(less@4.2.0)(sass@1.77.4)(stylus@0.62.0)
|
||||||
|
|
||||||
|
@@ -69,7 +69,7 @@ export const ProviderButton = () => {
|
|||||||
sx={{ textTransform: "capitalize" }}
|
sx={{ textTransform: "capitalize" }}
|
||||||
onClick={() => setOpen(true)}
|
onClick={() => setOpen(true)}
|
||||||
>
|
>
|
||||||
{t("Provider")}
|
{t("Proxy Provider")}
|
||||||
</Button>
|
</Button>
|
||||||
|
|
||||||
<BaseDialog
|
<BaseDialog
|
||||||
@@ -171,7 +171,7 @@ export const ProviderButton = () => {
|
|||||||
<IconButton
|
<IconButton
|
||||||
size="small"
|
size="small"
|
||||||
color="inherit"
|
color="inherit"
|
||||||
title="Update Provider"
|
title={`${t("Update")}${t("Proxy Provider")}`}
|
||||||
onClick={() => handleUpdate(key, index)}
|
onClick={() => handleUpdate(key, index)}
|
||||||
sx={{
|
sx={{
|
||||||
animation: updating[index]
|
animation: updating[index]
|
||||||
|
@@ -67,7 +67,7 @@ export const ProviderButton = () => {
|
|||||||
sx={{ textTransform: "capitalize" }}
|
sx={{ textTransform: "capitalize" }}
|
||||||
onClick={() => setOpen(true)}
|
onClick={() => setOpen(true)}
|
||||||
>
|
>
|
||||||
{t("Provider")}
|
{t("Rule Provider")}
|
||||||
</Button>
|
</Button>
|
||||||
|
|
||||||
<BaseDialog
|
<BaseDialog
|
||||||
@@ -145,7 +145,7 @@ export const ProviderButton = () => {
|
|||||||
<IconButton
|
<IconButton
|
||||||
size="small"
|
size="small"
|
||||||
color="inherit"
|
color="inherit"
|
||||||
title="Update Provider"
|
title={`${t("Update")}${t("Rule Provider")}`}
|
||||||
onClick={() => handleUpdate(key, index)}
|
onClick={() => handleUpdate(key, index)}
|
||||||
sx={{
|
sx={{
|
||||||
animation: updating[index]
|
animation: updating[index]
|
||||||
|
@@ -21,6 +21,9 @@
|
|||||||
|
|
||||||
"Proxies": "Proxies",
|
"Proxies": "Proxies",
|
||||||
"Proxy Groups": "Proxy Groups",
|
"Proxy Groups": "Proxy Groups",
|
||||||
|
"Proxy Provider": "Proxy Provider",
|
||||||
|
"Update All": "Update All",
|
||||||
|
"Update At": "Update At",
|
||||||
"rule": "rule",
|
"rule": "rule",
|
||||||
"global": "global",
|
"global": "global",
|
||||||
"direct": "direct",
|
"direct": "direct",
|
||||||
@@ -68,6 +71,8 @@
|
|||||||
"To End": "To End",
|
"To End": "To End",
|
||||||
|
|
||||||
"Connections": "Connections",
|
"Connections": "Connections",
|
||||||
|
"Table View": "Table View",
|
||||||
|
"List View": "List View",
|
||||||
"Close All": "Close All",
|
"Close All": "Close All",
|
||||||
"Default": "Default",
|
"Default": "Default",
|
||||||
"Download Speed": "Download Speed",
|
"Download Speed": "Download Speed",
|
||||||
@@ -86,8 +91,7 @@
|
|||||||
"Close Connection": "Close Connection",
|
"Close Connection": "Close Connection",
|
||||||
|
|
||||||
"Rules": "Rules",
|
"Rules": "Rules",
|
||||||
"Update All": "Update All",
|
"Rule Provider": "Rule Provider",
|
||||||
"Update At": "Update At",
|
|
||||||
|
|
||||||
"Logs": "Logs",
|
"Logs": "Logs",
|
||||||
"Pause": "Pause",
|
"Pause": "Pause",
|
||||||
|
@@ -21,6 +21,9 @@
|
|||||||
|
|
||||||
"Proxies": "پراکسیها",
|
"Proxies": "پراکسیها",
|
||||||
"Proxy Groups": "گروههای پراکسی",
|
"Proxy Groups": "گروههای پراکسی",
|
||||||
|
"Proxy Provider": "تأمینکننده پروکسی",
|
||||||
|
"Update All": "بهروزرسانی همه",
|
||||||
|
"Update At": "بهروزرسانی در",
|
||||||
"rule": "قانون",
|
"rule": "قانون",
|
||||||
"global": "جهانی",
|
"global": "جهانی",
|
||||||
"direct": "مستقیم",
|
"direct": "مستقیم",
|
||||||
@@ -68,6 +71,8 @@
|
|||||||
"To End": "به پایان",
|
"To End": "به پایان",
|
||||||
|
|
||||||
"Connections": "اتصالات",
|
"Connections": "اتصالات",
|
||||||
|
"Table View": "نمای جدولی",
|
||||||
|
"List View": "نمای لیستی",
|
||||||
"Close All": "بستن همه",
|
"Close All": "بستن همه",
|
||||||
"Default": "پیشفرض",
|
"Default": "پیشفرض",
|
||||||
"Download Speed": "سرعت دانلود",
|
"Download Speed": "سرعت دانلود",
|
||||||
@@ -86,8 +91,7 @@
|
|||||||
"Close Connection": "بستن اتصال",
|
"Close Connection": "بستن اتصال",
|
||||||
|
|
||||||
"Rules": "قوانین",
|
"Rules": "قوانین",
|
||||||
"Update All": "بهروزرسانی همه",
|
"Rule Provider": "تأمینکننده قانون",
|
||||||
"Update At": "بهروزرسانی در",
|
|
||||||
|
|
||||||
"Logs": "لاگها",
|
"Logs": "لاگها",
|
||||||
"Pause": "توقف",
|
"Pause": "توقف",
|
||||||
|
@@ -21,6 +21,9 @@
|
|||||||
|
|
||||||
"Proxies": "Прокси",
|
"Proxies": "Прокси",
|
||||||
"Proxy Groups": "Группы прокси",
|
"Proxy Groups": "Группы прокси",
|
||||||
|
"Proxy Provider": "Провайдер прокси",
|
||||||
|
"Update All": "Обновить все",
|
||||||
|
"Update At": "Обновлено в",
|
||||||
"rule": "правила",
|
"rule": "правила",
|
||||||
"global": "глобальный",
|
"global": "глобальный",
|
||||||
"direct": "прямой",
|
"direct": "прямой",
|
||||||
@@ -68,6 +71,8 @@
|
|||||||
"To End": "Вниз",
|
"To End": "Вниз",
|
||||||
|
|
||||||
"Connections": "Соединения",
|
"Connections": "Соединения",
|
||||||
|
"Table View": "Tablichnyy vid",
|
||||||
|
"List View": "Spiskovyy vid",
|
||||||
"Close All": "Закрыть всё",
|
"Close All": "Закрыть всё",
|
||||||
"Default": "По умолчанию",
|
"Default": "По умолчанию",
|
||||||
"Download Speed": "Скорость загрузки",
|
"Download Speed": "Скорость загрузки",
|
||||||
@@ -86,8 +91,7 @@
|
|||||||
"Close Connection": "Закрыть соединение",
|
"Close Connection": "Закрыть соединение",
|
||||||
|
|
||||||
"Rules": "Правила",
|
"Rules": "Правила",
|
||||||
"Update All": "Обновить все",
|
"Rule Provider": "Провайдер правило",
|
||||||
"Update At": "Обновлено в",
|
|
||||||
|
|
||||||
"Logs": "Логи",
|
"Logs": "Логи",
|
||||||
"Pause": "Пауза",
|
"Pause": "Пауза",
|
||||||
|
@@ -21,6 +21,9 @@
|
|||||||
|
|
||||||
"Proxies": "代理",
|
"Proxies": "代理",
|
||||||
"Proxy Groups": "代理组",
|
"Proxy Groups": "代理组",
|
||||||
|
"Proxy Provider": "代理集合",
|
||||||
|
"Update All": "更新全部",
|
||||||
|
"Update At": "更新于",
|
||||||
"rule": "规则",
|
"rule": "规则",
|
||||||
"global": "全局",
|
"global": "全局",
|
||||||
"direct": "直连",
|
"direct": "直连",
|
||||||
@@ -68,6 +71,8 @@
|
|||||||
"To End": "移到末尾",
|
"To End": "移到末尾",
|
||||||
|
|
||||||
"Connections": "连接",
|
"Connections": "连接",
|
||||||
|
"Table View": "表格视图",
|
||||||
|
"List View": "列表视图",
|
||||||
"Close All": "关闭全部",
|
"Close All": "关闭全部",
|
||||||
"Default": "默认",
|
"Default": "默认",
|
||||||
"Download Speed": "下载速度",
|
"Download Speed": "下载速度",
|
||||||
@@ -86,8 +91,7 @@
|
|||||||
"Close Connection": "关闭连接",
|
"Close Connection": "关闭连接",
|
||||||
|
|
||||||
"Rules": "规则",
|
"Rules": "规则",
|
||||||
"Update All": "更新全部",
|
"Rule Provider": "规则集合",
|
||||||
"Update At": "更新于",
|
|
||||||
|
|
||||||
"Logs": "日志",
|
"Logs": "日志",
|
||||||
"Pause": "暂停",
|
"Pause": "暂停",
|
||||||
|
@@ -143,9 +143,13 @@ const ConnectionsPage = () => {
|
|||||||
}
|
}
|
||||||
>
|
>
|
||||||
{isTableLayout ? (
|
{isTableLayout ? (
|
||||||
<TableChartRounded fontSize="inherit" />
|
<span title={t("List View")}>
|
||||||
) : (
|
|
||||||
<TableRowsRounded fontSize="inherit" />
|
<TableRowsRounded fontSize="inherit" />
|
||||||
|
</span>
|
||||||
|
) : (
|
||||||
|
<span title={t("Table View")}>
|
||||||
|
<TableChartRounded fontSize="inherit" />
|
||||||
|
</span>
|
||||||
)}
|
)}
|
||||||
</IconButton>
|
</IconButton>
|
||||||
|
|
||||||
|
@@ -0,0 +1,46 @@
|
|||||||
|
From 97eb5d51b4a584a60e5d096bdb6b33edc9f50d8d Mon Sep 17 00:00:00 2001
|
||||||
|
From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
|
||||||
|
Date: Mon, 15 Jan 2024 12:43:38 +0000
|
||||||
|
Subject: [PATCH] net: sfp-bus: fix SFP mode detect from bitrate
|
||||||
|
|
||||||
|
The referenced commit moved the setting of the Autoneg and pause bits
|
||||||
|
early in sfp_parse_support(). However, we check whether the modes are
|
||||||
|
empty before using the bitrate to set some modes. Setting these bits
|
||||||
|
so early causes that test to always be false, preventing this working,
|
||||||
|
and thus some modules that used to work no longer do.
|
||||||
|
|
||||||
|
Move them just before the call to the quirk.
|
||||||
|
|
||||||
|
Fixes: 8110633db49d ("net: sfp-bus: allow SFP quirks to override Autoneg and pause bits")
|
||||||
|
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
|
||||||
|
Reviewed-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
|
||||||
|
Link: https://lore.kernel.org/r/E1rPMJW-001Ahf-L0@rmk-PC.armlinux.org.uk
|
||||||
|
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||||
|
---
|
||||||
|
drivers/net/phy/sfp-bus.c | 8 ++++----
|
||||||
|
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||||
|
|
||||||
|
--- a/drivers/net/phy/sfp-bus.c
|
||||||
|
+++ b/drivers/net/phy/sfp-bus.c
|
||||||
|
@@ -151,10 +151,6 @@ void sfp_parse_support(struct sfp_bus *b
|
||||||
|
unsigned int br_min, br_nom, br_max;
|
||||||
|
__ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, };
|
||||||
|
|
||||||
|
- phylink_set(modes, Autoneg);
|
||||||
|
- phylink_set(modes, Pause);
|
||||||
|
- phylink_set(modes, Asym_Pause);
|
||||||
|
-
|
||||||
|
/* Decode the bitrate information to MBd */
|
||||||
|
br_min = br_nom = br_max = 0;
|
||||||
|
if (id->base.br_nominal) {
|
||||||
|
@@ -339,6 +335,10 @@ void sfp_parse_support(struct sfp_bus *b
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
+ phylink_set(modes, Autoneg);
|
||||||
|
+ phylink_set(modes, Pause);
|
||||||
|
+ phylink_set(modes, Asym_Pause);
|
||||||
|
+
|
||||||
|
if (bus->sfp_quirk && bus->sfp_quirk->modes)
|
||||||
|
bus->sfp_quirk->modes(id, modes, interfaces);
|
||||||
|
|
@@ -0,0 +1,61 @@
|
|||||||
|
From 629c701fc39f1ada9416e0766a86729e83bde86c Mon Sep 17 00:00:00 2001
|
||||||
|
Message-ID: <629c701fc39f1ada9416e0766a86729e83bde86c.1694465766.git.daniel@makrotopia.org>
|
||||||
|
From: Daniel Golle <daniel@makrotopia.org>
|
||||||
|
Date: Mon, 11 Sep 2023 21:27:44 +0100
|
||||||
|
Subject: [PATCH] serial: 8250_mtk: track busclk state to avoid bus error
|
||||||
|
To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
|
||||||
|
Jiri Slaby <jirislaby@kernel.org>,
|
||||||
|
Matthias Brugger <matthias.bgg@gmail.com>,
|
||||||
|
AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>,
|
||||||
|
Daniel Golle <daniel@makrotopia.org>,
|
||||||
|
John Ogness <john.ogness@linutronix.de>,
|
||||||
|
Chen-Yu Tsai <wenst@chromium.org>,
|
||||||
|
Changqi Hu <changqi.hu@mediatek.com>,
|
||||||
|
linux-kernel@vger.kernel.org,
|
||||||
|
linux-serial@vger.kernel.org,
|
||||||
|
linux-arm-kernel@lists.infradead.org,
|
||||||
|
linux-mediatek@lists.infradead.org
|
||||||
|
|
||||||
|
Commit e32a83c70cf9 ("serial: 8250-mtk: modify mtk uart power and
|
||||||
|
clock management") introduced polling a debug register to make sure
|
||||||
|
the UART is idle before disabling the bus clock. However, at least on
|
||||||
|
some MediaTek SoCs access to that very debug register requires the bus
|
||||||
|
clock being enabled. Hence calling the suspend function while already
|
||||||
|
in suspended state results in that register access triggering a bus
|
||||||
|
error. In order to avoid that, track the state of the bus clock and
|
||||||
|
only poll the debug register if not already in suspended state.
|
||||||
|
|
||||||
|
Fixes: e32a83c70cf9 ("serial: 8250-mtk: modify mtk uart power and clock management")
|
||||||
|
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
|
||||||
|
---
|
||||||
|
drivers/tty/serial/8250/8250_mtk.c | 11 ++++++++++-
|
||||||
|
1 file changed, 10 insertions(+), 1 deletion(-)
|
||||||
|
|
||||||
|
--- a/drivers/tty/serial/8250/8250_mtk.c
|
||||||
|
+++ b/drivers/tty/serial/8250/8250_mtk.c
|
||||||
|
@@ -32,7 +32,7 @@
|
||||||
|
#define MTK_UART_RXTRI_AD 0x14 /* RX Trigger address */
|
||||||
|
#define MTK_UART_FRACDIV_L 0x15 /* Fractional divider LSB address */
|
||||||
|
#define MTK_UART_FRACDIV_M 0x16 /* Fractional divider MSB address */
|
||||||
|
-#define MTK_UART_DEBUG0 0x18
|
||||||
|
+#define MTK_UART_DEBUG0 0x18
|
||||||
|
#define MTK_UART_IER_XOFFI 0x20 /* Enable XOFF character interrupt */
|
||||||
|
#define MTK_UART_IER_RTSI 0x40 /* Enable RTS Modem status interrupt */
|
||||||
|
#define MTK_UART_IER_CTSI 0x80 /* Enable CTS Modem status interrupt */
|
||||||
|
@@ -418,13 +418,12 @@ static int __maybe_unused mtk8250_runtim
|
||||||
|
struct mtk8250_data *data = dev_get_drvdata(dev);
|
||||||
|
struct uart_8250_port *up = serial8250_get_port(data->line);
|
||||||
|
|
||||||
|
- /* wait until UART in idle status */
|
||||||
|
- while
|
||||||
|
- (serial_in(up, MTK_UART_DEBUG0));
|
||||||
|
-
|
||||||
|
if (data->clk_count == 0U) {
|
||||||
|
dev_dbg(dev, "%s clock count is 0\n", __func__);
|
||||||
|
} else {
|
||||||
|
+ /* wait until UART in idle status */
|
||||||
|
+ while
|
||||||
|
+ (serial_in(up, MTK_UART_DEBUG0));
|
||||||
|
clk_disable_unprepare(data->bus_clk);
|
||||||
|
data->clk_count--;
|
||||||
|
}
|
@@ -0,0 +1,61 @@
|
|||||||
|
From 629c701fc39f1ada9416e0766a86729e83bde86c Mon Sep 17 00:00:00 2001
|
||||||
|
Message-ID: <629c701fc39f1ada9416e0766a86729e83bde86c.1694465766.git.daniel@makrotopia.org>
|
||||||
|
From: Daniel Golle <daniel@makrotopia.org>
|
||||||
|
Date: Mon, 11 Sep 2023 21:27:44 +0100
|
||||||
|
Subject: [PATCH] serial: 8250_mtk: track busclk state to avoid bus error
|
||||||
|
To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
|
||||||
|
Jiri Slaby <jirislaby@kernel.org>,
|
||||||
|
Matthias Brugger <matthias.bgg@gmail.com>,
|
||||||
|
AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>,
|
||||||
|
Daniel Golle <daniel@makrotopia.org>,
|
||||||
|
John Ogness <john.ogness@linutronix.de>,
|
||||||
|
Chen-Yu Tsai <wenst@chromium.org>,
|
||||||
|
Changqi Hu <changqi.hu@mediatek.com>,
|
||||||
|
linux-kernel@vger.kernel.org,
|
||||||
|
linux-serial@vger.kernel.org,
|
||||||
|
linux-arm-kernel@lists.infradead.org,
|
||||||
|
linux-mediatek@lists.infradead.org
|
||||||
|
|
||||||
|
Commit e32a83c70cf9 ("serial: 8250-mtk: modify mtk uart power and
|
||||||
|
clock management") introduced polling a debug register to make sure
|
||||||
|
the UART is idle before disabling the bus clock. However, at least on
|
||||||
|
some MediaTek SoCs access to that very debug register requires the bus
|
||||||
|
clock being enabled. Hence calling the suspend function while already
|
||||||
|
in suspended state results in that register access triggering a bus
|
||||||
|
error. In order to avoid that, track the state of the bus clock and
|
||||||
|
only poll the debug register if not already in suspended state.
|
||||||
|
|
||||||
|
Fixes: e32a83c70cf9 ("serial: 8250-mtk: modify mtk uart power and clock management")
|
||||||
|
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
|
||||||
|
---
|
||||||
|
drivers/tty/serial/8250/8250_mtk.c | 11 ++++++++++-
|
||||||
|
1 file changed, 10 insertions(+), 1 deletion(-)
|
||||||
|
|
||||||
|
--- a/drivers/tty/serial/8250/8250_mtk.c
|
||||||
|
+++ b/drivers/tty/serial/8250/8250_mtk.c
|
||||||
|
@@ -32,7 +32,7 @@
|
||||||
|
#define MTK_UART_RXTRI_AD 0x14 /* RX Trigger address */
|
||||||
|
#define MTK_UART_FRACDIV_L 0x15 /* Fractional divider LSB address */
|
||||||
|
#define MTK_UART_FRACDIV_M 0x16 /* Fractional divider MSB address */
|
||||||
|
-#define MTK_UART_DEBUG0 0x18
|
||||||
|
+#define MTK_UART_DEBUG0 0x18
|
||||||
|
#define MTK_UART_IER_XOFFI 0x20 /* Enable XOFF character interrupt */
|
||||||
|
#define MTK_UART_IER_RTSI 0x40 /* Enable RTS Modem status interrupt */
|
||||||
|
#define MTK_UART_IER_CTSI 0x80 /* Enable CTS Modem status interrupt */
|
||||||
|
@@ -418,13 +418,12 @@ static int __maybe_unused mtk8250_runtim
|
||||||
|
struct mtk8250_data *data = dev_get_drvdata(dev);
|
||||||
|
struct uart_8250_port *up = serial8250_get_port(data->line);
|
||||||
|
|
||||||
|
- /* wait until UART in idle status */
|
||||||
|
- while
|
||||||
|
- (serial_in(up, MTK_UART_DEBUG0));
|
||||||
|
-
|
||||||
|
if (data->clk_count == 0U) {
|
||||||
|
dev_dbg(dev, "%s clock count is 0\n", __func__);
|
||||||
|
} else {
|
||||||
|
+ /* wait until UART in idle status */
|
||||||
|
+ while
|
||||||
|
+ (serial_in(up, MTK_UART_DEBUG0));
|
||||||
|
clk_disable_unprepare(data->bus_clk);
|
||||||
|
data->clk_count--;
|
||||||
|
}
|
@@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/metacubex/mihomo/component/loopback"
|
"github.com/metacubex/mihomo/component/loopback"
|
||||||
"github.com/metacubex/mihomo/component/resolver"
|
"github.com/metacubex/mihomo/component/resolver"
|
||||||
C "github.com/metacubex/mihomo/constant"
|
C "github.com/metacubex/mihomo/constant"
|
||||||
|
"github.com/metacubex/mihomo/constant/features"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Direct struct {
|
type Direct struct {
|
||||||
@@ -24,9 +25,11 @@ type DirectOption struct {
|
|||||||
|
|
||||||
// DialContext implements C.ProxyAdapter
|
// DialContext implements C.ProxyAdapter
|
||||||
func (d *Direct) DialContext(ctx context.Context, metadata *C.Metadata, opts ...dialer.Option) (C.Conn, error) {
|
func (d *Direct) DialContext(ctx context.Context, metadata *C.Metadata, opts ...dialer.Option) (C.Conn, error) {
|
||||||
|
if !features.CMFA {
|
||||||
if err := d.loopBack.CheckConn(metadata); err != nil {
|
if err := d.loopBack.CheckConn(metadata); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
opts = append(opts, dialer.WithResolver(resolver.DefaultResolver))
|
opts = append(opts, dialer.WithResolver(resolver.DefaultResolver))
|
||||||
c, err := dialer.DialContext(ctx, "tcp", metadata.RemoteAddress(), d.Base.DialOptions(opts...)...)
|
c, err := dialer.DialContext(ctx, "tcp", metadata.RemoteAddress(), d.Base.DialOptions(opts...)...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -38,9 +41,11 @@ func (d *Direct) DialContext(ctx context.Context, metadata *C.Metadata, opts ...
|
|||||||
|
|
||||||
// ListenPacketContext implements C.ProxyAdapter
|
// ListenPacketContext implements C.ProxyAdapter
|
||||||
func (d *Direct) ListenPacketContext(ctx context.Context, metadata *C.Metadata, opts ...dialer.Option) (C.PacketConn, error) {
|
func (d *Direct) ListenPacketContext(ctx context.Context, metadata *C.Metadata, opts ...dialer.Option) (C.PacketConn, error) {
|
||||||
|
if !features.CMFA {
|
||||||
if err := d.loopBack.CheckPacketConn(metadata); err != nil {
|
if err := d.loopBack.CheckPacketConn(metadata); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
// net.UDPConn.WriteTo only working with *net.UDPAddr, so we need a net.UDPAddr
|
// net.UDPConn.WriteTo only working with *net.UDPAddr, so we need a net.UDPAddr
|
||||||
if !metadata.Resolved() {
|
if !metadata.Resolved() {
|
||||||
ip, err := resolver.ResolveIPWithResolver(ctx, metadata.Host, resolver.DefaultResolver)
|
ip, err := resolver.ResolveIPWithResolver(ctx, metadata.Host, resolver.DefaultResolver)
|
||||||
|
@@ -8,7 +8,6 @@
|
|||||||
#
|
#
|
||||||
|
|
||||||
START=99
|
START=99
|
||||||
STOP=99
|
|
||||||
USE_PROCD=1
|
USE_PROCD=1
|
||||||
|
|
||||||
CONFIG=koolproxy
|
CONFIG=koolproxy
|
||||||
|
21
shadowsocks-rust/Cargo.lock
generated
21
shadowsocks-rust/Cargo.lock
generated
@@ -532,18 +532,18 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "clap"
|
name = "clap"
|
||||||
version = "4.5.6"
|
version = "4.5.7"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a9689a29b593160de5bc4aacab7b5d54fb52231de70122626c178e6a368994c7"
|
checksum = "5db83dced34638ad474f39f250d7fea9598bdd239eaced1bdf45d597da0f433f"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clap_builder",
|
"clap_builder",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "clap_builder"
|
name = "clap_builder"
|
||||||
version = "4.5.6"
|
version = "4.5.7"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "2e5387378c84f6faa26890ebf9f0a92989f8873d4d380467bcd0d8d8620424df"
|
checksum = "f7e204572485eb3fbf28f871612191521df159bc3e15a9f5064c66dba3a8c05f"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anstream",
|
"anstream",
|
||||||
"anstyle",
|
"anstyle",
|
||||||
@@ -1397,12 +1397,12 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "http-body-util"
|
name = "http-body-util"
|
||||||
version = "0.1.1"
|
version = "0.1.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d"
|
checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"futures-core",
|
"futures-util",
|
||||||
"http 1.1.0",
|
"http 1.1.0",
|
||||||
"http-body",
|
"http-body",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
@@ -1410,9 +1410,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "httparse"
|
name = "httparse"
|
||||||
version = "1.9.1"
|
version = "1.9.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "8720bf4c5bfb5b6c350840c4cd14b787bf00ed51c148c857fbf7a6ddb7062764"
|
checksum = "9f3935c160d00ac752e09787e6e6bfc26494c2183cc922f1bc678a60d4733bc2"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "httpdate"
|
name = "httpdate"
|
||||||
@@ -2653,7 +2653,6 @@ dependencies = [
|
|||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"h2 0.4.5",
|
"h2 0.4.5",
|
||||||
"hickory-resolver",
|
|
||||||
"http 1.1.0",
|
"http 1.1.0",
|
||||||
"http-body",
|
"http-body",
|
||||||
"http-body-util",
|
"http-body-util",
|
||||||
@@ -3220,6 +3219,7 @@ dependencies = [
|
|||||||
"etherparse",
|
"etherparse",
|
||||||
"futures",
|
"futures",
|
||||||
"hickory-resolver",
|
"hickory-resolver",
|
||||||
|
"http 1.1.0",
|
||||||
"http-body-util",
|
"http-body-util",
|
||||||
"hyper",
|
"hyper",
|
||||||
"idna 1.0.0",
|
"idna 1.0.0",
|
||||||
@@ -3229,6 +3229,7 @@ dependencies = [
|
|||||||
"libc",
|
"libc",
|
||||||
"log",
|
"log",
|
||||||
"lru_time_cache",
|
"lru_time_cache",
|
||||||
|
"mime",
|
||||||
"native-tls",
|
"native-tls",
|
||||||
"nix",
|
"nix",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
|
@@ -121,7 +121,7 @@ service = ["local", "server", "manager"]
|
|||||||
winservice = ["service", "windows-service"]
|
winservice = ["service", "windows-service"]
|
||||||
|
|
||||||
# Enables Hickory-DNS for replacing tokio's builtin DNS resolver
|
# Enables Hickory-DNS for replacing tokio's builtin DNS resolver
|
||||||
hickory-dns = ["shadowsocks-service/hickory-dns", "reqwest/hickory-dns"]
|
hickory-dns = ["shadowsocks-service/hickory-dns"]
|
||||||
# Hickory-DNS was renamed from Trust-DNS, keep compatibility.
|
# Hickory-DNS was renamed from Trust-DNS, keep compatibility.
|
||||||
trust-dns = ["hickory-dns"]
|
trust-dns = ["hickory-dns"]
|
||||||
dns-over-tls = ["shadowsocks-service/dns-over-tls"]
|
dns-over-tls = ["shadowsocks-service/dns-over-tls"]
|
||||||
@@ -161,12 +161,11 @@ local-socks4 = ["local", "shadowsocks-service/local-socks4"]
|
|||||||
# Enable Tun interface protocol for sslocal
|
# Enable Tun interface protocol for sslocal
|
||||||
local-tun = ["local", "shadowsocks-service/local-tun", "ipnet"]
|
local-tun = ["local", "shadowsocks-service/local-tun", "ipnet"]
|
||||||
# Enable Fake DNS for sslocal
|
# Enable Fake DNS for sslocal
|
||||||
local-fake-dns = ["local", "shadowsocks-service/local-fake-dns"]
|
local-fake-dns = ["local", "shadowsocks-service/local-fake-dns", "ipnet"]
|
||||||
# sslocal support online URL (SIP008 Online Configuration Delivery)
|
# sslocal support online URL (SIP008 Online Configuration Delivery)
|
||||||
# https://shadowsocks.org/doc/sip008.html
|
# https://shadowsocks.org/doc/sip008.html
|
||||||
local-online-config = [
|
local-online-config = [
|
||||||
"local",
|
"local",
|
||||||
"reqwest",
|
|
||||||
"mime",
|
"mime",
|
||||||
"shadowsocks-service/local-online-config",
|
"shadowsocks-service/local-online-config",
|
||||||
]
|
]
|
||||||
|
@@ -98,7 +98,7 @@ local-tun = ["local", "etherparse", "tun2", "smoltcp"]
|
|||||||
local-fake-dns = ["local", "trust-dns", "sled", "bson"]
|
local-fake-dns = ["local", "trust-dns", "sled", "bson"]
|
||||||
# sslocal support online URL (SIP008 Online Configuration Delivery)
|
# sslocal support online URL (SIP008 Online Configuration Delivery)
|
||||||
# https://shadowsocks.org/doc/sip008.html
|
# https://shadowsocks.org/doc/sip008.html
|
||||||
local-online-config = ["local"]
|
local-online-config = ["local", "local-http", "mime", "http"]
|
||||||
|
|
||||||
# Enable Stream Cipher Protocol
|
# Enable Stream Cipher Protocol
|
||||||
# WARN: Stream Cipher Protocol is proved to be insecure
|
# WARN: Stream Cipher Protocol is proved to be insecure
|
||||||
@@ -157,6 +157,7 @@ libc = "0.2.141"
|
|||||||
|
|
||||||
hyper = { version = "1.3", optional = true, features = ["full"] }
|
hyper = { version = "1.3", optional = true, features = ["full"] }
|
||||||
http-body-util = { version = "0.1", optional = true }
|
http-body-util = { version = "0.1", optional = true }
|
||||||
|
http = { version = "1.1", optional = true }
|
||||||
|
|
||||||
hickory-resolver = { version = "0.24", optional = true, features = [
|
hickory-resolver = { version = "0.24", optional = true, features = [
|
||||||
"serde-config",
|
"serde-config",
|
||||||
@@ -166,6 +167,7 @@ idna = "1.0"
|
|||||||
ipnet = "2.9"
|
ipnet = "2.9"
|
||||||
iprange = "0.6"
|
iprange = "0.6"
|
||||||
regex = "1.4"
|
regex = "1.4"
|
||||||
|
mime = { version = "0.3", optional = true }
|
||||||
|
|
||||||
tun2 = { version = "1", optional = true, features = ["async"] }
|
tun2 = { version = "1", optional = true, features = ["async"] }
|
||||||
etherparse = { version = "0.15", optional = true }
|
etherparse = { version = "0.15", optional = true }
|
||||||
|
@@ -70,14 +70,7 @@ use serde::{Deserialize, Serialize};
|
|||||||
use shadowsocks::relay::socks5::Address;
|
use shadowsocks::relay::socks5::Address;
|
||||||
use shadowsocks::{
|
use shadowsocks::{
|
||||||
config::{
|
config::{
|
||||||
ManagerAddr,
|
ManagerAddr, Mode, ReplayAttackPolicy, ServerAddr, ServerConfig, ServerSource, ServerUser, ServerUserManager,
|
||||||
Mode,
|
|
||||||
ReplayAttackPolicy,
|
|
||||||
ServerAddr,
|
|
||||||
ServerConfig,
|
|
||||||
ServerSource,
|
|
||||||
ServerUser,
|
|
||||||
ServerUserManager,
|
|
||||||
ServerWeight,
|
ServerWeight,
|
||||||
},
|
},
|
||||||
crypto::CipherKind,
|
crypto::CipherKind,
|
||||||
@@ -234,6 +227,10 @@ struct SSConfig {
|
|||||||
#[cfg(feature = "local-online-config")]
|
#[cfg(feature = "local-online-config")]
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
version: Option<u32>,
|
version: Option<u32>,
|
||||||
|
|
||||||
|
#[cfg(feature = "local-online-config")]
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
online_config: Option<SSOnlineConfig>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Default)]
|
#[derive(Serialize, Deserialize, Debug, Default)]
|
||||||
@@ -406,6 +403,13 @@ struct SSServerExtConfig {
|
|||||||
outbound_bind_interface: Option<String>,
|
outbound_bind_interface: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "local-online-config")]
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Default)]
|
||||||
|
struct SSOnlineConfig {
|
||||||
|
config_url: String,
|
||||||
|
update_interval: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
/// Server config type
|
/// Server config type
|
||||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||||
pub enum ConfigType {
|
pub enum ConfigType {
|
||||||
@@ -1237,6 +1241,17 @@ impl LocalInstanceConfig {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// OnlineConfiguration (SIP008)
|
||||||
|
/// https://shadowsocks.org/doc/sip008.html
|
||||||
|
#[cfg(feature = "local-online-config")]
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct OnlineConfig {
|
||||||
|
/// SIP008 URL
|
||||||
|
pub config_url: String,
|
||||||
|
/// Update interval, 3600s by default
|
||||||
|
pub update_interval: Option<Duration>,
|
||||||
|
}
|
||||||
|
|
||||||
/// Configuration
|
/// Configuration
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
@@ -1341,6 +1356,11 @@ pub struct Config {
|
|||||||
/// Workers in runtime
|
/// Workers in runtime
|
||||||
/// It should be replaced with metrics APIs: https://github.com/tokio-rs/tokio/issues/4073
|
/// It should be replaced with metrics APIs: https://github.com/tokio-rs/tokio/issues/4073
|
||||||
pub worker_count: usize,
|
pub worker_count: usize,
|
||||||
|
|
||||||
|
/// OnlineConfiguration (SIP008)
|
||||||
|
/// https://shadowsocks.org/doc/sip008.html
|
||||||
|
#[cfg(feature = "local-online-config")]
|
||||||
|
pub online_config: Option<OnlineConfig>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Configuration parsing error kind
|
/// Configuration parsing error kind
|
||||||
@@ -1462,6 +1482,9 @@ impl Config {
|
|||||||
config_path: None,
|
config_path: None,
|
||||||
|
|
||||||
worker_count: 1,
|
worker_count: 1,
|
||||||
|
|
||||||
|
#[cfg(feature = "local-online-config")]
|
||||||
|
online_config: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2352,6 +2375,14 @@ impl Config {
|
|||||||
nconfig.acl = Some(acl);
|
nconfig.acl = Some(acl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "local-online-config")]
|
||||||
|
if let Some(online_config) = config.online_config {
|
||||||
|
nconfig.online_config = Some(OnlineConfig {
|
||||||
|
config_url: online_config.config_url,
|
||||||
|
update_interval: online_config.update_interval.map(Duration::from_secs),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
Ok(nconfig)
|
Ok(nconfig)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3090,6 +3121,15 @@ impl fmt::Display for Config {
|
|||||||
jconf.acl = Some(acl.file_path().to_str().unwrap().to_owned());
|
jconf.acl = Some(acl.file_path().to_str().unwrap().to_owned());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OnlineConfig
|
||||||
|
#[cfg(feature = "local-online-config")]
|
||||||
|
if let Some(ref online_config) = self.online_config {
|
||||||
|
jconf.online_config = Some(SSOnlineConfig {
|
||||||
|
config_url: online_config.config_url.clone(),
|
||||||
|
update_interval: online_config.update_interval.as_ref().map(Duration::as_secs),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
write!(f, "{}", json5::to_string(&jconf).unwrap())
|
write!(f, "{}", json5::to_string(&jconf).unwrap())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -2,20 +2,26 @@
|
|||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::VecDeque,
|
collections::VecDeque,
|
||||||
|
fmt::Debug,
|
||||||
|
future::Future,
|
||||||
io::{self, ErrorKind},
|
io::{self, ErrorKind},
|
||||||
|
pin::Pin,
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
|
task::{Context, Poll},
|
||||||
time::{Duration, Instant},
|
time::{Duration, Instant},
|
||||||
};
|
};
|
||||||
|
|
||||||
use hyper::{
|
use hyper::{
|
||||||
body,
|
body::{self, Body},
|
||||||
client::conn::{http1, http2},
|
client::conn::{http1, http2},
|
||||||
http::uri::Scheme,
|
http::uri::Scheme,
|
||||||
|
rt::{Sleep, Timer},
|
||||||
Request,
|
Request,
|
||||||
Response,
|
Response,
|
||||||
};
|
};
|
||||||
use log::{error, trace};
|
use log::{error, trace};
|
||||||
use lru_time_cache::LruCache;
|
use lru_time_cache::LruCache;
|
||||||
|
use pin_project::pin_project;
|
||||||
use shadowsocks::relay::Address;
|
use shadowsocks::relay::Address;
|
||||||
use tokio::sync::Mutex;
|
use tokio::sync::Mutex;
|
||||||
|
|
||||||
@@ -29,33 +35,96 @@ use super::{
|
|||||||
|
|
||||||
const CONNECTION_EXPIRE_DURATION: Duration = Duration::from_secs(20);
|
const CONNECTION_EXPIRE_DURATION: Duration = Duration::from_secs(20);
|
||||||
|
|
||||||
|
/// HTTPClient API request errors
|
||||||
#[derive(thiserror::Error, Debug)]
|
#[derive(thiserror::Error, Debug)]
|
||||||
pub enum HttpClientError {
|
pub enum HttpClientError {
|
||||||
|
/// Errors from hyper
|
||||||
#[error("{0}")]
|
#[error("{0}")]
|
||||||
Hyper(#[from] hyper::Error),
|
Hyper(#[from] hyper::Error),
|
||||||
|
/// std::io::Error
|
||||||
#[error("{0}")]
|
#[error("{0}")]
|
||||||
Io(#[from] io::Error),
|
Io(#[from] io::Error),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct HttpClient {
|
pub struct TokioTimer;
|
||||||
#[allow(clippy::type_complexity)]
|
|
||||||
cache_conn: Arc<Mutex<LruCache<Address, VecDeque<(HttpConnection, Instant)>>>>,
|
impl Timer for TokioTimer {
|
||||||
|
fn sleep(&self, duration: Duration) -> Pin<Box<dyn Sleep>> {
|
||||||
|
Box::pin(TokioSleep {
|
||||||
|
inner: tokio::time::sleep(duration),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HttpClient {
|
fn sleep_until(&self, deadline: Instant) -> Pin<Box<dyn Sleep>> {
|
||||||
pub fn new() -> HttpClient {
|
Box::pin(TokioSleep {
|
||||||
|
inner: tokio::time::sleep_until(deadline.into()),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn reset(&self, sleep: &mut Pin<Box<dyn Sleep>>, new_deadline: Instant) {
|
||||||
|
if let Some(sleep) = sleep.as_mut().downcast_mut_pin::<TokioSleep>() {
|
||||||
|
sleep.reset(new_deadline.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[pin_project]
|
||||||
|
pub(crate) struct TokioSleep {
|
||||||
|
#[pin]
|
||||||
|
pub(crate) inner: tokio::time::Sleep,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Future for TokioSleep {
|
||||||
|
type Output = ();
|
||||||
|
|
||||||
|
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||||
|
self.project().inner.poll(cx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Sleep for TokioSleep {}
|
||||||
|
|
||||||
|
impl TokioSleep {
|
||||||
|
pub fn reset(self: Pin<&mut Self>, deadline: Instant) {
|
||||||
|
self.project().inner.as_mut().reset(deadline.into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// HTTPClient, supporting HTTP/1.1 and H2, HTTPS.
|
||||||
|
pub struct HttpClient<B> {
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
|
cache_conn: Arc<Mutex<LruCache<Address, VecDeque<(HttpConnection<B>, Instant)>>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<B> Clone for HttpClient<B> {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
HttpClient {
|
||||||
|
cache_conn: self.cache_conn.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<B> HttpClient<B>
|
||||||
|
where
|
||||||
|
B: Body + Send + Unpin + Debug + 'static,
|
||||||
|
B::Data: Send,
|
||||||
|
B::Error: Into<Box<dyn ::std::error::Error + Send + Sync>>,
|
||||||
|
{
|
||||||
|
/// Create a new HttpClient
|
||||||
|
pub fn new() -> HttpClient<B> {
|
||||||
HttpClient {
|
HttpClient {
|
||||||
cache_conn: Arc::new(Mutex::new(LruCache::with_expiry_duration(CONNECTION_EXPIRE_DURATION))),
|
cache_conn: Arc::new(Mutex::new(LruCache::with_expiry_duration(CONNECTION_EXPIRE_DURATION))),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Make HTTP requests
|
||||||
#[inline]
|
#[inline]
|
||||||
pub async fn send_request(
|
pub async fn send_request(
|
||||||
&self,
|
&self,
|
||||||
context: Arc<ServiceContext>,
|
context: Arc<ServiceContext>,
|
||||||
req: Request<body::Incoming>,
|
req: Request<B>,
|
||||||
balancer: &PingBalancer,
|
balancer: Option<&PingBalancer>,
|
||||||
) -> Result<Response<body::Incoming>, HttpClientError> {
|
) -> Result<Response<body::Incoming>, HttpClientError> {
|
||||||
let host = match host_addr(req.uri()) {
|
let host = match host_addr(req.uri()) {
|
||||||
Some(h) => h,
|
Some(h) => h,
|
||||||
@@ -96,7 +165,7 @@ impl HttpClient {
|
|||||||
self.send_request_conn(host, c, req).await.map_err(Into::into)
|
self.send_request_conn(host, c, req).await.map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_cached_connection(&self, host: &Address) -> Option<HttpConnection> {
|
async fn get_cached_connection(&self, host: &Address) -> Option<HttpConnection<B>> {
|
||||||
if let Some(q) = self.cache_conn.lock().await.get_mut(host) {
|
if let Some(q) = self.cache_conn.lock().await.get_mut(host) {
|
||||||
while let Some((c, inst)) = q.pop_front() {
|
while let Some((c, inst)) = q.pop_front() {
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
@@ -115,8 +184,8 @@ impl HttpClient {
|
|||||||
async fn send_request_conn(
|
async fn send_request_conn(
|
||||||
&self,
|
&self,
|
||||||
host: Address,
|
host: Address,
|
||||||
mut c: HttpConnection,
|
mut c: HttpConnection<B>,
|
||||||
req: Request<body::Incoming>,
|
req: Request<B>,
|
||||||
) -> hyper::Result<Response<body::Incoming>> {
|
) -> hyper::Result<Response<body::Incoming>> {
|
||||||
trace!("HTTP making request to host: {}, request: {:?}", host, req);
|
trace!("HTTP making request to host: {}, request: {:?}", host, req);
|
||||||
let response = c.send_request(req).await?;
|
let response = c.send_request(req).await?;
|
||||||
@@ -141,19 +210,24 @@ impl HttpClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
enum HttpConnection {
|
enum HttpConnection<B> {
|
||||||
Http1(http1::SendRequest<body::Incoming>),
|
Http1(http1::SendRequest<B>),
|
||||||
Http2(http2::SendRequest<body::Incoming>),
|
Http2(http2::SendRequest<B>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HttpConnection {
|
impl<B> HttpConnection<B>
|
||||||
|
where
|
||||||
|
B: Body + Send + Unpin + 'static,
|
||||||
|
B::Data: Send,
|
||||||
|
B::Error: Into<Box<dyn ::std::error::Error + Send + Sync>>,
|
||||||
|
{
|
||||||
async fn connect(
|
async fn connect(
|
||||||
context: Arc<ServiceContext>,
|
context: Arc<ServiceContext>,
|
||||||
scheme: &Scheme,
|
scheme: &Scheme,
|
||||||
host: Address,
|
host: Address,
|
||||||
domain: &str,
|
domain: &str,
|
||||||
balancer: &PingBalancer,
|
balancer: Option<&PingBalancer>,
|
||||||
) -> io::Result<HttpConnection> {
|
) -> io::Result<HttpConnection<B>> {
|
||||||
if *scheme != Scheme::HTTP && *scheme != Scheme::HTTPS {
|
if *scheme != Scheme::HTTP && *scheme != Scheme::HTTPS {
|
||||||
return Err(io::Error::new(ErrorKind::InvalidInput, "invalid scheme"));
|
return Err(io::Error::new(ErrorKind::InvalidInput, "invalid scheme"));
|
||||||
}
|
}
|
||||||
@@ -173,7 +247,7 @@ impl HttpConnection {
|
|||||||
scheme: &Scheme,
|
scheme: &Scheme,
|
||||||
host: Address,
|
host: Address,
|
||||||
stream: AutoProxyClientStream,
|
stream: AutoProxyClientStream,
|
||||||
) -> io::Result<HttpConnection> {
|
) -> io::Result<HttpConnection<B>> {
|
||||||
trace!(
|
trace!(
|
||||||
"HTTP making new HTTP/1.1 connection to host: {}, scheme: {}",
|
"HTTP making new HTTP/1.1 connection to host: {}, scheme: {}",
|
||||||
host,
|
host,
|
||||||
@@ -207,7 +281,7 @@ impl HttpConnection {
|
|||||||
host: Address,
|
host: Address,
|
||||||
domain: &str,
|
domain: &str,
|
||||||
stream: AutoProxyClientStream,
|
stream: AutoProxyClientStream,
|
||||||
) -> io::Result<HttpConnection> {
|
) -> io::Result<HttpConnection<B>> {
|
||||||
trace!("HTTP making new TLS connection to host: {}, scheme: {}", host, scheme);
|
trace!("HTTP making new TLS connection to host: {}, scheme: {}", host, scheme);
|
||||||
|
|
||||||
// TLS handshake, check alpn for h2 support.
|
// TLS handshake, check alpn for h2 support.
|
||||||
@@ -216,6 +290,7 @@ impl HttpConnection {
|
|||||||
if stream.negotiated_http2() {
|
if stream.negotiated_http2() {
|
||||||
// H2 connnection
|
// H2 connnection
|
||||||
let (send_request, connection) = match http2::Builder::new(TokioExecutor)
|
let (send_request, connection) = match http2::Builder::new(TokioExecutor)
|
||||||
|
.timer(TokioTimer)
|
||||||
.keep_alive_interval(Duration::from_secs(15))
|
.keep_alive_interval(Duration::from_secs(15))
|
||||||
.handshake(TokioIo::new(stream))
|
.handshake(TokioIo::new(stream))
|
||||||
.await
|
.await
|
||||||
@@ -254,7 +329,7 @@ impl HttpConnection {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub async fn send_request(&mut self, req: Request<body::Incoming>) -> hyper::Result<Response<body::Incoming>> {
|
pub async fn send_request(&mut self, req: Request<B>) -> hyper::Result<Response<body::Incoming>> {
|
||||||
match self {
|
match self {
|
||||||
HttpConnection::Http1(r) => r.send_request(req).await,
|
HttpConnection::Http1(r) => r.send_request(req).await,
|
||||||
HttpConnection::Http2(r) => r.send_request(req).await,
|
HttpConnection::Http2(r) => r.send_request(req).await,
|
||||||
|
@@ -35,7 +35,7 @@ use super::{
|
|||||||
pub struct HttpService {
|
pub struct HttpService {
|
||||||
context: Arc<ServiceContext>,
|
context: Arc<ServiceContext>,
|
||||||
peer_addr: SocketAddr,
|
peer_addr: SocketAddr,
|
||||||
http_client: HttpClient,
|
http_client: HttpClient<body::Incoming>,
|
||||||
balancer: PingBalancer,
|
balancer: PingBalancer,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -43,7 +43,7 @@ impl HttpService {
|
|||||||
pub fn new(
|
pub fn new(
|
||||||
context: Arc<ServiceContext>,
|
context: Arc<ServiceContext>,
|
||||||
peer_addr: SocketAddr,
|
peer_addr: SocketAddr,
|
||||||
http_client: HttpClient,
|
http_client: HttpClient<body::Incoming>,
|
||||||
balancer: PingBalancer,
|
balancer: PingBalancer,
|
||||||
) -> HttpService {
|
) -> HttpService {
|
||||||
HttpService {
|
HttpService {
|
||||||
@@ -90,7 +90,7 @@ impl HttpService {
|
|||||||
// Connect to Shadowsocks' remote
|
// Connect to Shadowsocks' remote
|
||||||
//
|
//
|
||||||
// FIXME: What STATUS should I return for connection error?
|
// FIXME: What STATUS should I return for connection error?
|
||||||
let (mut stream, server_opt) = match connect_host(self.context, &host, &self.balancer).await {
|
let (mut stream, server_opt) = match connect_host(self.context, &host, Some(&self.balancer)).await {
|
||||||
Ok(s) => s,
|
Ok(s) => s,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
error!("failed to CONNECT host: {}, error: {}", host, err);
|
error!("failed to CONNECT host: {}, error: {}", host, err);
|
||||||
@@ -153,7 +153,11 @@ impl HttpService {
|
|||||||
// Set keep-alive for connection with remote
|
// Set keep-alive for connection with remote
|
||||||
set_conn_keep_alive(version, req.headers_mut(), conn_keep_alive);
|
set_conn_keep_alive(version, req.headers_mut(), conn_keep_alive);
|
||||||
|
|
||||||
let mut res = match self.http_client.send_request(self.context, req, &self.balancer).await {
|
let mut res = match self
|
||||||
|
.http_client
|
||||||
|
.send_request(self.context, req, Some(&self.balancer))
|
||||||
|
.await
|
||||||
|
{
|
||||||
Ok(resp) => resp,
|
Ok(resp) => resp,
|
||||||
Err(HttpClientError::Hyper(e)) => return Err(e),
|
Err(HttpClientError::Hyper(e)) => return Err(e),
|
||||||
Err(HttpClientError::Io(err)) => {
|
Err(HttpClientError::Io(err)) => {
|
||||||
|
@@ -70,26 +70,20 @@ impl ProxyHttpStream {
|
|||||||
|
|
||||||
static TLS_CONFIG: Lazy<Arc<ClientConfig>> = Lazy::new(|| {
|
static TLS_CONFIG: Lazy<Arc<ClientConfig>> = Lazy::new(|| {
|
||||||
let mut config = ClientConfig::builder()
|
let mut config = ClientConfig::builder()
|
||||||
.with_root_certificates(match rustls_native_certs::load_native_certs() {
|
.with_root_certificates({
|
||||||
Ok(certs) => {
|
// Load WebPKI roots (Mozilla's root certificates)
|
||||||
let mut store = RootCertStore::empty();
|
let mut store = RootCertStore::empty();
|
||||||
|
store.extend(webpki_roots::TLS_SERVER_ROOTS.iter().cloned());
|
||||||
|
|
||||||
|
if let Ok(certs) = rustls_native_certs::load_native_certs() {
|
||||||
for cert in certs {
|
for cert in certs {
|
||||||
if let Err(err) = store.add(cert) {
|
if let Err(err) = store.add(cert) {
|
||||||
warn!("failed to add cert (native), error: {}", err);
|
warn!("failed to add cert (native), error: {}", err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
store
|
store
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
warn!("failed to load native certs, {}, going to load from webpki-roots", err);
|
|
||||||
|
|
||||||
let mut store = RootCertStore::empty();
|
|
||||||
store.extend(webpki_roots::TLS_SERVER_ROOTS.iter().cloned());
|
|
||||||
|
|
||||||
store
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
.with_no_client_auth();
|
.with_no_client_auth();
|
||||||
|
|
||||||
|
@@ -2,7 +2,10 @@
|
|||||||
//!
|
//!
|
||||||
//! https://www.ietf.org/rfc/rfc2068.txt
|
//! https://www.ietf.org/rfc/rfc2068.txt
|
||||||
|
|
||||||
pub use self::server::{Http, HttpBuilder, HttpConnectionHandler};
|
pub use self::{
|
||||||
|
http_client::{HttpClient, HttpClientError},
|
||||||
|
server::{Http, HttpBuilder, HttpConnectionHandler},
|
||||||
|
};
|
||||||
|
|
||||||
mod http_client;
|
mod http_client;
|
||||||
mod http_service;
|
mod http_service;
|
||||||
|
@@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
use std::{io, net::SocketAddr, sync::Arc, time::Duration};
|
use std::{io, net::SocketAddr, sync::Arc, time::Duration};
|
||||||
|
|
||||||
use hyper::{server::conn::http1, service};
|
use hyper::{body, server::conn::http1, service};
|
||||||
use log::{error, info, trace};
|
use log::{error, info, trace};
|
||||||
use shadowsocks::{config::ServerAddr, net::TcpListener};
|
use shadowsocks::{config::ServerAddr, net::TcpListener};
|
||||||
use tokio::{
|
use tokio::{
|
||||||
@@ -138,7 +138,7 @@ impl Http {
|
|||||||
pub struct HttpConnectionHandler {
|
pub struct HttpConnectionHandler {
|
||||||
context: Arc<ServiceContext>,
|
context: Arc<ServiceContext>,
|
||||||
balancer: PingBalancer,
|
balancer: PingBalancer,
|
||||||
http_client: HttpClient,
|
http_client: HttpClient<body::Incoming>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HttpConnectionHandler {
|
impl HttpConnectionHandler {
|
||||||
|
@@ -118,20 +118,28 @@ pub fn check_keep_alive(version: Version, headers: &HeaderMap<HeaderValue>, chec
|
|||||||
pub async fn connect_host(
|
pub async fn connect_host(
|
||||||
context: Arc<ServiceContext>,
|
context: Arc<ServiceContext>,
|
||||||
host: &Address,
|
host: &Address,
|
||||||
balancer: &PingBalancer,
|
balancer: Option<&PingBalancer>,
|
||||||
) -> io::Result<(AutoProxyClientStream, Option<Arc<ServerIdent>>)> {
|
) -> io::Result<(AutoProxyClientStream, Option<Arc<ServerIdent>>)> {
|
||||||
if balancer.is_empty() {
|
match balancer {
|
||||||
match AutoProxyClientStream::connect_bypassed(context, host).await {
|
None => match AutoProxyClientStream::connect_bypassed(context, host).await {
|
||||||
Ok(s) => Ok((s, None)),
|
Ok(s) => Ok((s, None)),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
error!("failed to connect host {} bypassed, err: {}", host, err);
|
error!("failed to connect host {} bypassed, err: {}", host, err);
|
||||||
Err(err)
|
Err(err)
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
Some(balancer) if balancer.is_empty() => match AutoProxyClientStream::connect_bypassed(context, host).await {
|
||||||
|
Ok(s) => Ok((s, None)),
|
||||||
|
Err(err) => {
|
||||||
|
error!("failed to connect host {} bypassed, err: {}", host, err);
|
||||||
|
Err(err)
|
||||||
}
|
}
|
||||||
} else {
|
},
|
||||||
|
Some(balancer) => {
|
||||||
let server = balancer.best_tcp_server();
|
let server = balancer.best_tcp_server();
|
||||||
|
|
||||||
match AutoProxyClientStream::connect_with_opts(context, server.as_ref(), host, server.connect_opts_ref()).await
|
match AutoProxyClientStream::connect_with_opts(context, server.as_ref(), host, server.connect_opts_ref())
|
||||||
|
.await
|
||||||
{
|
{
|
||||||
Ok(s) => Ok((s, Some(server))),
|
Ok(s) => Ok((s, Some(server))),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
@@ -146,3 +154,4 @@ pub async fn connect_host(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
@@ -18,7 +18,7 @@ use byte_string::ByteStr;
|
|||||||
use futures::future;
|
use futures::future;
|
||||||
use log::{debug, error, info, trace, warn};
|
use log::{debug, error, info, trace, warn};
|
||||||
use shadowsocks::{
|
use shadowsocks::{
|
||||||
config::Mode,
|
config::{Mode, ServerSource},
|
||||||
plugin::{Plugin, PluginMode},
|
plugin::{Plugin, PluginMode},
|
||||||
relay::{
|
relay::{
|
||||||
socks5::Address,
|
socks5::Address,
|
||||||
@@ -721,10 +721,27 @@ impl PingBalancer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Reset servers in load balancer. Designed for auto-reloading configuration file.
|
/// Reset servers in load balancer. Designed for auto-reloading configuration file.
|
||||||
pub async fn reset_servers(&self, servers: Vec<ServerInstanceConfig>) -> io::Result<()> {
|
pub async fn reset_servers(
|
||||||
|
&self,
|
||||||
|
servers: Vec<ServerInstanceConfig>,
|
||||||
|
replace_server_sources: &[ServerSource],
|
||||||
|
) -> io::Result<()> {
|
||||||
let old_context = self.inner.context.load();
|
let old_context = self.inner.context.load();
|
||||||
|
|
||||||
let servers = servers
|
let mut old_servers = old_context.servers.clone();
|
||||||
|
let mut idx = 0;
|
||||||
|
while idx < old_servers.len() {
|
||||||
|
let source_match = replace_server_sources
|
||||||
|
.iter()
|
||||||
|
.any(|src| *src == old_servers[idx].server_config().source());
|
||||||
|
if source_match {
|
||||||
|
old_servers.swap_remove(idx);
|
||||||
|
} else {
|
||||||
|
idx += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut servers = servers
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|s| {
|
.map(|s| {
|
||||||
Arc::new(ServerIdent::new(
|
Arc::new(ServerIdent::new(
|
||||||
@@ -736,6 +753,16 @@ impl PingBalancer {
|
|||||||
})
|
})
|
||||||
.collect::<Vec<Arc<ServerIdent>>>();
|
.collect::<Vec<Arc<ServerIdent>>>();
|
||||||
|
|
||||||
|
// Recreate a new instance for old servers (old server instance may still being held by clients)
|
||||||
|
for old_server in old_servers {
|
||||||
|
servers.push(Arc::new(ServerIdent::new(
|
||||||
|
old_context.context.clone(),
|
||||||
|
old_server.server_instance_config().clone(),
|
||||||
|
old_context.max_server_rtt,
|
||||||
|
old_context.check_interval * EXPECTED_CHECK_POINTS_IN_CHECK_WINDOW,
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
let (shared_context, task_abortable) = PingBalancerContext::new(
|
let (shared_context, task_abortable) = PingBalancerContext::new(
|
||||||
servers,
|
servers,
|
||||||
old_context.context.clone(),
|
old_context.context.clone(),
|
||||||
|
@@ -114,6 +114,10 @@ impl ServerIdent {
|
|||||||
&mut self.svr_cfg.config
|
&mut self.svr_cfg.config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn server_instance_config(&self) -> &ServerInstanceConfig {
|
||||||
|
&self.svr_cfg
|
||||||
|
}
|
||||||
|
|
||||||
pub fn tcp_score(&self) -> &ServerScore {
|
pub fn tcp_score(&self) -> &ServerScore {
|
||||||
&self.tcp_score
|
&self.tcp_score
|
||||||
}
|
}
|
||||||
|
@@ -35,6 +35,8 @@ use self::dns::{Dns, DnsBuilder};
|
|||||||
use self::fake_dns::{FakeDns, FakeDnsBuilder};
|
use self::fake_dns::{FakeDns, FakeDnsBuilder};
|
||||||
#[cfg(feature = "local-http")]
|
#[cfg(feature = "local-http")]
|
||||||
use self::http::{Http, HttpBuilder};
|
use self::http::{Http, HttpBuilder};
|
||||||
|
#[cfg(feature = "local-online-config")]
|
||||||
|
use self::online_config::{OnlineConfigService, OnlineConfigServiceBuilder};
|
||||||
#[cfg(feature = "local-redir")]
|
#[cfg(feature = "local-redir")]
|
||||||
use self::redir::{Redir, RedirBuilder};
|
use self::redir::{Redir, RedirBuilder};
|
||||||
use self::socks::{Socks, SocksBuilder};
|
use self::socks::{Socks, SocksBuilder};
|
||||||
@@ -52,6 +54,8 @@ pub mod fake_dns;
|
|||||||
pub mod http;
|
pub mod http;
|
||||||
pub mod loadbalancing;
|
pub mod loadbalancing;
|
||||||
pub mod net;
|
pub mod net;
|
||||||
|
#[cfg(feature = "local-online-config")]
|
||||||
|
pub mod online_config;
|
||||||
#[cfg(feature = "local-redir")]
|
#[cfg(feature = "local-redir")]
|
||||||
pub mod redir;
|
pub mod redir;
|
||||||
pub mod socks;
|
pub mod socks;
|
||||||
@@ -107,6 +111,8 @@ pub struct Server {
|
|||||||
local_stat_addr: Option<LocalFlowStatAddress>,
|
local_stat_addr: Option<LocalFlowStatAddress>,
|
||||||
#[cfg(feature = "local-flow-stat")]
|
#[cfg(feature = "local-flow-stat")]
|
||||||
flow_stat: Arc<FlowStat>,
|
flow_stat: Arc<FlowStat>,
|
||||||
|
#[cfg(feature = "local-online-config")]
|
||||||
|
online_config: Option<OnlineConfigService>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Server {
|
impl Server {
|
||||||
@@ -117,6 +123,7 @@ impl Server {
|
|||||||
trace!("{:?}", config);
|
trace!("{:?}", config);
|
||||||
|
|
||||||
// Warning for Stream Ciphers
|
// Warning for Stream Ciphers
|
||||||
|
// NOTE: This will only check servers in config.
|
||||||
#[cfg(feature = "stream-cipher")]
|
#[cfg(feature = "stream-cipher")]
|
||||||
for inst in config.server.iter() {
|
for inst in config.server.iter() {
|
||||||
let server = &inst.config;
|
let server = &inst.config;
|
||||||
@@ -225,8 +232,8 @@ impl Server {
|
|||||||
balancer_builder.check_best_interval(intv);
|
balancer_builder.check_best_interval(intv);
|
||||||
}
|
}
|
||||||
|
|
||||||
for server in config.server {
|
for server in &config.server {
|
||||||
balancer_builder.add_server(server);
|
balancer_builder.add_server(server.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
balancer_builder.build().await?
|
balancer_builder.build().await?
|
||||||
@@ -251,6 +258,21 @@ impl Server {
|
|||||||
local_stat_addr: config.local_stat_addr,
|
local_stat_addr: config.local_stat_addr,
|
||||||
#[cfg(feature = "local-flow-stat")]
|
#[cfg(feature = "local-flow-stat")]
|
||||||
flow_stat: context.flow_stat(),
|
flow_stat: context.flow_stat(),
|
||||||
|
#[cfg(feature = "local-online-config")]
|
||||||
|
online_config: match config.online_config {
|
||||||
|
None => None,
|
||||||
|
Some(online_config) => {
|
||||||
|
let mut builder = OnlineConfigServiceBuilder::new(
|
||||||
|
Arc::new(context.clone()),
|
||||||
|
online_config.config_url,
|
||||||
|
balancer.clone(),
|
||||||
|
);
|
||||||
|
if let Some(update_interval) = online_config.update_interval {
|
||||||
|
builder.set_update_interval(update_interval);
|
||||||
|
}
|
||||||
|
Some(builder.build().await?)
|
||||||
|
}
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
for local_instance in config.local {
|
for local_instance in config.local {
|
||||||
@@ -567,6 +589,11 @@ impl Server {
|
|||||||
vfut.push(ServerHandle(tokio::spawn(report_fut)));
|
vfut.push(ServerHandle(tokio::spawn(report_fut)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "local-online-config")]
|
||||||
|
if let Some(online_config) = self.online_config {
|
||||||
|
vfut.push(ServerHandle(tokio::spawn(online_config.run())));
|
||||||
|
}
|
||||||
|
|
||||||
let (res, ..) = future::select_all(vfut).await;
|
let (res, ..) = future::select_all(vfut).await;
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
@@ -0,0 +1,233 @@
|
|||||||
|
//! Online Config (SIP008)
|
||||||
|
//!
|
||||||
|
//! Online Configuration Delivery URL (https://shadowsocks.org/doc/sip008.html)
|
||||||
|
|
||||||
|
use std::{
|
||||||
|
io,
|
||||||
|
sync::Arc,
|
||||||
|
time::{Duration, Instant},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
config::{Config, ConfigType},
|
||||||
|
local::{context::ServiceContext, http::HttpClient, loadbalancing::PingBalancer},
|
||||||
|
};
|
||||||
|
|
||||||
|
use futures::StreamExt;
|
||||||
|
use http_body_util::BodyExt;
|
||||||
|
use log::{debug, error, trace, warn};
|
||||||
|
use mime::Mime;
|
||||||
|
use shadowsocks::config::ServerSource;
|
||||||
|
use tokio::time;
|
||||||
|
|
||||||
|
/// OnlineConfigService builder pattern
|
||||||
|
pub struct OnlineConfigServiceBuilder {
|
||||||
|
context: Arc<ServiceContext>,
|
||||||
|
config_url: String,
|
||||||
|
balancer: PingBalancer,
|
||||||
|
config_update_interval: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OnlineConfigServiceBuilder {
|
||||||
|
/// Create a Builder
|
||||||
|
pub fn new(context: Arc<ServiceContext>, config_url: String, balancer: PingBalancer) -> OnlineConfigServiceBuilder {
|
||||||
|
OnlineConfigServiceBuilder {
|
||||||
|
context,
|
||||||
|
config_url,
|
||||||
|
balancer,
|
||||||
|
config_update_interval: Duration::from_secs(3600),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set update interval. Default is 3600s
|
||||||
|
pub fn set_update_interval(&mut self, update_interval: Duration) {
|
||||||
|
self.config_update_interval = update_interval;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build OnlineConfigService
|
||||||
|
pub async fn build(self) -> io::Result<OnlineConfigService> {
|
||||||
|
let mut service = OnlineConfigService {
|
||||||
|
context: self.context,
|
||||||
|
http_client: HttpClient::new(),
|
||||||
|
config_url: self.config_url,
|
||||||
|
config_update_interval: self.config_update_interval,
|
||||||
|
balancer: self.balancer,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Run once after creation.
|
||||||
|
service.run_once().await?;
|
||||||
|
|
||||||
|
Ok(service)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct OnlineConfigService {
|
||||||
|
context: Arc<ServiceContext>,
|
||||||
|
http_client: HttpClient<String>,
|
||||||
|
config_url: String,
|
||||||
|
config_update_interval: Duration,
|
||||||
|
balancer: PingBalancer,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OnlineConfigService {
|
||||||
|
async fn run_once(&mut self) -> io::Result<()> {
|
||||||
|
match time::timeout(Duration::from_secs(30), self.run_once_impl()).await {
|
||||||
|
Ok(o) => o,
|
||||||
|
Err(..) => {
|
||||||
|
error!("server-loader task timeout, url: {}", self.config_url);
|
||||||
|
Err(io::ErrorKind::TimedOut.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_once_impl(&mut self) -> io::Result<()> {
|
||||||
|
static SHADOWSOCKS_USER_AGENT: &str = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"));
|
||||||
|
|
||||||
|
let start_time = Instant::now();
|
||||||
|
|
||||||
|
let req = match hyper::Request::builder()
|
||||||
|
.header("User-Agent", SHADOWSOCKS_USER_AGENT)
|
||||||
|
.method("GET")
|
||||||
|
.uri(&self.config_url)
|
||||||
|
.body(String::new())
|
||||||
|
{
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(err) => {
|
||||||
|
error!("server-loader task failed to make hyper::Request, error: {}", err);
|
||||||
|
return Err(io::Error::new(io::ErrorKind::Other, err));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let rsp = match self.http_client.send_request(self.context.clone(), req, None).await {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(err) => {
|
||||||
|
error!("server-loader task failed to get {}, error: {}", self.config_url, err);
|
||||||
|
return Err(io::Error::new(io::ErrorKind::Other, err));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let fetch_time = Instant::now();
|
||||||
|
|
||||||
|
// Content-Type: application/json; charset=utf-8
|
||||||
|
// mandatory in standard SIP008
|
||||||
|
match rsp.headers().get("Content-Type") {
|
||||||
|
Some(h) => match h.to_str() {
|
||||||
|
Ok(hstr) => match hstr.parse::<Mime>() {
|
||||||
|
Ok(content_type) => {
|
||||||
|
if content_type.type_() == mime::APPLICATION
|
||||||
|
&& content_type.subtype() == mime::JSON
|
||||||
|
&& content_type.get_param(mime::CHARSET) == Some(mime::UTF_8)
|
||||||
|
{
|
||||||
|
trace!("checked Content-Type: {:?}", h);
|
||||||
|
} else {
|
||||||
|
warn!(
|
||||||
|
"Content-Type is not \"application/json; charset=utf-8\", which is mandatory in standard SIP008. found {:?}",
|
||||||
|
h
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
warn!("Content-Type parse failed, value: {:?}, error: {}", h, err);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(..) => {
|
||||||
|
warn!("Content-Type is not a UTF-8 string: {:?}", h);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
None => {
|
||||||
|
warn!("missing Content-Type in SIP008 response from {}", self.config_url);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut collected_body = Vec::new();
|
||||||
|
if let Some(content_length) = rsp.headers().get(http::header::CONTENT_LENGTH) {
|
||||||
|
if let Ok(content_length) = content_length.to_str() {
|
||||||
|
if let Ok(content_length) = content_length.parse::<usize>() {
|
||||||
|
collected_body.reserve(content_length);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut body = rsp.into_data_stream();
|
||||||
|
while let Some(data) = body.next().await {
|
||||||
|
match data {
|
||||||
|
Ok(data) => collected_body.extend_from_slice(&data),
|
||||||
|
Err(err) => {
|
||||||
|
error!(
|
||||||
|
"server-loader task failed to read body, url: {}, error: {}",
|
||||||
|
self.config_url, err
|
||||||
|
);
|
||||||
|
return Err(io::Error::new(io::ErrorKind::Other, err));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let parsed_body = match String::from_utf8(collected_body) {
|
||||||
|
Ok(b) => b,
|
||||||
|
Err(..) => return Err(io::Error::new(io::ErrorKind::Other, "body contains non-utf8 bytes").into()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let online_config = match Config::load_from_str(&parsed_body, ConfigType::OnlineConfig) {
|
||||||
|
Ok(c) => c,
|
||||||
|
Err(err) => {
|
||||||
|
error!(
|
||||||
|
"server-loader task failed to load from url: {}, error: {}",
|
||||||
|
self.config_url, err
|
||||||
|
);
|
||||||
|
return Err(io::Error::new(io::ErrorKind::Other, err).into());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Err(err) = online_config.check_integrity() {
|
||||||
|
error!(
|
||||||
|
"server-loader task failed to load from url: {}, error: {}",
|
||||||
|
self.config_url, err
|
||||||
|
);
|
||||||
|
return Err(io::Error::new(io::ErrorKind::Other, err).into());
|
||||||
|
}
|
||||||
|
|
||||||
|
let after_read_time = Instant::now();
|
||||||
|
|
||||||
|
// Merge with static servers
|
||||||
|
let server_len = online_config.server.len();
|
||||||
|
|
||||||
|
// Update into ping balancers
|
||||||
|
if let Err(err) = self
|
||||||
|
.balancer
|
||||||
|
.reset_servers(online_config.server, &[ServerSource::OnlineConfig])
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
error!(
|
||||||
|
"server-loader task failed to reset balancer, url: {}, error: {}",
|
||||||
|
self.config_url, err
|
||||||
|
);
|
||||||
|
return Err(err);
|
||||||
|
};
|
||||||
|
|
||||||
|
let finish_time = Instant::now();
|
||||||
|
|
||||||
|
debug!("server-loader task finished loading {} servers from url: {}, fetch time: {:?}, read time: {:?}, load time: {:?}, total time: {:?}",
|
||||||
|
server_len,
|
||||||
|
self.config_url,
|
||||||
|
fetch_time - start_time,
|
||||||
|
after_read_time - fetch_time,
|
||||||
|
finish_time - after_read_time,
|
||||||
|
finish_time - start_time,
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Start service loop
|
||||||
|
pub async fn run(mut self) -> io::Result<()> {
|
||||||
|
debug!(
|
||||||
|
"server-loader task started, url: {}, update interval: {:?}",
|
||||||
|
self.config_url, self.config_update_interval
|
||||||
|
);
|
||||||
|
|
||||||
|
loop {
|
||||||
|
time::sleep(self.config_update_interval).await;
|
||||||
|
let _ = self.run_once().await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@@ -1,7 +1,5 @@
|
|||||||
//! Common configuration utilities
|
//! Common configuration utilities
|
||||||
|
|
||||||
#[cfg(feature = "local-online-config")]
|
|
||||||
use std::time::Duration;
|
|
||||||
use std::{
|
use std::{
|
||||||
env,
|
env,
|
||||||
fs::OpenOptions,
|
fs::OpenOptions,
|
||||||
@@ -104,10 +102,6 @@ pub struct Config {
|
|||||||
|
|
||||||
/// Runtime configuration
|
/// Runtime configuration
|
||||||
pub runtime: RuntimeConfig,
|
pub runtime: RuntimeConfig,
|
||||||
|
|
||||||
/// Online Configuration Delivery (SIP008)
|
|
||||||
#[cfg(feature = "local-online-config")]
|
|
||||||
pub online_config: Option<OnlineConfig>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Config {
|
impl Config {
|
||||||
@@ -171,14 +165,6 @@ impl Config {
|
|||||||
config.runtime = nruntime;
|
config.runtime = nruntime;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "local-online-config")]
|
|
||||||
if let Some(online_config) = ssconfig.online_config {
|
|
||||||
config.online_config = Some(OnlineConfig {
|
|
||||||
config_url: online_config.config_url,
|
|
||||||
update_interval: online_config.update_interval.map(Duration::from_secs),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(config)
|
Ok(config)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -272,24 +258,11 @@ pub struct RuntimeConfig {
|
|||||||
pub mode: RuntimeMode,
|
pub mode: RuntimeMode,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// OnlineConfiguration (SIP008)
|
|
||||||
/// https://shadowsocks.org/doc/sip008.html
|
|
||||||
#[cfg(feature = "local-online-config")]
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct OnlineConfig {
|
|
||||||
/// SIP008 URL
|
|
||||||
pub config_url: String,
|
|
||||||
/// Update interval, 3600s by default
|
|
||||||
pub update_interval: Option<Duration>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
struct SSConfig {
|
struct SSConfig {
|
||||||
#[cfg(feature = "logging")]
|
#[cfg(feature = "logging")]
|
||||||
log: Option<SSLogConfig>,
|
log: Option<SSLogConfig>,
|
||||||
runtime: Option<SSRuntimeConfig>,
|
runtime: Option<SSRuntimeConfig>,
|
||||||
#[cfg(feature = "local-online-config")]
|
|
||||||
online_config: Option<SSOnlineConfig>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "logging")]
|
#[cfg(feature = "logging")]
|
||||||
@@ -312,10 +285,3 @@ struct SSRuntimeConfig {
|
|||||||
worker_count: Option<usize>,
|
worker_count: Option<usize>,
|
||||||
mode: Option<String>,
|
mode: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "local-online-config")]
|
|
||||||
#[derive(Deserialize, Debug, Default)]
|
|
||||||
struct SSOnlineConfig {
|
|
||||||
config_url: String,
|
|
||||||
update_interval: Option<u64>,
|
|
||||||
}
|
|
||||||
|
@@ -1,7 +1,6 @@
|
|||||||
//! Local server launchers
|
//! Local server launchers
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
fmt::{self, Display},
|
|
||||||
future::Future,
|
future::Future,
|
||||||
net::IpAddr,
|
net::IpAddr,
|
||||||
path::PathBuf,
|
path::PathBuf,
|
||||||
@@ -11,7 +10,7 @@ use std::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use clap::{builder::PossibleValuesParser, Arg, ArgAction, ArgGroup, ArgMatches, Command, ValueHint};
|
use clap::{builder::PossibleValuesParser, Arg, ArgAction, ArgGroup, ArgMatches, Command, ValueHint};
|
||||||
use futures::future::{self, BoxFuture, FutureExt};
|
use futures::future::{self, FutureExt};
|
||||||
use log::{error, info, trace};
|
use log::{error, info, trace};
|
||||||
use tokio::{
|
use tokio::{
|
||||||
self,
|
self,
|
||||||
@@ -25,12 +24,7 @@ use shadowsocks_service::shadowsocks::relay::socks5::Address;
|
|||||||
use shadowsocks_service::{
|
use shadowsocks_service::{
|
||||||
acl::AccessControl,
|
acl::AccessControl,
|
||||||
config::{
|
config::{
|
||||||
read_variable_field_value,
|
read_variable_field_value, Config, ConfigType, LocalConfig, LocalInstanceConfig, ProtocolType,
|
||||||
Config,
|
|
||||||
ConfigType,
|
|
||||||
LocalConfig,
|
|
||||||
LocalInstanceConfig,
|
|
||||||
ProtocolType,
|
|
||||||
ServerInstanceConfig,
|
ServerInstanceConfig,
|
||||||
},
|
},
|
||||||
local::{loadbalancing::PingBalancer, Server},
|
local::{loadbalancing::PingBalancer, Server},
|
||||||
@@ -45,8 +39,7 @@ use shadowsocks_service::{
|
|||||||
use crate::logging;
|
use crate::logging;
|
||||||
use crate::{
|
use crate::{
|
||||||
config::{Config as ServiceConfig, RuntimeMode},
|
config::{Config as ServiceConfig, RuntimeMode},
|
||||||
monitor,
|
monitor, vparser,
|
||||||
vparser,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#[cfg(feature = "local-dns")]
|
#[cfg(feature = "local-dns")]
|
||||||
@@ -576,7 +569,7 @@ pub fn define_command_line_options(mut app: Command) -> Command {
|
|||||||
/// Create `Runtime` and `main` entry
|
/// Create `Runtime` and `main` entry
|
||||||
pub fn create(matches: &ArgMatches) -> Result<(Runtime, impl Future<Output = ExitCode>), ExitCode> {
|
pub fn create(matches: &ArgMatches) -> Result<(Runtime, impl Future<Output = ExitCode>), ExitCode> {
|
||||||
#[cfg_attr(not(feature = "local-online-config"), allow(unused_mut))]
|
#[cfg_attr(not(feature = "local-online-config"), allow(unused_mut))]
|
||||||
let (mut config, service_config, runtime) = {
|
let (config, _, runtime) = {
|
||||||
let config_path_opt = matches.get_one::<PathBuf>("CONFIG").cloned().or_else(|| {
|
let config_path_opt = matches.get_one::<PathBuf>("CONFIG").cloned().or_else(|| {
|
||||||
if !matches.contains_id("SERVER_CONFIG") {
|
if !matches.contains_id("SERVER_CONFIG") {
|
||||||
match crate::config::get_default_config_path("local.json") {
|
match crate::config::get_default_config_path("local.json") {
|
||||||
@@ -930,10 +923,10 @@ pub fn create(matches: &ArgMatches) -> Result<(Runtime, impl Future<Output = Exi
|
|||||||
|
|
||||||
#[cfg(feature = "local-online-config")]
|
#[cfg(feature = "local-online-config")]
|
||||||
if let Some(online_config_url) = matches.get_one::<String>("ONLINE_CONFIG_URL") {
|
if let Some(online_config_url) = matches.get_one::<String>("ONLINE_CONFIG_URL") {
|
||||||
use crate::config::OnlineConfig;
|
use shadowsocks_service::config::OnlineConfig;
|
||||||
|
|
||||||
let online_config_update_interval = matches.get_one::<u64>("ONLINE_CONFIG_UPDATE_INTERVAL").cloned();
|
let online_config_update_interval = matches.get_one::<u64>("ONLINE_CONFIG_UPDATE_INTERVAL").cloned();
|
||||||
service_config.online_config = Some(OnlineConfig {
|
config.online_config = Some(OnlineConfig {
|
||||||
config_url: online_config_url.clone(),
|
config_url: online_config_url.clone(),
|
||||||
update_interval: online_config_update_interval.map(Duration::from_secs),
|
update_interval: online_config_update_interval.map(Duration::from_secs),
|
||||||
});
|
});
|
||||||
@@ -991,45 +984,17 @@ pub fn create(matches: &ArgMatches) -> Result<(Runtime, impl Future<Output = Exi
|
|||||||
let main_fut = async move {
|
let main_fut = async move {
|
||||||
let config_path = config.config_path.clone();
|
let config_path = config.config_path.clone();
|
||||||
|
|
||||||
let mut static_servers = Vec::new();
|
|
||||||
for server in config.server.iter() {
|
|
||||||
match server.config.source() {
|
|
||||||
ServerSource::Default | ServerSource::CommandLine => {
|
|
||||||
static_servers.push(server.clone());
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(feature = "local-online-config"))]
|
|
||||||
let _ = service_config;
|
|
||||||
|
|
||||||
// Fetch servers from remote for the first time
|
|
||||||
#[cfg(feature = "local-online-config")]
|
|
||||||
if let Some(ref online_config) = service_config.online_config {
|
|
||||||
if let Ok(mut servers) = get_online_config_servers(&online_config.config_url).await {
|
|
||||||
config.server.append(&mut servers);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Double check
|
|
||||||
if config.server.is_empty() {
|
|
||||||
eprintln!("local server cannot run without any valid servers");
|
|
||||||
return crate::EXIT_CODE_LOAD_CONFIG_FAILURE.into();
|
|
||||||
}
|
|
||||||
|
|
||||||
let instance = Server::new(config).await.expect("create local");
|
let instance = Server::new(config).await.expect("create local");
|
||||||
|
|
||||||
let reload_task = ServerReloader {
|
let reload_task = match config_path {
|
||||||
config_path,
|
Some(config_path) => ServerReloader {
|
||||||
|
config_path: config_path.clone(),
|
||||||
balancer: instance.server_balancer().clone(),
|
balancer: instance.server_balancer().clone(),
|
||||||
static_servers,
|
|
||||||
#[cfg(feature = "local-online-config")]
|
|
||||||
online_config_url: service_config.online_config.as_ref().map(|c| c.config_url.clone()),
|
|
||||||
#[cfg(feature = "local-online-config")]
|
|
||||||
online_config_update_interval: service_config.online_config.as_ref().and_then(|c| c.update_interval),
|
|
||||||
}
|
}
|
||||||
.launch_reload_server_task();
|
.launch_reload_server_task()
|
||||||
|
.boxed(),
|
||||||
|
None => future::pending().boxed(),
|
||||||
|
};
|
||||||
|
|
||||||
let abort_signal = monitor::create_signal_monitor();
|
let abort_signal = monitor::create_signal_monitor();
|
||||||
let server = instance.run();
|
let server = instance.run();
|
||||||
@@ -1083,110 +1048,8 @@ pub fn main(matches: &ArgMatches) -> ExitCode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct ServerReloader {
|
struct ServerReloader {
|
||||||
config_path: Option<PathBuf>,
|
config_path: PathBuf,
|
||||||
static_servers: Vec<ServerInstanceConfig>,
|
|
||||||
balancer: PingBalancer,
|
balancer: PingBalancer,
|
||||||
#[cfg(feature = "local-online-config")]
|
|
||||||
online_config_url: Option<String>,
|
|
||||||
#[cfg(feature = "local-online-config")]
|
|
||||||
online_config_update_interval: Option<Duration>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "local-online-config")]
|
|
||||||
async fn get_online_config_servers(
|
|
||||||
online_config_url: &str,
|
|
||||||
) -> Result<Vec<ServerInstanceConfig>, Box<dyn std::error::Error>> {
|
|
||||||
use log::warn;
|
|
||||||
use mime::Mime;
|
|
||||||
use reqwest::{redirect::Policy, Client};
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
async fn get_online_config(online_config_url: &str) -> reqwest::Result<String> {
|
|
||||||
static SHADOWSOCKS_USER_AGENT: &str = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"));
|
|
||||||
|
|
||||||
let client = Client::builder()
|
|
||||||
.user_agent(SHADOWSOCKS_USER_AGENT)
|
|
||||||
.deflate(true)
|
|
||||||
.gzip(true)
|
|
||||||
.brotli(true)
|
|
||||||
.zstd(true)
|
|
||||||
.redirect(Policy::limited(3))
|
|
||||||
.timeout(Duration::from_secs(30))
|
|
||||||
.build()?;
|
|
||||||
|
|
||||||
let response = client.get(online_config_url).send().await?;
|
|
||||||
if response.url().scheme() != "https" {
|
|
||||||
warn!(
|
|
||||||
"SIP008 suggests configuration URL should use https, but current URL is {}",
|
|
||||||
response.url().scheme()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Content-Type: application/json; charset=utf-8
|
|
||||||
// mandatory in standard SIP008
|
|
||||||
match response.headers().get("Content-Type") {
|
|
||||||
Some(h) => match h.to_str() {
|
|
||||||
Ok(hstr) => match hstr.parse::<Mime>() {
|
|
||||||
Ok(content_type) => {
|
|
||||||
if content_type.type_() == mime::APPLICATION
|
|
||||||
&& content_type.subtype() == mime::JSON
|
|
||||||
&& content_type.get_param(mime::CHARSET) == Some(mime::UTF_8)
|
|
||||||
{
|
|
||||||
trace!("checked Content-Type: {:?}", h);
|
|
||||||
} else {
|
|
||||||
warn!(
|
|
||||||
"Content-Type is not \"application/json; charset=utf-8\", which is mandatory in standard SIP008. found {:?}",
|
|
||||||
h
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
warn!("Content-Type parse failed, value: {:?}, error: {}", h, err);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Err(..) => {
|
|
||||||
warn!("Content-Type is not a UTF-8 string: {:?}", h);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
None => {
|
|
||||||
warn!("missing Content-Type in SIP008 response from {}", online_config_url);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
response.text().await
|
|
||||||
}
|
|
||||||
|
|
||||||
let body = match get_online_config(online_config_url).await {
|
|
||||||
Ok(b) => b,
|
|
||||||
Err(err) => {
|
|
||||||
error!(
|
|
||||||
"server-loader task failed to load from url: {}, error: {:?}",
|
|
||||||
online_config_url, err
|
|
||||||
);
|
|
||||||
return Err(Box::new(err));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let online_config = match Config::load_from_str(&body, ConfigType::OnlineConfig) {
|
|
||||||
Ok(c) => c,
|
|
||||||
Err(err) => {
|
|
||||||
error!(
|
|
||||||
"server-loader task failed to load from url: {}, error: {}",
|
|
||||||
online_config_url, err
|
|
||||||
);
|
|
||||||
return Err(Box::new(err));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Err(err) = online_config.check_integrity() {
|
|
||||||
error!(
|
|
||||||
"server-loader task failed to load from url: {}, error: {}",
|
|
||||||
online_config_url, err
|
|
||||||
);
|
|
||||||
return Err(Box::new(err));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(online_config.server)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ServerReloader {
|
impl ServerReloader {
|
||||||
@@ -1194,59 +1057,28 @@ impl ServerReloader {
|
|||||||
async fn run_once(&self) -> Result<(), Box<dyn std::error::Error>> {
|
async fn run_once(&self) -> Result<(), Box<dyn std::error::Error>> {
|
||||||
let start_time = Instant::now();
|
let start_time = Instant::now();
|
||||||
|
|
||||||
let mut servers = self.static_servers.clone();
|
|
||||||
|
|
||||||
// Load servers from source
|
// Load servers from source
|
||||||
if let Some(ref config_path) = self.config_path {
|
let source_config = match Config::load_from_file(&self.config_path, ConfigType::Local) {
|
||||||
let mut source_config = match Config::load_from_file(config_path, ConfigType::Local) {
|
|
||||||
Ok(c) => c,
|
Ok(c) => c,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
error!(
|
error!(
|
||||||
"server-loader task failed to load from file: {}, error: {}",
|
"server-loader task failed to load from file: {}, error: {}",
|
||||||
config_path.display(),
|
self.config_path.display(),
|
||||||
err
|
err
|
||||||
);
|
);
|
||||||
return Err(Box::new(err));
|
return Err(Box::new(err));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
servers.append(&mut source_config.server);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load servers from online-config (SIP008)
|
let server_len = source_config.server.len();
|
||||||
#[cfg(feature = "local-online-config")]
|
|
||||||
if let Some(ref online_config_url) = self.online_config_url {
|
|
||||||
let mut online_servers = get_online_config_servers(online_config_url).await?;
|
|
||||||
servers.append(&mut online_servers);
|
|
||||||
}
|
|
||||||
|
|
||||||
let server_len = servers.len();
|
|
||||||
|
|
||||||
struct ConfigDisplay<'a>(&'a ServerReloader);
|
|
||||||
impl Display for ConfigDisplay<'_> {
|
|
||||||
#[cfg_attr(not(feature = "local-online-config"), allow(unused_assignments, unused_variables))]
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
let mut is_first = true;
|
|
||||||
|
|
||||||
if let Some(ref config_path) = self.0.config_path {
|
|
||||||
config_path.display().fmt(f)?;
|
|
||||||
is_first = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "local-online-config")]
|
|
||||||
if let Some(ref online_config_url) = self.0.online_config_url {
|
|
||||||
if !is_first {
|
|
||||||
f.write_str(", ")?;
|
|
||||||
f.write_str(online_config_url)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let fetch_end_time = Instant::now();
|
let fetch_end_time = Instant::now();
|
||||||
|
|
||||||
if let Err(err) = self.balancer.reset_servers(servers).await {
|
if let Err(err) = self
|
||||||
|
.balancer
|
||||||
|
.reset_servers(source_config.server, &[ServerSource::Configuration])
|
||||||
|
.await
|
||||||
|
{
|
||||||
error!("server-loader task {} servers but found error: {}", server_len, err);
|
error!("server-loader task {} servers but found error: {}", server_len, err);
|
||||||
return Err(Box::new(err));
|
return Err(Box::new(err));
|
||||||
}
|
}
|
||||||
@@ -1255,7 +1087,7 @@ impl ServerReloader {
|
|||||||
|
|
||||||
info!(
|
info!(
|
||||||
"server-loader task load from {} with {} servers, fetch costs: {:?}, total costs: {:?}",
|
"server-loader task load from {} with {} servers, fetch costs: {:?}, total costs: {:?}",
|
||||||
ConfigDisplay(self),
|
self.config_path.display(),
|
||||||
server_len,
|
server_len,
|
||||||
fetch_end_time - start_time,
|
fetch_end_time - start_time,
|
||||||
total_end_time - start_time,
|
total_end_time - start_time,
|
||||||
@@ -1278,53 +1110,16 @@ impl ServerReloader {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "local-online-config")]
|
#[cfg(unix)]
|
||||||
async fn launch_online_reload_server_task(self: Arc<Self>) {
|
|
||||||
use log::debug;
|
|
||||||
use tokio::time;
|
|
||||||
|
|
||||||
let update_interval = self
|
|
||||||
.online_config_update_interval
|
|
||||||
.unwrap_or(Duration::from_secs(60 * 60));
|
|
||||||
|
|
||||||
debug!("server-loader task updating in interval {:?}", update_interval);
|
|
||||||
|
|
||||||
loop {
|
|
||||||
time::sleep(update_interval).await;
|
|
||||||
let _ = self.run_once().await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn launch_reload_server_task(self) {
|
async fn launch_reload_server_task(self) {
|
||||||
let arc_self = Arc::new(self);
|
let arc_self = Arc::new(self);
|
||||||
|
arc_self.launch_signal_reload_server_task().await
|
||||||
#[allow(unused_mut)]
|
|
||||||
let mut futs: Vec<BoxFuture<()>> = Vec::new();
|
|
||||||
|
|
||||||
#[cfg(unix)]
|
|
||||||
{
|
|
||||||
#[cfg_attr(not(feature = "local-online-config"), allow(unused_mut))]
|
|
||||||
let mut has_things_to_do = arc_self.config_path.is_some();
|
|
||||||
#[cfg(feature = "local-online-config")]
|
|
||||||
{
|
|
||||||
has_things_to_do = has_things_to_do || arc_self.online_config_url.is_some();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if has_things_to_do {
|
#[cfg(windows)]
|
||||||
futs.push(arc_self.clone().launch_signal_reload_server_task().boxed());
|
async fn launch_reload_server_task(self) {
|
||||||
}
|
let _ = self.config_path;
|
||||||
}
|
let _ = self.balancer;
|
||||||
|
|
||||||
#[cfg(feature = "local-online-config")]
|
|
||||||
if arc_self.online_config_url.is_some() {
|
|
||||||
futs.push(arc_self.clone().launch_online_reload_server_task().boxed());
|
|
||||||
}
|
|
||||||
|
|
||||||
if !futs.is_empty() {
|
|
||||||
future::join_all(futs.into_iter()).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
drop(arc_self);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
use std::net::{IpAddr, SocketAddr};
|
use std::net::{IpAddr, SocketAddr};
|
||||||
|
|
||||||
#[cfg(feature = "local-tun")]
|
#[cfg(any(feature = "local-tun", feature = "local-fake-dns"))]
|
||||||
use ipnet::IpNet;
|
use ipnet::IpNet;
|
||||||
#[cfg(feature = "local-redir")]
|
#[cfg(feature = "local-redir")]
|
||||||
use shadowsocks_service::config::RedirType;
|
use shadowsocks_service::config::RedirType;
|
||||||
@@ -56,7 +56,7 @@ pub fn parse_server_url(v: &str) -> Result<ServerConfig, String> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "local-tun")]
|
#[cfg(any(feature = "local-tun", feature = "local-fake-dns"))]
|
||||||
pub fn parse_ipnet(v: &str) -> Result<IpNet, String> {
|
pub fn parse_ipnet(v: &str) -> Result<IpNet, String> {
|
||||||
match v.parse::<IpNet>() {
|
match v.parse::<IpNet>() {
|
||||||
Err(..) => Err("should be a CIDR address like 10.1.2.3/24".to_owned()),
|
Err(..) => Err("should be a CIDR address like 10.1.2.3/24".to_owned()),
|
||||||
|
@@ -111,6 +111,7 @@ func New(options Options) (*Box, error) {
|
|||||||
ctx,
|
ctx,
|
||||||
router,
|
router,
|
||||||
logFactory.NewLogger(F.ToString("inbound/", inboundOptions.Type, "[", tag, "]")),
|
logFactory.NewLogger(F.ToString("inbound/", inboundOptions.Type, "[", tag, "]")),
|
||||||
|
tag,
|
||||||
inboundOptions,
|
inboundOptions,
|
||||||
options.PlatformInterface,
|
options.PlatformInterface,
|
||||||
)
|
)
|
||||||
|
@@ -32,6 +32,12 @@ const (
|
|||||||
|
|
||||||
func ProxyDisplayName(proxyType string) string {
|
func ProxyDisplayName(proxyType string) string {
|
||||||
switch proxyType {
|
switch proxyType {
|
||||||
|
case TypeTun:
|
||||||
|
return "TUN"
|
||||||
|
case TypeRedirect:
|
||||||
|
return "Redirect"
|
||||||
|
case TypeTProxy:
|
||||||
|
return "TProxy"
|
||||||
case TypeDirect:
|
case TypeDirect:
|
||||||
return "Direct"
|
return "Direct"
|
||||||
case TypeBlock:
|
case TypeBlock:
|
||||||
@@ -42,6 +48,8 @@ func ProxyDisplayName(proxyType string) string {
|
|||||||
return "SOCKS"
|
return "SOCKS"
|
||||||
case TypeHTTP:
|
case TypeHTTP:
|
||||||
return "HTTP"
|
return "HTTP"
|
||||||
|
case TypeMixed:
|
||||||
|
return "Mixed"
|
||||||
case TypeShadowsocks:
|
case TypeShadowsocks:
|
||||||
return "Shadowsocks"
|
return "Shadowsocks"
|
||||||
case TypeVMess:
|
case TypeVMess:
|
||||||
|
@@ -2,8 +2,9 @@
|
|||||||
icon: material/alert-decagram
|
icon: material/alert-decagram
|
||||||
---
|
---
|
||||||
|
|
||||||
#### 1.10.0-alpha.11
|
#### 1.10.0-alpha.12
|
||||||
|
|
||||||
|
* Fix auto-redirect not configuring nftables forward chain correctly
|
||||||
* Fixes and improvements
|
* Fixes and improvements
|
||||||
|
|
||||||
### 1.9.3
|
### 1.9.3
|
||||||
|
@@ -14,6 +14,7 @@ import (
|
|||||||
|
|
||||||
"github.com/go-chi/chi/v5"
|
"github.com/go-chi/chi/v5"
|
||||||
"github.com/go-chi/render"
|
"github.com/go-chi/render"
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
)
|
)
|
||||||
|
|
||||||
func connectionRouter(router adapter.Router, trafficManager *trafficontrol.Manager) http.Handler {
|
func connectionRouter(router adapter.Router, trafficManager *trafficontrol.Manager) http.Handler {
|
||||||
@@ -76,10 +77,10 @@ func getConnections(trafficManager *trafficontrol.Manager) func(w http.ResponseW
|
|||||||
|
|
||||||
func closeConnection(trafficManager *trafficontrol.Manager) func(w http.ResponseWriter, r *http.Request) {
|
func closeConnection(trafficManager *trafficontrol.Manager) func(w http.ResponseWriter, r *http.Request) {
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
id := chi.URLParam(r, "id")
|
id := uuid.FromStringOrNil(chi.URLParam(r, "id"))
|
||||||
snapshot := trafficManager.Snapshot()
|
snapshot := trafficManager.Snapshot()
|
||||||
for _, c := range snapshot.Connections {
|
for _, c := range snapshot.Connections {
|
||||||
if id == c.ID() {
|
if id == c.Metadata().ID {
|
||||||
c.Close()
|
c.Close()
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@@ -19,7 +19,6 @@ import (
|
|||||||
"github.com/sagernet/sing-box/option"
|
"github.com/sagernet/sing-box/option"
|
||||||
"github.com/sagernet/sing/common"
|
"github.com/sagernet/sing/common"
|
||||||
E "github.com/sagernet/sing/common/exceptions"
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
F "github.com/sagernet/sing/common/format"
|
|
||||||
"github.com/sagernet/sing/common/json"
|
"github.com/sagernet/sing/common/json"
|
||||||
N "github.com/sagernet/sing/common/network"
|
N "github.com/sagernet/sing/common/network"
|
||||||
"github.com/sagernet/sing/service"
|
"github.com/sagernet/sing/service"
|
||||||
@@ -218,58 +217,15 @@ func (s *Server) TrafficManager() *trafficontrol.Manager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) RoutedConnection(ctx context.Context, conn net.Conn, metadata adapter.InboundContext, matchedRule adapter.Rule) (net.Conn, adapter.Tracker) {
|
func (s *Server) RoutedConnection(ctx context.Context, conn net.Conn, metadata adapter.InboundContext, matchedRule adapter.Rule) (net.Conn, adapter.Tracker) {
|
||||||
tracker := trafficontrol.NewTCPTracker(conn, s.trafficManager, castMetadata(metadata), s.router, matchedRule)
|
tracker := trafficontrol.NewTCPTracker(conn, s.trafficManager, metadata, s.router, matchedRule)
|
||||||
return tracker, tracker
|
return tracker, tracker
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) RoutedPacketConnection(ctx context.Context, conn N.PacketConn, metadata adapter.InboundContext, matchedRule adapter.Rule) (N.PacketConn, adapter.Tracker) {
|
func (s *Server) RoutedPacketConnection(ctx context.Context, conn N.PacketConn, metadata adapter.InboundContext, matchedRule adapter.Rule) (N.PacketConn, adapter.Tracker) {
|
||||||
tracker := trafficontrol.NewUDPTracker(conn, s.trafficManager, castMetadata(metadata), s.router, matchedRule)
|
tracker := trafficontrol.NewUDPTracker(conn, s.trafficManager, metadata, s.router, matchedRule)
|
||||||
return tracker, tracker
|
return tracker, tracker
|
||||||
}
|
}
|
||||||
|
|
||||||
func castMetadata(metadata adapter.InboundContext) trafficontrol.Metadata {
|
|
||||||
var inbound string
|
|
||||||
if metadata.Inbound != "" {
|
|
||||||
inbound = metadata.InboundType + "/" + metadata.Inbound
|
|
||||||
} else {
|
|
||||||
inbound = metadata.InboundType
|
|
||||||
}
|
|
||||||
var domain string
|
|
||||||
if metadata.Domain != "" {
|
|
||||||
domain = metadata.Domain
|
|
||||||
} else {
|
|
||||||
domain = metadata.Destination.Fqdn
|
|
||||||
}
|
|
||||||
var processPath string
|
|
||||||
if metadata.ProcessInfo != nil {
|
|
||||||
if metadata.ProcessInfo.ProcessPath != "" {
|
|
||||||
processPath = metadata.ProcessInfo.ProcessPath
|
|
||||||
} else if metadata.ProcessInfo.PackageName != "" {
|
|
||||||
processPath = metadata.ProcessInfo.PackageName
|
|
||||||
}
|
|
||||||
if processPath == "" {
|
|
||||||
if metadata.ProcessInfo.UserId != -1 {
|
|
||||||
processPath = F.ToString(metadata.ProcessInfo.UserId)
|
|
||||||
}
|
|
||||||
} else if metadata.ProcessInfo.User != "" {
|
|
||||||
processPath = F.ToString(processPath, " (", metadata.ProcessInfo.User, ")")
|
|
||||||
} else if metadata.ProcessInfo.UserId != -1 {
|
|
||||||
processPath = F.ToString(processPath, " (", metadata.ProcessInfo.UserId, ")")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return trafficontrol.Metadata{
|
|
||||||
NetWork: metadata.Network,
|
|
||||||
Type: inbound,
|
|
||||||
SrcIP: metadata.Source.Addr,
|
|
||||||
DstIP: metadata.Destination.Addr,
|
|
||||||
SrcPort: F.ToString(metadata.Source.Port),
|
|
||||||
DstPort: F.ToString(metadata.Destination.Port),
|
|
||||||
Host: domain,
|
|
||||||
DNSMode: "normal",
|
|
||||||
ProcessPath: processPath,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func authentication(serverSecret string) func(next http.Handler) http.Handler {
|
func authentication(serverSecret string) func(next http.Handler) http.Handler {
|
||||||
return func(next http.Handler) http.Handler {
|
return func(next http.Handler) http.Handler {
|
||||||
fn := func(w http.ResponseWriter, r *http.Request) {
|
fn := func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
@@ -2,10 +2,17 @@ package trafficontrol
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
"github.com/sagernet/sing-box/experimental/clashapi/compatible"
|
"github.com/sagernet/sing-box/experimental/clashapi/compatible"
|
||||||
|
"github.com/sagernet/sing/common"
|
||||||
"github.com/sagernet/sing/common/atomic"
|
"github.com/sagernet/sing/common/atomic"
|
||||||
|
"github.com/sagernet/sing/common/json"
|
||||||
|
"github.com/sagernet/sing/common/x/list"
|
||||||
|
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Manager struct {
|
type Manager struct {
|
||||||
@@ -16,7 +23,9 @@ type Manager struct {
|
|||||||
uploadTotal atomic.Int64
|
uploadTotal atomic.Int64
|
||||||
downloadTotal atomic.Int64
|
downloadTotal atomic.Int64
|
||||||
|
|
||||||
connections compatible.Map[string, tracker]
|
connections compatible.Map[uuid.UUID, Tracker]
|
||||||
|
closedConnectionsAccess sync.Mutex
|
||||||
|
closedConnections list.List[TrackerMetadata]
|
||||||
ticker *time.Ticker
|
ticker *time.Ticker
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
// process *process.Process
|
// process *process.Process
|
||||||
@@ -33,12 +42,22 @@ func NewManager() *Manager {
|
|||||||
return manager
|
return manager
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) Join(c tracker) {
|
func (m *Manager) Join(c Tracker) {
|
||||||
m.connections.Store(c.ID(), c)
|
m.connections.Store(c.Metadata().ID, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) Leave(c tracker) {
|
func (m *Manager) Leave(c Tracker) {
|
||||||
m.connections.Delete(c.ID())
|
metadata := c.Metadata()
|
||||||
|
_, loaded := m.connections.LoadAndDelete(metadata.ID)
|
||||||
|
if loaded {
|
||||||
|
metadata.ClosedAt = time.Now()
|
||||||
|
m.closedConnectionsAccess.Lock()
|
||||||
|
defer m.closedConnectionsAccess.Unlock()
|
||||||
|
if m.closedConnections.Len() >= 1000 {
|
||||||
|
m.closedConnections.PopFront()
|
||||||
|
}
|
||||||
|
m.closedConnections.PushBack(metadata)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) PushUploaded(size int64) {
|
func (m *Manager) PushUploaded(size int64) {
|
||||||
@@ -59,14 +78,39 @@ func (m *Manager) Total() (up int64, down int64) {
|
|||||||
return m.uploadTotal.Load(), m.downloadTotal.Load()
|
return m.uploadTotal.Load(), m.downloadTotal.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) Connections() int {
|
func (m *Manager) ConnectionsLen() int {
|
||||||
return m.connections.Len()
|
return m.connections.Len()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Manager) Connections() []TrackerMetadata {
|
||||||
|
var connections []TrackerMetadata
|
||||||
|
m.connections.Range(func(_ uuid.UUID, value Tracker) bool {
|
||||||
|
connections = append(connections, value.Metadata())
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
return connections
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) ClosedConnections() []TrackerMetadata {
|
||||||
|
m.closedConnectionsAccess.Lock()
|
||||||
|
defer m.closedConnectionsAccess.Unlock()
|
||||||
|
return m.closedConnections.Array()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) Connection(id uuid.UUID) Tracker {
|
||||||
|
connection, loaded := m.connections.Load(id)
|
||||||
|
if !loaded {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return connection
|
||||||
|
}
|
||||||
|
|
||||||
func (m *Manager) Snapshot() *Snapshot {
|
func (m *Manager) Snapshot() *Snapshot {
|
||||||
var connections []tracker
|
var connections []Tracker
|
||||||
m.connections.Range(func(_ string, value tracker) bool {
|
m.connections.Range(func(_ uuid.UUID, value Tracker) bool {
|
||||||
|
if value.Metadata().OutboundType != C.TypeDNS {
|
||||||
connections = append(connections, value)
|
connections = append(connections, value)
|
||||||
|
}
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -75,8 +119,8 @@ func (m *Manager) Snapshot() *Snapshot {
|
|||||||
m.memory = memStats.StackInuse + memStats.HeapInuse + memStats.HeapIdle - memStats.HeapReleased
|
m.memory = memStats.StackInuse + memStats.HeapInuse + memStats.HeapIdle - memStats.HeapReleased
|
||||||
|
|
||||||
return &Snapshot{
|
return &Snapshot{
|
||||||
UploadTotal: m.uploadTotal.Load(),
|
Upload: m.uploadTotal.Load(),
|
||||||
DownloadTotal: m.downloadTotal.Load(),
|
Download: m.downloadTotal.Load(),
|
||||||
Connections: connections,
|
Connections: connections,
|
||||||
Memory: m.memory,
|
Memory: m.memory,
|
||||||
}
|
}
|
||||||
@@ -114,8 +158,17 @@ func (m *Manager) Close() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Snapshot struct {
|
type Snapshot struct {
|
||||||
DownloadTotal int64 `json:"downloadTotal"`
|
Download int64
|
||||||
UploadTotal int64 `json:"uploadTotal"`
|
Upload int64
|
||||||
Connections []tracker `json:"connections"`
|
Connections []Tracker
|
||||||
Memory uint64 `json:"memory"`
|
Memory uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Snapshot) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(map[string]any{
|
||||||
|
"downloadTotal": s.Download,
|
||||||
|
"uploadTotal": s.Upload,
|
||||||
|
"connections": common.Map(s.Connections, func(t Tracker) TrackerMetadata { return t.Metadata() }),
|
||||||
|
"memory": s.Memory,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
@@ -2,97 +2,135 @@ package trafficontrol
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/sagernet/sing-box/adapter"
|
"github.com/sagernet/sing-box/adapter"
|
||||||
"github.com/sagernet/sing/common"
|
"github.com/sagernet/sing/common"
|
||||||
"github.com/sagernet/sing/common/atomic"
|
"github.com/sagernet/sing/common/atomic"
|
||||||
"github.com/sagernet/sing/common/bufio"
|
"github.com/sagernet/sing/common/bufio"
|
||||||
|
F "github.com/sagernet/sing/common/format"
|
||||||
"github.com/sagernet/sing/common/json"
|
"github.com/sagernet/sing/common/json"
|
||||||
N "github.com/sagernet/sing/common/network"
|
N "github.com/sagernet/sing/common/network"
|
||||||
|
|
||||||
"github.com/gofrs/uuid/v5"
|
"github.com/gofrs/uuid/v5"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Metadata struct {
|
type TrackerMetadata struct {
|
||||||
NetWork string `json:"network"`
|
ID uuid.UUID
|
||||||
Type string `json:"type"`
|
Metadata adapter.InboundContext
|
||||||
SrcIP netip.Addr `json:"sourceIP"`
|
CreatedAt time.Time
|
||||||
DstIP netip.Addr `json:"destinationIP"`
|
ClosedAt time.Time
|
||||||
SrcPort string `json:"sourcePort"`
|
Upload *atomic.Int64
|
||||||
DstPort string `json:"destinationPort"`
|
Download *atomic.Int64
|
||||||
Host string `json:"host"`
|
Chain []string
|
||||||
DNSMode string `json:"dnsMode"`
|
Rule adapter.Rule
|
||||||
ProcessPath string `json:"processPath"`
|
Outbound string
|
||||||
|
OutboundType string
|
||||||
}
|
}
|
||||||
|
|
||||||
type tracker interface {
|
func (t TrackerMetadata) MarshalJSON() ([]byte, error) {
|
||||||
ID() string
|
var inbound string
|
||||||
Close() error
|
if t.Metadata.Inbound != "" {
|
||||||
Leave()
|
inbound = t.Metadata.InboundType + "/" + t.Metadata.Inbound
|
||||||
|
} else {
|
||||||
|
inbound = t.Metadata.InboundType
|
||||||
}
|
}
|
||||||
|
var domain string
|
||||||
type trackerInfo struct {
|
if t.Metadata.Domain != "" {
|
||||||
UUID uuid.UUID `json:"id"`
|
domain = t.Metadata.Domain
|
||||||
Metadata Metadata `json:"metadata"`
|
} else {
|
||||||
UploadTotal *atomic.Int64 `json:"upload"`
|
domain = t.Metadata.Destination.Fqdn
|
||||||
DownloadTotal *atomic.Int64 `json:"download"`
|
}
|
||||||
Start time.Time `json:"start"`
|
var processPath string
|
||||||
Chain []string `json:"chains"`
|
if t.Metadata.ProcessInfo != nil {
|
||||||
Rule string `json:"rule"`
|
if t.Metadata.ProcessInfo.ProcessPath != "" {
|
||||||
RulePayload string `json:"rulePayload"`
|
processPath = t.Metadata.ProcessInfo.ProcessPath
|
||||||
|
} else if t.Metadata.ProcessInfo.PackageName != "" {
|
||||||
|
processPath = t.Metadata.ProcessInfo.PackageName
|
||||||
|
}
|
||||||
|
if processPath == "" {
|
||||||
|
if t.Metadata.ProcessInfo.UserId != -1 {
|
||||||
|
processPath = F.ToString(t.Metadata.ProcessInfo.UserId)
|
||||||
|
}
|
||||||
|
} else if t.Metadata.ProcessInfo.User != "" {
|
||||||
|
processPath = F.ToString(processPath, " (", t.Metadata.ProcessInfo.User, ")")
|
||||||
|
} else if t.Metadata.ProcessInfo.UserId != -1 {
|
||||||
|
processPath = F.ToString(processPath, " (", t.Metadata.ProcessInfo.UserId, ")")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var rule string
|
||||||
|
if t.Rule != nil {
|
||||||
|
rule = F.ToString(t.Rule, " => ", t.Rule.Outbound())
|
||||||
|
} else {
|
||||||
|
rule = "final"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t trackerInfo) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal(map[string]any{
|
return json.Marshal(map[string]any{
|
||||||
"id": t.UUID.String(),
|
"id": t.ID,
|
||||||
"metadata": t.Metadata,
|
"metadata": map[string]any{
|
||||||
"upload": t.UploadTotal.Load(),
|
"network": t.Metadata.Network,
|
||||||
"download": t.DownloadTotal.Load(),
|
"type": inbound,
|
||||||
"start": t.Start,
|
"sourceIP": t.Metadata.Source.Addr,
|
||||||
|
"destinationIP": t.Metadata.Destination.Addr,
|
||||||
|
"sourcePort": t.Metadata.Source.Port,
|
||||||
|
"destinationPort": t.Metadata.Destination.Port,
|
||||||
|
"host": domain,
|
||||||
|
"dnsMode": "normal",
|
||||||
|
"processPath": processPath,
|
||||||
|
},
|
||||||
|
"upload": t.Upload.Load(),
|
||||||
|
"download": t.Download.Load(),
|
||||||
|
"start": t.CreatedAt,
|
||||||
"chains": t.Chain,
|
"chains": t.Chain,
|
||||||
"rule": t.Rule,
|
"rule": rule,
|
||||||
"rulePayload": t.RulePayload,
|
"rulePayload": "",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
type tcpTracker struct {
|
type Tracker interface {
|
||||||
N.ExtendedConn `json:"-"`
|
adapter.Tracker
|
||||||
*trackerInfo
|
Metadata() TrackerMetadata
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
type TCPConn struct {
|
||||||
|
N.ExtendedConn
|
||||||
|
metadata TrackerMetadata
|
||||||
manager *Manager
|
manager *Manager
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tt *tcpTracker) ID() string {
|
func (tt *TCPConn) Metadata() TrackerMetadata {
|
||||||
return tt.UUID.String()
|
return tt.metadata
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tt *tcpTracker) Close() error {
|
func (tt *TCPConn) Close() error {
|
||||||
tt.manager.Leave(tt)
|
tt.manager.Leave(tt)
|
||||||
return tt.ExtendedConn.Close()
|
return tt.ExtendedConn.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tt *tcpTracker) Leave() {
|
func (tt *TCPConn) Leave() {
|
||||||
tt.manager.Leave(tt)
|
tt.manager.Leave(tt)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tt *tcpTracker) Upstream() any {
|
func (tt *TCPConn) Upstream() any {
|
||||||
return tt.ExtendedConn
|
return tt.ExtendedConn
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tt *tcpTracker) ReaderReplaceable() bool {
|
func (tt *TCPConn) ReaderReplaceable() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tt *tcpTracker) WriterReplaceable() bool {
|
func (tt *TCPConn) WriterReplaceable() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTCPTracker(conn net.Conn, manager *Manager, metadata Metadata, router adapter.Router, rule adapter.Rule) *tcpTracker {
|
func NewTCPTracker(conn net.Conn, manager *Manager, metadata adapter.InboundContext, router adapter.Router, rule adapter.Rule) *TCPConn {
|
||||||
uuid, _ := uuid.NewV4()
|
id, _ := uuid.NewV4()
|
||||||
|
var (
|
||||||
var chain []string
|
chain []string
|
||||||
var next string
|
next string
|
||||||
|
outbound string
|
||||||
|
outboundType string
|
||||||
|
)
|
||||||
if rule == nil {
|
if rule == nil {
|
||||||
if defaultOutbound, err := router.DefaultOutbound(N.NetworkTCP); err == nil {
|
if defaultOutbound, err := router.DefaultOutbound(N.NetworkTCP); err == nil {
|
||||||
next = defaultOutbound.Tag()
|
next = defaultOutbound.Tag()
|
||||||
@@ -106,17 +144,17 @@ func NewTCPTracker(conn net.Conn, manager *Manager, metadata Metadata, router ad
|
|||||||
if !loaded {
|
if !loaded {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
outbound = detour.Tag()
|
||||||
|
outboundType = detour.Type()
|
||||||
group, isGroup := detour.(adapter.OutboundGroup)
|
group, isGroup := detour.(adapter.OutboundGroup)
|
||||||
if !isGroup {
|
if !isGroup {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
next = group.Now()
|
next = group.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
upload := new(atomic.Int64)
|
upload := new(atomic.Int64)
|
||||||
download := new(atomic.Int64)
|
download := new(atomic.Int64)
|
||||||
|
tracker := &TCPConn{
|
||||||
t := &tcpTracker{
|
|
||||||
ExtendedConn: bufio.NewCounterConn(conn, []N.CountFunc{func(n int64) {
|
ExtendedConn: bufio.NewCounterConn(conn, []N.CountFunc{func(n int64) {
|
||||||
upload.Add(n)
|
upload.Add(n)
|
||||||
manager.PushUploaded(n)
|
manager.PushUploaded(n)
|
||||||
@@ -124,64 +162,62 @@ func NewTCPTracker(conn net.Conn, manager *Manager, metadata Metadata, router ad
|
|||||||
download.Add(n)
|
download.Add(n)
|
||||||
manager.PushDownloaded(n)
|
manager.PushDownloaded(n)
|
||||||
}}),
|
}}),
|
||||||
manager: manager,
|
metadata: TrackerMetadata{
|
||||||
trackerInfo: &trackerInfo{
|
ID: id,
|
||||||
UUID: uuid,
|
|
||||||
Start: time.Now(),
|
|
||||||
Metadata: metadata,
|
Metadata: metadata,
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
Upload: upload,
|
||||||
|
Download: download,
|
||||||
Chain: common.Reverse(chain),
|
Chain: common.Reverse(chain),
|
||||||
Rule: "",
|
Rule: rule,
|
||||||
UploadTotal: upload,
|
Outbound: outbound,
|
||||||
DownloadTotal: download,
|
OutboundType: outboundType,
|
||||||
},
|
},
|
||||||
|
manager: manager,
|
||||||
|
}
|
||||||
|
manager.Join(tracker)
|
||||||
|
return tracker
|
||||||
}
|
}
|
||||||
|
|
||||||
if rule != nil {
|
type UDPConn struct {
|
||||||
t.trackerInfo.Rule = rule.String() + " => " + rule.Outbound()
|
|
||||||
} else {
|
|
||||||
t.trackerInfo.Rule = "final"
|
|
||||||
}
|
|
||||||
|
|
||||||
manager.Join(t)
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
type udpTracker struct {
|
|
||||||
N.PacketConn `json:"-"`
|
N.PacketConn `json:"-"`
|
||||||
*trackerInfo
|
metadata TrackerMetadata
|
||||||
manager *Manager
|
manager *Manager
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ut *udpTracker) ID() string {
|
func (ut *UDPConn) Metadata() TrackerMetadata {
|
||||||
return ut.UUID.String()
|
return ut.metadata
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ut *udpTracker) Close() error {
|
func (ut *UDPConn) Close() error {
|
||||||
ut.manager.Leave(ut)
|
ut.manager.Leave(ut)
|
||||||
return ut.PacketConn.Close()
|
return ut.PacketConn.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ut *udpTracker) Leave() {
|
func (ut *UDPConn) Leave() {
|
||||||
ut.manager.Leave(ut)
|
ut.manager.Leave(ut)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ut *udpTracker) Upstream() any {
|
func (ut *UDPConn) Upstream() any {
|
||||||
return ut.PacketConn
|
return ut.PacketConn
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ut *udpTracker) ReaderReplaceable() bool {
|
func (ut *UDPConn) ReaderReplaceable() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ut *udpTracker) WriterReplaceable() bool {
|
func (ut *UDPConn) WriterReplaceable() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewUDPTracker(conn N.PacketConn, manager *Manager, metadata Metadata, router adapter.Router, rule adapter.Rule) *udpTracker {
|
func NewUDPTracker(conn N.PacketConn, manager *Manager, metadata adapter.InboundContext, router adapter.Router, rule adapter.Rule) *UDPConn {
|
||||||
uuid, _ := uuid.NewV4()
|
id, _ := uuid.NewV4()
|
||||||
|
var (
|
||||||
var chain []string
|
chain []string
|
||||||
var next string
|
next string
|
||||||
|
outbound string
|
||||||
|
outboundType string
|
||||||
|
)
|
||||||
if rule == nil {
|
if rule == nil {
|
||||||
if defaultOutbound, err := router.DefaultOutbound(N.NetworkUDP); err == nil {
|
if defaultOutbound, err := router.DefaultOutbound(N.NetworkUDP); err == nil {
|
||||||
next = defaultOutbound.Tag()
|
next = defaultOutbound.Tag()
|
||||||
@@ -195,17 +231,17 @@ func NewUDPTracker(conn N.PacketConn, manager *Manager, metadata Metadata, route
|
|||||||
if !loaded {
|
if !loaded {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
outbound = detour.Tag()
|
||||||
|
outboundType = detour.Type()
|
||||||
group, isGroup := detour.(adapter.OutboundGroup)
|
group, isGroup := detour.(adapter.OutboundGroup)
|
||||||
if !isGroup {
|
if !isGroup {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
next = group.Now()
|
next = group.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
upload := new(atomic.Int64)
|
upload := new(atomic.Int64)
|
||||||
download := new(atomic.Int64)
|
download := new(atomic.Int64)
|
||||||
|
trackerConn := &UDPConn{
|
||||||
ut := &udpTracker{
|
|
||||||
PacketConn: bufio.NewCounterPacketConn(conn, []N.CountFunc{func(n int64) {
|
PacketConn: bufio.NewCounterPacketConn(conn, []N.CountFunc{func(n int64) {
|
||||||
upload.Add(n)
|
upload.Add(n)
|
||||||
manager.PushUploaded(n)
|
manager.PushUploaded(n)
|
||||||
@@ -213,24 +249,19 @@ func NewUDPTracker(conn N.PacketConn, manager *Manager, metadata Metadata, route
|
|||||||
download.Add(n)
|
download.Add(n)
|
||||||
manager.PushDownloaded(n)
|
manager.PushDownloaded(n)
|
||||||
}}),
|
}}),
|
||||||
manager: manager,
|
metadata: TrackerMetadata{
|
||||||
trackerInfo: &trackerInfo{
|
ID: id,
|
||||||
UUID: uuid,
|
|
||||||
Start: time.Now(),
|
|
||||||
Metadata: metadata,
|
Metadata: metadata,
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
Upload: upload,
|
||||||
|
Download: download,
|
||||||
Chain: common.Reverse(chain),
|
Chain: common.Reverse(chain),
|
||||||
Rule: "",
|
Rule: rule,
|
||||||
UploadTotal: upload,
|
Outbound: outbound,
|
||||||
DownloadTotal: download,
|
OutboundType: outboundType,
|
||||||
},
|
},
|
||||||
|
manager: manager,
|
||||||
}
|
}
|
||||||
|
manager.Join(trackerConn)
|
||||||
if rule != nil {
|
return trackerConn
|
||||||
ut.trackerInfo.Rule = rule.String() + " => " + rule.Outbound()
|
|
||||||
} else {
|
|
||||||
ut.trackerInfo.Rule = "final"
|
|
||||||
}
|
|
||||||
|
|
||||||
manager.Join(ut)
|
|
||||||
return ut
|
|
||||||
}
|
}
|
||||||
|
@@ -14,4 +14,6 @@ const (
|
|||||||
CommandSetClashMode
|
CommandSetClashMode
|
||||||
CommandGetSystemProxyStatus
|
CommandGetSystemProxyStatus
|
||||||
CommandSetSystemProxyEnabled
|
CommandSetSystemProxyEnabled
|
||||||
|
CommandConnections
|
||||||
|
CommandCloseConnection
|
||||||
)
|
)
|
||||||
|
@@ -31,6 +31,7 @@ type CommandClientHandler interface {
|
|||||||
WriteGroups(message OutboundGroupIterator)
|
WriteGroups(message OutboundGroupIterator)
|
||||||
InitializeClashMode(modeList StringIterator, currentMode string)
|
InitializeClashMode(modeList StringIterator, currentMode string)
|
||||||
UpdateClashMode(newMode string)
|
UpdateClashMode(newMode string)
|
||||||
|
WriteConnections(message *Connections)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewStandaloneCommandClient() *CommandClient {
|
func NewStandaloneCommandClient() *CommandClient {
|
||||||
@@ -116,6 +117,13 @@ func (c *CommandClient) Connect() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
go c.handleModeConn(conn)
|
go c.handleModeConn(conn)
|
||||||
|
case CommandConnections:
|
||||||
|
err = binary.Write(conn, binary.BigEndian, c.options.StatusInterval)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "write interval")
|
||||||
|
}
|
||||||
|
c.handler.Connected()
|
||||||
|
go c.handleConnectionsConn(conn)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
53
sing-box/experimental/libbox/command_close_connection.go
Normal file
53
sing-box/experimental/libbox/command_close_connection.go
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
package libbox
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/experimental/clashapi"
|
||||||
|
"github.com/sagernet/sing/common/binary"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c *CommandClient) CloseConnection(connId string) error {
|
||||||
|
conn, err := c.directConnect()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
writer := bufio.NewWriter(conn)
|
||||||
|
err = binary.WriteData(writer, binary.BigEndian, connId)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = writer.Flush()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return readError(conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CommandServer) handleCloseConnection(conn net.Conn) error {
|
||||||
|
reader := bufio.NewReader(conn)
|
||||||
|
var connId string
|
||||||
|
err := binary.ReadData(reader, binary.BigEndian, &connId)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "read connection id")
|
||||||
|
}
|
||||||
|
service := s.service
|
||||||
|
if service == nil {
|
||||||
|
return writeError(conn, E.New("service not ready"))
|
||||||
|
}
|
||||||
|
clashServer := service.instance.Router().ClashServer()
|
||||||
|
if clashServer == nil {
|
||||||
|
return writeError(conn, E.New("Clash API disabled"))
|
||||||
|
}
|
||||||
|
targetConn := clashServer.(*clashapi.Server).TrafficManager().Connection(uuid.FromStringOrNil(connId))
|
||||||
|
if targetConn == nil {
|
||||||
|
return writeError(conn, E.New("connection already closed"))
|
||||||
|
}
|
||||||
|
targetConn.Close()
|
||||||
|
return writeError(conn, nil)
|
||||||
|
}
|
268
sing-box/experimental/libbox/command_connections.go
Normal file
268
sing-box/experimental/libbox/command_connections.go
Normal file
@@ -0,0 +1,268 @@
|
|||||||
|
package libbox
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"net"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/experimental/clashapi"
|
||||||
|
"github.com/sagernet/sing-box/experimental/clashapi/trafficontrol"
|
||||||
|
"github.com/sagernet/sing/common/binary"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
M "github.com/sagernet/sing/common/metadata"
|
||||||
|
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c *CommandClient) handleConnectionsConn(conn net.Conn) {
|
||||||
|
defer conn.Close()
|
||||||
|
reader := bufio.NewReader(conn)
|
||||||
|
var connections Connections
|
||||||
|
for {
|
||||||
|
err := binary.ReadData(reader, binary.BigEndian, &connections.connections)
|
||||||
|
if err != nil {
|
||||||
|
c.handler.Disconnected(err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.handler.WriteConnections(&connections)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CommandServer) handleConnectionsConn(conn net.Conn) error {
|
||||||
|
var interval int64
|
||||||
|
err := binary.Read(conn, binary.BigEndian, &interval)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "read interval")
|
||||||
|
}
|
||||||
|
ticker := time.NewTicker(time.Duration(interval))
|
||||||
|
defer ticker.Stop()
|
||||||
|
ctx := connKeepAlive(conn)
|
||||||
|
var trafficManager *trafficontrol.Manager
|
||||||
|
for {
|
||||||
|
service := s.service
|
||||||
|
if service != nil {
|
||||||
|
clashServer := service.instance.Router().ClashServer()
|
||||||
|
if clashServer == nil {
|
||||||
|
return E.New("Clash API disabled")
|
||||||
|
}
|
||||||
|
trafficManager = clashServer.(*clashapi.Server).TrafficManager()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
case <-ticker.C:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
connections = make(map[uuid.UUID]*Connection)
|
||||||
|
outConnections []Connection
|
||||||
|
)
|
||||||
|
writer := bufio.NewWriter(conn)
|
||||||
|
for {
|
||||||
|
outConnections = outConnections[:0]
|
||||||
|
for _, connection := range trafficManager.Connections() {
|
||||||
|
outConnections = append(outConnections, newConnection(connections, connection, false))
|
||||||
|
}
|
||||||
|
for _, connection := range trafficManager.ClosedConnections() {
|
||||||
|
outConnections = append(outConnections, newConnection(connections, connection, true))
|
||||||
|
}
|
||||||
|
err = binary.WriteData(writer, binary.BigEndian, outConnections)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = writer.Flush()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
case <-ticker.C:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
ConnectionStateAll = iota
|
||||||
|
ConnectionStateActive
|
||||||
|
ConnectionStateClosed
|
||||||
|
)
|
||||||
|
|
||||||
|
type Connections struct {
|
||||||
|
connections []Connection
|
||||||
|
filteredConnections []Connection
|
||||||
|
outConnections *[]Connection
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Connections) FilterState(state int32) {
|
||||||
|
c.filteredConnections = c.filteredConnections[:0]
|
||||||
|
switch state {
|
||||||
|
case ConnectionStateAll:
|
||||||
|
c.filteredConnections = append(c.filteredConnections, c.connections...)
|
||||||
|
case ConnectionStateActive:
|
||||||
|
for _, connection := range c.connections {
|
||||||
|
if connection.ClosedAt == 0 {
|
||||||
|
c.filteredConnections = append(c.filteredConnections, connection)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case ConnectionStateClosed:
|
||||||
|
for _, connection := range c.connections {
|
||||||
|
if connection.ClosedAt != 0 {
|
||||||
|
c.filteredConnections = append(c.filteredConnections, connection)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Connections) SortByDate() {
|
||||||
|
slices.SortStableFunc(c.filteredConnections, func(x, y Connection) int {
|
||||||
|
if x.CreatedAt < y.CreatedAt {
|
||||||
|
return 1
|
||||||
|
} else if x.CreatedAt > y.CreatedAt {
|
||||||
|
return -1
|
||||||
|
} else {
|
||||||
|
return strings.Compare(y.ID, x.ID)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Connections) SortByTraffic() {
|
||||||
|
slices.SortStableFunc(c.filteredConnections, func(x, y Connection) int {
|
||||||
|
xTraffic := x.Uplink + x.Downlink
|
||||||
|
yTraffic := y.Uplink + y.Downlink
|
||||||
|
if xTraffic < yTraffic {
|
||||||
|
return 1
|
||||||
|
} else if xTraffic > yTraffic {
|
||||||
|
return -1
|
||||||
|
} else {
|
||||||
|
return strings.Compare(y.ID, x.ID)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Connections) SortByTrafficTotal() {
|
||||||
|
slices.SortStableFunc(c.filteredConnections, func(x, y Connection) int {
|
||||||
|
xTraffic := x.UplinkTotal + x.DownlinkTotal
|
||||||
|
yTraffic := y.UplinkTotal + y.DownlinkTotal
|
||||||
|
if xTraffic < yTraffic {
|
||||||
|
return 1
|
||||||
|
} else if xTraffic > yTraffic {
|
||||||
|
return -1
|
||||||
|
} else {
|
||||||
|
return strings.Compare(y.ID, x.ID)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Connections) Iterator() ConnectionIterator {
|
||||||
|
return newPtrIterator(c.filteredConnections)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Connection struct {
|
||||||
|
ID string
|
||||||
|
Inbound string
|
||||||
|
InboundType string
|
||||||
|
IPVersion int32
|
||||||
|
Network string
|
||||||
|
Source string
|
||||||
|
Destination string
|
||||||
|
Domain string
|
||||||
|
Protocol string
|
||||||
|
User string
|
||||||
|
FromOutbound string
|
||||||
|
CreatedAt int64
|
||||||
|
ClosedAt int64
|
||||||
|
Uplink int64
|
||||||
|
Downlink int64
|
||||||
|
UplinkTotal int64
|
||||||
|
DownlinkTotal int64
|
||||||
|
Rule string
|
||||||
|
Outbound string
|
||||||
|
OutboundType string
|
||||||
|
ChainList []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Connection) Chain() StringIterator {
|
||||||
|
return newIterator(c.ChainList)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Connection) DisplayDestination() string {
|
||||||
|
destination := M.ParseSocksaddr(c.Destination)
|
||||||
|
if destination.IsIP() && c.Domain != "" {
|
||||||
|
destination = M.Socksaddr{
|
||||||
|
Fqdn: c.Domain,
|
||||||
|
Port: destination.Port,
|
||||||
|
}
|
||||||
|
return destination.String()
|
||||||
|
}
|
||||||
|
return c.Destination
|
||||||
|
}
|
||||||
|
|
||||||
|
type ConnectionIterator interface {
|
||||||
|
Next() *Connection
|
||||||
|
HasNext() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newConnection(connections map[uuid.UUID]*Connection, metadata trafficontrol.TrackerMetadata, isClosed bool) Connection {
|
||||||
|
if oldConnection, loaded := connections[metadata.ID]; loaded {
|
||||||
|
if isClosed {
|
||||||
|
if oldConnection.ClosedAt == 0 {
|
||||||
|
oldConnection.Uplink = 0
|
||||||
|
oldConnection.Downlink = 0
|
||||||
|
oldConnection.ClosedAt = metadata.ClosedAt.UnixMilli()
|
||||||
|
}
|
||||||
|
return *oldConnection
|
||||||
|
}
|
||||||
|
lastUplink := oldConnection.UplinkTotal
|
||||||
|
lastDownlink := oldConnection.DownlinkTotal
|
||||||
|
uplinkTotal := metadata.Upload.Load()
|
||||||
|
downlinkTotal := metadata.Download.Load()
|
||||||
|
oldConnection.Uplink = uplinkTotal - lastUplink
|
||||||
|
oldConnection.Downlink = downlinkTotal - lastDownlink
|
||||||
|
oldConnection.UplinkTotal = uplinkTotal
|
||||||
|
oldConnection.DownlinkTotal = downlinkTotal
|
||||||
|
return *oldConnection
|
||||||
|
}
|
||||||
|
var rule string
|
||||||
|
if metadata.Rule != nil {
|
||||||
|
rule = metadata.Rule.String()
|
||||||
|
}
|
||||||
|
uplinkTotal := metadata.Upload.Load()
|
||||||
|
downlinkTotal := metadata.Download.Load()
|
||||||
|
uplink := uplinkTotal
|
||||||
|
downlink := downlinkTotal
|
||||||
|
var closedAt int64
|
||||||
|
if !metadata.ClosedAt.IsZero() {
|
||||||
|
closedAt = metadata.ClosedAt.UnixMilli()
|
||||||
|
uplink = 0
|
||||||
|
downlink = 0
|
||||||
|
}
|
||||||
|
connection := Connection{
|
||||||
|
ID: metadata.ID.String(),
|
||||||
|
Inbound: metadata.Metadata.Inbound,
|
||||||
|
InboundType: metadata.Metadata.InboundType,
|
||||||
|
IPVersion: int32(metadata.Metadata.IPVersion),
|
||||||
|
Network: metadata.Metadata.Network,
|
||||||
|
Source: metadata.Metadata.Source.String(),
|
||||||
|
Destination: metadata.Metadata.Destination.String(),
|
||||||
|
Domain: metadata.Metadata.Domain,
|
||||||
|
Protocol: metadata.Metadata.Protocol,
|
||||||
|
User: metadata.Metadata.User,
|
||||||
|
FromOutbound: metadata.Metadata.Outbound,
|
||||||
|
CreatedAt: metadata.CreatedAt.UnixMilli(),
|
||||||
|
ClosedAt: closedAt,
|
||||||
|
Uplink: uplink,
|
||||||
|
Downlink: downlink,
|
||||||
|
UplinkTotal: uplinkTotal,
|
||||||
|
DownlinkTotal: downlinkTotal,
|
||||||
|
Rule: rule,
|
||||||
|
Outbound: metadata.Outbound,
|
||||||
|
OutboundType: metadata.OutboundType,
|
||||||
|
ChainList: metadata.Chain,
|
||||||
|
}
|
||||||
|
connections[metadata.ID] = &connection
|
||||||
|
return connection
|
||||||
|
}
|
@@ -14,36 +14,6 @@ import (
|
|||||||
"github.com/sagernet/sing/service"
|
"github.com/sagernet/sing/service"
|
||||||
)
|
)
|
||||||
|
|
||||||
type OutboundGroup struct {
|
|
||||||
Tag string
|
|
||||||
Type string
|
|
||||||
Selectable bool
|
|
||||||
Selected string
|
|
||||||
IsExpand bool
|
|
||||||
items []*OutboundGroupItem
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *OutboundGroup) GetItems() OutboundGroupItemIterator {
|
|
||||||
return newIterator(g.items)
|
|
||||||
}
|
|
||||||
|
|
||||||
type OutboundGroupIterator interface {
|
|
||||||
Next() *OutboundGroup
|
|
||||||
HasNext() bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type OutboundGroupItem struct {
|
|
||||||
Tag string
|
|
||||||
Type string
|
|
||||||
URLTestTime int64
|
|
||||||
URLTestDelay int32
|
|
||||||
}
|
|
||||||
|
|
||||||
type OutboundGroupItemIterator interface {
|
|
||||||
Next() *OutboundGroupItem
|
|
||||||
HasNext() bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandClient) handleGroupConn(conn net.Conn) {
|
func (c *CommandClient) handleGroupConn(conn net.Conn) {
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
|
||||||
@@ -92,6 +62,36 @@ func (s *CommandServer) handleGroupConn(conn net.Conn) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type OutboundGroup struct {
|
||||||
|
Tag string
|
||||||
|
Type string
|
||||||
|
Selectable bool
|
||||||
|
Selected string
|
||||||
|
IsExpand bool
|
||||||
|
items []*OutboundGroupItem
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *OutboundGroup) GetItems() OutboundGroupItemIterator {
|
||||||
|
return newIterator(g.items)
|
||||||
|
}
|
||||||
|
|
||||||
|
type OutboundGroupIterator interface {
|
||||||
|
Next() *OutboundGroup
|
||||||
|
HasNext() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type OutboundGroupItem struct {
|
||||||
|
Tag string
|
||||||
|
Type string
|
||||||
|
URLTestTime int64
|
||||||
|
URLTestDelay int32
|
||||||
|
}
|
||||||
|
|
||||||
|
type OutboundGroupItemIterator interface {
|
||||||
|
Next() *OutboundGroupItem
|
||||||
|
HasNext() bool
|
||||||
|
}
|
||||||
|
|
||||||
func readGroups(reader io.Reader) (OutboundGroupIterator, error) {
|
func readGroups(reader io.Reader) (OutboundGroupIterator, error) {
|
||||||
var groupLength uint16
|
var groupLength uint16
|
||||||
err := binary.Read(reader, binary.BigEndian, &groupLength)
|
err := binary.Read(reader, binary.BigEndian, &groupLength)
|
||||||
|
@@ -33,6 +33,8 @@ type CommandServer struct {
|
|||||||
urlTestUpdate chan struct{}
|
urlTestUpdate chan struct{}
|
||||||
modeUpdate chan struct{}
|
modeUpdate chan struct{}
|
||||||
logReset chan struct{}
|
logReset chan struct{}
|
||||||
|
|
||||||
|
closedConnections []Connection
|
||||||
}
|
}
|
||||||
|
|
||||||
type CommandServerHandler interface {
|
type CommandServerHandler interface {
|
||||||
@@ -176,6 +178,10 @@ func (s *CommandServer) handleConnection(conn net.Conn) error {
|
|||||||
return s.handleGetSystemProxyStatus(conn)
|
return s.handleGetSystemProxyStatus(conn)
|
||||||
case CommandSetSystemProxyEnabled:
|
case CommandSetSystemProxyEnabled:
|
||||||
return s.handleSetSystemProxyEnabled(conn)
|
return s.handleSetSystemProxyEnabled(conn)
|
||||||
|
case CommandConnections:
|
||||||
|
return s.handleConnectionsConn(conn)
|
||||||
|
case CommandCloseConnection:
|
||||||
|
return s.handleCloseConnection(conn)
|
||||||
default:
|
default:
|
||||||
return E.New("unknown command: ", command)
|
return E.New("unknown command: ", command)
|
||||||
}
|
}
|
||||||
|
@@ -36,7 +36,7 @@ func (s *CommandServer) readStatus() StatusMessage {
|
|||||||
trafficManager := clashServer.(*clashapi.Server).TrafficManager()
|
trafficManager := clashServer.(*clashapi.Server).TrafficManager()
|
||||||
message.Uplink, message.Downlink = trafficManager.Now()
|
message.Uplink, message.Downlink = trafficManager.Now()
|
||||||
message.UplinkTotal, message.DownlinkTotal = trafficManager.Total()
|
message.UplinkTotal, message.DownlinkTotal = trafficManager.Total()
|
||||||
message.ConnectionsIn = int32(trafficManager.Connections())
|
message.ConnectionsIn = int32(trafficManager.ConnectionsLen())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -17,6 +17,10 @@ func newIterator[T any](values []T) *iterator[T] {
|
|||||||
return &iterator[T]{values}
|
return &iterator[T]{values}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newPtrIterator[T any](values []T) *iterator[*T] {
|
||||||
|
return &iterator[*T]{common.Map(values, func(value T) *T { return &value })}
|
||||||
|
}
|
||||||
|
|
||||||
func (i *iterator[T]) Next() T {
|
func (i *iterator[T]) Next() T {
|
||||||
if len(i.values) == 0 {
|
if len(i.values) == 0 {
|
||||||
return common.DefaultValue[T]()
|
return common.DefaultValue[T]()
|
||||||
|
@@ -149,33 +149,6 @@ func (w *platformInterfaceWrapper) OpenTun(options *tun.Options, platformOptions
|
|||||||
return tun.New(*options)
|
return tun.New(*options)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *platformInterfaceWrapper) FindProcessInfo(ctx context.Context, network string, source netip.AddrPort, destination netip.AddrPort) (*process.Info, error) {
|
|
||||||
var uid int32
|
|
||||||
if w.useProcFS {
|
|
||||||
uid = procfs.ResolveSocketByProcSearch(network, source, destination)
|
|
||||||
if uid == -1 {
|
|
||||||
return nil, E.New("procfs: not found")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
var ipProtocol int32
|
|
||||||
switch N.NetworkName(network) {
|
|
||||||
case N.NetworkTCP:
|
|
||||||
ipProtocol = syscall.IPPROTO_TCP
|
|
||||||
case N.NetworkUDP:
|
|
||||||
ipProtocol = syscall.IPPROTO_UDP
|
|
||||||
default:
|
|
||||||
return nil, E.New("unknown network: ", network)
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
uid, err = w.iif.FindConnectionOwner(ipProtocol, source.Addr().String(), int32(source.Port()), destination.Addr().String(), int32(destination.Port()))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
packageName, _ := w.iif.PackageNameByUid(uid)
|
|
||||||
return &process.Info{UserId: uid, PackageName: packageName}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *platformInterfaceWrapper) UsePlatformDefaultInterfaceMonitor() bool {
|
func (w *platformInterfaceWrapper) UsePlatformDefaultInterfaceMonitor() bool {
|
||||||
return w.iif.UsePlatformDefaultInterfaceMonitor()
|
return w.iif.UsePlatformDefaultInterfaceMonitor()
|
||||||
}
|
}
|
||||||
@@ -229,6 +202,33 @@ func (w *platformInterfaceWrapper) ReadWIFIState() adapter.WIFIState {
|
|||||||
return (adapter.WIFIState)(*wifiState)
|
return (adapter.WIFIState)(*wifiState)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *platformInterfaceWrapper) FindProcessInfo(ctx context.Context, network string, source netip.AddrPort, destination netip.AddrPort) (*process.Info, error) {
|
||||||
|
var uid int32
|
||||||
|
if w.useProcFS {
|
||||||
|
uid = procfs.ResolveSocketByProcSearch(network, source, destination)
|
||||||
|
if uid == -1 {
|
||||||
|
return nil, E.New("procfs: not found")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
var ipProtocol int32
|
||||||
|
switch N.NetworkName(network) {
|
||||||
|
case N.NetworkTCP:
|
||||||
|
ipProtocol = syscall.IPPROTO_TCP
|
||||||
|
case N.NetworkUDP:
|
||||||
|
ipProtocol = syscall.IPPROTO_UDP
|
||||||
|
default:
|
||||||
|
return nil, E.New("unknown network: ", network)
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
uid, err = w.iif.FindConnectionOwner(ipProtocol, source.Addr().String(), int32(source.Port()), destination.Addr().String(), int32(destination.Port()))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
packageName, _ := w.iif.PackageNameByUid(uid)
|
||||||
|
return &process.Info{UserId: uid, PackageName: packageName}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (w *platformInterfaceWrapper) DisableColors() bool {
|
func (w *platformInterfaceWrapper) DisableColors() bool {
|
||||||
return runtime.GOOS != "android"
|
return runtime.GOOS != "android"
|
||||||
}
|
}
|
||||||
|
@@ -4,10 +4,12 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"os/user"
|
"os/user"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/sagernet/sing-box/common/humanize"
|
"github.com/sagernet/sing-box/common/humanize"
|
||||||
C "github.com/sagernet/sing-box/constant"
|
C "github.com/sagernet/sing-box/constant"
|
||||||
_ "github.com/sagernet/sing-box/include"
|
_ "github.com/sagernet/sing-box/include"
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -59,6 +61,10 @@ func FormatMemoryBytes(length int64) string {
|
|||||||
return humanize.MemoryBytes(uint64(length))
|
return humanize.MemoryBytes(uint64(length))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func FormatDuration(duration int64) string {
|
||||||
|
return log.FormatDuration(time.Duration(duration) * time.Millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
func ProxyDisplayType(proxyType string) string {
|
func ProxyDisplayType(proxyType string) string {
|
||||||
return C.ProxyDisplayName(proxyType)
|
return C.ProxyDisplayName(proxyType)
|
||||||
}
|
}
|
||||||
|
@@ -26,14 +26,14 @@ require (
|
|||||||
github.com/sagernet/gvisor v0.0.0-20240428053021-e691de28565f
|
github.com/sagernet/gvisor v0.0.0-20240428053021-e691de28565f
|
||||||
github.com/sagernet/quic-go v0.45.0-beta.2
|
github.com/sagernet/quic-go v0.45.0-beta.2
|
||||||
github.com/sagernet/reality v0.0.0-20230406110435-ee17307e7691
|
github.com/sagernet/reality v0.0.0-20230406110435-ee17307e7691
|
||||||
github.com/sagernet/sing v0.5.0-alpha.9
|
github.com/sagernet/sing v0.5.0-alpha.10
|
||||||
github.com/sagernet/sing-dns v0.3.0-beta.5
|
github.com/sagernet/sing-dns v0.3.0-beta.5
|
||||||
github.com/sagernet/sing-mux v0.2.0
|
github.com/sagernet/sing-mux v0.2.0
|
||||||
github.com/sagernet/sing-quic v0.2.0-beta.9
|
github.com/sagernet/sing-quic v0.2.0-beta.9
|
||||||
github.com/sagernet/sing-shadowsocks v0.2.6
|
github.com/sagernet/sing-shadowsocks v0.2.6
|
||||||
github.com/sagernet/sing-shadowsocks2 v0.2.0
|
github.com/sagernet/sing-shadowsocks2 v0.2.0
|
||||||
github.com/sagernet/sing-shadowtls v0.1.4
|
github.com/sagernet/sing-shadowtls v0.1.4
|
||||||
github.com/sagernet/sing-tun v0.4.0-beta.8
|
github.com/sagernet/sing-tun v0.4.0-beta.9
|
||||||
github.com/sagernet/sing-vmess v0.1.8
|
github.com/sagernet/sing-vmess v0.1.8
|
||||||
github.com/sagernet/smux v0.0.0-20231208180855-7041f6ea79e7
|
github.com/sagernet/smux v0.0.0-20231208180855-7041f6ea79e7
|
||||||
github.com/sagernet/tfo-go v0.0.0-20231209031829-7b5343ac1dc6
|
github.com/sagernet/tfo-go v0.0.0-20231209031829-7b5343ac1dc6
|
||||||
|
@@ -113,8 +113,8 @@ github.com/sagernet/quic-go v0.45.0-beta.2/go.mod h1:rs3XCo3SQ2sB96NtaKnEyq+Zkya
|
|||||||
github.com/sagernet/reality v0.0.0-20230406110435-ee17307e7691 h1:5Th31OC6yj8byLGkEnIYp6grlXfo1QYUfiYFGjewIdc=
|
github.com/sagernet/reality v0.0.0-20230406110435-ee17307e7691 h1:5Th31OC6yj8byLGkEnIYp6grlXfo1QYUfiYFGjewIdc=
|
||||||
github.com/sagernet/reality v0.0.0-20230406110435-ee17307e7691/go.mod h1:B8lp4WkQ1PwNnrVMM6KyuFR20pU8jYBD+A4EhJovEXU=
|
github.com/sagernet/reality v0.0.0-20230406110435-ee17307e7691/go.mod h1:B8lp4WkQ1PwNnrVMM6KyuFR20pU8jYBD+A4EhJovEXU=
|
||||||
github.com/sagernet/sing v0.2.18/go.mod h1:OL6k2F0vHmEzXz2KW19qQzu172FDgSbUSODylighuVo=
|
github.com/sagernet/sing v0.2.18/go.mod h1:OL6k2F0vHmEzXz2KW19qQzu172FDgSbUSODylighuVo=
|
||||||
github.com/sagernet/sing v0.5.0-alpha.9 h1:Mmg+LCbaKXBeQD/ttzi0/MQa3NcUyfadIgkGzhQW7o0=
|
github.com/sagernet/sing v0.5.0-alpha.10 h1:kuHl10gpjbKQAdQfyogQU3u0CVnpqC3wrAHe/+BFaXc=
|
||||||
github.com/sagernet/sing v0.5.0-alpha.9/go.mod h1:ARkL0gM13/Iv5VCZmci/NuoOlePoIsW0m7BWfln/Hak=
|
github.com/sagernet/sing v0.5.0-alpha.10/go.mod h1:ARkL0gM13/Iv5VCZmci/NuoOlePoIsW0m7BWfln/Hak=
|
||||||
github.com/sagernet/sing-dns v0.3.0-beta.5 h1:lX+wfnBVaOlSd7+GBgb431Tt/gmYwJXSHvS1HutfnD4=
|
github.com/sagernet/sing-dns v0.3.0-beta.5 h1:lX+wfnBVaOlSd7+GBgb431Tt/gmYwJXSHvS1HutfnD4=
|
||||||
github.com/sagernet/sing-dns v0.3.0-beta.5/go.mod h1:qeO/lOUK/c3Zczp5a1VO13fbmolaM8xGKCUXtaX0/NQ=
|
github.com/sagernet/sing-dns v0.3.0-beta.5/go.mod h1:qeO/lOUK/c3Zczp5a1VO13fbmolaM8xGKCUXtaX0/NQ=
|
||||||
github.com/sagernet/sing-mux v0.2.0 h1:4C+vd8HztJCWNYfufvgL49xaOoOHXty2+EAjnzN3IYo=
|
github.com/sagernet/sing-mux v0.2.0 h1:4C+vd8HztJCWNYfufvgL49xaOoOHXty2+EAjnzN3IYo=
|
||||||
@@ -127,8 +127,8 @@ github.com/sagernet/sing-shadowsocks2 v0.2.0 h1:wpZNs6wKnR7mh1wV9OHwOyUr21VkS3wK
|
|||||||
github.com/sagernet/sing-shadowsocks2 v0.2.0/go.mod h1:RnXS0lExcDAovvDeniJ4IKa2IuChrdipolPYWBv9hWQ=
|
github.com/sagernet/sing-shadowsocks2 v0.2.0/go.mod h1:RnXS0lExcDAovvDeniJ4IKa2IuChrdipolPYWBv9hWQ=
|
||||||
github.com/sagernet/sing-shadowtls v0.1.4 h1:aTgBSJEgnumzFenPvc+kbD9/W0PywzWevnVpEx6Tw3k=
|
github.com/sagernet/sing-shadowtls v0.1.4 h1:aTgBSJEgnumzFenPvc+kbD9/W0PywzWevnVpEx6Tw3k=
|
||||||
github.com/sagernet/sing-shadowtls v0.1.4/go.mod h1:F8NBgsY5YN2beQavdgdm1DPlhaKQlaL6lpDdcBglGK4=
|
github.com/sagernet/sing-shadowtls v0.1.4/go.mod h1:F8NBgsY5YN2beQavdgdm1DPlhaKQlaL6lpDdcBglGK4=
|
||||||
github.com/sagernet/sing-tun v0.4.0-beta.8 h1:3FM7KpE3kmTj7aA9LYtn82pBAFHIrk2O1b84lpx/5ns=
|
github.com/sagernet/sing-tun v0.4.0-beta.9 h1:/5hXQ0u7tHtngfXozRc+o/gt6zfHBHMOwSIHXF0+S3I=
|
||||||
github.com/sagernet/sing-tun v0.4.0-beta.8/go.mod h1:uoRiCzWHzHLw/angVqXDzUNiQcMRl/ZrElJryQLJFhY=
|
github.com/sagernet/sing-tun v0.4.0-beta.9/go.mod h1:uoRiCzWHzHLw/angVqXDzUNiQcMRl/ZrElJryQLJFhY=
|
||||||
github.com/sagernet/sing-vmess v0.1.8 h1:XVWad1RpTy9b5tPxdm5MCU8cGfrTGdR8qCq6HV2aCNc=
|
github.com/sagernet/sing-vmess v0.1.8 h1:XVWad1RpTy9b5tPxdm5MCU8cGfrTGdR8qCq6HV2aCNc=
|
||||||
github.com/sagernet/sing-vmess v0.1.8/go.mod h1:vhx32UNzTDUkNwOyIjcZQohre1CaytquC5mPplId8uA=
|
github.com/sagernet/sing-vmess v0.1.8/go.mod h1:vhx32UNzTDUkNwOyIjcZQohre1CaytquC5mPplId8uA=
|
||||||
github.com/sagernet/smux v0.0.0-20231208180855-7041f6ea79e7 h1:DImB4lELfQhplLTxeq2z31Fpv8CQqqrUwTbrIRumZqQ=
|
github.com/sagernet/smux v0.0.0-20231208180855-7041f6ea79e7 h1:DImB4lELfQhplLTxeq2z31Fpv8CQqqrUwTbrIRumZqQ=
|
||||||
|
@@ -11,43 +11,43 @@ import (
|
|||||||
E "github.com/sagernet/sing/common/exceptions"
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
)
|
)
|
||||||
|
|
||||||
func New(ctx context.Context, router adapter.Router, logger log.ContextLogger, options option.Inbound, platformInterface platform.Interface) (adapter.Inbound, error) {
|
func New(ctx context.Context, router adapter.Router, logger log.ContextLogger, tag string, options option.Inbound, platformInterface platform.Interface) (adapter.Inbound, error) {
|
||||||
if options.Type == "" {
|
if options.Type == "" {
|
||||||
return nil, E.New("missing inbound type")
|
return nil, E.New("missing inbound type")
|
||||||
}
|
}
|
||||||
switch options.Type {
|
switch options.Type {
|
||||||
case C.TypeTun:
|
case C.TypeTun:
|
||||||
return NewTun(ctx, router, logger, options.Tag, options.TunOptions, platformInterface)
|
return NewTun(ctx, router, logger, tag, options.TunOptions, platformInterface)
|
||||||
case C.TypeRedirect:
|
case C.TypeRedirect:
|
||||||
return NewRedirect(ctx, router, logger, options.Tag, options.RedirectOptions), nil
|
return NewRedirect(ctx, router, logger, tag, options.RedirectOptions), nil
|
||||||
case C.TypeTProxy:
|
case C.TypeTProxy:
|
||||||
return NewTProxy(ctx, router, logger, options.Tag, options.TProxyOptions), nil
|
return NewTProxy(ctx, router, logger, tag, options.TProxyOptions), nil
|
||||||
case C.TypeDirect:
|
case C.TypeDirect:
|
||||||
return NewDirect(ctx, router, logger, options.Tag, options.DirectOptions), nil
|
return NewDirect(ctx, router, logger, tag, options.DirectOptions), nil
|
||||||
case C.TypeSOCKS:
|
case C.TypeSOCKS:
|
||||||
return NewSocks(ctx, router, logger, options.Tag, options.SocksOptions), nil
|
return NewSocks(ctx, router, logger, tag, options.SocksOptions), nil
|
||||||
case C.TypeHTTP:
|
case C.TypeHTTP:
|
||||||
return NewHTTP(ctx, router, logger, options.Tag, options.HTTPOptions)
|
return NewHTTP(ctx, router, logger, tag, options.HTTPOptions)
|
||||||
case C.TypeMixed:
|
case C.TypeMixed:
|
||||||
return NewMixed(ctx, router, logger, options.Tag, options.MixedOptions), nil
|
return NewMixed(ctx, router, logger, tag, options.MixedOptions), nil
|
||||||
case C.TypeShadowsocks:
|
case C.TypeShadowsocks:
|
||||||
return NewShadowsocks(ctx, router, logger, options.Tag, options.ShadowsocksOptions)
|
return NewShadowsocks(ctx, router, logger, tag, options.ShadowsocksOptions)
|
||||||
case C.TypeVMess:
|
case C.TypeVMess:
|
||||||
return NewVMess(ctx, router, logger, options.Tag, options.VMessOptions)
|
return NewVMess(ctx, router, logger, tag, options.VMessOptions)
|
||||||
case C.TypeTrojan:
|
case C.TypeTrojan:
|
||||||
return NewTrojan(ctx, router, logger, options.Tag, options.TrojanOptions)
|
return NewTrojan(ctx, router, logger, tag, options.TrojanOptions)
|
||||||
case C.TypeNaive:
|
case C.TypeNaive:
|
||||||
return NewNaive(ctx, router, logger, options.Tag, options.NaiveOptions)
|
return NewNaive(ctx, router, logger, tag, options.NaiveOptions)
|
||||||
case C.TypeHysteria:
|
case C.TypeHysteria:
|
||||||
return NewHysteria(ctx, router, logger, options.Tag, options.HysteriaOptions)
|
return NewHysteria(ctx, router, logger, tag, options.HysteriaOptions)
|
||||||
case C.TypeShadowTLS:
|
case C.TypeShadowTLS:
|
||||||
return NewShadowTLS(ctx, router, logger, options.Tag, options.ShadowTLSOptions)
|
return NewShadowTLS(ctx, router, logger, tag, options.ShadowTLSOptions)
|
||||||
case C.TypeVLESS:
|
case C.TypeVLESS:
|
||||||
return NewVLESS(ctx, router, logger, options.Tag, options.VLESSOptions)
|
return NewVLESS(ctx, router, logger, tag, options.VLESSOptions)
|
||||||
case C.TypeTUIC:
|
case C.TypeTUIC:
|
||||||
return NewTUIC(ctx, router, logger, options.Tag, options.TUICOptions)
|
return NewTUIC(ctx, router, logger, tag, options.TUICOptions)
|
||||||
case C.TypeHysteria2:
|
case C.TypeHysteria2:
|
||||||
return NewHysteria2(ctx, router, logger, options.Tag, options.Hysteria2Options)
|
return NewHysteria2(ctx, router, logger, tag, options.Hysteria2Options)
|
||||||
default:
|
default:
|
||||||
return nil, E.New("unknown inbound type: ", options.Type)
|
return nil, E.New("unknown inbound type: ", options.Type)
|
||||||
}
|
}
|
||||||
|
@@ -43,7 +43,7 @@ func (f Formatter) Format(ctx context.Context, level Level, tag string, message
|
|||||||
id, hasId = IDFromContext(ctx)
|
id, hasId = IDFromContext(ctx)
|
||||||
}
|
}
|
||||||
if hasId {
|
if hasId {
|
||||||
activeDuration := formatDuration(time.Since(id.CreatedAt))
|
activeDuration := FormatDuration(time.Since(id.CreatedAt))
|
||||||
if !f.DisableColors {
|
if !f.DisableColors {
|
||||||
var color aurora.Color
|
var color aurora.Color
|
||||||
color = aurora.Color(uint8(id.ID))
|
color = aurora.Color(uint8(id.ID))
|
||||||
@@ -113,7 +113,7 @@ func (f Formatter) FormatWithSimple(ctx context.Context, level Level, tag string
|
|||||||
id, hasId = IDFromContext(ctx)
|
id, hasId = IDFromContext(ctx)
|
||||||
}
|
}
|
||||||
if hasId {
|
if hasId {
|
||||||
activeDuration := formatDuration(time.Since(id.CreatedAt))
|
activeDuration := FormatDuration(time.Since(id.CreatedAt))
|
||||||
if !f.DisableColors {
|
if !f.DisableColors {
|
||||||
var color aurora.Color
|
var color aurora.Color
|
||||||
color = aurora.Color(uint8(id.ID))
|
color = aurora.Color(uint8(id.ID))
|
||||||
@@ -163,7 +163,7 @@ func xd(value int, x int) string {
|
|||||||
return message
|
return message
|
||||||
}
|
}
|
||||||
|
|
||||||
func formatDuration(duration time.Duration) string {
|
func FormatDuration(duration time.Duration) string {
|
||||||
if duration < time.Second {
|
if duration < time.Second {
|
||||||
return F.ToString(duration.Milliseconds(), "ms")
|
return F.ToString(duration.Milliseconds(), "ms")
|
||||||
} else if duration < time.Minute {
|
} else if duration < time.Minute {
|
||||||
|
@@ -531,7 +531,7 @@ func (r *Router) Start() error {
|
|||||||
r.dnsClient.Start()
|
r.dnsClient.Start()
|
||||||
monitor.Finish()
|
monitor.Finish()
|
||||||
|
|
||||||
if C.IsAndroid && r.platformInterface == nil {
|
if r.needPackageManager && r.platformInterface == nil {
|
||||||
monitor.Start("initialize package manager")
|
monitor.Start("initialize package manager")
|
||||||
packageManager, err := tun.NewPackageManager(r)
|
packageManager, err := tun.NewPackageManager(r)
|
||||||
monitor.Finish()
|
monitor.Finish()
|
||||||
|
@@ -5,7 +5,7 @@
|
|||||||
include $(TOPDIR)/rules.mk
|
include $(TOPDIR)/rules.mk
|
||||||
|
|
||||||
PKG_NAME:=naiveproxy
|
PKG_NAME:=naiveproxy
|
||||||
PKG_VERSION:=125.0.6422.35-1
|
PKG_VERSION:=126.0.6478.40-1
|
||||||
PKG_RELEASE:=1
|
PKG_RELEASE:=1
|
||||||
|
|
||||||
# intel 80386 & riscv64 & cortex-a76
|
# intel 80386 & riscv64 & cortex-a76
|
||||||
@@ -20,47 +20,47 @@ else ifeq ($(ARCH_PREBUILT),riscv64_riscv64)
|
|||||||
endif
|
endif
|
||||||
|
|
||||||
ifeq ($(ARCH_PACKAGES),aarch64_cortex-a53)
|
ifeq ($(ARCH_PACKAGES),aarch64_cortex-a53)
|
||||||
PKG_HASH:=829e033c930645730e39529a7be54bde571d43d3ca01ad5f8a6c58749db34308
|
PKG_HASH:=d805374e3c84c199679d120bf38b427f9a2a2572310ba2209ea3f54897021dfa
|
||||||
else ifeq ($(ARCH_PACKAGES),aarch64_cortex-a72)
|
else ifeq ($(ARCH_PACKAGES),aarch64_cortex-a72)
|
||||||
PKG_HASH:=f32ce43b362aa6ceb2f24a5a8ec5c6190722b06b5a2c97ca6fddc6cfa76202cd
|
PKG_HASH:=2c29345f266d3b7b617756c1bee1d9c9d0c18cd2df5f91778fa3c9ee78cbb6e0
|
||||||
else ifeq ($(ARCH_PACKAGES),aarch64_generic)
|
else ifeq ($(ARCH_PACKAGES),aarch64_generic)
|
||||||
PKG_HASH:=702dc4bb621d7a8189482f49d12d8d4943dbdfffe2c7c197bbe32f8f9c0f4ee0
|
PKG_HASH:=712be9c6c31f92737e3a9aa6345d1797bb76111ba139c95e32f81ca92d9f94d8
|
||||||
else ifeq ($(ARCH_PACKAGES),arm_arm1176jzf-s_vfp)
|
else ifeq ($(ARCH_PACKAGES),arm_arm1176jzf-s_vfp)
|
||||||
PKG_HASH:=d14931ec0a312f8fd996c14ae1ec012a5f9e03d92139509f5edd3f19feba40d4
|
PKG_HASH:=9eb54e9e5aaa46c6555275ab1760c5597ff882fd44d3c7b861f74bdb1f11b7ee
|
||||||
else ifeq ($(ARCH_PACKAGES),arm_arm926ej-s)
|
else ifeq ($(ARCH_PACKAGES),arm_arm926ej-s)
|
||||||
PKG_HASH:=bd8f45efa94ab89a8af08b48523a9a4a21cf954261123d5c73ea57246c29d611
|
PKG_HASH:=165d08dce7efccc0dd09aed9d09745e3932a9f9090d2aece21fc3ba9cf7c8d7f
|
||||||
else ifeq ($(ARCH_PACKAGES),arm_cortex-a15_neon-vfpv4)
|
else ifeq ($(ARCH_PACKAGES),arm_cortex-a15_neon-vfpv4)
|
||||||
PKG_HASH:=790e264c4798031781eea07360906e86494a95f4bf7466e5d34735adc5b3842a
|
PKG_HASH:=2814ee81d18154af761dde96449108e12c7a076e839fb6adc90914602118afb5
|
||||||
else ifeq ($(ARCH_PACKAGES),arm_cortex-a5_vfpv4)
|
else ifeq ($(ARCH_PACKAGES),arm_cortex-a5_vfpv4)
|
||||||
PKG_HASH:=11f585c0896236343f400f1d31e2aec7a91948f0d0f55c095264a05bbb93771a
|
PKG_HASH:=0a9d2f15e85b6a93580173b8a5a2527d40926fce70272570e2101f82c5bb96df
|
||||||
else ifeq ($(ARCH_PACKAGES),arm_cortex-a7)
|
else ifeq ($(ARCH_PACKAGES),arm_cortex-a7)
|
||||||
PKG_HASH:=dc40da97ca7d7a24aa45e8520611c6f0dcb324b4326bc03051db12d50dcb4c35
|
PKG_HASH:=f62ea0698f39f30d0845c3f81389d1fc929fccacf6bd92794803dc9c40397228
|
||||||
else ifeq ($(ARCH_PACKAGES),arm_cortex-a7_neon-vfpv4)
|
else ifeq ($(ARCH_PACKAGES),arm_cortex-a7_neon-vfpv4)
|
||||||
PKG_HASH:=282ca2cd3dc567a99da67bca9927180ef7147cc89249f43f0359c030fbcdcc55
|
PKG_HASH:=55d7c177689f4d598ee45dcf4f8f837e62accdec99c3939ed351bad6abe92f46
|
||||||
else ifeq ($(ARCH_PACKAGES),arm_cortex-a7_vfpv4)
|
else ifeq ($(ARCH_PACKAGES),arm_cortex-a7_vfpv4)
|
||||||
PKG_HASH:=c0987d38af34aae4687868625591a71d59b36906f2e453e7db458addebc594f1
|
PKG_HASH:=f9c00185b42913cf5623f4caa3eb9ba7883da6c0f304506889cc798e0c987a11
|
||||||
else ifeq ($(ARCH_PACKAGES),arm_cortex-a8_vfpv3)
|
else ifeq ($(ARCH_PACKAGES),arm_cortex-a8_vfpv3)
|
||||||
PKG_HASH:=ce7a23164af8720d71fff998ead2f0279792ede5fdb9b0cc54af05d77215af43
|
PKG_HASH:=54285cd36969fb7a90624b569fd1c0dcbded72a992597793936f5efb7789f0c9
|
||||||
else ifeq ($(ARCH_PACKAGES),arm_cortex-a9)
|
else ifeq ($(ARCH_PACKAGES),arm_cortex-a9)
|
||||||
PKG_HASH:=95af8607ce6302f62ff8c8b5ccf37c89d6c9b6a588249fb14a3120d1aab5c97e
|
PKG_HASH:=016895a8fa4a6ec36efa4a4890566bf33ea888526a8902da1b915573006d8dab
|
||||||
else ifeq ($(ARCH_PACKAGES),arm_cortex-a9_neon)
|
else ifeq ($(ARCH_PACKAGES),arm_cortex-a9_neon)
|
||||||
PKG_HASH:=9875ca3884cbcf1704ea44900fc5c89f62502ed76a7c79137d1ff3c32e912988
|
PKG_HASH:=d2508c999796c4e65a93044faa243a3640dfd9be36cf758535b7a801e61149a5
|
||||||
else ifeq ($(ARCH_PACKAGES),arm_cortex-a9_vfpv3-d16)
|
else ifeq ($(ARCH_PACKAGES),arm_cortex-a9_vfpv3-d16)
|
||||||
PKG_HASH:=072c9ebbcbaeedd8f7fa5d3da5733460bbb7047895f5f087356de34dd5014d7a
|
PKG_HASH:=0687360a7488b534818df5db071ff0feae8a0a8e6c0464fe0f64533d63682841
|
||||||
else ifeq ($(ARCH_PACKAGES),arm_mpcore)
|
else ifeq ($(ARCH_PACKAGES),arm_mpcore)
|
||||||
PKG_HASH:=7cebee26ac672b12f4b6f7d8fd06d251c52ed75ead434f0a54c377eca4f2797d
|
PKG_HASH:=13cdb19c23add28f8cc02b9d0234db5836e851ef3ff4968363da27f6045b94ae
|
||||||
else ifeq ($(ARCH_PACKAGES),arm_xscale)
|
else ifeq ($(ARCH_PACKAGES),arm_xscale)
|
||||||
PKG_HASH:=92237ec96e598c2b508b8793cf1574172a4362b64b8fd9ad505bd3c3e86b8bb6
|
PKG_HASH:=a9d4e1825a391ef9294b58d765cc6425322848a70b29f64955c5933121990288
|
||||||
else ifeq ($(ARCH_PACKAGES),mipsel_24kc)
|
else ifeq ($(ARCH_PACKAGES),mipsel_24kc)
|
||||||
PKG_HASH:=3e9cc1282a67c7487595f437a2d1a07ccf94c822ecd63086227a2d6b699a71d5
|
PKG_HASH:=7b358d7306f77f87bcee33beb6be1d8c1d70c2128172475616bb1531bb3aa908
|
||||||
else ifeq ($(ARCH_PACKAGES),mipsel_mips32)
|
else ifeq ($(ARCH_PACKAGES),mipsel_mips32)
|
||||||
PKG_HASH:=0aa2920f09f10c60d292b809a571e562df8cf83f8ea86281457f2d06ad466533
|
PKG_HASH:=1bc0af17f48b83e2439534f91d462b286d8c35888bfee87785f70337088a5d32
|
||||||
else ifeq ($(ARCH_PACKAGES),riscv64)
|
else ifeq ($(ARCH_PACKAGES),riscv64)
|
||||||
PKG_HASH:=8cae7646c9cc4e99b33b2f4de65795ebeb6eb7744e9babc39e6357180eb3bfb0
|
PKG_HASH:=8862ca30f93825298a00473fddbf698ffed251deef28a40958c3ccd06da91e6a
|
||||||
else ifeq ($(ARCH_PACKAGES),x86)
|
else ifeq ($(ARCH_PACKAGES),x86)
|
||||||
PKG_HASH:=21d83d8217ab3de9d41443530e7d2a34cc3a0b0395da881b1b210209bea601c6
|
PKG_HASH:=c403e1bd29d19dcf811e034bf6cc6940c6ef9425b80d87a1643000e7361016aa
|
||||||
else ifeq ($(ARCH_PACKAGES),x86_64)
|
else ifeq ($(ARCH_PACKAGES),x86_64)
|
||||||
PKG_HASH:=c39f4334f1ca292febd31fa153ed662f4cfea241183cb5ee97da2ca731d7ae9e
|
PKG_HASH:=d88b2cc80fb3b79f13f0f1d426d2b2dda9127b0b24f477c008b4c8cfa86d99ce
|
||||||
else
|
else
|
||||||
PKG_HASH:=dummy
|
PKG_HASH:=dummy
|
||||||
endif
|
endif
|
||||||
|
@@ -90,7 +90,7 @@ commands:
|
|||||||
export "CXX=$PWD/third_party/clang+llvm-17.0.6-x86_64-linux-gnu-ubuntu-22.04/bin/clang++"
|
export "CXX=$PWD/third_party/clang+llvm-17.0.6-x86_64-linux-gnu-ubuntu-22.04/bin/clang++"
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake -G Ninja -DGUI=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release -DUSE_TCMALLOC=on ..
|
cmake -G Ninja -DGUI=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release -DUSE_LIBCXX=on -DUSE_TCMALLOC=on ..
|
||||||
|
|
||||||
configure-qt6:
|
configure-qt6:
|
||||||
steps:
|
steps:
|
||||||
@@ -100,7 +100,7 @@ commands:
|
|||||||
export "CXX=$PWD/third_party/clang+llvm-17.0.6-x86_64-linux-gnu-ubuntu-22.04/bin/clang++"
|
export "CXX=$PWD/third_party/clang+llvm-17.0.6-x86_64-linux-gnu-ubuntu-22.04/bin/clang++"
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake -G Ninja -DGUI=on -DUSE_QT6=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release -DUSE_TCMALLOC=on ..
|
cmake -G Ninja -DGUI=on -DUSE_QT6=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release -DUSE_LIBCXX=on -DUSE_TCMALLOC=on ..
|
||||||
|
|
||||||
configure-gcc:
|
configure-gcc:
|
||||||
steps:
|
steps:
|
||||||
@@ -130,7 +130,7 @@ commands:
|
|||||||
export "CXX=clang++"
|
export "CXX=clang++"
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake -G Ninja -DGUI=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release ..
|
cmake -G Ninja -DGUI=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release -DUSE_LIBCXX=on ..
|
||||||
|
|
||||||
configure-asan:
|
configure-asan:
|
||||||
steps:
|
steps:
|
||||||
@@ -140,7 +140,7 @@ commands:
|
|||||||
export "CXX=$PWD/third_party/llvm-build/Release+Asserts/bin/clang++"
|
export "CXX=$PWD/third_party/llvm-build/Release+Asserts/bin/clang++"
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake -G Ninja -DGUI=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release -DASAN=on ..
|
cmake -G Ninja -DGUI=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release -DASAN=on -DUSE_LIBCXX=on ..
|
||||||
|
|
||||||
configure-ubsan:
|
configure-ubsan:
|
||||||
steps:
|
steps:
|
||||||
@@ -160,7 +160,7 @@ commands:
|
|||||||
export "CXX=$PWD/third_party/llvm-build/Release+Asserts/bin/clang++"
|
export "CXX=$PWD/third_party/llvm-build/Release+Asserts/bin/clang++"
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake -G Ninja -DGUI=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release -DTSAN=on ..
|
cmake -G Ninja -DGUI=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release -DTSAN=on -DUSE_LIBCXX=on ..
|
||||||
|
|
||||||
configure-msan:
|
configure-msan:
|
||||||
steps:
|
steps:
|
||||||
@@ -170,7 +170,7 @@ commands:
|
|||||||
export "CXX=$PWD/third_party/llvm-build/Release+Asserts/bin/clang++"
|
export "CXX=$PWD/third_party/llvm-build/Release+Asserts/bin/clang++"
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake -G Ninja -DGUI=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release -DMSAN=on -DUSE_CURL=off ..
|
cmake -G Ninja -DGUI=on -DBUILD_TESTS=on -DBORINGSSL_BUILD_TESTS=on -DCMAKE_BUILD_TYPE=Release -DMSAN=on -DUSE_CURL=off -DUSE_LIBCXX=on ..
|
||||||
|
|
||||||
build:
|
build:
|
||||||
steps:
|
steps:
|
||||||
|
@@ -31,7 +31,7 @@ freebsd_task:
|
|||||||
- export CXX=clang++
|
- export CXX=clang++
|
||||||
- mkdir build
|
- mkdir build
|
||||||
- cd build
|
- cd build
|
||||||
- cmake -G Ninja -DBUILD_TESTS=on ${configure} ..
|
- cmake -G Ninja -DBUILD_TESTS=on -DUSE_LIBCXX=on ${configure} ..
|
||||||
compile_script:
|
compile_script:
|
||||||
- ninja -C build yass_cli yass_server yass_test
|
- ninja -C build yass_cli yass_server yass_test
|
||||||
test_script:
|
test_script:
|
||||||
|
2
yass/.github/workflows/releases-mingw.yml
vendored
2
yass/.github/workflows/releases-mingw.yml
vendored
@@ -119,7 +119,7 @@ jobs:
|
|||||||
REM start to build with workaround
|
REM start to build with workaround
|
||||||
mkdir build-mingw
|
mkdir build-mingw
|
||||||
cd build-mingw
|
cd build-mingw
|
||||||
cmake -G Ninja -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} -DBUILD_BENCHMARKS=on -DBUILD_TESTS=on -DGUI=on -DMINGW_MSVCRT100=on -DMINGW_WORKAROUND=on .. ${{ env.CMAKE_OPTIONS }} -DCMAKE_C_COMPILER_TARGET=${{ matrix.arch }}-pc-windows-gnu -DCMAKE_CXX_COMPILER_TARGET=${{ matrix.arch }}-pc-windows-gnu -DCMAKE_ASM_COMPILER_TARGET=${{ matrix.arch }}-pc-windows-gnu -DCMAKE_SYSROOT="%CD%\..\third_party\${{ matrix.mingw_dir }}\${{ matrix.arch }}-w64-mingw32" -DUSE_TCMALLOC=on -DENABLE_LLD=on
|
cmake -G Ninja -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} -DBUILD_BENCHMARKS=on -DBUILD_TESTS=on -DGUI=on -DMINGW_MSVCRT100=on -DMINGW_WORKAROUND=on .. ${{ env.CMAKE_OPTIONS }} -DCMAKE_C_COMPILER_TARGET=${{ matrix.arch }}-pc-windows-gnu -DCMAKE_CXX_COMPILER_TARGET=${{ matrix.arch }}-pc-windows-gnu -DCMAKE_ASM_COMPILER_TARGET=${{ matrix.arch }}-pc-windows-gnu -DCMAKE_SYSROOT="%CD%\..\third_party\${{ matrix.mingw_dir }}\${{ matrix.arch }}-w64-mingw32" -DUSE_TCMALLOC=on -DUSE_LIBCXX=on -DENABLE_LLD=on
|
||||||
ninja yass yass_benchmark yass_test
|
ninja yass yass_benchmark yass_test
|
||||||
- name: Packaging
|
- name: Packaging
|
||||||
shell: bash
|
shell: bash
|
||||||
|
5
yass/.github/workflows/releases-src.yml
vendored
5
yass/.github/workflows/releases-src.yml
vendored
@@ -44,6 +44,11 @@ jobs:
|
|||||||
# unshallow must come first otherwise submodule may be get unshallowed
|
# unshallow must come first otherwise submodule may be get unshallowed
|
||||||
git fetch --tags --unshallow
|
git fetch --tags --unshallow
|
||||||
git submodule update --init --depth 1
|
git submodule update --init --depth 1
|
||||||
|
- name: Patch libcxx for gcc 14 support
|
||||||
|
run: |
|
||||||
|
cd third_party/libc++/trunk
|
||||||
|
patch -p1 < ../gcc14.patch
|
||||||
|
git clean -xfd
|
||||||
- name: Patch libcxxabi for both of armel and armhf
|
- name: Patch libcxxabi for both of armel and armhf
|
||||||
run: |
|
run: |
|
||||||
cd third_party/libc++abi
|
cd third_party/libc++abi
|
||||||
|
6
yass/.github/workflows/sanitizers.yml
vendored
6
yass/.github/workflows/sanitizers.yml
vendored
@@ -29,13 +29,13 @@ jobs:
|
|||||||
build_type: [Debug, Release]
|
build_type: [Debug, Release]
|
||||||
sanitizer:
|
sanitizer:
|
||||||
- name: address
|
- name: address
|
||||||
cmake_options: -DASAN=on
|
cmake_options: -DASAN=on -DUSE_LIBCXX=on
|
||||||
- name: undefined behavior
|
- name: undefined behavior
|
||||||
cmake_options: -DUBSAN=on -DUSE_LIBCXX=off
|
cmake_options: -DUBSAN=on -DUSE_LIBCXX=off
|
||||||
- name: thread
|
- name: thread
|
||||||
cmake_options: -DTSAN=on
|
cmake_options: -DTSAN=on -DUSE_LIBCXX=on
|
||||||
- name: memory
|
- name: memory
|
||||||
cmake_options: -DMSAN=on -DUSE_CURL=off
|
cmake_options: -DMSAN=on -DUSE_LIBCXX=on -DUSE_CURL=off
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
@@ -445,7 +445,7 @@ cmake_dependent_option(
|
|||||||
USE_SYSTEM_MIMALLOC "Use system or vendored mimalloc" OFF
|
USE_SYSTEM_MIMALLOC "Use system or vendored mimalloc" OFF
|
||||||
USE_MIMALLOC OFF)
|
USE_MIMALLOC OFF)
|
||||||
|
|
||||||
option(USE_LIBCXX "Build with libc++" ON)
|
option(USE_LIBCXX "Build with custom libc++" OFF)
|
||||||
|
|
||||||
option(USE_NGHTTP2 "Build with libnghttp2" ON)
|
option(USE_NGHTTP2 "Build with libnghttp2" ON)
|
||||||
cmake_dependent_option(
|
cmake_dependent_option(
|
||||||
|
@@ -31,14 +31,14 @@ Post Quantum Kyber Support (not enabled by default) is added on all of supported
|
|||||||
See [Protecting Chrome Traffic with Hybrid Kyber KEM](https://blog.chromium.org/2023/08/protecting-chrome-traffic-with-hybrid.html) for more.
|
See [Protecting Chrome Traffic with Hybrid Kyber KEM](https://blog.chromium.org/2023/08/protecting-chrome-traffic-with-hybrid.html) for more.
|
||||||
|
|
||||||
### Prebuilt binaries
|
### Prebuilt binaries
|
||||||
- Android [download apk](https://github.com/Chilledheart/yass/releases/download/1.10.4/yass-android-release-arm64-1.10.4.apk) or [download 32-bit apk](https://github.com/Chilledheart/yass/releases/download/1.10.4/yass-android-release-arm-1.10.4.apk)
|
- Android [download apk](https://github.com/Chilledheart/yass/releases/download/1.11.0/yass-android-release-arm64-1.11.0.apk) or [download 32-bit apk](https://github.com/Chilledheart/yass/releases/download/1.11.0/yass-android-release-arm-1.11.0.apk)
|
||||||
- iOS [join via TestFlight](https://testflight.apple.com/join/6AkiEq09)
|
- iOS [join via TestFlight](https://testflight.apple.com/join/6AkiEq09)
|
||||||
- Windows [download installer](https://github.com/Chilledheart/yass/releases/download/1.10.4/yass-mingw-win7-release-x86_64-1.10.4-system-installer.exe) [(require KB2999226 below windows 10)][KB2999226] or [download 32-bit installer](https://github.com/Chilledheart/yass/releases/download/1.10.4/yass-mingw-winxp-release-i686-1.10.4-system-installer.exe) [(require vc 2010 runtime)][vs2010_x86] or [download woa arm64 installer](https://github.com/Chilledheart/yass/releases/download/1.10.4/yass-mingw-release-aarch64-1.10.4-system-installer.exe)
|
- Windows [download installer](https://github.com/Chilledheart/yass/releases/download/1.11.0/yass-mingw-win7-release-x86_64-1.11.0-system-installer.exe) [(require KB2999226 below windows 10)][KB2999226] or [download 32-bit installer](https://github.com/Chilledheart/yass/releases/download/1.11.0/yass-mingw-winxp-release-i686-1.11.0-system-installer.exe) [(require vc 2010 runtime)][vs2010_x86] or [download woa arm64 installer](https://github.com/Chilledheart/yass/releases/download/1.11.0/yass-mingw-release-aarch64-1.11.0-system-installer.exe)
|
||||||
- macOS [download intel dmg](https://github.com/Chilledheart/yass/releases/download/1.10.4/yass-macos-release-x64-1.10.4.dmg) or [download apple silicon dmg](https://github.com/Chilledheart/yass/releases/download/1.10.4/yass-macos-release-arm64-1.10.4.dmg)
|
- macOS [download intel dmg](https://github.com/Chilledheart/yass/releases/download/1.11.0/yass-macos-release-x64-1.11.0.dmg) or [download apple silicon dmg](https://github.com/Chilledheart/yass/releases/download/1.11.0/yass-macos-release-arm64-1.11.0.dmg)
|
||||||
> via homebrew: `brew install --cask yass`
|
> via homebrew: `brew install --cask yass`
|
||||||
- Linux [download rpm](https://github.com/Chilledheart/yass/releases/download/1.10.4/yass.el7.x86_64.1.10.4.rpm) or [download deb](https://github.com/Chilledheart/yass/releases/download/1.10.4/yass-ubuntu-16.04-xenial_amd64.1.10.4.deb)
|
- Linux [download rpm](https://github.com/Chilledheart/yass/releases/download/1.11.0/yass.el7.x86_64.1.11.0.rpm) or [download deb](https://github.com/Chilledheart/yass/releases/download/1.11.0/yass-ubuntu-16.04-xenial_amd64.1.11.0.deb)
|
||||||
|
|
||||||
View more at [Release Page](https://github.com/Chilledheart/yass/releases/tag/1.10.4)
|
View more at [Release Page](https://github.com/Chilledheart/yass/releases/tag/1.11.0)
|
||||||
|
|
||||||
### NaïveProxy-Compatible Protocol Support
|
### NaïveProxy-Compatible Protocol Support
|
||||||
Cipher http2 and https are NaïveProxy-compatible.
|
Cipher http2 and https are NaïveProxy-compatible.
|
||||||
|
@@ -1,3 +1,9 @@
|
|||||||
|
yass (1.11.0-1) UNRELEASED; urgency=medium
|
||||||
|
|
||||||
|
* bump to chromium 127 dependents
|
||||||
|
* add gtk3/gtk4/qt6 build profile
|
||||||
|
|
||||||
|
-- Chilledheart <keeyou-cn@outlook.com> Tue, 11 Jun 2024 11:02:21 +0800
|
||||||
yass (1.10.5-1) UNRELEASED; urgency=medium
|
yass (1.10.5-1) UNRELEASED; urgency=medium
|
||||||
|
|
||||||
* miscellaneous fixes
|
* miscellaneous fixes
|
||||||
|
@@ -58,7 +58,7 @@ ifneq ($(filter cross,$(DEB_BUILD_PROFILES)),)
|
|||||||
override_dh_auto_configure: PKG_CONFIG = ${DEB_HOST_GNU_TYPE}-pkg-config
|
override_dh_auto_configure: PKG_CONFIG = ${DEB_HOST_GNU_TYPE}-pkg-config
|
||||||
endif
|
endif
|
||||||
|
|
||||||
override_dh_auto_configure: CMAKE_OPTIONS += -DENABLE_LTO=on -DENABLE_LLD=on
|
override_dh_auto_configure: CMAKE_OPTIONS += -DENABLE_LTO=on -DENABLE_LLD=on -DUSE_LIBCXX=on
|
||||||
override_dh_auto_configure: CMAKE_OPTIONS += -DUSE_SYSTEM_ZLIB=on
|
override_dh_auto_configure: CMAKE_OPTIONS += -DUSE_SYSTEM_ZLIB=on
|
||||||
override_dh_auto_configure: CMAKE_OPTIONS += -DCMAKE_SYSTEM_NAME=Linux -DCMAKE_SYSTEM_PROCESSOR=$(DEB_HOST_ARCH)
|
override_dh_auto_configure: CMAKE_OPTIONS += -DCMAKE_SYSTEM_NAME=Linux -DCMAKE_SYSTEM_PROCESSOR=$(DEB_HOST_ARCH)
|
||||||
override_dh_auto_configure: CMAKE_OPTIONS += -DUSE_OLD_SYSTEMD_SERVICE=on
|
override_dh_auto_configure: CMAKE_OPTIONS += -DUSE_OLD_SYSTEMD_SERVICE=on
|
||||||
|
@@ -410,7 +410,11 @@ void PrintMallocStats() {
|
|||||||
for (auto property : properties) {
|
for (auto property : properties) {
|
||||||
size_t size;
|
size_t size;
|
||||||
if (MallocExtension_GetNumericProperty(property, &size)) {
|
if (MallocExtension_GetNumericProperty(property, &size)) {
|
||||||
|
if (std::string_view(property).ends_with("_bytes")) {
|
||||||
LOG(ERROR) << "TCMALLOC: " << property << " = " << size << " bytes";
|
LOG(ERROR) << "TCMALLOC: " << property << " = " << size << " bytes";
|
||||||
|
} else {
|
||||||
|
LOG(ERROR) << "TCMALLOC: " << property << " = " << size;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#elif defined(HAVE_MIMALLOC)
|
#elif defined(HAVE_MIMALLOC)
|
||||||
|
@@ -19,7 +19,7 @@ OptionDialog::OptionDialog(QWidget* parent) : QDialog(parent) {
|
|||||||
setWindowFlags(windowFlags() & ~Qt::WindowContextHelpButtonHint);
|
setWindowFlags(windowFlags() & ~Qt::WindowContextHelpButtonHint);
|
||||||
|
|
||||||
QGridLayout* grid = new QGridLayout;
|
QGridLayout* grid = new QGridLayout;
|
||||||
grid->setContentsMargins(10, 0, 20, 0);
|
grid->setContentsMargins(20, 15, 20, 15);
|
||||||
|
|
||||||
auto tcp_keep_alive_label = new QLabel(tr("TCP keep alive"));
|
auto tcp_keep_alive_label = new QLabel(tr("TCP keep alive"));
|
||||||
auto tcp_keep_alive_cnt_label = new QLabel(tr("The number of TCP keep-alive probes"));
|
auto tcp_keep_alive_cnt_label = new QLabel(tr("The number of TCP keep-alive probes"));
|
||||||
|
173
yass/third_party/libc++/gcc14.patch
vendored
173
yass/third_party/libc++/gcc14.patch
vendored
@@ -1,26 +1,167 @@
|
|||||||
|
From 93dc957bd07760c5d810785707bf6bea2b18676e Mon Sep 17 00:00:00 2001
|
||||||
|
From: Nikolas Klauser <nikolasklauser@berlin.de>
|
||||||
|
Date: Sat, 1 Jun 2024 12:20:41 +0200
|
||||||
|
Subject: [PATCH] [libc++] Fix failures with GCC 14 (#92663)
|
||||||
|
|
||||||
|
Fixes #91831
|
||||||
|
|
||||||
|
NOKEYCHECK=True
|
||||||
|
GitOrigin-RevId: cb7a03b41fff563c0cbb5145eed09f9b17edf9e2
|
||||||
|
---
|
||||||
|
include/__string/constexpr_c_functions.h | 2 +-
|
||||||
|
include/__type_traits/remove_pointer.h | 5 +++++
|
||||||
|
include/bitset | 3 +++
|
||||||
|
.../time.zone/time.zone.leap/nonmembers/comparison.pass.cpp | 4 ++--
|
||||||
|
.../expected/expected.expected/monadic/transform.pass.cpp | 2 +-
|
||||||
|
.../expected.expected/monadic/transform_error.pass.cpp | 2 +-
|
||||||
|
.../expected/expected.void/monadic/transform_error.pass.cpp | 2 +-
|
||||||
|
.../format.formatter.spec/formatter.char_array.pass.cpp | 2 +-
|
||||||
|
.../utilities/tuple/tuple.tuple/tuple.cnstr/PR31384.pass.cpp | 3 +++
|
||||||
|
.../variant/variant.visit.member/visit_return_type.pass.cpp | 3 +++
|
||||||
|
10 files changed, 21 insertions(+), 7 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/include/__string/constexpr_c_functions.h b/include/__string/constexpr_c_functions.h
|
||||||
|
index 4da8542e3..a978f816f 100644
|
||||||
|
--- a/include/__string/constexpr_c_functions.h
|
||||||
|
+++ b/include/__string/constexpr_c_functions.h
|
||||||
|
@@ -123,7 +123,7 @@ __constexpr_memcmp_equal(const _Tp* __lhs, const _Up* __rhs, __element_count __n
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
- return __builtin_memcmp(__lhs, __rhs, __count * sizeof(_Tp)) == 0;
|
||||||
|
+ return ::__builtin_memcmp(__lhs, __rhs, __count * sizeof(_Tp)) == 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
diff --git a/include/__type_traits/remove_pointer.h b/include/__type_traits/remove_pointer.h
|
diff --git a/include/__type_traits/remove_pointer.h b/include/__type_traits/remove_pointer.h
|
||||||
index 54390a193..eea523ab2 100644
|
index 54390a193..1048f6705 100644
|
||||||
--- a/include/__type_traits/remove_pointer.h
|
--- a/include/__type_traits/remove_pointer.h
|
||||||
+++ b/include/__type_traits/remove_pointer.h
|
+++ b/include/__type_traits/remove_pointer.h
|
||||||
@@ -22,9 +22,6 @@ template <class _Tp>
|
@@ -23,8 +23,13 @@ struct remove_pointer {
|
||||||
struct remove_pointer {
|
|
||||||
using type _LIBCPP_NODEBUG = __remove_pointer(_Tp);
|
using type _LIBCPP_NODEBUG = __remove_pointer(_Tp);
|
||||||
};
|
};
|
||||||
-
|
|
||||||
-template <class _Tp>
|
+# ifdef _LIBCPP_COMPILER_GCC
|
||||||
-using __remove_pointer_t = __remove_pointer(_Tp);
|
+template <class _Tp>
|
||||||
|
+using __remove_pointer_t = typename remove_pointer<_Tp>::type;
|
||||||
|
+# else
|
||||||
|
template <class _Tp>
|
||||||
|
using __remove_pointer_t = __remove_pointer(_Tp);
|
||||||
|
+# endif
|
||||||
#else
|
#else
|
||||||
// clang-format off
|
// clang-format off
|
||||||
template <class _Tp> struct _LIBCPP_TEMPLATE_VIS remove_pointer {typedef _LIBCPP_NODEBUG _Tp type;};
|
template <class _Tp> struct _LIBCPP_TEMPLATE_VIS remove_pointer {typedef _LIBCPP_NODEBUG _Tp type;};
|
||||||
@@ -33,10 +30,10 @@ template <class _Tp> struct _LIBCPP_TEMPLATE_VIS remove_pointer<_Tp* const>
|
diff --git a/include/bitset b/include/bitset
|
||||||
template <class _Tp> struct _LIBCPP_TEMPLATE_VIS remove_pointer<_Tp* volatile> {typedef _LIBCPP_NODEBUG _Tp type;};
|
index 8818ab656..6bd7bfe58 100644
|
||||||
template <class _Tp> struct _LIBCPP_TEMPLATE_VIS remove_pointer<_Tp* const volatile> {typedef _LIBCPP_NODEBUG _Tp type;};
|
--- a/include/bitset
|
||||||
// clang-format on
|
+++ b/include/bitset
|
||||||
+#endif // !defined(_LIBCPP_WORKAROUND_OBJCXX_COMPILER_INTRINSICS) && __has_builtin(__remove_pointer)
|
@@ -375,8 +375,11 @@ template <size_t _N_words, size_t _Size>
|
||||||
|
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 unsigned long long
|
||||||
|
__bitset<_N_words, _Size>::to_ullong(true_type, true_type) const {
|
||||||
|
unsigned long long __r = __first_[0];
|
||||||
|
+ _LIBCPP_DIAGNOSTIC_PUSH
|
||||||
|
+ _LIBCPP_GCC_DIAGNOSTIC_IGNORED("-Wshift-count-overflow")
|
||||||
|
for (size_t __i = 1; __i < sizeof(unsigned long long) / sizeof(__storage_type); ++__i)
|
||||||
|
__r |= static_cast<unsigned long long>(__first_[__i]) << (sizeof(__storage_type) * CHAR_BIT);
|
||||||
|
+ _LIBCPP_DIAGNOSTIC_POP
|
||||||
|
return __r;
|
||||||
|
}
|
||||||
|
|
||||||
template <class _Tp>
|
diff --git a/test/std/time/time.zone/time.zone.leap/nonmembers/comparison.pass.cpp b/test/std/time/time.zone/time.zone.leap/nonmembers/comparison.pass.cpp
|
||||||
using __remove_pointer_t = typename remove_pointer<_Tp>::type;
|
index 448cd88d1..ccff0248e 100644
|
||||||
-#endif // !defined(_LIBCPP_WORKAROUND_OBJCXX_COMPILER_INTRINSICS) && __has_builtin(__remove_pointer)
|
--- a/test/std/time/time.zone/time.zone.leap/nonmembers/comparison.pass.cpp
|
||||||
|
+++ b/test/std/time/time.zone/time.zone.leap/nonmembers/comparison.pass.cpp
|
||||||
|
@@ -9,8 +9,8 @@
|
||||||
|
// UNSUPPORTED: c++03, c++11, c++14, c++17
|
||||||
|
// UNSUPPORTED: no-filesystem, no-localization, no-tzdb
|
||||||
|
|
||||||
|
-// TODO TZDB test whether this can be enabled with gcc 14.
|
||||||
|
-// UNSUPPORTED: gcc-13
|
||||||
|
+// TODO TZDB investigate why this fails with GCC
|
||||||
|
+// UNSUPPORTED: gcc-13, gcc-14
|
||||||
|
|
||||||
|
// XFAIL: libcpp-has-no-experimental-tzdb
|
||||||
|
// XFAIL: availability-tzdb-missing
|
||||||
|
diff --git a/test/std/utilities/expected/expected.expected/monadic/transform.pass.cpp b/test/std/utilities/expected/expected.expected/monadic/transform.pass.cpp
|
||||||
|
index d38a46f04..aa7106fb9 100644
|
||||||
|
--- a/test/std/utilities/expected/expected.expected/monadic/transform.pass.cpp
|
||||||
|
+++ b/test/std/utilities/expected/expected.expected/monadic/transform.pass.cpp
|
||||||
|
@@ -10,7 +10,7 @@
|
||||||
|
|
||||||
|
// GCC has a issue for `Guaranteed copy elision for potentially-overlapping non-static data members`,
|
||||||
|
// please refer to: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108333
|
||||||
|
-// XFAIL: gcc-13
|
||||||
|
+// XFAIL: gcc-13, gcc-14
|
||||||
|
|
||||||
|
// <expected>
|
||||||
|
|
||||||
|
diff --git a/test/std/utilities/expected/expected.expected/monadic/transform_error.pass.cpp b/test/std/utilities/expected/expected.expected/monadic/transform_error.pass.cpp
|
||||||
|
index ec55f637f..ae9feccb5 100644
|
||||||
|
--- a/test/std/utilities/expected/expected.expected/monadic/transform_error.pass.cpp
|
||||||
|
+++ b/test/std/utilities/expected/expected.expected/monadic/transform_error.pass.cpp
|
||||||
|
@@ -10,7 +10,7 @@
|
||||||
|
|
||||||
|
// GCC has a issue for `Guaranteed copy elision for potentially-overlapping non-static data members`,
|
||||||
|
// please refer to: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108333.
|
||||||
|
-// XFAIL: gcc-13
|
||||||
|
+// XFAIL: gcc-13, gcc-14
|
||||||
|
|
||||||
|
// <expected>
|
||||||
|
|
||||||
|
diff --git a/test/std/utilities/expected/expected.void/monadic/transform_error.pass.cpp b/test/std/utilities/expected/expected.void/monadic/transform_error.pass.cpp
|
||||||
|
index cd6e5a503..f70bddbed 100644
|
||||||
|
--- a/test/std/utilities/expected/expected.void/monadic/transform_error.pass.cpp
|
||||||
|
+++ b/test/std/utilities/expected/expected.void/monadic/transform_error.pass.cpp
|
||||||
|
@@ -10,7 +10,7 @@
|
||||||
|
|
||||||
|
// GCC has a issue for `Guaranteed copy elision for potentially-overlapping non-static data members`,
|
||||||
|
// please refer to: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108333
|
||||||
|
-// XFAIL: gcc-13
|
||||||
|
+// XFAIL: gcc-13, gcc-14
|
||||||
|
|
||||||
|
// <expected>
|
||||||
|
|
||||||
|
diff --git a/test/std/utilities/format/format.formatter/format.formatter.spec/formatter.char_array.pass.cpp b/test/std/utilities/format/format.formatter/format.formatter.spec/formatter.char_array.pass.cpp
|
||||||
|
index b0ee399a1..cad13c1ef 100644
|
||||||
|
--- a/test/std/utilities/format/format.formatter/format.formatter.spec/formatter.char_array.pass.cpp
|
||||||
|
+++ b/test/std/utilities/format/format.formatter/format.formatter.spec/formatter.char_array.pass.cpp
|
||||||
|
@@ -7,7 +7,7 @@
|
||||||
|
|
||||||
|
// UNSUPPORTED: c++03, c++11, c++14, c++17
|
||||||
|
// TODO FMT __builtin_memcpy isn't constexpr in GCC
|
||||||
|
-// UNSUPPORTED: gcc-13
|
||||||
|
+// UNSUPPORTED: gcc-13, gcc-14
|
||||||
|
|
||||||
|
// <format>
|
||||||
|
|
||||||
|
diff --git a/test/std/utilities/tuple/tuple.tuple/tuple.cnstr/PR31384.pass.cpp b/test/std/utilities/tuple/tuple.tuple/tuple.cnstr/PR31384.pass.cpp
|
||||||
|
index c9e7bb6a5..0b40ac9ff 100644
|
||||||
|
--- a/test/std/utilities/tuple/tuple.tuple/tuple.cnstr/PR31384.pass.cpp
|
||||||
|
+++ b/test/std/utilities/tuple/tuple.tuple/tuple.cnstr/PR31384.pass.cpp
|
||||||
|
@@ -8,6 +8,9 @@
|
||||||
|
|
||||||
|
// UNSUPPORTED: c++03
|
||||||
|
|
||||||
|
+// FIXME: Why does this start to fail with GCC 14?
|
||||||
|
+// XFAIL: gcc-14
|
||||||
|
+
|
||||||
|
// See https://llvm.org/PR31384.
|
||||||
|
|
||||||
|
#include <tuple>
|
||||||
|
diff --git a/test/std/utilities/variant/variant.visit.member/visit_return_type.pass.cpp b/test/std/utilities/variant/variant.visit.member/visit_return_type.pass.cpp
|
||||||
|
index 2c1cbb06e..7429cdf80 100644
|
||||||
|
--- a/test/std/utilities/variant/variant.visit.member/visit_return_type.pass.cpp
|
||||||
|
+++ b/test/std/utilities/variant/variant.visit.member/visit_return_type.pass.cpp
|
||||||
|
@@ -34,6 +34,9 @@ struct overloaded : Ts... {
|
||||||
|
using Ts::operator()...;
|
||||||
|
};
|
||||||
|
|
||||||
|
+template <class... Ts>
|
||||||
|
+overloaded(Ts...) -> overloaded<Ts...>;
|
||||||
|
+
|
||||||
|
void test_overload_ambiguity() {
|
||||||
|
using V = std::variant<float, long, std::string>;
|
||||||
|
using namespace std::string_literals;
|
||||||
|
--
|
||||||
|
2.45.2
|
||||||
|
|
||||||
#if _LIBCPP_STD_VER >= 14
|
|
||||||
template <class _Tp>
|
|
||||||
|
@@ -11,20 +11,27 @@
|
|||||||
|
|
||||||
// Basic test for float registers number are accepted.
|
// Basic test for float registers number are accepted.
|
||||||
|
|
||||||
#include <dlfcn.h>
|
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <unwind.h>
|
#include <unwind.h>
|
||||||
|
|
||||||
|
// Using __attribute__((section("main_func"))) is ELF specific, but then
|
||||||
|
// this entire test is marked as requiring Linux, so we should be good.
|
||||||
|
//
|
||||||
|
// We don't use dladdr() because on musl it's a no-op when statically linked.
|
||||||
|
extern char __start_main_func;
|
||||||
|
extern char __stop_main_func;
|
||||||
|
|
||||||
_Unwind_Reason_Code frame_handler(struct _Unwind_Context *ctx, void *arg) {
|
_Unwind_Reason_Code frame_handler(struct _Unwind_Context *ctx, void *arg) {
|
||||||
(void)arg;
|
(void)arg;
|
||||||
Dl_info info = {0, 0, 0, 0};
|
|
||||||
|
|
||||||
// Unwind util the main is reached, above frames depend on the platform and
|
// Unwind until the main is reached, above frames depend on the platform and
|
||||||
// architecture.
|
// architecture.
|
||||||
if (dladdr(reinterpret_cast<void *>(_Unwind_GetIP(ctx)), &info) &&
|
uintptr_t ip = _Unwind_GetIP(ctx);
|
||||||
info.dli_sname && !strcmp("main", info.dli_sname))
|
if (ip >= (uintptr_t)&__start_main_func &&
|
||||||
|
ip < (uintptr_t)&__stop_main_func) {
|
||||||
_Exit(0);
|
_Exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
return _URC_NO_REASON;
|
return _URC_NO_REASON;
|
||||||
}
|
}
|
||||||
@@ -45,7 +52,7 @@ __attribute__((noinline)) void foo() {
|
|||||||
_Unwind_Backtrace(frame_handler, NULL);
|
_Unwind_Backtrace(frame_handler, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
int main() {
|
__attribute__((section("main_func"))) int main() {
|
||||||
foo();
|
foo();
|
||||||
return -2;
|
return -2;
|
||||||
}
|
}
|
||||||
|
@@ -17,7 +17,6 @@
|
|||||||
|
|
||||||
#undef NDEBUG
|
#undef NDEBUG
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
#include <dlfcn.h>
|
|
||||||
#include <signal.h>
|
#include <signal.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
@@ -27,6 +26,13 @@
|
|||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <unwind.h>
|
#include <unwind.h>
|
||||||
|
|
||||||
|
// Using __attribute__((section("main_func"))) is Linux specific, but then
|
||||||
|
// this entire test is marked as requiring Linux, so we should be good.
|
||||||
|
//
|
||||||
|
// We don't use dladdr() because on musl it's a no-op when statically linked.
|
||||||
|
extern char __start_main_func;
|
||||||
|
extern char __stop_main_func;
|
||||||
|
|
||||||
void foo();
|
void foo();
|
||||||
_Unwind_Exception ex;
|
_Unwind_Exception ex;
|
||||||
|
|
||||||
@@ -41,14 +47,14 @@ _Unwind_Reason_Code stop(int version, _Unwind_Action actions,
|
|||||||
assert(exceptionObject == &ex);
|
assert(exceptionObject == &ex);
|
||||||
assert(stop_parameter == &foo);
|
assert(stop_parameter == &foo);
|
||||||
|
|
||||||
Dl_info info = {0, 0, 0, 0};
|
// Unwind until the main is reached, above frames depend on the platform and
|
||||||
|
|
||||||
// Unwind util the main is reached, above frames depend on the platform and
|
|
||||||
// architecture.
|
// architecture.
|
||||||
if (dladdr(reinterpret_cast<void *>(_Unwind_GetIP(context)), &info) &&
|
uintptr_t ip = _Unwind_GetIP(context);
|
||||||
info.dli_sname && !strcmp("main", info.dli_sname)) {
|
if (ip >= (uintptr_t)&__start_main_func &&
|
||||||
|
ip < (uintptr_t)&__stop_main_func) {
|
||||||
_Exit(0);
|
_Exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
return _URC_NO_REASON;
|
return _URC_NO_REASON;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -66,7 +72,7 @@ __attribute__((noinline)) void foo() {
|
|||||||
_Unwind_ForcedUnwind(e, stop, (void *)&foo);
|
_Unwind_ForcedUnwind(e, stop, (void *)&foo);
|
||||||
}
|
}
|
||||||
|
|
||||||
int main() {
|
__attribute__((section("main_func"))) int main() {
|
||||||
foo();
|
foo();
|
||||||
return -2;
|
return -2;
|
||||||
}
|
}
|
||||||
|
@@ -13,9 +13,15 @@
|
|||||||
// TODO: Figure out why this fails with Memory Sanitizer.
|
// TODO: Figure out why this fails with Memory Sanitizer.
|
||||||
// XFAIL: msan
|
// XFAIL: msan
|
||||||
|
|
||||||
|
// Note: this test fails on musl because:
|
||||||
|
//
|
||||||
|
// (a) musl disables emission of unwind information for its build, and
|
||||||
|
// (b) musl's signal trampolines don't include unwind information
|
||||||
|
//
|
||||||
|
// XFAIL: target={{.*}}-musl
|
||||||
|
|
||||||
#undef NDEBUG
|
#undef NDEBUG
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
#include <dlfcn.h>
|
|
||||||
#include <signal.h>
|
#include <signal.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
@@ -24,16 +30,24 @@
|
|||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <unwind.h>
|
#include <unwind.h>
|
||||||
|
|
||||||
|
// Using __attribute__((section("main_func"))) is ELF specific, but then
|
||||||
|
// this entire test is marked as requiring Linux, so we should be good.
|
||||||
|
//
|
||||||
|
// We don't use dladdr() because on musl it's a no-op when statically linked.
|
||||||
|
extern char __start_main_func;
|
||||||
|
extern char __stop_main_func;
|
||||||
|
|
||||||
_Unwind_Reason_Code frame_handler(struct _Unwind_Context* ctx, void* arg) {
|
_Unwind_Reason_Code frame_handler(struct _Unwind_Context* ctx, void* arg) {
|
||||||
(void)arg;
|
(void)arg;
|
||||||
Dl_info info = { 0, 0, 0, 0 };
|
|
||||||
|
|
||||||
// Unwind util the main is reached, above frames depend on the platform and
|
// Unwind until the main is reached, above frames depend on the platform and
|
||||||
// architecture.
|
// architecture.
|
||||||
if (dladdr(reinterpret_cast<void *>(_Unwind_GetIP(ctx)), &info) &&
|
uintptr_t ip = _Unwind_GetIP(ctx);
|
||||||
info.dli_sname && !strcmp("main", info.dli_sname)) {
|
if (ip >= (uintptr_t)&__start_main_func &&
|
||||||
|
ip < (uintptr_t)&__stop_main_func) {
|
||||||
_Exit(0);
|
_Exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
return _URC_NO_REASON;
|
return _URC_NO_REASON;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -43,7 +57,7 @@ void signal_handler(int signum) {
|
|||||||
_Exit(-1);
|
_Exit(-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
int main(int, char**) {
|
__attribute__((section("main_func"))) int main(int, char **) {
|
||||||
signal(SIGUSR1, signal_handler);
|
signal(SIGUSR1, signal_handler);
|
||||||
kill(getpid(), SIGUSR1);
|
kill(getpid(), SIGUSR1);
|
||||||
return -2;
|
return -2;
|
||||||
|
@@ -13,9 +13,15 @@
|
|||||||
// TODO: Figure out why this fails with Memory Sanitizer.
|
// TODO: Figure out why this fails with Memory Sanitizer.
|
||||||
// XFAIL: msan
|
// XFAIL: msan
|
||||||
|
|
||||||
|
// Note: this test fails on musl because:
|
||||||
|
//
|
||||||
|
// (a) musl disables emission of unwind information for its build, and
|
||||||
|
// (b) musl's signal trampolines don't include unwind information
|
||||||
|
//
|
||||||
|
// XFAIL: target={{.*}}-musl
|
||||||
|
|
||||||
#undef NDEBUG
|
#undef NDEBUG
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
#include <dlfcn.h>
|
|
||||||
#include <signal.h>
|
#include <signal.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
@@ -24,16 +30,24 @@
|
|||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <unwind.h>
|
#include <unwind.h>
|
||||||
|
|
||||||
|
// Using __attribute__((section("main_func"))) is ELF specific, but then
|
||||||
|
// this entire test is marked as requiring Linux, so we should be good.
|
||||||
|
//
|
||||||
|
// We don't use dladdr() because on musl it's a no-op when statically linked.
|
||||||
|
extern char __start_main_func;
|
||||||
|
extern char __stop_main_func;
|
||||||
|
|
||||||
_Unwind_Reason_Code frame_handler(struct _Unwind_Context* ctx, void* arg) {
|
_Unwind_Reason_Code frame_handler(struct _Unwind_Context* ctx, void* arg) {
|
||||||
(void)arg;
|
(void)arg;
|
||||||
Dl_info info = { 0, 0, 0, 0 };
|
|
||||||
|
|
||||||
// Unwind until the main is reached, above frames depend on the platform and
|
// Unwind until the main is reached, above frames depend on the platform and
|
||||||
// architecture.
|
// architecture.
|
||||||
if (dladdr(reinterpret_cast<void *>(_Unwind_GetIP(ctx)), &info) &&
|
uintptr_t ip = _Unwind_GetIP(ctx);
|
||||||
info.dli_sname && !strcmp("main", info.dli_sname)) {
|
if (ip >= (uintptr_t)&__start_main_func &&
|
||||||
|
ip < (uintptr_t)&__stop_main_func) {
|
||||||
_Exit(0);
|
_Exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
return _URC_NO_REASON;
|
return _URC_NO_REASON;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -56,7 +70,7 @@ __attribute__((noinline)) void crashing_leaf_func(int do_trap) {
|
|||||||
__builtin_trap();
|
__builtin_trap();
|
||||||
}
|
}
|
||||||
|
|
||||||
int main(int, char**) {
|
__attribute__((section("main_func"))) int main(int, char **) {
|
||||||
signal(SIGTRAP, signal_handler);
|
signal(SIGTRAP, signal_handler);
|
||||||
signal(SIGILL, signal_handler);
|
signal(SIGILL, signal_handler);
|
||||||
crashing_leaf_func(1);
|
crashing_leaf_func(1);
|
||||||
|
@@ -260,6 +260,9 @@ for embedded devices and low end boxes.
|
|||||||
%systemd_postun_with_restart yass-redir.service
|
%systemd_postun_with_restart yass-redir.service
|
||||||
|
|
||||||
%changelog
|
%changelog
|
||||||
|
* Tue Jun 11 2024 Chilledheart <keeyou-cn@outlook.com> - 1.11.0-1
|
||||||
|
- bump to chromium 127 dependents
|
||||||
|
- add gtk3/gtk4/qt6 build profile (source)
|
||||||
* Mon Jun 3 2024 Chilledheart <keeyou-cn@outlook.com> - 1.10.5-1
|
* Mon Jun 3 2024 Chilledheart <keeyou-cn@outlook.com> - 1.10.5-1
|
||||||
- miscellaneous fixes
|
- miscellaneous fixes
|
||||||
- fix gtk3 wayland app icon issue
|
- fix gtk3 wayland app icon issue
|
||||||
|
@@ -5,9 +5,9 @@ import hashlib
|
|||||||
import json
|
import json
|
||||||
import os.path
|
import os.path
|
||||||
import re
|
import re
|
||||||
import types
|
|
||||||
import ssl
|
import ssl
|
||||||
import sys
|
import sys
|
||||||
|
import types
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
import youtube_dl.extractor
|
import youtube_dl.extractor
|
||||||
@@ -181,18 +181,18 @@ def expect_value(self, got, expected, field):
|
|||||||
op, _, expected_num = expected.partition(':')
|
op, _, expected_num = expected.partition(':')
|
||||||
expected_num = int(expected_num)
|
expected_num = int(expected_num)
|
||||||
if op == 'mincount':
|
if op == 'mincount':
|
||||||
assert_func = assertGreaterEqual
|
assert_func = self.assertGreaterEqual
|
||||||
msg_tmpl = 'Expected %d items in field %s, but only got %d'
|
msg_tmpl = 'Expected %d items in field %s, but only got %d'
|
||||||
elif op == 'maxcount':
|
elif op == 'maxcount':
|
||||||
assert_func = assertLessEqual
|
assert_func = self.assertLessEqual
|
||||||
msg_tmpl = 'Expected maximum %d items in field %s, but got %d'
|
msg_tmpl = 'Expected maximum %d items in field %s, but got %d'
|
||||||
elif op == 'count':
|
elif op == 'count':
|
||||||
assert_func = assertEqual
|
assert_func = self.assertEqual
|
||||||
msg_tmpl = 'Expected exactly %d items in field %s, but got %d'
|
msg_tmpl = 'Expected exactly %d items in field %s, but got %d'
|
||||||
else:
|
else:
|
||||||
assert False
|
assert False
|
||||||
assert_func(
|
assert_func(
|
||||||
self, len(got), expected_num,
|
len(got), expected_num,
|
||||||
msg_tmpl % (expected_num, field, len(got)))
|
msg_tmpl % (expected_num, field, len(got)))
|
||||||
return
|
return
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
@@ -262,27 +262,6 @@ def assertRegexpMatches(self, text, regexp, msg=None):
|
|||||||
self.assertTrue(m, msg)
|
self.assertTrue(m, msg)
|
||||||
|
|
||||||
|
|
||||||
def assertGreaterEqual(self, got, expected, msg=None):
|
|
||||||
if not (got >= expected):
|
|
||||||
if msg is None:
|
|
||||||
msg = '%r not greater than or equal to %r' % (got, expected)
|
|
||||||
self.assertTrue(got >= expected, msg)
|
|
||||||
|
|
||||||
|
|
||||||
def assertLessEqual(self, got, expected, msg=None):
|
|
||||||
if not (got <= expected):
|
|
||||||
if msg is None:
|
|
||||||
msg = '%r not less than or equal to %r' % (got, expected)
|
|
||||||
self.assertTrue(got <= expected, msg)
|
|
||||||
|
|
||||||
|
|
||||||
def assertEqual(self, got, expected, msg=None):
|
|
||||||
if not (got == expected):
|
|
||||||
if msg is None:
|
|
||||||
msg = '%r not equal to %r' % (got, expected)
|
|
||||||
self.assertTrue(got == expected, msg)
|
|
||||||
|
|
||||||
|
|
||||||
def expect_warnings(ydl, warnings_re):
|
def expect_warnings(ydl, warnings_re):
|
||||||
real_warning = ydl.report_warning
|
real_warning = ydl.report_warning
|
||||||
|
|
||||||
|
@@ -9,8 +9,6 @@ import unittest
|
|||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
from test.helper import (
|
from test.helper import (
|
||||||
assertGreaterEqual,
|
|
||||||
assertLessEqual,
|
|
||||||
expect_warnings,
|
expect_warnings,
|
||||||
get_params,
|
get_params,
|
||||||
gettestcases,
|
gettestcases,
|
||||||
@@ -36,12 +34,20 @@ from youtube_dl.utils import (
|
|||||||
ExtractorError,
|
ExtractorError,
|
||||||
error_to_compat_str,
|
error_to_compat_str,
|
||||||
format_bytes,
|
format_bytes,
|
||||||
|
IDENTITY,
|
||||||
|
preferredencoding,
|
||||||
UnavailableVideoError,
|
UnavailableVideoError,
|
||||||
)
|
)
|
||||||
from youtube_dl.extractor import get_info_extractor
|
from youtube_dl.extractor import get_info_extractor
|
||||||
|
|
||||||
RETRIES = 3
|
RETRIES = 3
|
||||||
|
|
||||||
|
# Some unittest APIs require actual str
|
||||||
|
if not isinstance('TEST', str):
|
||||||
|
_encode_str = lambda s: s.encode(preferredencoding())
|
||||||
|
else:
|
||||||
|
_encode_str = IDENTITY
|
||||||
|
|
||||||
|
|
||||||
class YoutubeDL(youtube_dl.YoutubeDL):
|
class YoutubeDL(youtube_dl.YoutubeDL):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
@@ -102,7 +108,7 @@ def generator(test_case, tname):
|
|||||||
|
|
||||||
def print_skipping(reason):
|
def print_skipping(reason):
|
||||||
print('Skipping %s: %s' % (test_case['name'], reason))
|
print('Skipping %s: %s' % (test_case['name'], reason))
|
||||||
self.skipTest(reason)
|
self.skipTest(_encode_str(reason))
|
||||||
|
|
||||||
if not ie.working():
|
if not ie.working():
|
||||||
print_skipping('IE marked as not _WORKING')
|
print_skipping('IE marked as not _WORKING')
|
||||||
@@ -187,16 +193,14 @@ def generator(test_case, tname):
|
|||||||
expect_info_dict(self, res_dict, test_case.get('info_dict', {}))
|
expect_info_dict(self, res_dict, test_case.get('info_dict', {}))
|
||||||
|
|
||||||
if 'playlist_mincount' in test_case:
|
if 'playlist_mincount' in test_case:
|
||||||
assertGreaterEqual(
|
self.assertGreaterEqual(
|
||||||
self,
|
|
||||||
len(res_dict['entries']),
|
len(res_dict['entries']),
|
||||||
test_case['playlist_mincount'],
|
test_case['playlist_mincount'],
|
||||||
'Expected at least %d in playlist %s, but got only %d' % (
|
'Expected at least %d in playlist %s, but got only %d' % (
|
||||||
test_case['playlist_mincount'], test_case['url'],
|
test_case['playlist_mincount'], test_case['url'],
|
||||||
len(res_dict['entries'])))
|
len(res_dict['entries'])))
|
||||||
if 'playlist_maxcount' in test_case:
|
if 'playlist_maxcount' in test_case:
|
||||||
assertLessEqual(
|
self.assertLessEqual(
|
||||||
self,
|
|
||||||
len(res_dict['entries']),
|
len(res_dict['entries']),
|
||||||
test_case['playlist_maxcount'],
|
test_case['playlist_maxcount'],
|
||||||
'Expected at most %d in playlist %s, but got %d' % (
|
'Expected at most %d in playlist %s, but got %d' % (
|
||||||
@@ -243,8 +247,8 @@ def generator(test_case, tname):
|
|||||||
if params.get('test'):
|
if params.get('test'):
|
||||||
expected_minsize = max(expected_minsize, 10000)
|
expected_minsize = max(expected_minsize, 10000)
|
||||||
got_fsize = os.path.getsize(tc_filename)
|
got_fsize = os.path.getsize(tc_filename)
|
||||||
assertGreaterEqual(
|
self.assertGreaterEqual(
|
||||||
self, got_fsize, expected_minsize,
|
got_fsize, expected_minsize,
|
||||||
'Expected %s to be at least %s, but it\'s only %s ' %
|
'Expected %s to be at least %s, but it\'s only %s ' %
|
||||||
(tc_filename, format_bytes(expected_minsize),
|
(tc_filename, format_bytes(expected_minsize),
|
||||||
format_bytes(got_fsize)))
|
format_bytes(got_fsize)))
|
||||||
|
@@ -1039,8 +1039,8 @@ class YoutubeDL(object):
|
|||||||
elif result_type in ('playlist', 'multi_video'):
|
elif result_type in ('playlist', 'multi_video'):
|
||||||
# Protect from infinite recursion due to recursively nested playlists
|
# Protect from infinite recursion due to recursively nested playlists
|
||||||
# (see https://github.com/ytdl-org/youtube-dl/issues/27833)
|
# (see https://github.com/ytdl-org/youtube-dl/issues/27833)
|
||||||
webpage_url = ie_result['webpage_url']
|
webpage_url = ie_result.get('webpage_url') # not all pl/mv have this
|
||||||
if webpage_url in self._playlist_urls:
|
if webpage_url and webpage_url in self._playlist_urls:
|
||||||
self.to_screen(
|
self.to_screen(
|
||||||
'[download] Skipping already downloaded playlist: %s'
|
'[download] Skipping already downloaded playlist: %s'
|
||||||
% ie_result.get('title') or ie_result.get('id'))
|
% ie_result.get('title') or ie_result.get('id'))
|
||||||
@@ -1048,6 +1048,10 @@ class YoutubeDL(object):
|
|||||||
|
|
||||||
self._playlist_level += 1
|
self._playlist_level += 1
|
||||||
self._playlist_urls.add(webpage_url)
|
self._playlist_urls.add(webpage_url)
|
||||||
|
new_result = dict((k, v) for k, v in extra_info.items() if k not in ie_result)
|
||||||
|
if new_result:
|
||||||
|
new_result.update(ie_result)
|
||||||
|
ie_result = new_result
|
||||||
try:
|
try:
|
||||||
return self.__process_playlist(ie_result, download)
|
return self.__process_playlist(ie_result, download)
|
||||||
finally:
|
finally:
|
||||||
@@ -1593,6 +1597,28 @@ class YoutubeDL(object):
|
|||||||
self.cookiejar.add_cookie_header(pr)
|
self.cookiejar.add_cookie_header(pr)
|
||||||
return pr.get_header('Cookie')
|
return pr.get_header('Cookie')
|
||||||
|
|
||||||
|
def _fill_common_fields(self, info_dict, final=True):
|
||||||
|
|
||||||
|
for ts_key, date_key in (
|
||||||
|
('timestamp', 'upload_date'),
|
||||||
|
('release_timestamp', 'release_date'),
|
||||||
|
):
|
||||||
|
if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
|
||||||
|
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
|
||||||
|
# see http://bugs.python.org/issue1646728)
|
||||||
|
try:
|
||||||
|
upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
|
||||||
|
info_dict[date_key] = compat_str(upload_date.strftime('%Y%m%d'))
|
||||||
|
except (ValueError, OverflowError, OSError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Auto generate title fields corresponding to the *_number fields when missing
|
||||||
|
# in order to always have clean titles. This is very common for TV series.
|
||||||
|
if final:
|
||||||
|
for field in ('chapter', 'season', 'episode'):
|
||||||
|
if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
|
||||||
|
info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
|
||||||
|
|
||||||
def process_video_result(self, info_dict, download=True):
|
def process_video_result(self, info_dict, download=True):
|
||||||
assert info_dict.get('_type', 'video') == 'video'
|
assert info_dict.get('_type', 'video') == 'video'
|
||||||
|
|
||||||
@@ -1660,24 +1686,7 @@ class YoutubeDL(object):
|
|||||||
if 'display_id' not in info_dict and 'id' in info_dict:
|
if 'display_id' not in info_dict and 'id' in info_dict:
|
||||||
info_dict['display_id'] = info_dict['id']
|
info_dict['display_id'] = info_dict['id']
|
||||||
|
|
||||||
for ts_key, date_key in (
|
self._fill_common_fields(info_dict)
|
||||||
('timestamp', 'upload_date'),
|
|
||||||
('release_timestamp', 'release_date'),
|
|
||||||
):
|
|
||||||
if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
|
|
||||||
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
|
|
||||||
# see http://bugs.python.org/issue1646728)
|
|
||||||
try:
|
|
||||||
upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
|
|
||||||
info_dict[date_key] = compat_str(upload_date.strftime('%Y%m%d'))
|
|
||||||
except (ValueError, OverflowError, OSError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
# Auto generate title fields corresponding to the *_number fields when missing
|
|
||||||
# in order to always have clean titles. This is very common for TV series.
|
|
||||||
for field in ('chapter', 'season', 'episode'):
|
|
||||||
if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
|
|
||||||
info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
|
|
||||||
|
|
||||||
for cc_kind in ('subtitles', 'automatic_captions'):
|
for cc_kind in ('subtitles', 'automatic_captions'):
|
||||||
cc = info_dict.get(cc_kind)
|
cc = info_dict.get(cc_kind)
|
||||||
|
@@ -898,21 +898,13 @@ from .ooyala import (
|
|||||||
)
|
)
|
||||||
from .ora import OraTVIE
|
from .ora import OraTVIE
|
||||||
from .orf import (
|
from .orf import (
|
||||||
ORFTVthekIE,
|
ORFONIE,
|
||||||
ORFFM4IE,
|
ORFONLiveIE,
|
||||||
ORFFM4StoryIE,
|
ORFFM4StoryIE,
|
||||||
ORFOE1IE,
|
|
||||||
ORFOE3IE,
|
|
||||||
ORFNOEIE,
|
|
||||||
ORFWIEIE,
|
|
||||||
ORFBGLIE,
|
|
||||||
ORFOOEIE,
|
|
||||||
ORFSTMIE,
|
|
||||||
ORFKTNIE,
|
|
||||||
ORFSBGIE,
|
|
||||||
ORFTIRIE,
|
|
||||||
ORFVBGIE,
|
|
||||||
ORFIPTVIE,
|
ORFIPTVIE,
|
||||||
|
ORFPodcastIE,
|
||||||
|
ORFRadioIE,
|
||||||
|
ORFRadioCollectionIE,
|
||||||
)
|
)
|
||||||
from .outsidetv import OutsideTVIE
|
from .outsidetv import OutsideTVIE
|
||||||
from .packtpub import (
|
from .packtpub import (
|
||||||
|
@@ -1,3 +1,4 @@
|
|||||||
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import itertools
|
import itertools
|
||||||
@@ -10,7 +11,7 @@ from ..compat import (
|
|||||||
compat_ord,
|
compat_ord,
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
compat_zip
|
compat_zip as zip,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
@@ -24,7 +25,7 @@ class MixcloudBaseIE(InfoExtractor):
|
|||||||
def _call_api(self, object_type, object_fields, display_id, username, slug=None):
|
def _call_api(self, object_type, object_fields, display_id, username, slug=None):
|
||||||
lookup_key = object_type + 'Lookup'
|
lookup_key = object_type + 'Lookup'
|
||||||
return self._download_json(
|
return self._download_json(
|
||||||
'https://www.mixcloud.com/graphql', display_id, query={
|
'https://app.mixcloud.com/graphql', display_id, query={
|
||||||
'query': '''{
|
'query': '''{
|
||||||
%s(lookup: {username: "%s"%s}) {
|
%s(lookup: {username: "%s"%s}) {
|
||||||
%s
|
%s
|
||||||
@@ -44,7 +45,7 @@ class MixcloudIE(MixcloudBaseIE):
|
|||||||
'ext': 'm4a',
|
'ext': 'm4a',
|
||||||
'title': 'Cryptkeeper',
|
'title': 'Cryptkeeper',
|
||||||
'description': 'After quite a long silence from myself, finally another Drum\'n\'Bass mix with my favourite current dance floor bangers.',
|
'description': 'After quite a long silence from myself, finally another Drum\'n\'Bass mix with my favourite current dance floor bangers.',
|
||||||
'uploader': 'Daniel Holbach',
|
'uploader': 'dholbach', # was: 'Daniel Holbach',
|
||||||
'uploader_id': 'dholbach',
|
'uploader_id': 'dholbach',
|
||||||
'thumbnail': r're:https?://.*\.jpg',
|
'thumbnail': r're:https?://.*\.jpg',
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
@@ -57,7 +58,7 @@ class MixcloudIE(MixcloudBaseIE):
|
|||||||
'id': 'gillespeterson_caribou-7-inch-vinyl-mix-chat',
|
'id': 'gillespeterson_caribou-7-inch-vinyl-mix-chat',
|
||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
'title': 'Caribou 7 inch Vinyl Mix & Chat',
|
'title': 'Caribou 7 inch Vinyl Mix & Chat',
|
||||||
'description': 'md5:2b8aec6adce69f9d41724647c65875e8',
|
'description': r're:Last week Dan Snaith aka Caribou swung by the Brownswood.{136}',
|
||||||
'uploader': 'Gilles Peterson Worldwide',
|
'uploader': 'Gilles Peterson Worldwide',
|
||||||
'uploader_id': 'gillespeterson',
|
'uploader_id': 'gillespeterson',
|
||||||
'thumbnail': 're:https?://.*',
|
'thumbnail': 're:https?://.*',
|
||||||
@@ -65,6 +66,23 @@ class MixcloudIE(MixcloudBaseIE):
|
|||||||
'timestamp': 1422987057,
|
'timestamp': 1422987057,
|
||||||
'upload_date': '20150203',
|
'upload_date': '20150203',
|
||||||
},
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': '404 not found',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.mixcloud.com/gillespeterson/carnival-m%C3%BAsica-popular-brasileira-mix/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'gillespeterson_carnival-música-popular-brasileira-mix',
|
||||||
|
'ext': 'm4a',
|
||||||
|
'title': 'Carnival Música Popular Brasileira Mix',
|
||||||
|
'description': r're:Gilles was recently in Brazil to play at Boiler Room.{208}',
|
||||||
|
'timestamp': 1454347174,
|
||||||
|
'upload_date': '20160201',
|
||||||
|
'uploader': 'Gilles Peterson Worldwide',
|
||||||
|
'uploader_id': 'gillespeterson',
|
||||||
|
'thumbnail': 're:https?://.*',
|
||||||
|
'view_count': int,
|
||||||
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://beta.mixcloud.com/RedLightRadio/nosedrip-15-red-light-radio-01-18-2016/',
|
'url': 'https://beta.mixcloud.com/RedLightRadio/nosedrip-15-red-light-radio-01-18-2016/',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
@@ -76,10 +94,10 @@ class MixcloudIE(MixcloudBaseIE):
|
|||||||
"""Encrypt/Decrypt XOR cipher. Both ways are possible because it's XOR."""
|
"""Encrypt/Decrypt XOR cipher. Both ways are possible because it's XOR."""
|
||||||
return ''.join([
|
return ''.join([
|
||||||
compat_chr(compat_ord(ch) ^ compat_ord(k))
|
compat_chr(compat_ord(ch) ^ compat_ord(k))
|
||||||
for ch, k in compat_zip(ciphertext, itertools.cycle(key))])
|
for ch, k in zip(ciphertext, itertools.cycle(key))])
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
username, slug = re.match(self._VALID_URL, url).groups()
|
username, slug = self._match_valid_url(url).groups()
|
||||||
username, slug = compat_urllib_parse_unquote(username), compat_urllib_parse_unquote(slug)
|
username, slug = compat_urllib_parse_unquote(username), compat_urllib_parse_unquote(slug)
|
||||||
track_id = '%s_%s' % (username, slug)
|
track_id = '%s_%s' % (username, slug)
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@@ -4,6 +4,7 @@ from __future__ import unicode_literals
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
float_or_none,
|
float_or_none,
|
||||||
get_element_by_id,
|
get_element_by_id,
|
||||||
@@ -11,6 +12,7 @@ from ..utils import (
|
|||||||
strip_or_none,
|
strip_or_none,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
urljoin,
|
urljoin,
|
||||||
|
str_to_int,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -35,6 +37,26 @@ class VidLiiIE(InfoExtractor):
|
|||||||
'categories': ['News & Politics'],
|
'categories': ['News & Politics'],
|
||||||
'tags': ['Vidlii', 'Jan', 'Videogames'],
|
'tags': ['Vidlii', 'Jan', 'Videogames'],
|
||||||
}
|
}
|
||||||
|
}, {
|
||||||
|
# HD
|
||||||
|
'url': 'https://www.vidlii.com/watch?v=2Ng8Abj2Fkl',
|
||||||
|
'md5': '450e7da379c884788c3a4fa02a3ce1a4',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '2Ng8Abj2Fkl',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'test',
|
||||||
|
'description': 'md5:cc55a86032a7b6b3cbfd0f6b155b52e9',
|
||||||
|
'thumbnail': 'https://www.vidlii.com/usfi/thmp/2Ng8Abj2Fkl.jpg',
|
||||||
|
'uploader': 'VidLii',
|
||||||
|
'uploader_url': 'https://www.vidlii.com/user/VidLii',
|
||||||
|
'upload_date': '20200927',
|
||||||
|
'duration': 5,
|
||||||
|
'view_count': int,
|
||||||
|
'comment_count': int,
|
||||||
|
'average_rating': float,
|
||||||
|
'categories': ['Film & Animation'],
|
||||||
|
'tags': list,
|
||||||
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.vidlii.com/embed?v=tJluaH4BJ3v&a=0',
|
'url': 'https://www.vidlii.com/embed?v=tJluaH4BJ3v&a=0',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
@@ -46,11 +68,32 @@ class VidLiiIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(
|
webpage = self._download_webpage(
|
||||||
'https://www.vidlii.com/watch?v=%s' % video_id, video_id)
|
'https://www.vidlii.com/watch?v=%s' % video_id, video_id)
|
||||||
|
|
||||||
video_url = self._search_regex(
|
formats = []
|
||||||
r'src\s*:\s*(["\'])(?P<url>(?:https?://)?(?:(?!\1).)+)\1', webpage,
|
|
||||||
'video url', group='url')
|
|
||||||
|
|
||||||
title = self._search_regex(
|
def add_format(format_url, height=None):
|
||||||
|
height = int(self._search_regex(r'(\d+)\.mp4',
|
||||||
|
format_url, 'height', default=360))
|
||||||
|
|
||||||
|
formats.append({
|
||||||
|
'url': format_url,
|
||||||
|
'format_id': '%dp' % height if height else None,
|
||||||
|
'height': height,
|
||||||
|
})
|
||||||
|
|
||||||
|
sources = re.findall(
|
||||||
|
r'src\s*:\s*(["\'])(?P<url>(?:https?://)?(?:(?!\1).)+)\1',
|
||||||
|
webpage)
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
if len(sources) > 1:
|
||||||
|
add_format(sources[1][1])
|
||||||
|
self._check_formats(formats, video_id)
|
||||||
|
if len(sources) > 0:
|
||||||
|
add_format(sources[0][1])
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
title = self._html_search_regex(
|
||||||
(r'<h1>([^<]+)</h1>', r'<title>([^<]+) - VidLii<'), webpage,
|
(r'<h1>([^<]+)</h1>', r'<title>([^<]+) - VidLii<'), webpage,
|
||||||
'title')
|
'title')
|
||||||
|
|
||||||
@@ -82,9 +125,9 @@ class VidLiiIE(InfoExtractor):
|
|||||||
default=None) or self._search_regex(
|
default=None) or self._search_regex(
|
||||||
r'duration\s*:\s*(\d+)', webpage, 'duration', fatal=False))
|
r'duration\s*:\s*(\d+)', webpage, 'duration', fatal=False))
|
||||||
|
|
||||||
view_count = int_or_none(self._search_regex(
|
view_count = str_to_int(self._html_search_regex(
|
||||||
(r'<strong>(\d+)</strong> views',
|
(r'<strong>([\d,.]+)</strong> views',
|
||||||
r'Views\s*:\s*<strong>(\d+)</strong>'),
|
r'Views\s*:\s*<strong>([\d,.]+)</strong>'),
|
||||||
webpage, 'view count', fatal=False))
|
webpage, 'view count', fatal=False))
|
||||||
|
|
||||||
comment_count = int_or_none(self._search_regex(
|
comment_count = int_or_none(self._search_regex(
|
||||||
@@ -109,7 +152,7 @@ class VidLiiIE(InfoExtractor):
|
|||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': video_url,
|
'formats': formats,
|
||||||
'title': title,
|
'title': title,
|
||||||
'description': description,
|
'description': description,
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
|
Reference in New Issue
Block a user