mirror of
https://github.com/guoyk93/minit.git
synced 2025-12-24 12:37:54 +08:00
complete minit
This commit is contained in:
65
.github/workflows/release.yml
vendored
Normal file
65
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
name: release
|
||||
|
||||
on:
|
||||
workflow_dispatch: { }
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
tags: [ "v*.*.*" ]
|
||||
|
||||
env:
|
||||
DOCKERHUB_USERNAME: guoyk
|
||||
DOCKERHUB_IMAGE_NAME: guoyk/minit
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: docker-setup
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: docker-login-ghcr
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: docker-login-dockerhub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ env.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: docker-meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: |-
|
||||
ghcr.io/${{ github.repository }}
|
||||
${{env.DOCKERHUB_IMAGE_NAME}}
|
||||
tags: |-
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }}
|
||||
|
||||
- name: docker-build-and-push
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
pull: true
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
28
.gitignore
vendored
Normal file
28
.gitignore
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
# vendor/
|
||||
|
||||
# GoLand
|
||||
.idea/
|
||||
|
||||
# Binary
|
||||
/minit
|
||||
|
||||
# Log
|
||||
*.log
|
||||
|
||||
# macOS
|
||||
.DS_Store
|
||||
._*
|
||||
10
Dockerfile
Normal file
10
Dockerfile
Normal file
@@ -0,0 +1,10 @@
|
||||
FROM golang:1.19 AS builder
|
||||
ENV CGO_ENABLED 0
|
||||
ARG VERSION
|
||||
WORKDIR /go/src/app
|
||||
ADD . .
|
||||
RUN go build -mod vendor -ldflags="-X main.GitHash=$(git rev-parse --short HEAD)" -o /minit
|
||||
|
||||
FROM busybox
|
||||
COPY --from=builder /minit /minit
|
||||
ENTRYPOINT ["/minit"]
|
||||
21
LICENSE
Normal file
21
LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022 GUO YANKE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
391
README.md
Normal file
391
README.md
Normal file
@@ -0,0 +1,391 @@
|
||||
# minit
|
||||
|
||||

|
||||
[](https://github.com/guoyk93/minit/actions/workflows/release.yml)
|
||||
[](https://hub.docker.com/r/guoyk/minit)
|
||||
[](https://www.patreon.com/guoyk)
|
||||
[](https://github.com/sponsors/guoyk93)
|
||||
|
||||
The missing `init` daemon for container
|
||||
|
||||
[简体中文](README.zh.md)
|
||||
|
||||
## 1. Installation
|
||||
|
||||
You can install `minit` to your own container image by a multi-stage `Dockerfile`
|
||||
|
||||
```dockerfile
|
||||
FROM guoyk/minit:VERSION AS minit
|
||||
# Or using Github Packages
|
||||
# FROM ghcr.io/guoyk93/minit:VERSION AS minit
|
||||
|
||||
# Your own build stage
|
||||
FROM ubuntu:22.04
|
||||
|
||||
# ...
|
||||
|
||||
# Copy minit binary
|
||||
COPY --from=minit /minit /minit
|
||||
|
||||
# Set ENTRYPOINT to minit
|
||||
ENTRYPOINT ["/minit"]
|
||||
|
||||
# Add a unit file to /etc/minit.d
|
||||
ADD my-service.yml /etc/minit.d/my-service.yml
|
||||
```
|
||||
|
||||
## 2. Unit Loading
|
||||
|
||||
### 2.1 From Files
|
||||
|
||||
Add Unit `YAML` files to `/etc/minit.d`
|
||||
|
||||
Override default directory by environment variable `MINIT_UNIT_DIR`
|
||||
|
||||
Use `---` to separate multiple units in single `YAML` file
|
||||
|
||||
### 2.2 From Environment Variable
|
||||
|
||||
**Example:**
|
||||
|
||||
```dockerfile
|
||||
ENV MINIT_MAIN="redis-server /etc/redis.conf"
|
||||
ENV MINIT_MAIN_DIR="/work"
|
||||
ENV MINIT_MAIN_NAME="main-program"
|
||||
ENV MINIT_MAIN_GROUP="super-main"
|
||||
ENV MINIT_MAIN_KIND="cron"
|
||||
ENV MINIT_MAIN_IMMEDIATE=true
|
||||
ENV MINIT_MAIN_CRON="* * * * *"
|
||||
ENV MINIT_MAIN_CHARSET=gbk18030
|
||||
```
|
||||
|
||||
### 2.3 From Command Arguments
|
||||
|
||||
**Example:**
|
||||
|
||||
```dockerfile
|
||||
ENTRYPOINT ["/minit"]
|
||||
CMD ["redis-server", "/etc/redis.conf"]
|
||||
```
|
||||
|
||||
|
||||
## 3. Unit Types
|
||||
|
||||
### 3.1 Type: `render`
|
||||
|
||||
`render` units execute at the very first stage. It renders template files.
|
||||
|
||||
See [pkg/mtmpl/funcs.go](pkg/mtmpl/funcs.go) for available functions.
|
||||
|
||||
**Example:**
|
||||
|
||||
* `/etc/minit.d/render-demo.yaml`
|
||||
|
||||
```yaml
|
||||
kind: render
|
||||
name: render-demo
|
||||
files:
|
||||
- /opt/*.txt
|
||||
```
|
||||
|
||||
* `/opt/demo.txt`
|
||||
|
||||
```text
|
||||
Hello, {{stringsToUpper .Evn.HOME}}
|
||||
```
|
||||
|
||||
Upon startup, `minit` will render file `/opt/demo.txt`
|
||||
|
||||
Since default user for container is `root`, the content of file `/opt/demo.txt` will become:
|
||||
|
||||
```text
|
||||
Hello, ROOT
|
||||
```
|
||||
|
||||
### 3.2 Type: `once`
|
||||
|
||||
`once` units execute after `render` units. It runs command once.
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
kind: once
|
||||
name: once-demo
|
||||
command:
|
||||
- echo
|
||||
- once
|
||||
```
|
||||
|
||||
### 3.3 Type: `daemon`
|
||||
|
||||
`daemon` units execute after `render` and `once`. It runs long-running command.
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
kind: daemon
|
||||
name: daemon-demo
|
||||
command:
|
||||
- sleep
|
||||
- 9999
|
||||
```
|
||||
|
||||
### 3.4 Type: `cron`
|
||||
|
||||
`cron` units execute after `render` and `once`. It runs command at cron basis.
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
kind: cron
|
||||
name: cron-demo
|
||||
cron: "* * * * *" # cron expression, support extended syntax by https://github.com/robfig/cron
|
||||
immediate: true # execute once on started
|
||||
command:
|
||||
- echo
|
||||
- cron
|
||||
```
|
||||
|
||||
## 4. Unit Features
|
||||
|
||||
### 4.1 Replicas
|
||||
|
||||
If `count` field is set, `minit` will replicate this unit with sequence number suffixed
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
kind: once
|
||||
name: once-demo-replicas
|
||||
count: 2
|
||||
command:
|
||||
- echo
|
||||
- $MINIT_UNIT_SUB_ID
|
||||
```
|
||||
|
||||
Is equal to:
|
||||
|
||||
```yaml
|
||||
kind: once
|
||||
name: once-demo-replicas-1
|
||||
command:
|
||||
- echo
|
||||
- 1
|
||||
---
|
||||
kind: once
|
||||
name: once-demo-replicas-2
|
||||
command:
|
||||
- echo
|
||||
- 2
|
||||
```
|
||||
|
||||
### 4.2 Logging
|
||||
|
||||
**Log Files**
|
||||
|
||||
`minit` write console logs of every command unit into `/var/log/minit`
|
||||
|
||||
This directory can be overridden by environment `MINIT_LOG_DIR`
|
||||
|
||||
Set `MINIT_LOG_DIR=none` to disable file logging and optimize performance of `minit`
|
||||
|
||||
**Console Encoding**
|
||||
|
||||
If `charset` field is set, `minit` will transcode command console output from other encodings to `utf8`
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
kind: once
|
||||
name: once-demo-transcode
|
||||
charset: gbk # supports gbk, gb18030 only
|
||||
command:
|
||||
- command-that-produces-gbk-logs
|
||||
```
|
||||
|
||||
### 4.3 Extra Environment Variables
|
||||
|
||||
If `env` field is set, `minit` will append extra environment variables while launching command.
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
kind: daemon
|
||||
name: daemon-demo-env
|
||||
env:
|
||||
AAA: BBB
|
||||
command:
|
||||
- echo
|
||||
- $AAA
|
||||
```
|
||||
|
||||
### 4.4 Render Environment Variables
|
||||
|
||||
Any environment with prefix `MINIT_ENV_` will be rendered before passing to command.
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
kind: daemon
|
||||
name: daemon-demo-render-env
|
||||
env:
|
||||
MINIT_ENV_MY_IP: '{{netResolveIP "google.com"}}'
|
||||
command:
|
||||
- echo
|
||||
- $MY_IP
|
||||
```
|
||||
|
||||
### 4.5 Using `shell` in command units
|
||||
|
||||
By default, `command` field will be passed to `exec` syscall, `minit` won't modify ti, except simple environment variable substitution.
|
||||
|
||||
If `shell` field is set, `command` field will act as a simple script file.
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
kind: once
|
||||
name: once-demo-shell
|
||||
shell: "/bin/bash -eu"
|
||||
command: # this is merely a script file
|
||||
- if [ -n "${HELLO}" ]; then
|
||||
- echo "world"
|
||||
- fi
|
||||
```
|
||||
|
||||
### 4.6 Unit Enabling / Disabling
|
||||
|
||||
**Grouping**
|
||||
|
||||
Use `group` field to set a group name to units.
|
||||
|
||||
Default unit group name is `default`
|
||||
|
||||
**Allowlist Mode**
|
||||
|
||||
If environment `MINIT_ENABLE` is set, `minit` will run in **Allowlist Mode**, only units with name existed
|
||||
in `MINIT_ENABLE` will be loaded.
|
||||
|
||||
Use format `@group-name` to enable a group of units
|
||||
|
||||
Use format `&daemon` to enable a kind of units
|
||||
|
||||
Example:
|
||||
|
||||
```text
|
||||
MINIT_ENABLE=once-demo,@demo
|
||||
```
|
||||
|
||||
**Denylist Mode**
|
||||
|
||||
If environment `MINIT_DISABLE` is set, `minit` will run in **Denylist Mode**, units with name existed in `MINIT_DISABLE`
|
||||
will NOT be loaded.
|
||||
|
||||
Use format `@group-name` to disable a group of units
|
||||
|
||||
Example:
|
||||
|
||||
```text
|
||||
MINIT_DISABLE=once-demo,@demo
|
||||
```
|
||||
|
||||
## 5. Extra Features
|
||||
|
||||
### 5.1 Zombie Processes Cleaning
|
||||
|
||||
When running as `PID 1`, `minit` will do zombie process cleaning
|
||||
|
||||
This is the responsibility of `PID 1`
|
||||
|
||||
### 5.2 Quick Exit
|
||||
|
||||
By default, `minit` will keep running even without `daemon` or `cron` units defined.
|
||||
|
||||
If you want to use `minit` in `initContainers` or outside of container, you can set envrionment
|
||||
variable `MINIT_QUIT_EXIT=true` to let `minit` exit as soon as possible
|
||||
|
||||
### 5.3 Resource limits (ulimit)
|
||||
|
||||
**Warning: this feature need container running at Privileged mode**
|
||||
|
||||
Use environment variable `MINIT_RLIMIT_XXX` to set resource limits
|
||||
|
||||
* `unlimited` means no limitation
|
||||
* `-` means unchanged
|
||||
|
||||
**Supported:**
|
||||
|
||||
```text
|
||||
MINIT_RLIMIT_AS
|
||||
MINIT_RLIMIT_CORE
|
||||
MINIT_RLIMIT_CPU
|
||||
MINIT_RLIMIT_DATA
|
||||
MINIT_RLIMIT_FSIZE
|
||||
MINIT_RLIMIT_LOCKS
|
||||
MINIT_RLIMIT_MEMLOCK
|
||||
MINIT_RLIMIT_MSGQUEUE
|
||||
MINIT_RLIMIT_NICE
|
||||
MINIT_RLIMIT_NOFILE
|
||||
MINIT_RLIMIT_NPROC
|
||||
MINIT_RLIMIT_RTPRIO
|
||||
MINIT_RLIMIT_SIGPENDING
|
||||
MINIT_RLIMIT_STACK
|
||||
```
|
||||
|
||||
**Example:**
|
||||
|
||||
```text
|
||||
MINIT_RLIMIT_NOFILE=unlimited # set soft limit and hard limit to 'unlimited'
|
||||
MINIT_RLIMIT_NOFILE=128:unlimited # set soft limit to 128,set hard limit to 'unlimited'
|
||||
MINIT_RLIMIT_NOFILE=128:- # set soft limit to 128,dont change hard limit
|
||||
MINIT_RLIMIT_NOFILE=-:unlimited # don't change soft limit,set hard limit to 'unlimited'
|
||||
```
|
||||
|
||||
### 5.4 Kernel Parameters (sysctl)
|
||||
|
||||
**Warning: this feature need container running at Privileged mode**
|
||||
|
||||
Use environment variable `MINIT_SYSCTL` to set kernel parameters
|
||||
|
||||
Separate multiple entries with `,`
|
||||
|
||||
**Example:**
|
||||
|
||||
```
|
||||
MINIT_SYSCTL=vm.max_map_count=262144,vm.swappiness=60
|
||||
```
|
||||
|
||||
### 5.5 Transparent Huge Page (THP)
|
||||
|
||||
**Warning: this feature need container running at Privileged mode and host `/sys` mounted**
|
||||
|
||||
Use environment variable `MINIT_THP` to set THP configuration.
|
||||
|
||||
**Example:**
|
||||
|
||||
```
|
||||
# available values: never, madvise, always
|
||||
MINIT_THP=madvise
|
||||
```
|
||||
|
||||
### 5.6 Built-in WebDAV server
|
||||
|
||||
By setting environment variable `MINIT_WEBDAV_ROOT`, `minit` will start a built-in WebDAV server at port `7486`
|
||||
|
||||
Environment Variables:
|
||||
|
||||
* `MINIT_WEBDAV_ROOT`, path to serve, `/srv` for example
|
||||
* `MINIT_WEBDAV_PORT`, port of WebDAV server, default to `7486`
|
||||
* `MINIT_WEBDAV_USERNAME` and `MINIT_WEBDAV_PASSWORD`, optional basic auth for WebDAV server
|
||||
|
||||
### 5.7 Banner file
|
||||
|
||||
By putting a file at `/etc/banner.minit.txt`, `minit` will print it's content at startup
|
||||
|
||||
## 6. Donation
|
||||
|
||||
View https://guoyk.xyz/donation
|
||||
|
||||
## 7. Credits
|
||||
|
||||
GUO YANKE, MIT License
|
||||
298
README.zh.md
Normal file
298
README.zh.md
Normal file
@@ -0,0 +1,298 @@
|
||||
# minit
|
||||
|
||||
一个用 Go 编写的进程管理工具,用以在容器内启动多个进程
|
||||
|
||||
## 获取镜像
|
||||
|
||||
```
|
||||
guoyk/minit:VERSION
|
||||
```
|
||||
|
||||
## 使用方法
|
||||
|
||||
使用多阶段 Dockerfile 来从上述镜像地址导入 `minit` 可执行程序
|
||||
|
||||
```dockerfile
|
||||
FROM guoyk/minit AS minit
|
||||
|
||||
FROM xxxxxxx
|
||||
|
||||
# 添加一份服务配置到 /etc/minit.d/
|
||||
ADD my-service.yml /etc/minit.d/my-service.yml
|
||||
# 这将从 minit 镜像中,将可执行文件 /minit 拷贝到最终镜像的 /minit 位置
|
||||
COPY --from=minit /minit /minit
|
||||
# 这将指定 /minit 作为主启动入口,允许后续的 CMD 传入
|
||||
ENTRYPOINT ["/minit"]
|
||||
```
|
||||
|
||||
## 配置文件
|
||||
|
||||
配置文件默认从 `/etc/minit.d/*.yml` 读取
|
||||
|
||||
允许使用 `---` 分割在单个 `yaml` 文件中,写入多条配置单元
|
||||
|
||||
当前支持以下类型
|
||||
|
||||
* `render`
|
||||
|
||||
`render` 类型配置单元最先运行(优先级 L1),一般用于渲染配置文件,可使用函数参考 [pkg/mtmpl/funcs.go] 文件
|
||||
|
||||
如下示例
|
||||
|
||||
`/etc/minit.d/render-test.yml`
|
||||
|
||||
```yaml
|
||||
kind: render
|
||||
name: render-test
|
||||
files:
|
||||
- /tmp/*.txt
|
||||
```
|
||||
|
||||
`/tmp/sample.txt`
|
||||
|
||||
```text
|
||||
Hello, {{stringsToUpper .Env.HOME}}
|
||||
```
|
||||
|
||||
`minit` 启动时,会按照配置规则,渲染 `/tmp/sample.txt` 文件
|
||||
|
||||
由于容器用户默认为 `root`,因此 `/tmp/sample.txt` 文件会被渲染为
|
||||
|
||||
```text
|
||||
Hello, /ROOT
|
||||
```
|
||||
|
||||
可用渲染函数,参见代码中的 `pkg/tmplfuncs/tmplfuncs.go`
|
||||
|
||||
* `once`
|
||||
|
||||
`once` 类型的配置单元随后运行(优先级 L2),用于执行一次性进程
|
||||
|
||||
`/etc/minit.d/sample.yml`
|
||||
|
||||
```yaml
|
||||
kind: once
|
||||
name: once-sample
|
||||
dir: /work # 指定工作目录
|
||||
command:
|
||||
- echo
|
||||
- once
|
||||
```
|
||||
|
||||
* `daemon`
|
||||
|
||||
`daemon` 类型的配置单元,最后启动(优先级 L3),用于执行常驻进程
|
||||
|
||||
```yaml
|
||||
kind: daemon
|
||||
name: daemon-sample
|
||||
dir: /work # 指定工作目录
|
||||
count: 3 # 如果指定了 count,会启动多个副本
|
||||
command:
|
||||
- sleep
|
||||
- 9999
|
||||
```
|
||||
|
||||
* `cron`
|
||||
|
||||
`cron` 类型的配置单元,最后启动(优先级 L3),用于按照 cron 表达式,执行命令
|
||||
|
||||
```yaml
|
||||
kind: cron
|
||||
name: cron-sample
|
||||
cron: "* * * * *"
|
||||
immediate: true # 启动后立即执行一次
|
||||
dir: /work # 指定工作目录
|
||||
command:
|
||||
- echo
|
||||
- cron
|
||||
```
|
||||
|
||||
## 日志文件
|
||||
|
||||
`minit` 会把每个单元的日志记录在 `/var/log/minit` 文件夹内,使用环境变量 `MINIT_LOG_DIR` 来修改这个目录
|
||||
|
||||
设置 `MINIT_LOG_DIR=none` 禁用日志文件功能,同时缩减内存使用量,优化标准输出性能
|
||||
|
||||
## 日志字符集转换
|
||||
|
||||
上述所有配置单元,均可以追加 `charset` 字段,会将命令输出的日志,从其他字符集转义到 `utf-8`
|
||||
|
||||
当前支持
|
||||
|
||||
* `gbk18030`
|
||||
* `gbk`
|
||||
|
||||
## 增加环境变量
|
||||
|
||||
在 `once`, `daemon` 和 `cron` 类型的单元中,可以使用 `env` 字段增加额外的环境变量
|
||||
|
||||
比如
|
||||
|
||||
```yaml
|
||||
kind: daemon
|
||||
name: demo-daemon-1
|
||||
env:
|
||||
AAA: BBB
|
||||
command:
|
||||
- echo
|
||||
- $AAA
|
||||
```
|
||||
|
||||
## 渲染环境变量
|
||||
|
||||
凡是以 `MINIT_ENV_` 为前缀开头的环境变量,会执行模板渲染,并传递给进程,可使用函数参考 [pkg/mtmpl/funcs.go] 文件。
|
||||
|
||||
比如:
|
||||
|
||||
```
|
||||
MINIT_ENV_MY_IP={{netResolveIP "google.com"}}
|
||||
```
|
||||
|
||||
会设置对应的环境变量
|
||||
|
||||
```
|
||||
MY_IP=172.217.160.110
|
||||
```
|
||||
|
||||
## 使用 `Shell`
|
||||
|
||||
上述配置单元的 `command` 数组默认状态下等价于 `argv` 系统调用,如果想要使用基于 `Shell` 的多行命令,使用以下方式
|
||||
|
||||
```yaml
|
||||
name: demo-for-shell
|
||||
kind: once
|
||||
# 追加要使用的 shell
|
||||
shell: "/bin/bash -eu"
|
||||
command:
|
||||
- if [ -n "${HELLO}" ]; then
|
||||
- echo "world"
|
||||
- fi
|
||||
```
|
||||
|
||||
支持所有带 `command` 参数的工作单元类型,比如 `once`, `daemon`, `cron`
|
||||
|
||||
## 快速创建单元
|
||||
|
||||
如果懒得写 `YAML` 文件,可以直接用环境变量,或者 `CMD` 来创建 `daemon` 类型的配置单元
|
||||
|
||||
**使用环境变量创建单元**
|
||||
|
||||
```
|
||||
MINIT_MAIN=redis-server /etc/redis.conf
|
||||
MINIT_MAIN_DIR=/work
|
||||
MINIT_MAIN_NAME=main-program
|
||||
MINIT_MAIN_GROUP=super-main
|
||||
MINIT_MAIN_KIND=cron
|
||||
MINIT_MAIN_CRON="* * * * *"
|
||||
MINIT_MAIN_IMMEDIATE=true
|
||||
MINIT_MAIN_CHARSET=gbk18030
|
||||
```
|
||||
|
||||
**使用命令行参数创建单元**
|
||||
|
||||
```
|
||||
ENTRYPOINT ["/minit"]
|
||||
CMD ["redis-server", "/etc/redis.conf"]
|
||||
```
|
||||
|
||||
## 打开/关闭单元
|
||||
|
||||
可以通过环境变量,打开/关闭特定的单元
|
||||
|
||||
* `MINIT_ENABLE`, 逗号分隔, 如果值存在,则为 `白名单模式`,只有指定名称的单元会执行
|
||||
* `MINIT_DISABLE`, 逗号分隔, 如果值存在,则为 `黑名单模式`,除了指定名称外的单元会执行
|
||||
|
||||
可以为配置单元设置字段 `group`,然后在上述环境变量使用 `@group` ,设置一组单元的开启和关闭。
|
||||
|
||||
使用 `&daemon` 这样的格式,控制一个类型的控制单元的开启和关闭
|
||||
|
||||
没有设置 `group` 字段的单元,默认组名为 `default`
|
||||
|
||||
## 快速退出
|
||||
|
||||
默认情况下,即便是没有 L3 类型任务 (`daemon`, `cron`, `logrotate` 等),`minit` 也会持续运行,以支撑起容器主进程。
|
||||
|
||||
如果要在 `initContainers` 中,或者容器外使用 `minit`,可以将环境变量 `MINIT_QUICK_EXIT` 设置为 `true`
|
||||
|
||||
此时,如果没有 L3 类型任务,`minit` 会自动退出
|
||||
|
||||
## 资源限制 (ulimit)
|
||||
|
||||
**注意,使用此功能可能需要容器运行在高权限 (Privileged) 模式**
|
||||
|
||||
使用环境变量 `MINIT_RLIMIT_XXXX` 来设置容器的资源限制,`unlimited` 代表无限制, `-` 表示不修改
|
||||
|
||||
比如:
|
||||
|
||||
```
|
||||
MINIT_RLIMIT_NOFILE=unlimited # 同时设置软硬限制为 unlimited
|
||||
MINIT_RLIMIT_NOFILE=128:unlimited # 设置软限制为 128,设置硬限制为 unlimited
|
||||
MINIT_RLIMIT_NOFILE=128:- # 设置软限制为 128,硬限制不变
|
||||
MINIT_RLIMIT_NOFILE=-:unlimited # 软限制不变,硬限制修改为 unlimited
|
||||
```
|
||||
|
||||
可用的环境变量有:
|
||||
|
||||
```
|
||||
MINIT_RLIMIT_AS
|
||||
MINIT_RLIMIT_CORE
|
||||
MINIT_RLIMIT_CPU
|
||||
MINIT_RLIMIT_DATA
|
||||
MINIT_RLIMIT_FSIZE
|
||||
MINIT_RLIMIT_LOCKS
|
||||
MINIT_RLIMIT_MEMLOCK
|
||||
MINIT_RLIMIT_MSGQUEUE
|
||||
MINIT_RLIMIT_NICE
|
||||
MINIT_RLIMIT_NOFILE
|
||||
MINIT_RLIMIT_NPROC
|
||||
MINIT_RLIMIT_RTPRIO
|
||||
MINIT_RLIMIT_SIGPENDING
|
||||
MINIT_RLIMIT_STACK
|
||||
```
|
||||
|
||||
## 内核参数 (sysctl)
|
||||
|
||||
**注意,使用此功能可能需要容器运行在高权限 (Privileged) 模式**
|
||||
|
||||
使用环境变量 `MINIT_SYSCTL` 来写入 `sysctl` 配置项,`minit` 会自动写入 `/proc/sys` 目录下对应的参数
|
||||
|
||||
使用 `,` 分隔多个值
|
||||
|
||||
比如:
|
||||
|
||||
```
|
||||
MINIT_SYSCTL=vm.max_map_count=262144,vm.swappiness=60
|
||||
```
|
||||
|
||||
## 透明大页 (THP)
|
||||
|
||||
**注意,使用此功能可能需要容器运行在高权限 (Privileged) 模式,并且需要挂载 /sys 目录**
|
||||
|
||||
使用环境变量 `MINIT_THP` 修改 透明大页配置,可选值为 `never`, `madvise` 和 `always`
|
||||
|
||||
## WebDAV 服务
|
||||
|
||||
我懂你的痛,当你在容器里面生成了一份调试信息,比如 `Arthas` 或者 `Go pprof` 的火焰图,然后你开始绞尽脑汁想办法把这个文件传输出来
|
||||
|
||||
现在,不再需要这份痛苦了,`minit` 内置 `WebDAV` 服务,你可以像暴露一个标准服务一样暴露出来,省去了调度主机+映射主机目录等一堆烦心事
|
||||
|
||||
环境变量:
|
||||
|
||||
* `MINIT_WEBDAV_ROOT` 指定要暴露的路径并启动 WebDAV 服务,比如 `/srv`
|
||||
* `MINIT_WEBDAV_PORT` 指定 `WebDAV` 服务的端口,默认为 `7486`
|
||||
* `MINIT_WEBDAV_USERNAME` 和 `MINIT_WEBDAV_PASSWORD` 指定 `WebDAV` 服务的用户密码,默认不设置用户密码
|
||||
|
||||
可以使用 Cyberduck 来连接 WebDAV 服务器 https://cyberduck.io/
|
||||
|
||||
## 展示自述文件
|
||||
|
||||
如果把一个文件放在 `/etc/banner.minit.txt` ,则 `minit` 在启动时会打印其内容
|
||||
|
||||
## 赞助
|
||||
|
||||
访问 <https://guoyk.net/donation>
|
||||
|
||||
## 许可证
|
||||
|
||||
GUO YANKE, MIT License
|
||||
18
go.mod
Normal file
18
go.mod
Normal file
@@ -0,0 +1,18 @@
|
||||
module github.com/guoyk93/minit
|
||||
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/guoyk93/rg v1.0.1
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
github.com/stretchr/testify v1.8.1
|
||||
golang.org/x/net v0.7.0
|
||||
golang.org/x/sys v0.5.0
|
||||
golang.org/x/text v0.7.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
)
|
||||
27
go.sum
Normal file
27
go.sum
Normal file
@@ -0,0 +1,27 @@
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/guoyk93/rg v1.0.1 h1:Rnca+1JYfuGqPRMIQkuxAoZhhmrPpMFyS5XwLz0U0ds=
|
||||
github.com/guoyk93/rg v1.0.1/go.mod h1:tLaoLk8bo/PQld1xGvJvAfCl3K0Nckzh0gsnykFoQYg=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
205
main.go
Normal file
205
main.go
Normal file
@@ -0,0 +1,205 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/guoyk93/minit/pkg/mexec"
|
||||
"github.com/guoyk93/minit/pkg/mlog"
|
||||
"github.com/guoyk93/minit/pkg/mrunners"
|
||||
"github.com/guoyk93/minit/pkg/msetups"
|
||||
"github.com/guoyk93/minit/pkg/munit"
|
||||
"github.com/guoyk93/rg"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
GitHash = "UNKNOWN"
|
||||
)
|
||||
|
||||
const (
|
||||
dirNone = "none"
|
||||
)
|
||||
|
||||
func mkdirUnlessNone(dir string) error {
|
||||
if dir == dirNone {
|
||||
return nil
|
||||
}
|
||||
return os.MkdirAll(dir, 0755)
|
||||
}
|
||||
|
||||
func exit(err *error) {
|
||||
if *err != nil {
|
||||
_, _ = fmt.Fprintf(os.Stderr, "%s: exited with error: %s\n", "minit", (*err).Error())
|
||||
os.Exit(1)
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(os.Stdout, "%s: exited\n", "minit")
|
||||
}
|
||||
}
|
||||
|
||||
func envStr(key string, out *string) {
|
||||
if val := strings.TrimSpace(os.Getenv(key)); val != "" {
|
||||
*out = val
|
||||
}
|
||||
}
|
||||
|
||||
func envBool(key string, out *bool) {
|
||||
if val := strings.TrimSpace(os.Getenv(key)); val != "" {
|
||||
*out, _ = strconv.ParseBool(val)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
var err error
|
||||
defer exit(&err)
|
||||
defer rg.Guard(&err)
|
||||
|
||||
var (
|
||||
optPprofPort = ""
|
||||
|
||||
optUnitDir = "/etc/minit.d"
|
||||
optLogDir = "/var/log/minit"
|
||||
optQuickExit bool
|
||||
)
|
||||
|
||||
envStr("MINIT_PPROF_PORT", &optPprofPort)
|
||||
|
||||
envStr("MINIT_UNIT_DIR", &optUnitDir)
|
||||
envStr("MINIT_LOG_DIR", &optLogDir)
|
||||
envBool("MINIT_QUICK_EXIT", &optQuickExit)
|
||||
|
||||
if optPprofPort != "" {
|
||||
go func() {
|
||||
_ = http.ListenAndServe(":"+optPprofPort, nil)
|
||||
}()
|
||||
}
|
||||
|
||||
rg.Must0(mkdirUnlessNone(optUnitDir))
|
||||
rg.Must0(mkdirUnlessNone(optLogDir))
|
||||
|
||||
createLogger := func(name string, pfx string) (mlog.ProcLogger, error) {
|
||||
var rfo *mlog.RotatingFileOptions
|
||||
if optLogDir != dirNone {
|
||||
rfo = &mlog.RotatingFileOptions{
|
||||
Dir: optLogDir,
|
||||
Filename: name,
|
||||
}
|
||||
}
|
||||
return mlog.NewProcLogger(mlog.ProcLoggerOptions{
|
||||
ConsolePrefix: pfx,
|
||||
FileOptions: rfo,
|
||||
})
|
||||
}
|
||||
|
||||
log := rg.Must(createLogger("minit", "minit: "))
|
||||
|
||||
exem := mexec.NewManager()
|
||||
|
||||
log.Print("starting (#" + GitHash + ")")
|
||||
|
||||
// run through setups
|
||||
rg.Must0(msetups.Setup(log))
|
||||
|
||||
// load units
|
||||
loader := munit.NewLoader()
|
||||
units, skips := rg.Must2(
|
||||
loader.Load(
|
||||
munit.LoadOptions{
|
||||
Args: os.Args[1:],
|
||||
Env: true,
|
||||
Dir: optUnitDir,
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
for _, skip := range skips {
|
||||
log.Print("unit skipped: " + skip.Name)
|
||||
}
|
||||
|
||||
// load runners
|
||||
var (
|
||||
runnersS []mrunners.Runner
|
||||
runnersL []mrunners.Runner
|
||||
)
|
||||
|
||||
{
|
||||
var runners []mrunners.Runner
|
||||
|
||||
// convert units to runners
|
||||
for _, unit := range units {
|
||||
runners = append(
|
||||
runners,
|
||||
rg.Must(mrunners.Create(mrunners.RunnerOptions{
|
||||
Unit: unit,
|
||||
Exec: exem,
|
||||
Logger: rg.Must(createLogger(unit.Name, "")),
|
||||
})),
|
||||
)
|
||||
}
|
||||
|
||||
// sort runners
|
||||
sort.Slice(runners, func(i, j int) bool {
|
||||
return runners[i].Order < runners[j].Order
|
||||
})
|
||||
|
||||
// split short runners and long runners
|
||||
for _, runner := range runners {
|
||||
if runner.Long {
|
||||
runnersL = append(runnersL, runner)
|
||||
} else {
|
||||
runnersS = append(runnersS, runner)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// execute short runners
|
||||
for _, runner := range runnersS {
|
||||
runner.Action.Do(context.Background())
|
||||
}
|
||||
|
||||
// quick exit
|
||||
if len(runnersL) == 0 && optQuickExit {
|
||||
log.Printf("no long runners and MINIT_QUICK_EXIT is set")
|
||||
return
|
||||
}
|
||||
|
||||
// run long runners
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := &sync.WaitGroup{}
|
||||
|
||||
for _, runner := range runnersL {
|
||||
wg.Add(1)
|
||||
go func(runner mrunners.Runner) {
|
||||
runner.Action.Do(ctx)
|
||||
wg.Done()
|
||||
}(runner)
|
||||
}
|
||||
|
||||
log.Printf("started")
|
||||
|
||||
// wait for signals
|
||||
chSig := make(chan os.Signal, 1)
|
||||
signal.Notify(chSig, syscall.SIGINT, syscall.SIGTERM)
|
||||
sig := <-chSig
|
||||
log.Printf("signal caught: %s", sig.String())
|
||||
|
||||
// shutdown context
|
||||
cancel()
|
||||
|
||||
// delay 3 seconds
|
||||
time.Sleep(time.Second * 3)
|
||||
|
||||
// broadcast signals
|
||||
exem.Signal(sig)
|
||||
|
||||
// wait for long runners
|
||||
wg.Wait()
|
||||
}
|
||||
44
pkg/menv/construct.go
Normal file
44
pkg/menv/construct.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package menv
|
||||
|
||||
import (
|
||||
"github.com/guoyk93/minit/pkg/mtmpl"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
PrefixMinitEnv = "MINIT_ENV_"
|
||||
)
|
||||
|
||||
// Construct create the env map with current system environ, extra and rendering MINIT_ENV_ prefixed keys
|
||||
func Construct(extra map[string]string) (envs map[string]string, err error) {
|
||||
envs = make(map[string]string)
|
||||
// system env
|
||||
for _, item := range os.Environ() {
|
||||
splits := strings.SplitN(item, "=", 2)
|
||||
var k, v string
|
||||
if len(splits) > 0 {
|
||||
k = splits[0]
|
||||
if len(splits) > 1 {
|
||||
v = splits[1]
|
||||
}
|
||||
envs[k] = v
|
||||
}
|
||||
}
|
||||
// merge extra env
|
||||
Merge(envs, extra)
|
||||
// render MINIT_ENV_XXX
|
||||
for k, v := range envs {
|
||||
if !strings.HasPrefix(k, PrefixMinitEnv) {
|
||||
continue
|
||||
}
|
||||
k = strings.TrimPrefix(k, PrefixMinitEnv)
|
||||
var buf []byte
|
||||
if buf, err = mtmpl.Execute(v, map[string]any{"Env": envs}); err != nil {
|
||||
return
|
||||
}
|
||||
envs[k] = string(buf)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
16
pkg/menv/construct_test.go
Normal file
16
pkg/menv/construct_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package menv
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBuild(t *testing.T) {
|
||||
envs, err := Construct(map[string]string{
|
||||
"HOME-": "NONE",
|
||||
"MINIT_ENV_BUF": "{{stringsToUpper \"bbb\"}}",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "", envs["HOME"])
|
||||
require.Equal(t, "BBB", envs["BUF"])
|
||||
}
|
||||
15
pkg/menv/merge.go
Normal file
15
pkg/menv/merge.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package menv
|
||||
|
||||
import "strings"
|
||||
|
||||
// Merge merge two env map, if keys in src has a suffix '-', this will delete the key from dst
|
||||
func Merge(dst map[string]string, src map[string]string) {
|
||||
for k, v := range src {
|
||||
if strings.HasSuffix(k, "-") {
|
||||
delete(dst, k[:len(k)-1])
|
||||
} else {
|
||||
dst[k] = v
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
22
pkg/menv/merge_test.go
Normal file
22
pkg/menv/merge_test.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package menv
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMerge(t *testing.T) {
|
||||
m := map[string]string{
|
||||
"a": "b",
|
||||
"c": "d",
|
||||
}
|
||||
m2 := map[string]string{
|
||||
"a-": "",
|
||||
"c": "e",
|
||||
"h": "j",
|
||||
}
|
||||
Merge(m, m2)
|
||||
require.Equal(t, 2, len(m))
|
||||
require.Equal(t, "e", m["c"])
|
||||
require.Equal(t, "j", m["h"])
|
||||
}
|
||||
74
pkg/merrs/errors.go
Normal file
74
pkg/merrs/errors.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package merrs
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Errors []error
|
||||
|
||||
func (errs Errors) Error() string {
|
||||
sb := &strings.Builder{}
|
||||
for i, err := range errs {
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
if sb.Len() > 0 {
|
||||
sb.WriteString("; ")
|
||||
}
|
||||
sb.WriteRune('#')
|
||||
sb.WriteString(strconv.Itoa(i))
|
||||
sb.WriteString(": ")
|
||||
sb.WriteString(err.Error())
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
type ErrorGroup interface {
|
||||
Add(err error)
|
||||
Set(i int, err error)
|
||||
Unwrap() error
|
||||
}
|
||||
|
||||
type errorGroup struct {
|
||||
errors Errors
|
||||
locker *sync.RWMutex
|
||||
}
|
||||
|
||||
func NewErrorGroup() ErrorGroup {
|
||||
return &errorGroup{
|
||||
locker: &sync.RWMutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (eg *errorGroup) Add(err error) {
|
||||
eg.locker.Lock()
|
||||
defer eg.locker.Unlock()
|
||||
|
||||
eg.errors = append(eg.errors, err)
|
||||
}
|
||||
|
||||
func (eg *errorGroup) Set(i int, err error) {
|
||||
eg.locker.Lock()
|
||||
defer eg.locker.Unlock()
|
||||
|
||||
if i >= len(eg.errors) {
|
||||
eg.errors = append(eg.errors, make([]error, i+1-len(eg.errors))...)
|
||||
}
|
||||
|
||||
eg.errors[i] = err
|
||||
}
|
||||
|
||||
func (eg *errorGroup) Unwrap() error {
|
||||
eg.locker.RLock()
|
||||
defer eg.locker.RUnlock()
|
||||
|
||||
for _, err := range eg.errors {
|
||||
if err != nil {
|
||||
return eg.errors
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
30
pkg/merrs/errors_test.go
Normal file
30
pkg/merrs/errors_test.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package merrs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewErrorGroup(t *testing.T) {
|
||||
eg := NewErrorGroup()
|
||||
eg.Add(errors.New("hello"))
|
||||
eg.Add(nil)
|
||||
eg.Add(errors.New("world"))
|
||||
require.Equal(t, "#0: hello; #2: world", eg.Unwrap().Error())
|
||||
|
||||
eg = NewErrorGroup()
|
||||
eg.Add(nil)
|
||||
eg.Add(nil)
|
||||
require.NoError(t, eg.Unwrap())
|
||||
|
||||
eg.Set(3, errors.New("BBB"))
|
||||
require.Error(t, eg.Unwrap())
|
||||
|
||||
errs := eg.Unwrap().(Errors)
|
||||
require.Equal(t, 4, len(errs))
|
||||
require.NoError(t, errs[0])
|
||||
require.NoError(t, errs[1])
|
||||
require.NoError(t, errs[2])
|
||||
require.Error(t, errs[3])
|
||||
}
|
||||
180
pkg/mexec/manager.go
Normal file
180
pkg/mexec/manager.go
Normal file
@@ -0,0 +1,180 @@
|
||||
package mexec
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/guoyk93/minit/pkg/menv"
|
||||
"github.com/guoyk93/minit/pkg/mlog"
|
||||
"github.com/guoyk93/minit/pkg/shellquote"
|
||||
"golang.org/x/text/encoding"
|
||||
"golang.org/x/text/encoding/simplifiedchinese"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type ExecuteOptions struct {
|
||||
Name string
|
||||
|
||||
Dir string
|
||||
Shell string
|
||||
Env map[string]string
|
||||
Command []string
|
||||
Charset string
|
||||
|
||||
Logger mlog.ProcLogger
|
||||
IgnoreExecError bool
|
||||
}
|
||||
|
||||
type Manager interface {
|
||||
Signal(sig os.Signal)
|
||||
Execute(opts ExecuteOptions) (err error)
|
||||
}
|
||||
|
||||
type manager struct {
|
||||
childPIDs map[int]struct{}
|
||||
childPIDLock sync.Locker
|
||||
charsets map[string]encoding.Encoding
|
||||
}
|
||||
|
||||
func NewManager() Manager {
|
||||
return &manager{
|
||||
childPIDs: map[int]struct{}{},
|
||||
childPIDLock: &sync.Mutex{},
|
||||
charsets: map[string]encoding.Encoding{
|
||||
"gb18030": simplifiedchinese.GB18030,
|
||||
"gbk": simplifiedchinese.GBK,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *manager) addChildPID(fn func() (pid int, err error)) error {
|
||||
m.childPIDLock.Lock()
|
||||
defer m.childPIDLock.Unlock()
|
||||
pid, err := fn()
|
||||
if err == nil {
|
||||
m.childPIDs[pid] = struct{}{}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *manager) delChildPID(pid int) {
|
||||
m.childPIDLock.Lock()
|
||||
defer m.childPIDLock.Unlock()
|
||||
delete(m.childPIDs, pid)
|
||||
}
|
||||
|
||||
func (m *manager) Signal(sig os.Signal) {
|
||||
m.childPIDLock.Lock()
|
||||
defer m.childPIDLock.Unlock()
|
||||
for pid := range m.childPIDs {
|
||||
if process, _ := os.FindProcess(pid); process != nil {
|
||||
_ = process.Signal(sig)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *manager) Execute(opts ExecuteOptions) (err error) {
|
||||
var argv []string
|
||||
|
||||
// check opts.Dir
|
||||
if opts.Dir != "" {
|
||||
var info os.FileInfo
|
||||
if info, err = os.Stat(opts.Dir); err != nil {
|
||||
err = errors.New("failed to stat opts.Dir: " + err.Error())
|
||||
return
|
||||
}
|
||||
if !info.IsDir() {
|
||||
err = errors.New("opts.Dir is not a directory: " + opts.Dir)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// build env
|
||||
var env map[string]string
|
||||
if env, err = menv.Construct(opts.Env); err != nil {
|
||||
err = errors.New("failed constructing environment variables: " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// build argv
|
||||
if opts.Shell != "" {
|
||||
if argv, err = shellquote.Split(opts.Shell); err != nil {
|
||||
err = errors.New("opts.Shell is invalid: " + err.Error())
|
||||
return
|
||||
}
|
||||
} else {
|
||||
for _, arg := range opts.Command {
|
||||
argv = append(argv, os.Expand(arg, func(s string) string {
|
||||
return env[s]
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
// build exec.Cmd
|
||||
var outPipe, errPipe io.Reader
|
||||
cmd := exec.Command(argv[0], argv[1:]...)
|
||||
if opts.Shell != "" {
|
||||
cmd.Stdin = strings.NewReader(strings.Join(opts.Command, "\n"))
|
||||
}
|
||||
for k, v := range env {
|
||||
cmd.Env = append(cmd.Env, k+"="+v)
|
||||
}
|
||||
cmd.Dir = opts.Dir
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||
Setpgid: true,
|
||||
}
|
||||
|
||||
// build out / err pipe
|
||||
if outPipe, err = cmd.StdoutPipe(); err != nil {
|
||||
return
|
||||
}
|
||||
if errPipe, err = cmd.StderrPipe(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// charset
|
||||
if opts.Charset != "" {
|
||||
enc := m.charsets[strings.ToLower(opts.Charset)]
|
||||
if enc == nil {
|
||||
opts.Logger.Error("unknown charset:", opts.Charset)
|
||||
} else {
|
||||
outPipe = enc.NewDecoder().Reader(outPipe)
|
||||
errPipe = enc.NewDecoder().Reader(errPipe)
|
||||
}
|
||||
}
|
||||
|
||||
// start process in the same lock with signal children
|
||||
if err = m.addChildPID(func() (pid int, err error) {
|
||||
if err = cmd.Start(); err != nil {
|
||||
return
|
||||
}
|
||||
pid = cmd.Process.Pid
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
opts.Logger.Print("minit: " + opts.Name + ": process started")
|
||||
|
||||
// streaming
|
||||
go opts.Logger.Out().ReadFrom(outPipe)
|
||||
go opts.Logger.Err().ReadFrom(errPipe)
|
||||
|
||||
// wait for process
|
||||
if err = cmd.Wait(); err != nil {
|
||||
opts.Logger.Error("minit: " + opts.Name + ": process exited with error: " + err.Error())
|
||||
|
||||
if opts.IgnoreExecError {
|
||||
err = nil
|
||||
}
|
||||
} else {
|
||||
opts.Logger.Print("minit: " + opts.Name + ": process exited")
|
||||
}
|
||||
|
||||
m.delChildPID(cmd.Process.Pid)
|
||||
|
||||
return
|
||||
}
|
||||
64
pkg/mexec/manager_test.go
Normal file
64
pkg/mexec/manager_test.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package mexec
|
||||
|
||||
import (
|
||||
"github.com/guoyk93/minit/pkg/mlog"
|
||||
"github.com/stretchr/testify/require"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestNewManager(t *testing.T) {
|
||||
m := NewManager()
|
||||
|
||||
os.RemoveAll(filepath.Join("testdata", "test.out.log"))
|
||||
os.RemoveAll(filepath.Join("testdata", "test.err.log"))
|
||||
|
||||
logger, err := mlog.NewProcLogger(mlog.ProcLoggerOptions{
|
||||
FileOptions: &mlog.RotatingFileOptions{
|
||||
Dir: "testdata",
|
||||
Filename: "test",
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = m.Execute(ExecuteOptions{
|
||||
Dir: "testdata",
|
||||
Env: map[string]string{
|
||||
"AAA": "BBB",
|
||||
},
|
||||
Command: []string{
|
||||
"echo", "$AAA",
|
||||
},
|
||||
Logger: logger,
|
||||
IgnoreExecError: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
buf, err := os.ReadFile(filepath.Join("testdata", "test.out.log"))
|
||||
require.Contains(t, string(buf), "BBB")
|
||||
|
||||
go func() {
|
||||
time.Sleep(time.Second)
|
||||
m.Signal(syscall.SIGINT)
|
||||
}()
|
||||
|
||||
t1 := time.Now()
|
||||
|
||||
err = m.Execute(ExecuteOptions{
|
||||
Dir: "testdata",
|
||||
Env: map[string]string{
|
||||
"AAA": "10",
|
||||
},
|
||||
Command: []string{
|
||||
"sleep", "$AAA",
|
||||
},
|
||||
Logger: logger,
|
||||
IgnoreExecError: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.True(t, time.Now().Sub(t1) < time.Second*2)
|
||||
}
|
||||
1
pkg/mexec/testdata/.gitignore
vendored
Normal file
1
pkg/mexec/testdata/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*.log
|
||||
118
pkg/mlog/logger.go
Normal file
118
pkg/mlog/logger.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package mlog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/guoyk93/minit/pkg/merrs"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
type ProcLoggerOptions struct {
|
||||
ConsoleOut io.Writer
|
||||
ConsoleErr io.Writer
|
||||
ConsolePrefix string
|
||||
|
||||
FilePrefix string
|
||||
FileOptions *RotatingFileOptions
|
||||
}
|
||||
|
||||
type ProcLogger interface {
|
||||
Print(items ...interface{})
|
||||
Printf(layout string, items ...interface{})
|
||||
Error(items ...interface{})
|
||||
Errorf(layout string, items ...interface{})
|
||||
|
||||
ProcOutput
|
||||
}
|
||||
|
||||
type procLogger struct {
|
||||
out Output
|
||||
err Output
|
||||
}
|
||||
|
||||
func NewProcLogger(opts ProcLoggerOptions) (pl ProcLogger, err error) {
|
||||
if opts.ConsoleOut == nil {
|
||||
opts.ConsoleOut = os.Stdout
|
||||
}
|
||||
if opts.ConsoleErr == nil {
|
||||
opts.ConsoleErr = os.Stderr
|
||||
}
|
||||
|
||||
if opts.FileOptions == nil {
|
||||
pl = &procLogger{
|
||||
out: NewWriterOutput(opts.ConsoleOut, []byte(opts.ConsolePrefix), nil),
|
||||
err: NewWriterOutput(opts.ConsoleErr, []byte(opts.ConsolePrefix), nil),
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if opts.FileOptions.MaxFileSize == 0 {
|
||||
opts.FileOptions.MaxFileSize = 128 * 1024 * 1024
|
||||
}
|
||||
if opts.FileOptions.MaxFileCount == 0 {
|
||||
opts.FileOptions.MaxFileCount = 5
|
||||
}
|
||||
|
||||
var fileOut io.WriteCloser
|
||||
if fileOut, err = NewRotatingFile(RotatingFileOptions{
|
||||
Dir: opts.FileOptions.Dir,
|
||||
Filename: opts.FileOptions.Filename + ".out",
|
||||
MaxFileSize: opts.FileOptions.MaxFileSize,
|
||||
MaxFileCount: opts.FileOptions.MaxFileCount,
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var fileErr io.WriteCloser
|
||||
if fileErr, err = NewRotatingFile(RotatingFileOptions{
|
||||
Dir: opts.FileOptions.Dir,
|
||||
Filename: opts.FileOptions.Filename + ".err",
|
||||
MaxFileSize: opts.FileOptions.MaxFileSize,
|
||||
MaxFileCount: opts.FileOptions.MaxFileCount,
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
pl = &procLogger{
|
||||
out: MultiOutput(
|
||||
NewWriterOutput(fileOut, []byte(opts.FilePrefix), nil),
|
||||
NewWriterOutput(opts.ConsoleOut, []byte(opts.ConsolePrefix), nil),
|
||||
),
|
||||
err: MultiOutput(
|
||||
NewWriterOutput(fileErr, []byte(opts.FilePrefix), nil),
|
||||
NewWriterOutput(opts.ConsoleErr, []byte(opts.ConsolePrefix), nil),
|
||||
),
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (pl *procLogger) Close() error {
|
||||
eg := merrs.NewErrorGroup()
|
||||
eg.Add(pl.out.Close())
|
||||
eg.Add(pl.err.Close())
|
||||
return eg.Unwrap()
|
||||
}
|
||||
|
||||
func (pl *procLogger) Print(items ...interface{}) {
|
||||
_, _ = pl.out.Write(append([]byte(fmt.Sprint(items...)), '\n'))
|
||||
}
|
||||
|
||||
func (pl *procLogger) Error(items ...interface{}) {
|
||||
_, _ = pl.err.Write(append([]byte(fmt.Sprint(items...)), '\n'))
|
||||
}
|
||||
|
||||
func (pl *procLogger) Printf(pattern string, items ...interface{}) {
|
||||
_, _ = pl.out.Write(append([]byte(fmt.Sprintf(pattern, items...)), '\n'))
|
||||
}
|
||||
|
||||
func (pl *procLogger) Errorf(pattern string, items ...interface{}) {
|
||||
_, _ = pl.err.Write(append([]byte(fmt.Sprintf(pattern, items...)), '\n'))
|
||||
}
|
||||
|
||||
func (pl *procLogger) Out() Output {
|
||||
return pl.out
|
||||
}
|
||||
|
||||
func (pl *procLogger) Err() Output {
|
||||
return pl.err
|
||||
}
|
||||
25
pkg/mlog/logger_test.go
Normal file
25
pkg/mlog/logger_test.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package mlog
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLog(t *testing.T) {
|
||||
os.MkdirAll(filepath.Join("testdata", "logger"), 0755)
|
||||
os.WriteFile(filepath.Join("testdata", "logger", ".gitignore"), []byte("*.log"), 0644)
|
||||
log, err := NewProcLogger(ProcLoggerOptions{
|
||||
FileOptions: &RotatingFileOptions{
|
||||
Dir: filepath.Join("testdata", "logger"),
|
||||
Filename: "test",
|
||||
},
|
||||
ConsolePrefix: "test",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
log.Print("hello", "world")
|
||||
log.Printf("hello, %s", "world")
|
||||
log.Error("error", "world")
|
||||
log.Errorf("error, %s", "world")
|
||||
}
|
||||
154
pkg/mlog/output.go
Normal file
154
pkg/mlog/output.go
Normal file
@@ -0,0 +1,154 @@
|
||||
package mlog
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"github.com/guoyk93/minit/pkg/merrs"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Output interface for single stream log output
|
||||
type Output interface {
|
||||
// WriteCloser is for single line writing
|
||||
io.WriteCloser
|
||||
|
||||
// ReaderFrom is for streaming
|
||||
io.ReaderFrom
|
||||
}
|
||||
|
||||
// ProcOutput interface for process
|
||||
type ProcOutput interface {
|
||||
// Out stdout
|
||||
Out() Output
|
||||
// Err stderr
|
||||
Err() Output
|
||||
}
|
||||
|
||||
type writerOutput struct {
|
||||
pfx []byte
|
||||
sfx []byte
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func (w *writerOutput) Write(p []byte) (n int, err error) {
|
||||
if len(w.pfx) == 0 && len(w.sfx) == 0 {
|
||||
n, err = w.w.Write(p)
|
||||
return
|
||||
}
|
||||
if n, err = w.w.Write(
|
||||
append(
|
||||
append(w.pfx, p...),
|
||||
w.sfx...,
|
||||
),
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
n = len(p)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (w *writerOutput) Close() error {
|
||||
if c, ok := w.w.(io.Closer); ok {
|
||||
return c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *writerOutput) ReadFrom(r io.Reader) (n int64, err error) {
|
||||
br := bufio.NewReader(r)
|
||||
for {
|
||||
var line []byte
|
||||
if line, err = br.ReadBytes('\n'); err == nil {
|
||||
_, _ = w.Write(line)
|
||||
n += int64(len(line))
|
||||
} else {
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
if len(line) != 0 {
|
||||
_, _ = w.Write(append(line, '\n'))
|
||||
n += int64(len(line))
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewWriterOutput wrap a writer as a Output, with optional line Prefix and Suffix
|
||||
func NewWriterOutput(w io.Writer, pfx, sfx []byte) Output {
|
||||
return &writerOutput{w: w, pfx: pfx, sfx: sfx}
|
||||
}
|
||||
|
||||
type multiOutput struct {
|
||||
outputs []Output
|
||||
}
|
||||
|
||||
// MultiOutput create a new Output for proc logging
|
||||
func MultiOutput(outputs ...Output) Output {
|
||||
return &multiOutput{outputs: outputs}
|
||||
}
|
||||
|
||||
func (pc *multiOutput) Close() error {
|
||||
eg := merrs.NewErrorGroup()
|
||||
for _, output := range pc.outputs {
|
||||
eg.Add(output.Close())
|
||||
}
|
||||
return eg.Unwrap()
|
||||
}
|
||||
|
||||
// Write this method is used to write a single line of log
|
||||
func (pc *multiOutput) Write(buf []byte) (n int, err error) {
|
||||
for _, output := range pc.outputs {
|
||||
if n, err = output.Write(buf); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
n = len(buf)
|
||||
return
|
||||
}
|
||||
|
||||
// ReadFrom implements ReaderFrom
|
||||
func (pc *multiOutput) ReadFrom(r io.Reader) (n int64, err error) {
|
||||
eg := merrs.NewErrorGroup()
|
||||
wg := &sync.WaitGroup{}
|
||||
|
||||
var (
|
||||
cs []io.Closer
|
||||
ws []io.Writer
|
||||
)
|
||||
|
||||
for _, _out := range pc.outputs {
|
||||
out := _out
|
||||
|
||||
childR, childW := io.Pipe()
|
||||
cs, ws = append(cs, childW), append(ws, childW)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_, err := out.ReadFrom(childR)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
eg.Add(err)
|
||||
}()
|
||||
}
|
||||
|
||||
_, err = io.Copy(io.MultiWriter(ws...), r)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
for _, c := range cs {
|
||||
_ = c.Close()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if err == nil {
|
||||
err = eg.Unwrap()
|
||||
}
|
||||
return
|
||||
}
|
||||
39
pkg/mlog/output_test.go
Normal file
39
pkg/mlog/output_test.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package mlog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewWriterOutput(t *testing.T) {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
o := NewWriterOutput(buf, []byte("a"), []byte("b"))
|
||||
|
||||
_, err := o.Write([]byte("hello\n"))
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = o.ReadFrom(bytes.NewReader([]byte("hello\nworld")))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "ahello\nbahello\nbaworld\nb", buf.String())
|
||||
}
|
||||
|
||||
func TestMultiOutput(t *testing.T) {
|
||||
buf1 := &bytes.Buffer{}
|
||||
o1 := NewWriterOutput(buf1, []byte("a"), []byte("b"))
|
||||
buf2 := &bytes.Buffer{}
|
||||
o2 := NewWriterOutput(buf2, []byte("c"), []byte("d"))
|
||||
|
||||
o := MultiOutput(o1, o2)
|
||||
|
||||
_, err := o.Write([]byte("hello\n"))
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = o.ReadFrom(bytes.NewReader([]byte("hello\nworld")))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "ahello\nbahello\nbaworld\nb", buf1.String())
|
||||
require.Equal(t, "chello\ndchello\ndcworld\nd", buf2.String())
|
||||
}
|
||||
165
pkg/mlog/rotating.go
Normal file
165
pkg/mlog/rotating.go
Normal file
@@ -0,0 +1,165 @@
|
||||
package mlog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type rotatingFile struct {
|
||||
opts RotatingFileOptions
|
||||
|
||||
fd *os.File
|
||||
size int64
|
||||
lock sync.Locker
|
||||
}
|
||||
|
||||
// RotatingFileOptions options for creating a RotatingFile
|
||||
type RotatingFileOptions struct {
|
||||
// Dir directory
|
||||
Dir string
|
||||
// Filename filename prefix
|
||||
Filename string
|
||||
// MaxFileSize max size of a single file, default to 128mb
|
||||
MaxFileSize int64
|
||||
// MaxFileCount max count of rotated files
|
||||
MaxFileCount int64
|
||||
}
|
||||
|
||||
// NewRotatingFile create a new io.WriteCloser as a rotating log file
|
||||
func NewRotatingFile(opts RotatingFileOptions) (w io.WriteCloser, err error) {
|
||||
if opts.MaxFileSize == 0 {
|
||||
opts.MaxFileSize = 128 * 1000 * 1000
|
||||
}
|
||||
rf := &rotatingFile{opts: opts, lock: &sync.Mutex{}}
|
||||
if err = rf.open(); err != nil {
|
||||
return
|
||||
}
|
||||
w = rf
|
||||
return
|
||||
}
|
||||
|
||||
func (rf *rotatingFile) currentPath() string {
|
||||
return filepath.Join(rf.opts.Dir, rf.opts.Filename+".log")
|
||||
}
|
||||
|
||||
func (rf *rotatingFile) rotatedPath(id int64) string {
|
||||
return filepath.Join(rf.opts.Dir, fmt.Sprintf("%s.%d.log", rf.opts.Filename, id))
|
||||
}
|
||||
|
||||
func (rf *rotatingFile) nextRotatedID() (id int64, err error) {
|
||||
var entries []os.DirEntry
|
||||
if entries, err = os.ReadDir(rf.opts.Dir); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
name := entry.Name()
|
||||
if strings.HasPrefix(name, rf.opts.Filename+".") &&
|
||||
strings.HasSuffix(name, ".log") {
|
||||
eIDStr := strings.TrimSuffix(strings.TrimPrefix(name, rf.opts.Filename+"."), ".log")
|
||||
eID, _ := strconv.ParseInt(eIDStr, 10, 64)
|
||||
if eID > id {
|
||||
id = eID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
id += 1
|
||||
|
||||
// if id exceeded MaxFileCount, back to 1
|
||||
if rf.opts.MaxFileCount > 0 && id > rf.opts.MaxFileCount {
|
||||
id = 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (rf *rotatingFile) open() (err error) {
|
||||
var fd *os.File
|
||||
if fd, err = os.OpenFile(
|
||||
rf.currentPath(),
|
||||
os.O_WRONLY|os.O_CREATE|os.O_APPEND,
|
||||
0644,
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var info os.FileInfo
|
||||
if info, err = fd.Stat(); err != nil {
|
||||
_ = fd.Close()
|
||||
return
|
||||
}
|
||||
|
||||
existed := rf.fd
|
||||
|
||||
rf.fd = fd
|
||||
rf.size = info.Size()
|
||||
|
||||
if existed != nil {
|
||||
_ = existed.Close()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (rf *rotatingFile) reallocate() (err error) {
|
||||
rf.lock.Lock()
|
||||
defer rf.lock.Unlock()
|
||||
|
||||
// recheck, in case of race condition
|
||||
if atomic.LoadInt64(&rf.size) <= rf.opts.MaxFileSize {
|
||||
return
|
||||
}
|
||||
|
||||
// find next rotated id
|
||||
var id int64
|
||||
if id, err = rf.nextRotatedID(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// try remove existed, in case id looped due to maxCount
|
||||
_ = os.Remove(rf.rotatedPath(id))
|
||||
|
||||
// remove current file to rotated path
|
||||
if err = os.Rename(rf.currentPath(), rf.rotatedPath(id)); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// open current file, this will close existing file
|
||||
if err = rf.open(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rf *rotatingFile) Write(p []byte) (n int, err error) {
|
||||
if n, err = rf.fd.Write(p); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// reallocate if exceeded
|
||||
if atomic.AddInt64(&rf.size, int64(n)) > rf.opts.MaxFileSize {
|
||||
if err = rf.reallocate(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (rf *rotatingFile) Close() (err error) {
|
||||
rf.lock.Lock()
|
||||
defer rf.lock.Unlock()
|
||||
|
||||
if rf.fd != nil {
|
||||
err = rf.fd.Close()
|
||||
rf.fd = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
43
pkg/mlog/rotating_test.go
Normal file
43
pkg/mlog/rotating_test.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package mlog
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewRotatingFile(t *testing.T) {
|
||||
_ = os.RemoveAll(filepath.Join("testdata", "logs"))
|
||||
_ = os.MkdirAll(filepath.Join("testdata", "logs"), 0755)
|
||||
_ = os.WriteFile(filepath.Join("testdata", "logs", ".gitignore"), []byte("*.log"), 0644)
|
||||
f, err := NewRotatingFile(RotatingFileOptions{
|
||||
Dir: filepath.Join("testdata", "logs"),
|
||||
Filename: "test",
|
||||
MaxFileSize: 10,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, err = f.Write([]byte("hello, world, hello, world, hello, world"))
|
||||
require.NoError(t, err)
|
||||
_, err = f.Write([]byte("hello, world, hello, world, hello, world"))
|
||||
require.NoError(t, err)
|
||||
_, err = f.Write([]byte("hello, world, hello, world, hello, world"))
|
||||
require.NoError(t, err)
|
||||
err = f.Close()
|
||||
require.NoError(t, err)
|
||||
f, err = NewRotatingFile(RotatingFileOptions{
|
||||
Dir: filepath.Join("testdata", "logs"),
|
||||
Filename: "test-maxcount",
|
||||
MaxFileSize: 10,
|
||||
MaxFileCount: 2,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, err = f.Write([]byte("hello, world, hello, world, hello, world"))
|
||||
require.NoError(t, err)
|
||||
_, err = f.Write([]byte("hello, world, hello, world, hello, world"))
|
||||
require.NoError(t, err)
|
||||
_, err = f.Write([]byte("hello, world, hello, world, hello, world"))
|
||||
require.NoError(t, err)
|
||||
err = f.Close()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
1
pkg/mlog/testdata/logger/.gitignore
vendored
Normal file
1
pkg/mlog/testdata/logger/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*.log
|
||||
1
pkg/mlog/testdata/logs/.gitignore
vendored
Normal file
1
pkg/mlog/testdata/logs/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*.log
|
||||
80
pkg/mlog/writer.go
Normal file
80
pkg/mlog/writer.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package mlog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"log"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type loggerWriter struct {
|
||||
logger *log.Logger
|
||||
buf *bytes.Buffer
|
||||
pfx string
|
||||
|
||||
lock sync.Locker
|
||||
}
|
||||
|
||||
// NewLoggerWriter create a new io.WriteCloser that append each line to log.procLogger
|
||||
func NewLoggerWriter(logger *log.Logger, prefix string) io.WriteCloser {
|
||||
return &loggerWriter{
|
||||
logger: logger,
|
||||
buf: &bytes.Buffer{},
|
||||
pfx: prefix,
|
||||
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (w *loggerWriter) finish(force bool) (err error) {
|
||||
var line string
|
||||
|
||||
for {
|
||||
// read till new line
|
||||
if line, err = w.buf.ReadString('\n'); err == nil {
|
||||
// output
|
||||
if err = w.logger.Output(3, w.pfx+line); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if force {
|
||||
// if forced, output to logger
|
||||
if err = w.logger.Output(3, w.pfx+line); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// write back
|
||||
if _, err = w.buf.WriteString(line); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (w *loggerWriter) Close() (err error) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
|
||||
if err = w.finish(true); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (w *loggerWriter) Write(p []byte) (n int, err error) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
|
||||
if n, err = w.buf.Write(p); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = w.finish(false); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
19
pkg/mlog/writer_test.go
Normal file
19
pkg/mlog/writer_test.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package mlog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/stretchr/testify/require"
|
||||
"log"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLoggerWriter(t *testing.T) {
|
||||
out := &bytes.Buffer{}
|
||||
l := log.New(out, "aaa", log.Lshortfile)
|
||||
w := NewLoggerWriter(l, "bbb ")
|
||||
_, err := w.Write([]byte("hello,world\nbbb"))
|
||||
require.NoError(t, err)
|
||||
err = w.Close()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "aaawriter_test.go:14: bbb hello,world\naaawriter_test.go:16: bbb bbb\n", out.String())
|
||||
}
|
||||
59
pkg/mrunners/runner.go
Normal file
59
pkg/mrunners/runner.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package mrunners
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/guoyk93/minit/pkg/mexec"
|
||||
"github.com/guoyk93/minit/pkg/mlog"
|
||||
"github.com/guoyk93/minit/pkg/munit"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type RunnerAction interface {
|
||||
Do(ctx context.Context)
|
||||
}
|
||||
|
||||
type Runner struct {
|
||||
Order int
|
||||
Long bool
|
||||
Action RunnerAction
|
||||
}
|
||||
|
||||
var (
|
||||
factories = map[string]RunnerFactory{}
|
||||
factoriesLock sync.Locker = &sync.Mutex{}
|
||||
)
|
||||
|
||||
type RunnerOptions struct {
|
||||
Unit munit.Unit
|
||||
Exec mexec.Manager
|
||||
Logger mlog.ProcLogger
|
||||
}
|
||||
|
||||
func (ro RunnerOptions) Print(message string) {
|
||||
ro.Logger.Print("minit: " + ro.Unit.Kind + "/" + ro.Unit.Name + ": " + message)
|
||||
}
|
||||
|
||||
func (ro RunnerOptions) Error(message string) {
|
||||
ro.Logger.Error("minit: " + ro.Unit.Kind + "/" + ro.Unit.Name + ": " + message)
|
||||
}
|
||||
|
||||
type RunnerFactory = func(opts RunnerOptions) (Runner, error)
|
||||
|
||||
func Register(name string, factory RunnerFactory) {
|
||||
factoriesLock.Lock()
|
||||
defer factoriesLock.Unlock()
|
||||
|
||||
factories[name] = factory
|
||||
}
|
||||
|
||||
func Create(opts RunnerOptions) (Runner, error) {
|
||||
factoriesLock.Lock()
|
||||
defer factoriesLock.Unlock()
|
||||
|
||||
if fac, ok := factories[opts.Unit.Kind]; ok {
|
||||
return fac(opts)
|
||||
} else {
|
||||
return Runner{}, errors.New("unknown runner kind: " + opts.Unit.Kind)
|
||||
}
|
||||
}
|
||||
58
pkg/mrunners/runner_cron.go
Normal file
58
pkg/mrunners/runner_cron.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package mrunners
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/guoyk93/minit/pkg/munit"
|
||||
"github.com/robfig/cron/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Register(munit.KindCron, func(opts RunnerOptions) (runner Runner, err error) {
|
||||
if err = opts.Unit.RequireCommand(); err != nil {
|
||||
return
|
||||
}
|
||||
if err = opts.Unit.RequireCron(); err != nil {
|
||||
return
|
||||
}
|
||||
if _, err = cron.ParseStandard(opts.Unit.Cron); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
runner.Order = 30
|
||||
runner.Long = true
|
||||
runner.Action = &runnerCron{RunnerOptions: opts}
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
type runnerCron struct {
|
||||
RunnerOptions
|
||||
}
|
||||
|
||||
func (r *runnerCron) Do(ctx context.Context) {
|
||||
r.Print("controller started")
|
||||
defer r.Print("controller exited")
|
||||
|
||||
if r.Unit.Immediate {
|
||||
if err := r.Exec.Execute(r.Unit.ExecuteOptions(r.Logger)); err != nil {
|
||||
r.Error("failed executing: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
cr := cron.New(cron.WithLogger(cron.PrintfLogger(r.Logger)))
|
||||
_, err := cr.AddFunc(r.Unit.Cron, func() {
|
||||
r.Print("triggered")
|
||||
if err := r.Exec.Execute(r.Unit.ExecuteOptions(r.Logger)); err != nil {
|
||||
r.Error("failed executing: " + err.Error())
|
||||
}
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
cr.Start()
|
||||
|
||||
<-ctx.Done()
|
||||
<-cr.Stop().Done()
|
||||
}
|
||||
55
pkg/mrunners/runner_daemon.go
Normal file
55
pkg/mrunners/runner_daemon.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package mrunners
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/guoyk93/minit/pkg/munit"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Register(munit.KindDaemon, func(opts RunnerOptions) (runner Runner, err error) {
|
||||
if err = opts.Unit.RequireCommand(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
runner.Order = 40
|
||||
runner.Long = true
|
||||
runner.Action = &runnerDaemon{RunnerOptions: opts}
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
type runnerDaemon struct {
|
||||
RunnerOptions
|
||||
}
|
||||
|
||||
func (r *runnerDaemon) Do(ctx context.Context) {
|
||||
r.Print("controller started")
|
||||
defer r.Print("controller exited")
|
||||
|
||||
forLoop:
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
break forLoop
|
||||
}
|
||||
|
||||
var err error
|
||||
if err = r.Exec.Execute(r.Unit.ExecuteOptions(r.Logger)); err != nil {
|
||||
r.Error("failed executing:" + err.Error())
|
||||
}
|
||||
|
||||
if ctx.Err() != nil {
|
||||
break forLoop
|
||||
}
|
||||
|
||||
r.Print("restarting")
|
||||
|
||||
timer := time.NewTimer(time.Second * 5)
|
||||
select {
|
||||
case <-timer.C:
|
||||
case <-ctx.Done():
|
||||
break forLoop
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
32
pkg/mrunners/runner_once.go
Normal file
32
pkg/mrunners/runner_once.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package mrunners
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/guoyk93/minit/pkg/munit"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Register(munit.KindOnce, func(opts RunnerOptions) (runner Runner, err error) {
|
||||
if err = opts.Unit.RequireCommand(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
runner.Order = 20
|
||||
runner.Action = &runnerOnce{RunnerOptions: opts}
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
type runnerOnce struct {
|
||||
RunnerOptions
|
||||
}
|
||||
|
||||
func (r *runnerOnce) Do(ctx context.Context) {
|
||||
r.Print("controller started")
|
||||
defer r.Print("controller exited")
|
||||
|
||||
if err := r.Exec.Execute(r.Unit.ExecuteOptions(r.Logger)); err != nil {
|
||||
r.Error("failed executing: " + err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
92
pkg/mrunners/runner_render.go
Normal file
92
pkg/mrunners/runner_render.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package mrunners
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/guoyk93/minit/pkg/menv"
|
||||
"github.com/guoyk93/minit/pkg/mtmpl"
|
||||
"github.com/guoyk93/minit/pkg/munit"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Register(munit.KindRender, func(opts RunnerOptions) (runner Runner, err error) {
|
||||
if err = opts.Unit.RequireFiles(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
runner.Order = 10
|
||||
runner.Action = &runnerRender{RunnerOptions: opts}
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
type runnerRender struct {
|
||||
RunnerOptions
|
||||
}
|
||||
|
||||
func (r *runnerRender) doFile(ctx context.Context, name string, env map[string]string) (err error) {
|
||||
var buf []byte
|
||||
if buf, err = os.ReadFile(name); err != nil {
|
||||
err = fmt.Errorf("failed reading %s: %s", name, err.Error())
|
||||
return
|
||||
}
|
||||
var content []byte
|
||||
if content, err = mtmpl.Execute(string(buf), map[string]any{
|
||||
"Env": env,
|
||||
}); err != nil {
|
||||
err = fmt.Errorf("failed rendering %s: %s", name, err.Error())
|
||||
return
|
||||
}
|
||||
if !r.Unit.Raw {
|
||||
content = sanitizeLines(content)
|
||||
}
|
||||
if err = os.WriteFile(name, content, 0755); err != nil {
|
||||
err = fmt.Errorf("failed writing %s: %s", name, err.Error())
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *runnerRender) Do(ctx context.Context) {
|
||||
r.Print("controller started")
|
||||
defer r.Print("controller exited")
|
||||
|
||||
env, err := menv.Construct(r.Unit.Env)
|
||||
|
||||
if err != nil {
|
||||
r.Error("failed constructing environments variables: " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
for _, filePattern := range r.Unit.Files {
|
||||
var names []string
|
||||
if names, err = filepath.Glob(filePattern); err != nil {
|
||||
r.Error(fmt.Sprintf("failed globbing: %s: %s", filePattern, err.Error()))
|
||||
continue
|
||||
}
|
||||
for _, name := range names {
|
||||
if err = r.doFile(ctx, name, env); err == nil {
|
||||
r.Print("done rendering: " + name)
|
||||
} else {
|
||||
r.Error("failed rendering: " + name + ": " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sanitizeLines(s []byte) []byte {
|
||||
lines := bytes.Split(s, []byte("\n"))
|
||||
out := &bytes.Buffer{}
|
||||
for _, line := range lines {
|
||||
line = bytes.TrimSpace(line)
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
out.Write(line)
|
||||
out.WriteRune('\n')
|
||||
}
|
||||
return out.Bytes()
|
||||
}
|
||||
43
pkg/msetups/setup.go
Normal file
43
pkg/msetups/setup.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package msetups
|
||||
|
||||
import (
|
||||
"github.com/guoyk93/minit/pkg/mlog"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type SetupFunc = func(log mlog.ProcLogger) error
|
||||
|
||||
type setupItem struct {
|
||||
order int
|
||||
fn SetupFunc
|
||||
}
|
||||
|
||||
var (
|
||||
setupsLock sync.Locker = &sync.Mutex{}
|
||||
setups []setupItem
|
||||
)
|
||||
|
||||
func Register(order int, fn SetupFunc) {
|
||||
setupsLock.Lock()
|
||||
defer setupsLock.Unlock()
|
||||
|
||||
setups = append(setups, setupItem{order: order, fn: fn})
|
||||
}
|
||||
|
||||
func Setup(logger mlog.ProcLogger) (err error) {
|
||||
setupsLock.Lock()
|
||||
defer setupsLock.Unlock()
|
||||
|
||||
sort.Slice(setups, func(i, j int) bool {
|
||||
return setups[i].order > setups[j].order
|
||||
})
|
||||
|
||||
for _, setup := range setups {
|
||||
if err = setup.fn(logger); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
30
pkg/msetups/setup_banner.go
Normal file
30
pkg/msetups/setup_banner.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package msetups
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/guoyk93/minit/pkg/mlog"
|
||||
"os"
|
||||
)
|
||||
|
||||
const (
|
||||
BannerFile = "/etc/banner.minit.txt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Register(10, setupBanner)
|
||||
}
|
||||
|
||||
func setupBanner(logger mlog.ProcLogger) (err error) {
|
||||
var buf []byte
|
||||
if buf, err = os.ReadFile(BannerFile); err != nil {
|
||||
err = nil
|
||||
return
|
||||
}
|
||||
|
||||
lines := bytes.Split(buf, []byte{'\n'})
|
||||
for _, line := range lines {
|
||||
logger.Print(string(line))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
104
pkg/msetups/setup_rlimits.go
Normal file
104
pkg/msetups/setup_rlimits.go
Normal file
@@ -0,0 +1,104 @@
|
||||
//go:build linux
|
||||
|
||||
package msetups
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/guoyk93/minit/pkg/mlog"
|
||||
"golang.org/x/sys/unix"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const Unlimited = "unlimited"
|
||||
|
||||
var (
|
||||
knownRLimitNames = map[string]int{
|
||||
"AS": unix.RLIMIT_AS,
|
||||
"CORE": unix.RLIMIT_CORE,
|
||||
"CPU": unix.RLIMIT_CPU,
|
||||
"DATA": unix.RLIMIT_DATA,
|
||||
"FSIZE": unix.RLIMIT_FSIZE,
|
||||
"LOCKS": unix.RLIMIT_LOCKS,
|
||||
"MEMLOCK": unix.RLIMIT_MEMLOCK,
|
||||
"MSGQUEUE": unix.RLIMIT_MSGQUEUE,
|
||||
"NICE": unix.RLIMIT_NICE,
|
||||
"NOFILE": unix.RLIMIT_NOFILE,
|
||||
"NPROC": unix.RLIMIT_NPROC,
|
||||
"RTPRIO": unix.RLIMIT_RTPRIO,
|
||||
"SIGPENDING": unix.RLIMIT_SIGPENDING,
|
||||
"STACK": unix.RLIMIT_STACK,
|
||||
}
|
||||
)
|
||||
|
||||
func decodeRLimitValue(v *uint64, s string) (err error) {
|
||||
s = strings.TrimSpace(s)
|
||||
if s == "-" || s == "" {
|
||||
return
|
||||
}
|
||||
if strings.ToLower(s) == Unlimited {
|
||||
*v = unix.RLIM_INFINITY
|
||||
} else {
|
||||
if *v, err = strconv.ParseUint(s, 10, 64); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func formatRLimitValue(v uint64) string {
|
||||
if v == unix.RLIM_INFINITY {
|
||||
return Unlimited
|
||||
} else {
|
||||
return strconv.FormatUint(v, 10)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
Register(30, setupRLimits)
|
||||
}
|
||||
|
||||
func setupRLimits(logger mlog.ProcLogger) (err error) {
|
||||
for name, res := range knownRLimitNames {
|
||||
key := "MINIT_RLIMIT_" + name
|
||||
val := strings.TrimSpace(os.Getenv(key))
|
||||
if val == "-" || val == "-:-" || val == "" {
|
||||
continue
|
||||
}
|
||||
var limit syscall.Rlimit
|
||||
if err = syscall.Getrlimit(res, &limit); err != nil {
|
||||
err = fmt.Errorf("failed getting rlimit_%s: %s", name, err.Error())
|
||||
return
|
||||
}
|
||||
logger.Printf("current rlimit_%s=%s:%s", name, formatRLimitValue(limit.Cur), formatRLimitValue(limit.Max))
|
||||
if strings.Contains(val, ":") {
|
||||
splits := strings.Split(val, ":")
|
||||
if len(splits) != 2 {
|
||||
err = fmt.Errorf("invalid environment variable %s=%s", key, val)
|
||||
return
|
||||
}
|
||||
if err = decodeRLimitValue(&limit.Cur, splits[0]); err != nil {
|
||||
err = fmt.Errorf("invalid environment variable %s=%s: %s", key, val, err.Error())
|
||||
return
|
||||
}
|
||||
if err = decodeRLimitValue(&limit.Max, splits[1]); err != nil {
|
||||
err = fmt.Errorf("invalid environment variable %s=%s: %s", key, val, err.Error())
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if err = decodeRLimitValue(&limit.Cur, val); err != nil {
|
||||
return
|
||||
}
|
||||
limit.Max = limit.Cur
|
||||
}
|
||||
logger.Printf("setting rlimit_%s=%s:%s", name, formatRLimitValue(limit.Cur), formatRLimitValue(limit.Max))
|
||||
if err = syscall.Setrlimit(res, &limit); err != nil {
|
||||
err = fmt.Errorf("failed setting rlimit_%s=%s: %s", name, val, err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
45
pkg/msetups/setup_sysctl.go
Normal file
45
pkg/msetups/setup_sysctl.go
Normal file
@@ -0,0 +1,45 @@
|
||||
//go:build linux
|
||||
|
||||
package msetups
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/guoyk93/minit/pkg/mlog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Register(20, setupSysctl)
|
||||
}
|
||||
|
||||
func setupSysctl(logger mlog.ProcLogger) (err error) {
|
||||
items := strings.Split(os.Getenv("MINIT_SYSCTL"), ",")
|
||||
for _, item := range items {
|
||||
splits := strings.SplitN(item, "=", 2)
|
||||
if len(splits) != 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
k, v := strings.TrimSpace(splits[0]), strings.TrimSpace(splits[1])
|
||||
if k == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
filename := filepath.Join(
|
||||
append(
|
||||
[]string{"/proc", "sys"},
|
||||
strings.Split(k, ".")...,
|
||||
)...,
|
||||
)
|
||||
|
||||
logger.Printf("writing sysctl %s=%s", k, v)
|
||||
|
||||
if err = os.WriteFile(filename, []byte(v), 0644); err != nil {
|
||||
err = fmt.Errorf("failed writing sysctl %s=%s: %s", k, v, err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
10
pkg/msetups/setup_test.go
Normal file
10
pkg/msetups/setup_test.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package msetups
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
require.Equal(t, 10, setups[0].order)
|
||||
}
|
||||
43
pkg/msetups/setup_thp.go
Normal file
43
pkg/msetups/setup_thp.go
Normal file
@@ -0,0 +1,43 @@
|
||||
//go:build linux
|
||||
|
||||
package msetups
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/guoyk93/minit/pkg/mlog"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
controlFileTHP = "/sys/kernel/mm/transparent_hugepage/enabled"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Register(40, setupTHP)
|
||||
}
|
||||
|
||||
func setupTHP(logger mlog.ProcLogger) (err error) {
|
||||
val := strings.TrimSpace(os.Getenv("MINIT_THP"))
|
||||
if val == "" {
|
||||
return
|
||||
}
|
||||
var buf []byte
|
||||
if buf, err = os.ReadFile(controlFileTHP); err != nil {
|
||||
err = fmt.Errorf("failed reading THP configuration %s: %s", controlFileTHP, err.Error())
|
||||
return
|
||||
}
|
||||
logger.Printf("current THP configuration: %s", bytes.TrimSpace(buf))
|
||||
logger.Printf("writing THP configuration: %s", val)
|
||||
if err = os.WriteFile(controlFileTHP, []byte(val), 644); err != nil {
|
||||
err = fmt.Errorf("fialed writting THP configuration %s: %s", controlFileTHP, err.Error())
|
||||
return
|
||||
}
|
||||
if buf, err = os.ReadFile(controlFileTHP); err != nil {
|
||||
err = fmt.Errorf("failed reading THP configuration %s: %s", controlFileTHP, err.Error())
|
||||
return
|
||||
}
|
||||
logger.Printf("current THP configuration: %s", bytes.TrimSpace(buf))
|
||||
return
|
||||
}
|
||||
66
pkg/msetups/setup_webdav.go
Normal file
66
pkg/msetups/setup_webdav.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package msetups
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/guoyk93/minit/pkg/mlog"
|
||||
"golang.org/x/net/webdav"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Register(50, setupWebDAV)
|
||||
}
|
||||
|
||||
func setupWebDAV(logger mlog.ProcLogger) (err error) {
|
||||
envRoot := strings.TrimSpace(os.Getenv("MINIT_WEBDAV_ROOT"))
|
||||
if envRoot == "" {
|
||||
return
|
||||
}
|
||||
if err = os.MkdirAll(envRoot, 0755); err != nil {
|
||||
err = fmt.Errorf("failed initializing WebDAV root: %s: %s", envRoot, err.Error())
|
||||
return
|
||||
}
|
||||
envPort := strings.TrimSpace(os.Getenv("MINIT_WEBDAV_PORT"))
|
||||
if envPort == "" {
|
||||
envPort = "7486"
|
||||
}
|
||||
logger.Printf("WebDAV started: root=%s, port=%s", envRoot, envPort)
|
||||
h := &webdav.Handler{
|
||||
FileSystem: webdav.Dir(envRoot),
|
||||
LockSystem: webdav.NewMemLS(),
|
||||
Logger: func(req *http.Request, err error) {
|
||||
if err != nil {
|
||||
logger.Printf("WebDAV: %s %s: %s", req.Method, req.URL.Path, err.Error())
|
||||
} else {
|
||||
logger.Printf("WebDAV: %s %s", req.Method, req.URL.Path)
|
||||
}
|
||||
},
|
||||
}
|
||||
envUsername := strings.TrimSpace(os.Getenv("MINIT_WEBDAV_USERNAME"))
|
||||
envPassword := strings.TrimSpace(os.Getenv("MINIT_WEBDAV_PASSWORD"))
|
||||
s := http.Server{
|
||||
Addr: ":" + envPort,
|
||||
Handler: http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
if envUsername != "" && envPassword != "" {
|
||||
if username, password, ok := req.BasicAuth(); !ok || username != envUsername || password != envPassword {
|
||||
rw.Header().Add("WWW-Authenticate", `Basic realm=Minit WebDAV`)
|
||||
rw.WriteHeader(http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
}
|
||||
h.ServeHTTP(rw, req)
|
||||
}),
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
if err := s.ListenAndServe(); err != nil {
|
||||
logger.Printf("failed running WebDAV: %s", err.Error())
|
||||
}
|
||||
time.Sleep(time.Second * 10)
|
||||
}
|
||||
}()
|
||||
return
|
||||
}
|
||||
143
pkg/msetups/setup_zombies.go
Normal file
143
pkg/msetups/setup_zombies.go
Normal file
@@ -0,0 +1,143 @@
|
||||
//go:build linux
|
||||
|
||||
package msetups
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/guoyk93/minit/pkg/mlog"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Register(60, setupZombies)
|
||||
}
|
||||
|
||||
func setupZombies(log mlog.ProcLogger) (err error) {
|
||||
// 如果自己不是 PID 1,则不负责清理僵尸进程
|
||||
if os.Getpid() != 1 {
|
||||
log.Print("minit is not running as PID 1, skipping cleaning up zombies")
|
||||
return
|
||||
}
|
||||
|
||||
go runZombieCleaner(log)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func runZombieCleaner(log mlog.ProcLogger) {
|
||||
// SIGCHLD 触发
|
||||
chSig := make(chan os.Signal, 10)
|
||||
signal.Notify(chSig, syscall.SIGCHLD)
|
||||
|
||||
// 周期触发
|
||||
tk := time.NewTicker(time.Second * 30)
|
||||
|
||||
var chT <-chan time.Time
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-chSig:
|
||||
if chT == nil {
|
||||
chT = time.After(time.Second * 3)
|
||||
}
|
||||
case <-tk.C:
|
||||
if chT == nil {
|
||||
chT = time.After(time.Second * 5)
|
||||
}
|
||||
case <-chT:
|
||||
chT = nil
|
||||
cleanZombieProcesses(log)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func cleanZombieProcesses(log mlog.ProcLogger) {
|
||||
var (
|
||||
err error
|
||||
pids []int
|
||||
)
|
||||
if pids, err = findZombieProcesses(); err != nil {
|
||||
log.Print("failed checking zombies:", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
for _, pid := range pids {
|
||||
go waitZombieProcess(log, pid)
|
||||
}
|
||||
}
|
||||
|
||||
func findZombieProcesses() (pids []int, err error) {
|
||||
var f *os.File
|
||||
if f, err = os.Open("/proc"); err != nil {
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
var dirnames []string
|
||||
if dirnames, err = f.Readdirnames(-1); err != nil {
|
||||
return
|
||||
}
|
||||
for _, dirname := range dirnames {
|
||||
if dirname[0] < '0' || dirname[0] > '9' {
|
||||
continue
|
||||
}
|
||||
var pid int
|
||||
if pid, err = strconv.Atoi(dirname); err != nil {
|
||||
return
|
||||
}
|
||||
var zombie bool
|
||||
if zombie, err = checkProcessIsZombie(pid); err != nil {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
if zombie {
|
||||
pids = append(pids, pid)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkProcessIsZombie(pid int) (zombie bool, err error) {
|
||||
var buf []byte
|
||||
if buf, err = ioutil.ReadFile(fmt.Sprintf("/proc/%d/stat", pid)); err != nil {
|
||||
return
|
||||
}
|
||||
zombie = checkProcStatIsZombie(buf)
|
||||
return
|
||||
}
|
||||
|
||||
func checkProcStatIsZombie(buf []byte) bool {
|
||||
if len(buf) == 0 {
|
||||
return false
|
||||
}
|
||||
idx := bytes.LastIndexByte(buf, ')')
|
||||
if idx < 0 {
|
||||
return false
|
||||
}
|
||||
buf = buf[idx+1:]
|
||||
buf = bytes.TrimSpace(buf)
|
||||
if len(buf) == 0 {
|
||||
return false
|
||||
}
|
||||
return buf[0] == 'Z'
|
||||
}
|
||||
|
||||
func waitZombieProcess(log mlog.ProcLogger, pid int) {
|
||||
var err error
|
||||
var ws syscall.WaitStatus
|
||||
for {
|
||||
_, err = syscall.Wait4(pid, &ws, 0, nil)
|
||||
for syscall.EINTR == err {
|
||||
_, err = syscall.Wait4(pid, &ws, 0, nil)
|
||||
}
|
||||
if syscall.ECHILD == err {
|
||||
break
|
||||
}
|
||||
}
|
||||
log.Printf("zombie cleaned %d", pid)
|
||||
}
|
||||
16
pkg/msetups/setup_zombies_test.go
Normal file
16
pkg/msetups/setup_zombies_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
//go:build linux
|
||||
|
||||
package msetups
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCheckProcStatIsZombie(t *testing.T) {
|
||||
var res bool
|
||||
res = checkProcStatIsZombie([]byte("299923 (kworker/2:1-cgroup_pidlist_destroy) R 2 0 0 0 -1 69238880 0 0 0 0 9 153 0 0 20 0 1 0 78232531 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 0 0 0 17 2 0 0 0 0 0 0 0 0 0 0 0 0 0"))
|
||||
require.False(t, res)
|
||||
res = checkProcStatIsZombie([]byte("299923 (kworker/2:1-cgroup_pidlist_destroy) Z 2 0 0 0 -1 69238880 0 0 0 0 9 153 0 0 20 0 1 0 78232531 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 0 0 0 17 2 0 0 0 0 0 0 0 0 0 0 0 0 0"))
|
||||
require.True(t, res)
|
||||
}
|
||||
24
pkg/mtmpl/execute.go
Normal file
24
pkg/mtmpl/execute.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package mtmpl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
// Execute render text template with predefined funcs
|
||||
func Execute(src string, data any) (out []byte, err error) {
|
||||
var t *template.Template
|
||||
if t, err = template.
|
||||
New("__main__").
|
||||
Funcs(Funcs).
|
||||
Option("missingkey=zero").
|
||||
Parse(src); err != nil {
|
||||
return
|
||||
}
|
||||
o := &bytes.Buffer{}
|
||||
if err = t.Execute(o, data); err != nil {
|
||||
return
|
||||
}
|
||||
out = o.Bytes()
|
||||
return
|
||||
}
|
||||
13
pkg/mtmpl/execute_test.go
Normal file
13
pkg/mtmpl/execute_test.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package mtmpl
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestExecute(t *testing.T) {
|
||||
buf, err := Execute(TEST_TMPL, map[string]interface{}{"A": "B"})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "2\nB", strings.TrimSpace(string(buf)))
|
||||
}
|
||||
140
pkg/mtmpl/funcs.go
Normal file
140
pkg/mtmpl/funcs.go
Normal file
@@ -0,0 +1,140 @@
|
||||
package mtmpl
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"os"
|
||||
"os/user"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Funcs provided funcs for render
|
||||
var Funcs = map[string]interface{}{
|
||||
"netResolveIPAddr": net.ResolveIPAddr,
|
||||
"netResolveIP": netResolveIP,
|
||||
"osHostname": os.Hostname,
|
||||
"osUserCacheDir": os.UserCacheDir,
|
||||
"osUserConfigDir": os.UserConfigDir,
|
||||
"osUserHomeDir": os.UserHomeDir,
|
||||
"osGetegid": os.Getegid,
|
||||
"osGetenv": os.Getenv,
|
||||
"osGeteuid": os.Geteuid,
|
||||
"osGetgid": os.Getgid,
|
||||
"osGetgroups": os.Getgroups,
|
||||
"osGetpagesize": os.Getpagesize,
|
||||
"osGetpid": os.Getpid,
|
||||
"osGetppid": os.Getppid,
|
||||
"osGetuid": os.Getuid,
|
||||
"osGetwd": os.Getwd,
|
||||
"osTempDir": os.TempDir,
|
||||
"osUserLookupGroup": user.LookupGroup,
|
||||
"osUserLookupGroupId": user.LookupGroupId,
|
||||
"osUserCurrent": user.Current,
|
||||
"osUserLookup": user.Lookup,
|
||||
"osUserLookupId": user.LookupId,
|
||||
"stringsContains": strings.Contains,
|
||||
"stringsFields": strings.Fields,
|
||||
"stringsIndex": strings.Index,
|
||||
"stringsLastIndex": strings.LastIndex,
|
||||
"stringsHasPrefix": strings.HasPrefix,
|
||||
"stringsHasSuffix": strings.HasSuffix,
|
||||
"stringsRepeat": strings.Repeat,
|
||||
"stringsReplaceAll": strings.ReplaceAll,
|
||||
"stringsSplit": strings.Split,
|
||||
"stringsSplitN": strings.SplitN,
|
||||
"stringsToLower": strings.ToLower,
|
||||
"stringsToUpper": strings.ToUpper,
|
||||
"stringsTrimPrefix": strings.TrimPrefix,
|
||||
"stringsTrimSpace": strings.TrimSpace,
|
||||
"stringsTrimSuffix": strings.TrimSuffix,
|
||||
"strconvQuote": strconv.Quote,
|
||||
"strconvUnquote": strconv.Unquote,
|
||||
"strconvParseBool": strconv.ParseBool,
|
||||
"strconvParseInt": strconv.ParseInt,
|
||||
"strconvParseUint": strconv.ParseUint,
|
||||
"strconvParseFloat": strconv.ParseFloat,
|
||||
"strconvFormatBool": strconv.FormatBool,
|
||||
"strconvFormatInt": strconv.FormatInt,
|
||||
"strconvFormatUint": strconv.FormatUint,
|
||||
"strconvFormatFloat": strconv.FormatFloat,
|
||||
"strconvAoti": strconv.Atoi,
|
||||
"strconvItoa": strconv.Itoa,
|
||||
|
||||
"add": add,
|
||||
"neg": neg,
|
||||
"intAdd": add,
|
||||
"intNeg": neg,
|
||||
"int64Add": add,
|
||||
"int64Neg": neg,
|
||||
"float32Add": add,
|
||||
"float32Neg": neg,
|
||||
"float64Add": add,
|
||||
"float64Neg": neg,
|
||||
|
||||
"osHostnameSequenceID": osHostnameSequenceID,
|
||||
"k8sStatefulSetID": osHostnameSequenceID,
|
||||
}
|
||||
|
||||
func netResolveIP(s string) (ip string, err error) {
|
||||
var addr *net.IPAddr
|
||||
if addr, err = net.ResolveIPAddr("ip", s); err != nil {
|
||||
return
|
||||
}
|
||||
ip = addr.IP.String()
|
||||
return
|
||||
}
|
||||
|
||||
func add(a, b interface{}) interface{} {
|
||||
switch a.(type) {
|
||||
case bool:
|
||||
return a.(bool) || b.(bool)
|
||||
case int:
|
||||
return a.(int) + b.(int)
|
||||
case int64:
|
||||
return a.(int64) + b.(int64)
|
||||
case int32:
|
||||
return a.(int32) + b.(int32)
|
||||
case float32:
|
||||
return a.(float32) + b.(float32)
|
||||
case float64:
|
||||
return a.(float64) + b.(float64)
|
||||
case string:
|
||||
return a.(string) + b.(string)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func neg(a interface{}) interface{} {
|
||||
switch a.(type) {
|
||||
case bool:
|
||||
return !a.(bool)
|
||||
case int:
|
||||
return -a.(int)
|
||||
case int64:
|
||||
return -a.(int64)
|
||||
case int32:
|
||||
return -a.(int32)
|
||||
case float32:
|
||||
return -a.(float32)
|
||||
case float64:
|
||||
return -a.(float64)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func osHostnameSequenceID() (id int, err error) {
|
||||
var hostname string
|
||||
if hostname = os.Getenv("HOSTNAME"); hostname == "" {
|
||||
if hostname, err = os.Hostname(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
splits := strings.Split(hostname, "-")
|
||||
if len(splits) < 2 {
|
||||
err = errors.New("invalid stateful-set hostname")
|
||||
return
|
||||
}
|
||||
id, err = strconv.Atoi(splits[len(splits)-1])
|
||||
return
|
||||
}
|
||||
26
pkg/mtmpl/funcs_test.go
Normal file
26
pkg/mtmpl/funcs_test.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package mtmpl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/stretchr/testify/require"
|
||||
"strings"
|
||||
"testing"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
const TEST_TMPL = `
|
||||
{{$a := 3}}
|
||||
{{$b := 1}}
|
||||
{{add (neg $b) $a}}
|
||||
{{.A}}
|
||||
`
|
||||
|
||||
func TestFuncs(t *testing.T) {
|
||||
tmpl := template.New("__main__").Funcs(Funcs).Option("missingkey=zero")
|
||||
tmpl, err := tmpl.Parse(TEST_TMPL)
|
||||
require.NoError(t, err)
|
||||
buf := &bytes.Buffer{}
|
||||
err = tmpl.Execute(buf, map[string]interface{}{"A": "B"})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "2\nB", strings.TrimSpace(buf.String()))
|
||||
}
|
||||
62
pkg/munit/filter.go
Normal file
62
pkg/munit/filter.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package munit
|
||||
|
||||
import "strings"
|
||||
|
||||
type FilterMap map[string]struct{}
|
||||
|
||||
func (fm FilterMap) Match(unit Unit) bool {
|
||||
if fm == nil {
|
||||
return false
|
||||
}
|
||||
if _, ok := fm[unit.Name]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := fm[PrefixGroup+unit.Group]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := fm[PrefixKind+unit.Kind]; ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func NewFilterMap(s string) (out FilterMap) {
|
||||
s = strings.TrimSpace(s)
|
||||
for _, item := range strings.Split(s, ",") {
|
||||
item = strings.TrimSpace(item)
|
||||
if item == "" || item == PrefixGroup || item == PrefixKind {
|
||||
continue
|
||||
}
|
||||
if out == nil {
|
||||
out = FilterMap{}
|
||||
}
|
||||
out[item] = struct{}{}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type Filter struct {
|
||||
pass FilterMap
|
||||
deny FilterMap
|
||||
}
|
||||
|
||||
func NewFilter(pass, deny string) (uf *Filter) {
|
||||
return &Filter{
|
||||
pass: NewFilterMap(pass),
|
||||
deny: NewFilterMap(deny),
|
||||
}
|
||||
}
|
||||
|
||||
func (uf *Filter) Match(unit Unit) bool {
|
||||
if uf.pass != nil {
|
||||
if !uf.pass.Match(unit) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if uf.deny != nil {
|
||||
if uf.deny.Match(unit) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
117
pkg/munit/filter_test.go
Normal file
117
pkg/munit/filter_test.go
Normal file
@@ -0,0 +1,117 @@
|
||||
package munit
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewFilterMap(t *testing.T) {
|
||||
fm := NewFilterMap("")
|
||||
require.Nil(t, fm)
|
||||
|
||||
fm = NewFilterMap(",, ,")
|
||||
require.Nil(t, fm)
|
||||
|
||||
fm = NewFilterMap("unit-a,&daemon")
|
||||
require.True(t, fm.Match(Unit{
|
||||
Name: "unit-a",
|
||||
}))
|
||||
require.True(t, fm.Match(Unit{
|
||||
Name: "unit-b",
|
||||
Kind: "daemon",
|
||||
}))
|
||||
|
||||
fm = NewFilterMap("unit-a, ,, @group-b, unit-c,,")
|
||||
require.NotNil(t, fm)
|
||||
require.True(t, fm.Match(Unit{
|
||||
Name: "unit-a",
|
||||
}))
|
||||
require.True(t, fm.Match(Unit{
|
||||
Name: "unit-b",
|
||||
Group: "group-b",
|
||||
}))
|
||||
require.True(t, fm.Match(Unit{
|
||||
Name: "unit-c",
|
||||
Group: "group-c",
|
||||
}))
|
||||
require.False(t, fm.Match(Unit{
|
||||
Name: "unit-d",
|
||||
Group: "group-d",
|
||||
}))
|
||||
}
|
||||
|
||||
func TestNewFilter(t *testing.T) {
|
||||
f := NewFilter(" , , , ", ",, ,")
|
||||
for i := 0; i < 10; i++ {
|
||||
buf := make([]byte, 10)
|
||||
rand.Read(buf)
|
||||
require.True(t, f.Match(Unit{
|
||||
Name: hex.EncodeToString(buf),
|
||||
Group: hex.EncodeToString(buf),
|
||||
}))
|
||||
}
|
||||
|
||||
f = NewFilter("unit-a,&daemon", "")
|
||||
require.False(t, f.Match(Unit{
|
||||
Name: "bla",
|
||||
Kind: KindCron,
|
||||
}))
|
||||
require.True(t, f.Match(Unit{
|
||||
Name: "bla",
|
||||
Kind: KindDaemon,
|
||||
}))
|
||||
require.True(t, f.Match(Unit{
|
||||
Name: "unit-a",
|
||||
Kind: KindCron,
|
||||
}))
|
||||
|
||||
f = NewFilter("", "unit-a,&daemon")
|
||||
require.True(t, f.Match(Unit{
|
||||
Name: "bla",
|
||||
Kind: KindCron,
|
||||
}))
|
||||
require.False(t, f.Match(Unit{
|
||||
Name: "bla",
|
||||
Kind: KindDaemon,
|
||||
}))
|
||||
require.False(t, f.Match(Unit{
|
||||
Name: "unit-a",
|
||||
Kind: KindCron,
|
||||
}))
|
||||
|
||||
f = NewFilter("", "unit-a,,,@group-c,,")
|
||||
require.True(t, f.Match(Unit{
|
||||
Name: "unit-b",
|
||||
Group: "group-b",
|
||||
}))
|
||||
require.False(t, f.Match(Unit{
|
||||
Name: "unit-c",
|
||||
Group: "group-c",
|
||||
}))
|
||||
|
||||
f = NewFilter("unit-a,,,@group-c,,", "")
|
||||
require.False(t, f.Match(Unit{
|
||||
Name: "unit-b",
|
||||
Group: "group-b",
|
||||
}))
|
||||
require.True(t, f.Match(Unit{
|
||||
Name: "unit-c",
|
||||
Group: "group-c",
|
||||
}))
|
||||
|
||||
f = NewFilter("unit-a,,,@group-c,,", "unit-c2")
|
||||
require.False(t, f.Match(Unit{
|
||||
Name: "unit-b",
|
||||
Group: "group-b",
|
||||
}))
|
||||
require.True(t, f.Match(Unit{
|
||||
Name: "unit-c",
|
||||
Group: "group-c",
|
||||
}))
|
||||
require.False(t, f.Match(Unit{
|
||||
Name: "unit-c2",
|
||||
Group: "group-c",
|
||||
}))
|
||||
}
|
||||
160
pkg/munit/load.go
Normal file
160
pkg/munit/load.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package munit
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/guoyk93/minit/pkg/shellquote"
|
||||
"gopkg.in/yaml.v3"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func LoadArgs(args []string) (unit Unit, ok bool, err error) {
|
||||
var opts []string
|
||||
|
||||
// fix a history issue
|
||||
for len(args) > 0 {
|
||||
if filepath.Base(args[0]) == "minit" {
|
||||
args = args[1:]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// extract arguments after '--' if existed
|
||||
for i, item := range args {
|
||||
if item == "--" {
|
||||
opts = args[0:i]
|
||||
args = args[i+1:]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
unit = Unit{
|
||||
Name: "arg-main",
|
||||
Kind: KindDaemon,
|
||||
Command: args,
|
||||
}
|
||||
|
||||
// opts decoding
|
||||
for _, opt := range opts {
|
||||
if strings.HasSuffix(opt, "-"+KindOnce) {
|
||||
unit.Kind = KindOnce
|
||||
}
|
||||
}
|
||||
|
||||
ok = true
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func LoadEnv() (unit Unit, ok bool, err error) {
|
||||
cmd := strings.TrimSpace(os.Getenv("MINIT_MAIN"))
|
||||
if cmd == "" {
|
||||
return
|
||||
}
|
||||
|
||||
name := strings.TrimSpace(os.Getenv("MINIT_MAIN_NAME"))
|
||||
if name == "" {
|
||||
name = "env-main"
|
||||
}
|
||||
|
||||
var (
|
||||
cron string
|
||||
immediate bool
|
||||
)
|
||||
|
||||
kind := strings.TrimSpace(os.Getenv("MINIT_MAIN_KIND"))
|
||||
|
||||
switch kind {
|
||||
case KindDaemon, KindOnce:
|
||||
case KindCron:
|
||||
cron = strings.TrimSpace(os.Getenv("MINIT_MAIN_CRON"))
|
||||
|
||||
if cron == "" {
|
||||
err = errors.New("missing environment variable $MINIT_MAIN_CRON while $MINIT_MAIN_KIND is 'cron'")
|
||||
return
|
||||
}
|
||||
|
||||
immediate, _ = strconv.ParseBool(os.Getenv("MINIT_MAIN_IMMEDIATE"))
|
||||
case "":
|
||||
if once, _ := strconv.ParseBool(strings.TrimSpace(os.Getenv("MINIT_MAIN_ONCE"))); once {
|
||||
kind = KindOnce
|
||||
} else {
|
||||
kind = KindDaemon
|
||||
}
|
||||
default:
|
||||
err = errors.New("unsupported $MINIT_MAIN_KIND: " + kind)
|
||||
return
|
||||
}
|
||||
|
||||
var cmds []string
|
||||
if cmds, err = shellquote.Split(cmd); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
unit = Unit{
|
||||
Name: name,
|
||||
Group: strings.TrimSpace(os.Getenv("MINIT_MAIN_GROUP")),
|
||||
Kind: kind,
|
||||
Cron: cron,
|
||||
Immediate: immediate,
|
||||
Command: cmds,
|
||||
Dir: strings.TrimSpace(os.Getenv("MINIT_MAIN_DIR")),
|
||||
Charset: strings.TrimSpace(os.Getenv("MINIT_MAIN_CHARSET")),
|
||||
}
|
||||
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
func LoadFile(filename string) (units []Unit, err error) {
|
||||
var f *os.File
|
||||
if f, err = os.Open(filename); err != nil {
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
dec := yaml.NewDecoder(f)
|
||||
for {
|
||||
var unit Unit
|
||||
if err = dec.Decode(&unit); err != nil {
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
} else {
|
||||
err = fmt.Errorf("failed to decode unit file %s: %s", filename, err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if unit.Kind == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
units = append(units, unit)
|
||||
}
|
||||
}
|
||||
|
||||
func LoadDir(dir string) (units []Unit, err error) {
|
||||
for _, ext := range []string{"*.yml", "*.yaml"} {
|
||||
var files []string
|
||||
if files, err = filepath.Glob(filepath.Join(dir, ext)); err != nil {
|
||||
return
|
||||
}
|
||||
for _, file := range files {
|
||||
var _units []Unit
|
||||
if _units, err = LoadFile(file); err != nil {
|
||||
return
|
||||
}
|
||||
units = append(units, _units...)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
105
pkg/munit/load_test.go
Normal file
105
pkg/munit/load_test.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package munit
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLoadArgs(t *testing.T) {
|
||||
unit, ok, err := LoadArgs([]string{
|
||||
"hello",
|
||||
"world",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, []string{"hello", "world"}, unit.Command)
|
||||
|
||||
unit, ok, err = LoadArgs([]string{
|
||||
"minit",
|
||||
"/usr/bin/minit",
|
||||
"hello",
|
||||
"world",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, []string{"hello", "world"}, unit.Command)
|
||||
|
||||
unit, ok, err = LoadArgs([]string{
|
||||
"minit",
|
||||
"--a",
|
||||
"--b",
|
||||
"--",
|
||||
"hello",
|
||||
"world",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, []string{"hello", "world"}, unit.Command)
|
||||
|
||||
unit, ok, err = LoadArgs([]string{
|
||||
"minit",
|
||||
"--a",
|
||||
"--b",
|
||||
"--",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.False(t, ok)
|
||||
|
||||
unit, ok, err = LoadArgs([]string{
|
||||
"--a",
|
||||
"--b",
|
||||
"--",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.False(t, ok)
|
||||
|
||||
unit, ok, err = LoadArgs([]string{
|
||||
"minit",
|
||||
"--once",
|
||||
"--b",
|
||||
"--",
|
||||
"sleep",
|
||||
"30",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, []string{"sleep", "30"}, unit.Command)
|
||||
require.Equal(t, KindOnce, unit.Kind)
|
||||
|
||||
unit, ok, err = LoadArgs([]string{
|
||||
"--once",
|
||||
"--b",
|
||||
"--",
|
||||
"sleep",
|
||||
"30",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, []string{"sleep", "30"}, unit.Command)
|
||||
require.Equal(t, KindOnce, unit.Kind)
|
||||
}
|
||||
|
||||
func TestLoadEnv(t *testing.T) {
|
||||
os.Setenv("MINIT_MAIN", "hello 'world destroyer'")
|
||||
os.Setenv("MINIT_MAIN_KIND", "cron")
|
||||
os.Setenv("MINIT_MAIN_NAME", "test-main")
|
||||
os.Setenv("MINIT_MAIN_CRON", "1 2 3 4 5")
|
||||
os.Setenv("MINIT_MAIN_GROUP", "bbb")
|
||||
os.Setenv("MINIT_MAIN_CHARSET", "gbk")
|
||||
|
||||
unit, ok, err := LoadEnv()
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, Unit{
|
||||
Kind: "cron",
|
||||
Name: "test-main",
|
||||
Cron: "1 2 3 4 5",
|
||||
Group: "bbb",
|
||||
Command: []string{
|
||||
"hello",
|
||||
"world destroyer",
|
||||
},
|
||||
Charset: "gbk",
|
||||
}, unit)
|
||||
}
|
||||
145
pkg/munit/loader.go
Normal file
145
pkg/munit/loader.go
Normal file
@@ -0,0 +1,145 @@
|
||||
package munit
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
regexpName = regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9_-]*[a-zA-Z0-9]$`)
|
||||
)
|
||||
|
||||
const (
|
||||
NameMinit = "minit"
|
||||
|
||||
PrefixGroup = "@"
|
||||
PrefixKind = "&"
|
||||
)
|
||||
|
||||
type Loader struct {
|
||||
filter *Filter
|
||||
}
|
||||
|
||||
func NewLoader() (ld *Loader) {
|
||||
return &Loader{
|
||||
filter: NewFilter(
|
||||
strings.TrimSpace(os.Getenv("MINIT_ENABLE")),
|
||||
strings.TrimSpace(os.Getenv("MINIT_DISABLE")),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
type LoadOptions struct {
|
||||
Args []string
|
||||
Env bool
|
||||
Dir string
|
||||
}
|
||||
|
||||
func (ld *Loader) Load(opts LoadOptions) (output []Unit, skipped []Unit, err error) {
|
||||
var units []Unit
|
||||
|
||||
// load units
|
||||
if opts.Dir != "" {
|
||||
var dUnits []Unit
|
||||
if dUnits, err = LoadDir(opts.Dir); err != nil {
|
||||
return
|
||||
}
|
||||
units = append(units, dUnits...)
|
||||
}
|
||||
if len(opts.Args) > 0 {
|
||||
var unit Unit
|
||||
var ok bool
|
||||
if unit, ok, err = LoadArgs(opts.Args); err != nil {
|
||||
return
|
||||
}
|
||||
if ok {
|
||||
units = append(units, unit)
|
||||
}
|
||||
}
|
||||
if opts.Env {
|
||||
var unit Unit
|
||||
var ok bool
|
||||
if unit, ok, err = LoadEnv(); err != nil {
|
||||
return
|
||||
}
|
||||
if ok {
|
||||
units = append(units, unit)
|
||||
}
|
||||
}
|
||||
|
||||
// check duplicated name
|
||||
names := map[string]struct{}{}
|
||||
|
||||
// reserve 'minit'
|
||||
|
||||
names[NameMinit] = struct{}{}
|
||||
|
||||
// whitelist / blacklist, replicas
|
||||
for _, unit := range units {
|
||||
// check unit kind
|
||||
if _, ok := knownUnitKind[unit.Kind]; !ok {
|
||||
err = errors.New("invalid unit kind: " + unit.Kind)
|
||||
return
|
||||
}
|
||||
|
||||
// check unit name
|
||||
if !regexpName.MatchString(unit.Name) {
|
||||
err = errors.New("invalid unit name: " + unit.Name)
|
||||
return
|
||||
}
|
||||
|
||||
// check duplicated
|
||||
if _, found := names[unit.Name]; found {
|
||||
err = errors.New("duplicated unit name: " + unit.Name)
|
||||
return
|
||||
}
|
||||
names[unit.Name] = struct{}{}
|
||||
|
||||
// fix default group
|
||||
if unit.Group == "" {
|
||||
unit.Group = DefaultGroup
|
||||
}
|
||||
|
||||
// skip if needed
|
||||
if !ld.filter.Match(unit) {
|
||||
skipped = append(skipped, unit)
|
||||
continue
|
||||
}
|
||||
|
||||
// replicas
|
||||
if unit.Count > 1 {
|
||||
for i := 0; i < unit.Count; i++ {
|
||||
subUnit := unit
|
||||
subUnit.Name = unit.Name + "-" + strconv.Itoa(i+1)
|
||||
subUnit.Count = 1
|
||||
dupOrMakeMap(&subUnit.Env)
|
||||
subUnit.Env["MINIT_UNIT_NAME"] = subUnit.Name
|
||||
subUnit.Env["MINIT_UNIT_SUB_ID"] = strconv.Itoa(i + 1)
|
||||
|
||||
output = append(output, subUnit)
|
||||
}
|
||||
} else {
|
||||
unit.Count = 1
|
||||
dupOrMakeMap(&unit.Env)
|
||||
unit.Env["MINIT_UNIT_NAME"] = unit.Name
|
||||
unit.Env["MINIT_UNIT_SUB_ID"] = "1"
|
||||
|
||||
output = append(output, unit)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func dupOrMakeMap[T comparable, U any](m *map[T]U) {
|
||||
nm := make(map[T]U)
|
||||
if *m != nil {
|
||||
for k, v := range *m {
|
||||
nm[k] = v
|
||||
}
|
||||
}
|
||||
*m = nm
|
||||
}
|
||||
36
pkg/munit/loader_test.go
Normal file
36
pkg/munit/loader_test.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package munit
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewLoader(t *testing.T) {
|
||||
os.Setenv("MINIT_ENABLE", "@default")
|
||||
os.Setenv("MINIT_DISABLE", "task-3,task-5")
|
||||
ld := NewLoader()
|
||||
units, skipped, err := ld.Load(LoadOptions{
|
||||
Dir: "testdata",
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Len(t, units, 1)
|
||||
require.Len(t, skipped, 4)
|
||||
require.Equal(t, "task-4", units[0].Name)
|
||||
}
|
||||
|
||||
func TestDupOrMakeMap(t *testing.T) {
|
||||
var o map[string]any
|
||||
dupOrMakeMap(&o)
|
||||
require.NotNil(t, o)
|
||||
|
||||
m1a := map[string]string{
|
||||
"a": "b",
|
||||
}
|
||||
m1b := m1a
|
||||
dupOrMakeMap(&m1a)
|
||||
m1a["c"] = "d"
|
||||
require.Equal(t, "d", m1a["c"])
|
||||
require.Equal(t, "", m1b["c"])
|
||||
}
|
||||
16
pkg/munit/testdata/test1.yml
vendored
Normal file
16
pkg/munit/testdata/test1.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: task-1
|
||||
kind: once
|
||||
group: group-echo
|
||||
command:
|
||||
- echo
|
||||
- once
|
||||
- $HOME
|
||||
---
|
||||
name: task-2
|
||||
kind: daemon
|
||||
group: group-echo
|
||||
count: 3
|
||||
shell: /bin/bash
|
||||
command:
|
||||
- sleep 1 && echo hello world
|
||||
---
|
||||
18
pkg/munit/testdata/test2.yml
vendored
Normal file
18
pkg/munit/testdata/test2.yml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
name: task-3
|
||||
kind: daemon
|
||||
count: 3
|
||||
command:
|
||||
- sleep
|
||||
- 5
|
||||
---
|
||||
name: task-4
|
||||
kind: cron
|
||||
cron: "@every 10s"
|
||||
command:
|
||||
- echo
|
||||
- cron
|
||||
---
|
||||
name: task-5
|
||||
kind: render
|
||||
files:
|
||||
- testdata/conf/*.conf
|
||||
85
pkg/munit/unit.go
Normal file
85
pkg/munit/unit.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package munit
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/guoyk93/minit/pkg/mexec"
|
||||
"github.com/guoyk93/minit/pkg/mlog"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultGroup = "default"
|
||||
)
|
||||
|
||||
const (
|
||||
KindDaemon = "daemon"
|
||||
KindOnce = "once"
|
||||
KindCron = "cron"
|
||||
KindRender = "render"
|
||||
)
|
||||
|
||||
var (
|
||||
knownUnitKind = map[string]struct{}{
|
||||
KindDaemon: {},
|
||||
KindOnce: {},
|
||||
KindCron: {},
|
||||
KindRender: {},
|
||||
}
|
||||
)
|
||||
|
||||
type Unit struct {
|
||||
Kind string `yaml:"kind"` // kind of unit
|
||||
Name string `yaml:"name"` // name of unit
|
||||
Group string `yaml:"group"` // group of unit
|
||||
Count int `yaml:"count"` // replicas of unit
|
||||
|
||||
// execution options
|
||||
Dir string `yaml:"dir"`
|
||||
Shell string `yaml:"shell"`
|
||||
Env map[string]string `yaml:"env"`
|
||||
Command []string `yaml:"command"`
|
||||
Charset string `yaml:"charset"`
|
||||
|
||||
// for 'render' only
|
||||
Raw bool `yaml:"raw"` // don't trim white spaces for 'render'
|
||||
Files []string `yaml:"files"` // files to process
|
||||
|
||||
// for 'cron' only
|
||||
Cron string `yaml:"cron"` // cron syntax
|
||||
Immediate bool `yaml:"immediate"`
|
||||
}
|
||||
|
||||
func (u Unit) RequireCommand() error {
|
||||
if len(u.Command) == 0 {
|
||||
return errors.New("missing unit field: command")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u Unit) RequireFiles() error {
|
||||
if len(u.Files) == 0 {
|
||||
return errors.New("missing unit field: command")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u Unit) RequireCron() error {
|
||||
if len(u.Cron) == 0 {
|
||||
return errors.New("missing unit field: cron")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u Unit) ExecuteOptions(logger mlog.ProcLogger) mexec.ExecuteOptions {
|
||||
return mexec.ExecuteOptions{
|
||||
Name: u.Kind + "/" + u.Name,
|
||||
|
||||
Dir: u.Dir,
|
||||
Shell: u.Shell,
|
||||
Env: u.Env,
|
||||
Command: u.Command,
|
||||
Charset: u.Charset,
|
||||
|
||||
Logger: logger,
|
||||
IgnoreExecError: true,
|
||||
}
|
||||
}
|
||||
29
pkg/shellquote/both_test.go
Normal file
29
pkg/shellquote/both_test.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package shellquote
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
)
|
||||
|
||||
// this is called bothtest because it tests Split and Join together
|
||||
|
||||
func TestJoinSplit(t *testing.T) {
|
||||
f := func(strs []string) bool {
|
||||
// Join, then split, the input
|
||||
combined := Join(strs...)
|
||||
split, err := Split(combined)
|
||||
if err != nil {
|
||||
t.Logf("Error splitting %#v: %v", combined, err)
|
||||
return false
|
||||
}
|
||||
if !reflect.DeepEqual(strs, split) {
|
||||
t.Logf("Input %q did not match output %q", strs, split)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
if err := quick.Check(f, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
3
pkg/shellquote/doc.go
Normal file
3
pkg/shellquote/doc.go
Normal file
@@ -0,0 +1,3 @@
|
||||
// Package shellquote provides utilities for joining/splitting strings using sh's
|
||||
// word-splitting rules.
|
||||
package shellquote
|
||||
99
pkg/shellquote/quote.go
Normal file
99
pkg/shellquote/quote.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package shellquote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Join quotes each argument and joins them with a space.
|
||||
// If passed to /bin/sh, the resulting string will be split back into the
|
||||
// original arguments.
|
||||
func Join(args ...string) string {
|
||||
var buf bytes.Buffer
|
||||
for i, arg := range args {
|
||||
if i != 0 {
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
quote(arg, &buf)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
const (
|
||||
specialChars = "\\'\"`${[|&;<>()*?!~"
|
||||
extraSpecialChars = " \t\n"
|
||||
)
|
||||
|
||||
func quote(word string, buf *bytes.Buffer) {
|
||||
// We want to try to produce a "nice" output. As such, we will
|
||||
// backslash-escape most characters, but if we encounter a space, or if we
|
||||
// encounter an extra-special char (which doesn't work with
|
||||
// backslash-escaping) we switch over to quoting the whole word. We do this
|
||||
// with a space because it's typically easier for people to read multi-word
|
||||
// arguments when quoted with a space rather than with ugly backslashes
|
||||
// everywhere.
|
||||
origLen := buf.Len()
|
||||
|
||||
if len(word) == 0 {
|
||||
// oops, no content
|
||||
buf.WriteString("''")
|
||||
return
|
||||
}
|
||||
|
||||
cur, prev := word, word
|
||||
for len(cur) > 0 {
|
||||
c, l := utf8.DecodeRuneInString(cur)
|
||||
cur = cur[l:]
|
||||
if strings.ContainsRune(specialChars, c) {
|
||||
// copy the non-special chars up to this point
|
||||
if len(cur) < len(prev) {
|
||||
buf.WriteString(prev[0 : len(prev)-len(cur)-l])
|
||||
}
|
||||
buf.WriteByte('\\')
|
||||
buf.WriteRune(c)
|
||||
prev = cur
|
||||
} else if strings.ContainsRune(extraSpecialChars, c) {
|
||||
// start over in quote mode
|
||||
buf.Truncate(origLen)
|
||||
goto quote
|
||||
}
|
||||
}
|
||||
if len(prev) > 0 {
|
||||
buf.WriteString(prev)
|
||||
}
|
||||
return
|
||||
|
||||
quote:
|
||||
// quote mode
|
||||
// Use single-quotes, but if we find a single-quote in the word, we need
|
||||
// to terminate the string, emit an escaped quote, and start the string up
|
||||
// again
|
||||
inQuote := false
|
||||
for len(word) > 0 {
|
||||
i := strings.IndexRune(word, '\'')
|
||||
if i == -1 {
|
||||
break
|
||||
}
|
||||
if i > 0 {
|
||||
if !inQuote {
|
||||
buf.WriteByte('\'')
|
||||
inQuote = true
|
||||
}
|
||||
buf.WriteString(word[0:i])
|
||||
}
|
||||
word = word[i+1:]
|
||||
if inQuote {
|
||||
buf.WriteByte('\'')
|
||||
inQuote = false
|
||||
}
|
||||
buf.WriteString("\\'")
|
||||
}
|
||||
if len(word) > 0 {
|
||||
if !inQuote {
|
||||
buf.WriteByte('\'')
|
||||
}
|
||||
buf.WriteString(word)
|
||||
buf.WriteByte('\'')
|
||||
}
|
||||
}
|
||||
31
pkg/shellquote/quote_test.go
Normal file
31
pkg/shellquote/quote_test.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package shellquote
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSimpleJoin(t *testing.T) {
|
||||
for _, elem := range simpleJoinTest {
|
||||
output := Join(elem.input...)
|
||||
if output != elem.output {
|
||||
t.Errorf("Input %q, got %q, expected %q", elem.input, output, elem.output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var simpleJoinTest = []struct {
|
||||
input []string
|
||||
output string
|
||||
}{
|
||||
{[]string{"test"}, "test"},
|
||||
{[]string{"hello goodbye"}, "'hello goodbye'"},
|
||||
{[]string{"hello", "goodbye"}, "hello goodbye"},
|
||||
{[]string{"don't you know the dewey decimal system?"}, "'don'\\''t you know the dewey decimal system?'"},
|
||||
{[]string{"don't", "you", "know", "the", "dewey", "decimal", "system?"}, "don\\'t you know the dewey decimal system\\?"},
|
||||
{[]string{"~user", "u~ser", " ~user", "!~user"}, "\\~user u\\~ser ' ~user' \\!\\~user"},
|
||||
{[]string{"foo*", "M{ovies,usic}", "ab[cd]", "%3"}, "foo\\* M\\{ovies,usic} ab\\[cd] %3"},
|
||||
{[]string{"one", "", "three"}, "one '' three"},
|
||||
{[]string{"some(parentheses)"}, "some\\(parentheses\\)"},
|
||||
{[]string{"$some_ot~her_)spe!cial_*_characters"}, "\\$some_ot\\~her_\\)spe\\!cial_\\*_characters"},
|
||||
{[]string{"' "}, "\\'' '"},
|
||||
}
|
||||
156
pkg/shellquote/unquote.go
Normal file
156
pkg/shellquote/unquote.go
Normal file
@@ -0,0 +1,156 @@
|
||||
package shellquote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var (
|
||||
UnterminatedSingleQuoteError = errors.New("unterminated single-quoted string")
|
||||
UnterminatedDoubleQuoteError = errors.New("unterminated double-quoted string")
|
||||
UnterminatedEscapeError = errors.New("unterminated backslash-escape")
|
||||
)
|
||||
|
||||
var (
|
||||
splitChars = " \n\t"
|
||||
singleChar = '\''
|
||||
doubleChar = '"'
|
||||
escapeChar = '\\'
|
||||
doubleEscapeChars = "$`\"\n\\"
|
||||
)
|
||||
|
||||
// Split splits a string according to /bin/sh's word-splitting rules. It
|
||||
// supports backslash-escapes, single-quotes, and double-quotes. Notably it does
|
||||
// not support the $” style of quoting. It also doesn't attempt to perform any
|
||||
// other sort of expansion, including brace expansion, shell expansion, or
|
||||
// pathname expansion.
|
||||
//
|
||||
// If the given input has an unterminated quoted string or ends in a
|
||||
// backslash-escape, one of UnterminatedSingleQuoteError,
|
||||
// UnterminatedDoubleQuoteError, or UnterminatedEscapeError is returned.
|
||||
func Split(input string) (words []string, err error) {
|
||||
var buf bytes.Buffer
|
||||
words = make([]string, 0)
|
||||
|
||||
for len(input) > 0 {
|
||||
// skip any splitChars at the start
|
||||
c, l := utf8.DecodeRuneInString(input)
|
||||
if strings.ContainsRune(splitChars, c) {
|
||||
input = input[l:]
|
||||
continue
|
||||
} else if c == escapeChar {
|
||||
// Look ahead for escaped newline so we can skip over it
|
||||
next := input[l:]
|
||||
if len(next) == 0 {
|
||||
err = UnterminatedEscapeError
|
||||
return
|
||||
}
|
||||
c2, l2 := utf8.DecodeRuneInString(next)
|
||||
if c2 == '\n' {
|
||||
input = next[l2:]
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var word string
|
||||
word, input, err = splitWord(input, &buf)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
words = append(words, word)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func splitWord(input string, buf *bytes.Buffer) (word string, remainder string, err error) {
|
||||
buf.Reset()
|
||||
|
||||
raw:
|
||||
{
|
||||
cur := input
|
||||
for len(cur) > 0 {
|
||||
c, l := utf8.DecodeRuneInString(cur)
|
||||
cur = cur[l:]
|
||||
if c == singleChar {
|
||||
buf.WriteString(input[0 : len(input)-len(cur)-l])
|
||||
input = cur
|
||||
goto single
|
||||
} else if c == doubleChar {
|
||||
buf.WriteString(input[0 : len(input)-len(cur)-l])
|
||||
input = cur
|
||||
goto double
|
||||
} else if c == escapeChar {
|
||||
buf.WriteString(input[0 : len(input)-len(cur)-l])
|
||||
input = cur
|
||||
goto escape
|
||||
} else if strings.ContainsRune(splitChars, c) {
|
||||
buf.WriteString(input[0 : len(input)-len(cur)-l])
|
||||
return buf.String(), cur, nil
|
||||
}
|
||||
}
|
||||
if len(input) > 0 {
|
||||
buf.WriteString(input)
|
||||
input = ""
|
||||
}
|
||||
goto done
|
||||
}
|
||||
|
||||
escape:
|
||||
{
|
||||
if len(input) == 0 {
|
||||
return "", "", UnterminatedEscapeError
|
||||
}
|
||||
c, l := utf8.DecodeRuneInString(input)
|
||||
if c == '\n' {
|
||||
// a backslash-escaped newline is elided from the output entirely
|
||||
} else {
|
||||
buf.WriteString(input[:l])
|
||||
}
|
||||
input = input[l:]
|
||||
}
|
||||
goto raw
|
||||
|
||||
single:
|
||||
{
|
||||
i := strings.IndexRune(input, singleChar)
|
||||
if i == -1 {
|
||||
return "", "", UnterminatedSingleQuoteError
|
||||
}
|
||||
buf.WriteString(input[0:i])
|
||||
input = input[i+1:]
|
||||
goto raw
|
||||
}
|
||||
|
||||
double:
|
||||
{
|
||||
cur := input
|
||||
for len(cur) > 0 {
|
||||
c, l := utf8.DecodeRuneInString(cur)
|
||||
cur = cur[l:]
|
||||
if c == doubleChar {
|
||||
buf.WriteString(input[0 : len(input)-len(cur)-l])
|
||||
input = cur
|
||||
goto raw
|
||||
} else if c == escapeChar {
|
||||
// bash only supports certain escapes in double-quoted strings
|
||||
c2, l2 := utf8.DecodeRuneInString(cur)
|
||||
cur = cur[l2:]
|
||||
if strings.ContainsRune(doubleEscapeChars, c2) {
|
||||
buf.WriteString(input[0 : len(input)-len(cur)-l-l2])
|
||||
if c2 == '\n' {
|
||||
// newline is special, skip the backslash entirely
|
||||
} else {
|
||||
buf.WriteRune(c2)
|
||||
}
|
||||
input = cur
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", "", UnterminatedDoubleQuoteError
|
||||
}
|
||||
|
||||
done:
|
||||
return buf.String(), input, nil
|
||||
}
|
||||
55
pkg/shellquote/unquote_test.go
Normal file
55
pkg/shellquote/unquote_test.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package shellquote
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSimpleSplit(t *testing.T) {
|
||||
for _, elem := range simpleSplitTest {
|
||||
output, err := Split(elem.input)
|
||||
if err != nil {
|
||||
t.Errorf("Input %q, got error %#v", elem.input, err)
|
||||
} else if !reflect.DeepEqual(output, elem.output) {
|
||||
t.Errorf("Input %q, got %q, expected %q", elem.input, output, elem.output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorSplit(t *testing.T) {
|
||||
for _, elem := range errorSplitTest {
|
||||
_, err := Split(elem.input)
|
||||
if err != elem.error {
|
||||
t.Errorf("Input %q, got error %#v, expected error %#v", elem.input, err, elem.error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var simpleSplitTest = []struct {
|
||||
input string
|
||||
output []string
|
||||
}{
|
||||
{"hello", []string{"hello"}},
|
||||
{"hello goodbye", []string{"hello", "goodbye"}},
|
||||
{"hello goodbye", []string{"hello", "goodbye"}},
|
||||
{"glob* test?", []string{"glob*", "test?"}},
|
||||
{"don\\'t you know the dewey decimal system\\?", []string{"don't", "you", "know", "the", "dewey", "decimal", "system?"}},
|
||||
{"'don'\\''t you know the dewey decimal system?'", []string{"don't you know the dewey decimal system?"}},
|
||||
{"one '' two", []string{"one", "", "two"}},
|
||||
{"text with\\\na backslash-escaped newline", []string{"text", "witha", "backslash-escaped", "newline"}},
|
||||
{"text \"with\na\" quoted newline", []string{"text", "with\na", "quoted", "newline"}},
|
||||
{"\"quoted\\d\\\\\\\" text with\\\na backslash-escaped newline\"", []string{"quoted\\d\\\" text witha backslash-escaped newline"}},
|
||||
{"text with an escaped \\\n newline in the middle", []string{"text", "with", "an", "escaped", "newline", "in", "the", "middle"}},
|
||||
{"foo\"bar\"baz", []string{"foobarbaz"}},
|
||||
}
|
||||
|
||||
var errorSplitTest = []struct {
|
||||
input string
|
||||
error error
|
||||
}{
|
||||
{"don't worry", UnterminatedSingleQuoteError},
|
||||
{"'test'\\''ing", UnterminatedSingleQuoteError},
|
||||
{"\"foo'bar", UnterminatedDoubleQuoteError},
|
||||
{"foo\\", UnterminatedEscapeError},
|
||||
{" \\", UnterminatedEscapeError},
|
||||
}
|
||||
1
testdata/conf/sample.conf
vendored
Normal file
1
testdata/conf/sample.conf
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This is a sample conf {{ uppercase .Env.HOME }}
|
||||
32
testdata/minit.d/test.yml
vendored
Normal file
32
testdata/minit.d/test.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: echo
|
||||
kind: once
|
||||
command:
|
||||
- echo
|
||||
- once
|
||||
- $HOME
|
||||
---
|
||||
name: shell-test
|
||||
kind: daemon
|
||||
count: 3
|
||||
shell: /bin/bash
|
||||
command:
|
||||
- sleep 1 && echo hello world
|
||||
---
|
||||
name: sleep
|
||||
kind: daemon
|
||||
count: 3
|
||||
command:
|
||||
- sleep
|
||||
- 5
|
||||
---
|
||||
name: echo-cron
|
||||
kind: cron
|
||||
cron: "@every 10s"
|
||||
command:
|
||||
- echo
|
||||
- cron
|
||||
---
|
||||
name: render-test
|
||||
kind: render
|
||||
files:
|
||||
- testdata/conf/*.conf
|
||||
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
ISC License
|
||||
|
||||
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
145
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
145
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
||||
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// Go versions prior to 1.4 are disabled because they use a different layout
|
||||
// for interfaces which make the implementation of unsafeReflectValue more complex.
|
||||
// +build !js,!appengine,!safe,!disableunsafe,go1.4
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||
// not access to the unsafe package is available.
|
||||
UnsafeDisabled = false
|
||||
|
||||
// ptrSize is the size of a pointer on the current arch.
|
||||
ptrSize = unsafe.Sizeof((*byte)(nil))
|
||||
)
|
||||
|
||||
type flag uintptr
|
||||
|
||||
var (
|
||||
// flagRO indicates whether the value field of a reflect.Value
|
||||
// is read-only.
|
||||
flagRO flag
|
||||
|
||||
// flagAddr indicates whether the address of the reflect.Value's
|
||||
// value may be taken.
|
||||
flagAddr flag
|
||||
)
|
||||
|
||||
// flagKindMask holds the bits that make up the kind
|
||||
// part of the flags field. In all the supported versions,
|
||||
// it is in the lower 5 bits.
|
||||
const flagKindMask = flag(0x1f)
|
||||
|
||||
// Different versions of Go have used different
|
||||
// bit layouts for the flags type. This table
|
||||
// records the known combinations.
|
||||
var okFlags = []struct {
|
||||
ro, addr flag
|
||||
}{{
|
||||
// From Go 1.4 to 1.5
|
||||
ro: 1 << 5,
|
||||
addr: 1 << 7,
|
||||
}, {
|
||||
// Up to Go tip.
|
||||
ro: 1<<5 | 1<<6,
|
||||
addr: 1 << 8,
|
||||
}}
|
||||
|
||||
var flagValOffset = func() uintptr {
|
||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||
if !ok {
|
||||
panic("reflect.Value has no flag field")
|
||||
}
|
||||
return field.Offset
|
||||
}()
|
||||
|
||||
// flagField returns a pointer to the flag field of a reflect.Value.
|
||||
func flagField(v *reflect.Value) *flag {
|
||||
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
|
||||
}
|
||||
|
||||
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
||||
// the typical safety restrictions preventing access to unaddressable and
|
||||
// unexported data. It works by digging the raw pointer to the underlying
|
||||
// value out of the protected value and generating a new unprotected (unsafe)
|
||||
// reflect.Value to it.
|
||||
//
|
||||
// This allows us to check for implementations of the Stringer and error
|
||||
// interfaces to be used for pretty printing ordinarily unaddressable and
|
||||
// inaccessible values such as unexported struct fields.
|
||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
|
||||
return v
|
||||
}
|
||||
flagFieldPtr := flagField(&v)
|
||||
*flagFieldPtr &^= flagRO
|
||||
*flagFieldPtr |= flagAddr
|
||||
return v
|
||||
}
|
||||
|
||||
// Sanity checks against future reflect package changes
|
||||
// to the type or semantics of the Value.flag field.
|
||||
func init() {
|
||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||
if !ok {
|
||||
panic("reflect.Value has no flag field")
|
||||
}
|
||||
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
|
||||
panic("reflect.Value flag field has changed kind")
|
||||
}
|
||||
type t0 int
|
||||
var t struct {
|
||||
A t0
|
||||
// t0 will have flagEmbedRO set.
|
||||
t0
|
||||
// a will have flagStickyRO set
|
||||
a t0
|
||||
}
|
||||
vA := reflect.ValueOf(t).FieldByName("A")
|
||||
va := reflect.ValueOf(t).FieldByName("a")
|
||||
vt0 := reflect.ValueOf(t).FieldByName("t0")
|
||||
|
||||
// Infer flagRO from the difference between the flags
|
||||
// for the (otherwise identical) fields in t.
|
||||
flagPublic := *flagField(&vA)
|
||||
flagWithRO := *flagField(&va) | *flagField(&vt0)
|
||||
flagRO = flagPublic ^ flagWithRO
|
||||
|
||||
// Infer flagAddr from the difference between a value
|
||||
// taken from a pointer and not.
|
||||
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
|
||||
flagNoPtr := *flagField(&vA)
|
||||
flagPtr := *flagField(&vPtrA)
|
||||
flagAddr = flagNoPtr ^ flagPtr
|
||||
|
||||
// Check that the inferred flags tally with one of the known versions.
|
||||
for _, f := range okFlags {
|
||||
if flagRO == f.ro && flagAddr == f.addr {
|
||||
return
|
||||
}
|
||||
}
|
||||
panic("reflect.Value read-only flag has changed semantics")
|
||||
}
|
||||
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when the code is running on Google App Engine, compiled by GopherJS, or
|
||||
// "-tags safe" is added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// +build js appengine safe disableunsafe !go1.4
|
||||
|
||||
package spew
|
||||
|
||||
import "reflect"
|
||||
|
||||
const (
|
||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||
// not access to the unsafe package is available.
|
||||
UnsafeDisabled = true
|
||||
)
|
||||
|
||||
// unsafeReflectValue typically converts the passed reflect.Value into a one
|
||||
// that bypasses the typical safety restrictions preventing access to
|
||||
// unaddressable and unexported data. However, doing this relies on access to
|
||||
// the unsafe package. This is a stub version which simply returns the passed
|
||||
// reflect.Value when the unsafe package is not available.
|
||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||
return v
|
||||
}
|
||||
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
@@ -0,0 +1,341 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Some constants in the form of bytes to avoid string overhead. This mirrors
|
||||
// the technique used in the fmt package.
|
||||
var (
|
||||
panicBytes = []byte("(PANIC=")
|
||||
plusBytes = []byte("+")
|
||||
iBytes = []byte("i")
|
||||
trueBytes = []byte("true")
|
||||
falseBytes = []byte("false")
|
||||
interfaceBytes = []byte("(interface {})")
|
||||
commaNewlineBytes = []byte(",\n")
|
||||
newlineBytes = []byte("\n")
|
||||
openBraceBytes = []byte("{")
|
||||
openBraceNewlineBytes = []byte("{\n")
|
||||
closeBraceBytes = []byte("}")
|
||||
asteriskBytes = []byte("*")
|
||||
colonBytes = []byte(":")
|
||||
colonSpaceBytes = []byte(": ")
|
||||
openParenBytes = []byte("(")
|
||||
closeParenBytes = []byte(")")
|
||||
spaceBytes = []byte(" ")
|
||||
pointerChainBytes = []byte("->")
|
||||
nilAngleBytes = []byte("<nil>")
|
||||
maxNewlineBytes = []byte("<max depth reached>\n")
|
||||
maxShortBytes = []byte("<max>")
|
||||
circularBytes = []byte("<already shown>")
|
||||
circularShortBytes = []byte("<shown>")
|
||||
invalidAngleBytes = []byte("<invalid>")
|
||||
openBracketBytes = []byte("[")
|
||||
closeBracketBytes = []byte("]")
|
||||
percentBytes = []byte("%")
|
||||
precisionBytes = []byte(".")
|
||||
openAngleBytes = []byte("<")
|
||||
closeAngleBytes = []byte(">")
|
||||
openMapBytes = []byte("map[")
|
||||
closeMapBytes = []byte("]")
|
||||
lenEqualsBytes = []byte("len=")
|
||||
capEqualsBytes = []byte("cap=")
|
||||
)
|
||||
|
||||
// hexDigits is used to map a decimal value to a hex digit.
|
||||
var hexDigits = "0123456789abcdef"
|
||||
|
||||
// catchPanic handles any panics that might occur during the handleMethods
|
||||
// calls.
|
||||
func catchPanic(w io.Writer, v reflect.Value) {
|
||||
if err := recover(); err != nil {
|
||||
w.Write(panicBytes)
|
||||
fmt.Fprintf(w, "%v", err)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// handleMethods attempts to call the Error and String methods on the underlying
|
||||
// type the passed reflect.Value represents and outputes the result to Writer w.
|
||||
//
|
||||
// It handles panics in any called methods by catching and displaying the error
|
||||
// as the formatted value.
|
||||
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
|
||||
// We need an interface to check if the type implements the error or
|
||||
// Stringer interface. However, the reflect package won't give us an
|
||||
// interface on certain things like unexported struct fields in order
|
||||
// to enforce visibility rules. We use unsafe, when it's available,
|
||||
// to bypass these restrictions since this package does not mutate the
|
||||
// values.
|
||||
if !v.CanInterface() {
|
||||
if UnsafeDisabled {
|
||||
return false
|
||||
}
|
||||
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
|
||||
// Choose whether or not to do error and Stringer interface lookups against
|
||||
// the base type or a pointer to the base type depending on settings.
|
||||
// Technically calling one of these methods with a pointer receiver can
|
||||
// mutate the value, however, types which choose to satisify an error or
|
||||
// Stringer interface with a pointer receiver should not be mutating their
|
||||
// state inside these interface methods.
|
||||
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
if v.CanAddr() {
|
||||
v = v.Addr()
|
||||
}
|
||||
|
||||
// Is it an error or Stringer?
|
||||
switch iface := v.Interface().(type) {
|
||||
case error:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.Error()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
|
||||
w.Write([]byte(iface.Error()))
|
||||
return true
|
||||
|
||||
case fmt.Stringer:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.String()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
w.Write([]byte(iface.String()))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// printBool outputs a boolean value as true or false to Writer w.
|
||||
func printBool(w io.Writer, val bool) {
|
||||
if val {
|
||||
w.Write(trueBytes)
|
||||
} else {
|
||||
w.Write(falseBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// printInt outputs a signed integer value to Writer w.
|
||||
func printInt(w io.Writer, val int64, base int) {
|
||||
w.Write([]byte(strconv.FormatInt(val, base)))
|
||||
}
|
||||
|
||||
// printUint outputs an unsigned integer value to Writer w.
|
||||
func printUint(w io.Writer, val uint64, base int) {
|
||||
w.Write([]byte(strconv.FormatUint(val, base)))
|
||||
}
|
||||
|
||||
// printFloat outputs a floating point value using the specified precision,
|
||||
// which is expected to be 32 or 64bit, to Writer w.
|
||||
func printFloat(w io.Writer, val float64, precision int) {
|
||||
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
|
||||
}
|
||||
|
||||
// printComplex outputs a complex value using the specified float precision
|
||||
// for the real and imaginary parts to Writer w.
|
||||
func printComplex(w io.Writer, c complex128, floatPrecision int) {
|
||||
r := real(c)
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
|
||||
i := imag(c)
|
||||
if i >= 0 {
|
||||
w.Write(plusBytes)
|
||||
}
|
||||
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
|
||||
w.Write(iBytes)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
|
||||
// prefix to Writer w.
|
||||
func printHexPtr(w io.Writer, p uintptr) {
|
||||
// Null pointer.
|
||||
num := uint64(p)
|
||||
if num == 0 {
|
||||
w.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
|
||||
buf := make([]byte, 18)
|
||||
|
||||
// It's simpler to construct the hex string right to left.
|
||||
base := uint64(16)
|
||||
i := len(buf) - 1
|
||||
for num >= base {
|
||||
buf[i] = hexDigits[num%base]
|
||||
num /= base
|
||||
i--
|
||||
}
|
||||
buf[i] = hexDigits[num]
|
||||
|
||||
// Add '0x' prefix.
|
||||
i--
|
||||
buf[i] = 'x'
|
||||
i--
|
||||
buf[i] = '0'
|
||||
|
||||
// Strip unused leading bytes.
|
||||
buf = buf[i:]
|
||||
w.Write(buf)
|
||||
}
|
||||
|
||||
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
|
||||
// elements to be sorted.
|
||||
type valuesSorter struct {
|
||||
values []reflect.Value
|
||||
strings []string // either nil or same len and values
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// newValuesSorter initializes a valuesSorter instance, which holds a set of
|
||||
// surrogate keys on which the data should be sorted. It uses flags in
|
||||
// ConfigState to decide if and how to populate those surrogate keys.
|
||||
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
|
||||
vs := &valuesSorter{values: values, cs: cs}
|
||||
if canSortSimply(vs.values[0].Kind()) {
|
||||
return vs
|
||||
}
|
||||
if !cs.DisableMethods {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
b := bytes.Buffer{}
|
||||
if !handleMethods(cs, &b, vs.values[i]) {
|
||||
vs.strings = nil
|
||||
break
|
||||
}
|
||||
vs.strings[i] = b.String()
|
||||
}
|
||||
}
|
||||
if vs.strings == nil && cs.SpewKeys {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
|
||||
}
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
|
||||
// directly, or whether it should be considered for sorting by surrogate keys
|
||||
// (if the ConfigState allows it).
|
||||
func canSortSimply(kind reflect.Kind) bool {
|
||||
// This switch parallels valueSortLess, except for the default case.
|
||||
switch kind {
|
||||
case reflect.Bool:
|
||||
return true
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return true
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return true
|
||||
case reflect.String:
|
||||
return true
|
||||
case reflect.Uintptr:
|
||||
return true
|
||||
case reflect.Array:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Len returns the number of values in the slice. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Len() int {
|
||||
return len(s.values)
|
||||
}
|
||||
|
||||
// Swap swaps the values at the passed indices. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Swap(i, j int) {
|
||||
s.values[i], s.values[j] = s.values[j], s.values[i]
|
||||
if s.strings != nil {
|
||||
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
|
||||
}
|
||||
}
|
||||
|
||||
// valueSortLess returns whether the first value should sort before the second
|
||||
// value. It is used by valueSorter.Less as part of the sort.Interface
|
||||
// implementation.
|
||||
func valueSortLess(a, b reflect.Value) bool {
|
||||
switch a.Kind() {
|
||||
case reflect.Bool:
|
||||
return !a.Bool() && b.Bool()
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return a.Int() < b.Int()
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return a.Float() < b.Float()
|
||||
case reflect.String:
|
||||
return a.String() < b.String()
|
||||
case reflect.Uintptr:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Array:
|
||||
// Compare the contents of both arrays.
|
||||
l := a.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
av := a.Index(i)
|
||||
bv := b.Index(i)
|
||||
if av.Interface() == bv.Interface() {
|
||||
continue
|
||||
}
|
||||
return valueSortLess(av, bv)
|
||||
}
|
||||
}
|
||||
return a.String() < b.String()
|
||||
}
|
||||
|
||||
// Less returns whether the value at index i should sort before the
|
||||
// value at index j. It is part of the sort.Interface implementation.
|
||||
func (s *valuesSorter) Less(i, j int) bool {
|
||||
if s.strings == nil {
|
||||
return valueSortLess(s.values[i], s.values[j])
|
||||
}
|
||||
return s.strings[i] < s.strings[j]
|
||||
}
|
||||
|
||||
// sortValues is a sort function that handles both native types and any type that
|
||||
// can be converted to error or Stringer. Other inputs are sorted according to
|
||||
// their Value.String() value to ensure display stability.
|
||||
func sortValues(values []reflect.Value, cs *ConfigState) {
|
||||
if len(values) == 0 {
|
||||
return
|
||||
}
|
||||
sort.Sort(newValuesSorter(values, cs))
|
||||
}
|
||||
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
@@ -0,0 +1,306 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ConfigState houses the configuration options used by spew to format and
|
||||
// display values. There is a global instance, Config, that is used to control
|
||||
// all top-level Formatter and Dump functionality. Each ConfigState instance
|
||||
// provides methods equivalent to the top-level functions.
|
||||
//
|
||||
// The zero value for ConfigState provides no indentation. You would typically
|
||||
// want to set it to a space or a tab.
|
||||
//
|
||||
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
|
||||
// with default settings. See the documentation of NewDefaultConfig for default
|
||||
// values.
|
||||
type ConfigState struct {
|
||||
// Indent specifies the string to use for each indentation level. The
|
||||
// global config instance that all top-level functions use set this to a
|
||||
// single space by default. If you would like more indentation, you might
|
||||
// set this to a tab with "\t" or perhaps two spaces with " ".
|
||||
Indent string
|
||||
|
||||
// MaxDepth controls the maximum number of levels to descend into nested
|
||||
// data structures. The default, 0, means there is no limit.
|
||||
//
|
||||
// NOTE: Circular data structures are properly detected, so it is not
|
||||
// necessary to set this value unless you specifically want to limit deeply
|
||||
// nested data structures.
|
||||
MaxDepth int
|
||||
|
||||
// DisableMethods specifies whether or not error and Stringer interfaces are
|
||||
// invoked for types that implement them.
|
||||
DisableMethods bool
|
||||
|
||||
// DisablePointerMethods specifies whether or not to check for and invoke
|
||||
// error and Stringer interfaces on types which only accept a pointer
|
||||
// receiver when the current type is not a pointer.
|
||||
//
|
||||
// NOTE: This might be an unsafe action since calling one of these methods
|
||||
// with a pointer receiver could technically mutate the value, however,
|
||||
// in practice, types which choose to satisify an error or Stringer
|
||||
// interface with a pointer receiver should not be mutating their state
|
||||
// inside these interface methods. As a result, this option relies on
|
||||
// access to the unsafe package, so it will not have any effect when
|
||||
// running in environments without access to the unsafe package such as
|
||||
// Google App Engine or with the "safe" build tag specified.
|
||||
DisablePointerMethods bool
|
||||
|
||||
// DisablePointerAddresses specifies whether to disable the printing of
|
||||
// pointer addresses. This is useful when diffing data structures in tests.
|
||||
DisablePointerAddresses bool
|
||||
|
||||
// DisableCapacities specifies whether to disable the printing of capacities
|
||||
// for arrays, slices, maps and channels. This is useful when diffing
|
||||
// data structures in tests.
|
||||
DisableCapacities bool
|
||||
|
||||
// ContinueOnMethod specifies whether or not recursion should continue once
|
||||
// a custom error or Stringer interface is invoked. The default, false,
|
||||
// means it will print the results of invoking the custom error or Stringer
|
||||
// interface and return immediately instead of continuing to recurse into
|
||||
// the internals of the data type.
|
||||
//
|
||||
// NOTE: This flag does not have any effect if method invocation is disabled
|
||||
// via the DisableMethods or DisablePointerMethods options.
|
||||
ContinueOnMethod bool
|
||||
|
||||
// SortKeys specifies map keys should be sorted before being printed. Use
|
||||
// this to have a more deterministic, diffable output. Note that only
|
||||
// native types (bool, int, uint, floats, uintptr and string) and types
|
||||
// that support the error or Stringer interfaces (if methods are
|
||||
// enabled) are supported, with other types sorted according to the
|
||||
// reflect.Value.String() output which guarantees display stability.
|
||||
SortKeys bool
|
||||
|
||||
// SpewKeys specifies that, as a last resort attempt, map keys should
|
||||
// be spewed to strings and sorted by those strings. This is only
|
||||
// considered if SortKeys is true.
|
||||
SpewKeys bool
|
||||
}
|
||||
|
||||
// Config is the active configuration of the top-level functions.
|
||||
// The configuration can be changed by modifying the contents of spew.Config.
|
||||
var Config = ConfigState{Indent: " "}
|
||||
|
||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the formatted string as a value that satisfies error. See NewFormatter
|
||||
// for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
|
||||
return fmt.Errorf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprint(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(w, format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||
// passed with a Formatter interface returned by c.NewFormatter. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintln(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
|
||||
return fmt.Print(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Printf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
|
||||
return fmt.Println(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||
// were passed with a Formatter interface returned by c.NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintln(a ...interface{}) string {
|
||||
return fmt.Sprintln(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
c.Printf, c.Println, or c.Printf.
|
||||
*/
|
||||
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(c, v)
|
||||
}
|
||||
|
||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||
// exactly the same as Dump.
|
||||
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
|
||||
fdump(c, w, a...)
|
||||
}
|
||||
|
||||
/*
|
||||
Dump displays the passed parameters to standard out with newlines, customizable
|
||||
indentation, and additional debug information such as complete types and all
|
||||
pointer addresses used to indirect to the final value. It provides the
|
||||
following features over the built-in printing facilities provided by the fmt
|
||||
package:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output
|
||||
|
||||
The configuration options are controlled by modifying the public members
|
||||
of c. See ConfigState for options documentation.
|
||||
|
||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||
get the formatted result as a string.
|
||||
*/
|
||||
func (c *ConfigState) Dump(a ...interface{}) {
|
||||
fdump(c, os.Stdout, a...)
|
||||
}
|
||||
|
||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||
// as Dump.
|
||||
func (c *ConfigState) Sdump(a ...interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
fdump(c, &buf, a...)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||
// length with each argument converted to a spew Formatter interface using
|
||||
// the ConfigState associated with s.
|
||||
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
|
||||
formatters = make([]interface{}, len(args))
|
||||
for index, arg := range args {
|
||||
formatters[index] = newFormatter(c, arg)
|
||||
}
|
||||
return formatters
|
||||
}
|
||||
|
||||
// NewDefaultConfig returns a ConfigState with the following default settings.
|
||||
//
|
||||
// Indent: " "
|
||||
// MaxDepth: 0
|
||||
// DisableMethods: false
|
||||
// DisablePointerMethods: false
|
||||
// ContinueOnMethod: false
|
||||
// SortKeys: false
|
||||
func NewDefaultConfig() *ConfigState {
|
||||
return &ConfigState{Indent: " "}
|
||||
}
|
||||
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
@@ -0,0 +1,211 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package spew implements a deep pretty printer for Go data structures to aid in
|
||||
debugging.
|
||||
|
||||
A quick overview of the additional features spew provides over the built-in
|
||||
printing facilities for Go data types are as follows:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output (only when using
|
||||
Dump style)
|
||||
|
||||
There are two different approaches spew allows for dumping Go data structures:
|
||||
|
||||
* Dump style which prints with newlines, customizable indentation,
|
||||
and additional debug information such as types and all pointer addresses
|
||||
used to indirect to the final value
|
||||
* A custom Formatter interface that integrates cleanly with the standard fmt
|
||||
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
|
||||
similar to the default %v while providing the additional functionality
|
||||
outlined above and passing unsupported format verbs such as %x and %q
|
||||
along to fmt
|
||||
|
||||
Quick Start
|
||||
|
||||
This section demonstrates how to quickly get started with spew. See the
|
||||
sections below for further details on formatting and configuration options.
|
||||
|
||||
To dump a variable with full newlines, indentation, type, and pointer
|
||||
information use Dump, Fdump, or Sdump:
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
spew.Fdump(someWriter, myVar1, myVar2, ...)
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Alternatively, if you would prefer to use format strings with a compacted inline
|
||||
printing style, use the convenience wrappers Printf, Fprintf, etc with
|
||||
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
|
||||
%#+v (adds types and pointer addresses):
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
Configuration Options
|
||||
|
||||
Configuration of spew is handled by fields in the ConfigState type. For
|
||||
convenience, all of the top-level functions use a global state available
|
||||
via the spew.Config global.
|
||||
|
||||
It is also possible to create a ConfigState instance that provides methods
|
||||
equivalent to the top-level functions. This allows concurrent configuration
|
||||
options. See the ConfigState documentation for more details.
|
||||
|
||||
The following configuration options are available:
|
||||
* Indent
|
||||
String to use for each indentation level for Dump functions.
|
||||
It is a single space by default. A popular alternative is "\t".
|
||||
|
||||
* MaxDepth
|
||||
Maximum number of levels to descend into nested data structures.
|
||||
There is no limit by default.
|
||||
|
||||
* DisableMethods
|
||||
Disables invocation of error and Stringer interface methods.
|
||||
Method invocation is enabled by default.
|
||||
|
||||
* DisablePointerMethods
|
||||
Disables invocation of error and Stringer interface methods on types
|
||||
which only accept pointer receivers from non-pointer variables.
|
||||
Pointer method invocation is enabled by default.
|
||||
|
||||
* DisablePointerAddresses
|
||||
DisablePointerAddresses specifies whether to disable the printing of
|
||||
pointer addresses. This is useful when diffing data structures in tests.
|
||||
|
||||
* DisableCapacities
|
||||
DisableCapacities specifies whether to disable the printing of
|
||||
capacities for arrays, slices, maps and channels. This is useful when
|
||||
diffing data structures in tests.
|
||||
|
||||
* ContinueOnMethod
|
||||
Enables recursion into types after invoking error and Stringer interface
|
||||
methods. Recursion after method invocation is disabled by default.
|
||||
|
||||
* SortKeys
|
||||
Specifies map keys should be sorted before being printed. Use
|
||||
this to have a more deterministic, diffable output. Note that
|
||||
only native types (bool, int, uint, floats, uintptr and string)
|
||||
and types which implement error or Stringer interfaces are
|
||||
supported with other types sorted according to the
|
||||
reflect.Value.String() output which guarantees display
|
||||
stability. Natural map order is used by default.
|
||||
|
||||
* SpewKeys
|
||||
Specifies that, as a last resort attempt, map keys should be
|
||||
spewed to strings and sorted by those strings. This is only
|
||||
considered if SortKeys is true.
|
||||
|
||||
Dump Usage
|
||||
|
||||
Simply call spew.Dump with a list of variables you want to dump:
|
||||
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
|
||||
You may also call spew.Fdump if you would prefer to output to an arbitrary
|
||||
io.Writer. For example, to dump to standard error:
|
||||
|
||||
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
|
||||
|
||||
A third option is to call spew.Sdump to get the formatted output as a string:
|
||||
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Sample Dump Output
|
||||
|
||||
See the Dump example for details on the setup of the types and variables being
|
||||
shown here.
|
||||
|
||||
(main.Foo) {
|
||||
unexportedField: (*main.Bar)(0xf84002e210)({
|
||||
flag: (main.Flag) flagTwo,
|
||||
data: (uintptr) <nil>
|
||||
}),
|
||||
ExportedField: (map[interface {}]interface {}) (len=1) {
|
||||
(string) (len=3) "one": (bool) true
|
||||
}
|
||||
}
|
||||
|
||||
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
|
||||
command as shown.
|
||||
([]uint8) (len=32 cap=32) {
|
||||
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
|
||||
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
|
||||
00000020 31 32 |12|
|
||||
}
|
||||
|
||||
Custom Formatter
|
||||
|
||||
Spew provides a custom formatter that implements the fmt.Formatter interface
|
||||
so that it integrates cleanly with standard fmt package printing functions. The
|
||||
formatter is useful for inline printing of smaller data types similar to the
|
||||
standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Custom Formatter Usage
|
||||
|
||||
The simplest way to make use of the spew custom formatter is to call one of the
|
||||
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
|
||||
functions have syntax you are most likely already familiar with:
|
||||
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Println(myVar, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
See the Index for the full list convenience functions.
|
||||
|
||||
Sample Formatter Output
|
||||
|
||||
Double pointer to a uint8:
|
||||
%v: <**>5
|
||||
%+v: <**>(0xf8400420d0->0xf8400420c8)5
|
||||
%#v: (**uint8)5
|
||||
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
|
||||
|
||||
Pointer to circular struct with a uint8 field and a pointer to itself:
|
||||
%v: <*>{1 <*><shown>}
|
||||
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
|
||||
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
|
||||
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
|
||||
|
||||
See the Printf example for details on the setup of variables being shown
|
||||
here.
|
||||
|
||||
Errors
|
||||
|
||||
Since it is possible for custom Stringer/error interfaces to panic, spew
|
||||
detects them and handles them internally by printing the panic information
|
||||
inline with the output. Since spew is intended to provide deep pretty printing
|
||||
capabilities on structures, it intentionally does not return any errors.
|
||||
*/
|
||||
package spew
|
||||
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
@@ -0,0 +1,509 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// uint8Type is a reflect.Type representing a uint8. It is used to
|
||||
// convert cgo types to uint8 slices for hexdumping.
|
||||
uint8Type = reflect.TypeOf(uint8(0))
|
||||
|
||||
// cCharRE is a regular expression that matches a cgo char.
|
||||
// It is used to detect character arrays to hexdump them.
|
||||
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
|
||||
|
||||
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
|
||||
// char. It is used to detect unsigned character arrays to hexdump
|
||||
// them.
|
||||
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
|
||||
|
||||
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
|
||||
// It is used to detect uint8_t arrays to hexdump them.
|
||||
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
|
||||
)
|
||||
|
||||
// dumpState contains information about the state of a dump operation.
|
||||
type dumpState struct {
|
||||
w io.Writer
|
||||
depth int
|
||||
pointers map[uintptr]int
|
||||
ignoreNextType bool
|
||||
ignoreNextIndent bool
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// indent performs indentation according to the depth level and cs.Indent
|
||||
// option.
|
||||
func (d *dumpState) indent() {
|
||||
if d.ignoreNextIndent {
|
||||
d.ignoreNextIndent = false
|
||||
return
|
||||
}
|
||||
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
|
||||
}
|
||||
|
||||
// unpackValue returns values inside of non-nil interfaces when possible.
|
||||
// This is useful for data types like structs, arrays, slices, and maps which
|
||||
// can contain varying types packed inside an interface.
|
||||
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Interface && !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// dumpPtr handles formatting of pointers by indirecting them as necessary.
|
||||
func (d *dumpState) dumpPtr(v reflect.Value) {
|
||||
// Remove pointers at or below the current depth from map used to detect
|
||||
// circular refs.
|
||||
for k, depth := range d.pointers {
|
||||
if depth >= d.depth {
|
||||
delete(d.pointers, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep list of all dereferenced pointers to show later.
|
||||
pointerChain := make([]uintptr, 0)
|
||||
|
||||
// Figure out how many levels of indirection there are by dereferencing
|
||||
// pointers and unpacking interfaces down the chain while detecting circular
|
||||
// references.
|
||||
nilFound := false
|
||||
cycleFound := false
|
||||
indirects := 0
|
||||
ve := v
|
||||
for ve.Kind() == reflect.Ptr {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
indirects++
|
||||
addr := ve.Pointer()
|
||||
pointerChain = append(pointerChain, addr)
|
||||
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
|
||||
cycleFound = true
|
||||
indirects--
|
||||
break
|
||||
}
|
||||
d.pointers[addr] = d.depth
|
||||
|
||||
ve = ve.Elem()
|
||||
if ve.Kind() == reflect.Interface {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
ve = ve.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Display type information.
|
||||
d.w.Write(openParenBytes)
|
||||
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||
d.w.Write([]byte(ve.Type().String()))
|
||||
d.w.Write(closeParenBytes)
|
||||
|
||||
// Display pointer information.
|
||||
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
|
||||
d.w.Write(openParenBytes)
|
||||
for i, addr := range pointerChain {
|
||||
if i > 0 {
|
||||
d.w.Write(pointerChainBytes)
|
||||
}
|
||||
printHexPtr(d.w, addr)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// Display dereferenced value.
|
||||
d.w.Write(openParenBytes)
|
||||
switch {
|
||||
case nilFound:
|
||||
d.w.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound:
|
||||
d.w.Write(circularBytes)
|
||||
|
||||
default:
|
||||
d.ignoreNextType = true
|
||||
d.dump(ve)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
|
||||
// reflection) arrays and slices are dumped in hexdump -C fashion.
|
||||
func (d *dumpState) dumpSlice(v reflect.Value) {
|
||||
// Determine whether this type should be hex dumped or not. Also,
|
||||
// for types which should be hexdumped, try to use the underlying data
|
||||
// first, then fall back to trying to convert them to a uint8 slice.
|
||||
var buf []uint8
|
||||
doConvert := false
|
||||
doHexDump := false
|
||||
numEntries := v.Len()
|
||||
if numEntries > 0 {
|
||||
vt := v.Index(0).Type()
|
||||
vts := vt.String()
|
||||
switch {
|
||||
// C types that need to be converted.
|
||||
case cCharRE.MatchString(vts):
|
||||
fallthrough
|
||||
case cUnsignedCharRE.MatchString(vts):
|
||||
fallthrough
|
||||
case cUint8tCharRE.MatchString(vts):
|
||||
doConvert = true
|
||||
|
||||
// Try to use existing uint8 slices and fall back to converting
|
||||
// and copying if that fails.
|
||||
case vt.Kind() == reflect.Uint8:
|
||||
// We need an addressable interface to convert the type
|
||||
// to a byte slice. However, the reflect package won't
|
||||
// give us an interface on certain things like
|
||||
// unexported struct fields in order to enforce
|
||||
// visibility rules. We use unsafe, when available, to
|
||||
// bypass these restrictions since this package does not
|
||||
// mutate the values.
|
||||
vs := v
|
||||
if !vs.CanInterface() || !vs.CanAddr() {
|
||||
vs = unsafeReflectValue(vs)
|
||||
}
|
||||
if !UnsafeDisabled {
|
||||
vs = vs.Slice(0, numEntries)
|
||||
|
||||
// Use the existing uint8 slice if it can be
|
||||
// type asserted.
|
||||
iface := vs.Interface()
|
||||
if slice, ok := iface.([]uint8); ok {
|
||||
buf = slice
|
||||
doHexDump = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// The underlying data needs to be converted if it can't
|
||||
// be type asserted to a uint8 slice.
|
||||
doConvert = true
|
||||
}
|
||||
|
||||
// Copy and convert the underlying type if needed.
|
||||
if doConvert && vt.ConvertibleTo(uint8Type) {
|
||||
// Convert and copy each element into a uint8 byte
|
||||
// slice.
|
||||
buf = make([]uint8, numEntries)
|
||||
for i := 0; i < numEntries; i++ {
|
||||
vv := v.Index(i)
|
||||
buf[i] = uint8(vv.Convert(uint8Type).Uint())
|
||||
}
|
||||
doHexDump = true
|
||||
}
|
||||
}
|
||||
|
||||
// Hexdump the entire slice as needed.
|
||||
if doHexDump {
|
||||
indent := strings.Repeat(d.cs.Indent, d.depth)
|
||||
str := indent + hex.Dump(buf)
|
||||
str = strings.Replace(str, "\n", "\n"+indent, -1)
|
||||
str = strings.TrimRight(str, d.cs.Indent)
|
||||
d.w.Write([]byte(str))
|
||||
return
|
||||
}
|
||||
|
||||
// Recursively call dump for each item.
|
||||
for i := 0; i < numEntries; i++ {
|
||||
d.dump(d.unpackValue(v.Index(i)))
|
||||
if i < (numEntries - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dump is the main workhorse for dumping a value. It uses the passed reflect
|
||||
// value to figure out what kind of object we are dealing with and formats it
|
||||
// appropriately. It is a recursive function, however circular data structures
|
||||
// are detected and handled properly.
|
||||
func (d *dumpState) dump(v reflect.Value) {
|
||||
// Handle invalid reflect values immediately.
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Invalid {
|
||||
d.w.Write(invalidAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle pointers specially.
|
||||
if kind == reflect.Ptr {
|
||||
d.indent()
|
||||
d.dumpPtr(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Print type information unless already handled elsewhere.
|
||||
if !d.ignoreNextType {
|
||||
d.indent()
|
||||
d.w.Write(openParenBytes)
|
||||
d.w.Write([]byte(v.Type().String()))
|
||||
d.w.Write(closeParenBytes)
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
d.ignoreNextType = false
|
||||
|
||||
// Display length and capacity if the built-in len and cap functions
|
||||
// work with the value's kind and the len/cap itself is non-zero.
|
||||
valueLen, valueCap := 0, 0
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Chan:
|
||||
valueLen, valueCap = v.Len(), v.Cap()
|
||||
case reflect.Map, reflect.String:
|
||||
valueLen = v.Len()
|
||||
}
|
||||
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
|
||||
d.w.Write(openParenBytes)
|
||||
if valueLen != 0 {
|
||||
d.w.Write(lenEqualsBytes)
|
||||
printInt(d.w, int64(valueLen), 10)
|
||||
}
|
||||
if !d.cs.DisableCapacities && valueCap != 0 {
|
||||
if valueLen != 0 {
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
d.w.Write(capEqualsBytes)
|
||||
printInt(d.w, int64(valueCap), 10)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
|
||||
// Call Stringer/error interfaces if they exist and the handle methods flag
|
||||
// is enabled
|
||||
if !d.cs.DisableMethods {
|
||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||
if handled := handleMethods(d.cs, d.w, v); handled {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Invalid:
|
||||
// Do nothing. We should never get here since invalid has already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Bool:
|
||||
printBool(d.w, v.Bool())
|
||||
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
printInt(d.w, v.Int(), 10)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
printUint(d.w, v.Uint(), 10)
|
||||
|
||||
case reflect.Float32:
|
||||
printFloat(d.w, v.Float(), 32)
|
||||
|
||||
case reflect.Float64:
|
||||
printFloat(d.w, v.Float(), 64)
|
||||
|
||||
case reflect.Complex64:
|
||||
printComplex(d.w, v.Complex(), 32)
|
||||
|
||||
case reflect.Complex128:
|
||||
printComplex(d.w, v.Complex(), 64)
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
d.dumpSlice(v)
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.String:
|
||||
d.w.Write([]byte(strconv.Quote(v.String())))
|
||||
|
||||
case reflect.Interface:
|
||||
// The only time we should get here is for nil interfaces due to
|
||||
// unpackValue calls.
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// Do nothing. We should never get here since pointers have already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Map:
|
||||
// nil maps should be indicated as different than empty maps
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
numEntries := v.Len()
|
||||
keys := v.MapKeys()
|
||||
if d.cs.SortKeys {
|
||||
sortValues(keys, d.cs)
|
||||
}
|
||||
for i, key := range keys {
|
||||
d.dump(d.unpackValue(key))
|
||||
d.w.Write(colonSpaceBytes)
|
||||
d.ignoreNextIndent = true
|
||||
d.dump(d.unpackValue(v.MapIndex(key)))
|
||||
if i < (numEntries - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Struct:
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
vt := v.Type()
|
||||
numFields := v.NumField()
|
||||
for i := 0; i < numFields; i++ {
|
||||
d.indent()
|
||||
vtf := vt.Field(i)
|
||||
d.w.Write([]byte(vtf.Name))
|
||||
d.w.Write(colonSpaceBytes)
|
||||
d.ignoreNextIndent = true
|
||||
d.dump(d.unpackValue(v.Field(i)))
|
||||
if i < (numFields - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Uintptr:
|
||||
printHexPtr(d.w, uintptr(v.Uint()))
|
||||
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
printHexPtr(d.w, v.Pointer())
|
||||
|
||||
// There were not any other types at the time this code was written, but
|
||||
// fall back to letting the default fmt package handle it in case any new
|
||||
// types are added.
|
||||
default:
|
||||
if v.CanInterface() {
|
||||
fmt.Fprintf(d.w, "%v", v.Interface())
|
||||
} else {
|
||||
fmt.Fprintf(d.w, "%v", v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fdump is a helper function to consolidate the logic from the various public
|
||||
// methods which take varying writers and config states.
|
||||
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
|
||||
for _, arg := range a {
|
||||
if arg == nil {
|
||||
w.Write(interfaceBytes)
|
||||
w.Write(spaceBytes)
|
||||
w.Write(nilAngleBytes)
|
||||
w.Write(newlineBytes)
|
||||
continue
|
||||
}
|
||||
|
||||
d := dumpState{w: w, cs: cs}
|
||||
d.pointers = make(map[uintptr]int)
|
||||
d.dump(reflect.ValueOf(arg))
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||
// exactly the same as Dump.
|
||||
func Fdump(w io.Writer, a ...interface{}) {
|
||||
fdump(&Config, w, a...)
|
||||
}
|
||||
|
||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||
// as Dump.
|
||||
func Sdump(a ...interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
fdump(&Config, &buf, a...)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
/*
|
||||
Dump displays the passed parameters to standard out with newlines, customizable
|
||||
indentation, and additional debug information such as complete types and all
|
||||
pointer addresses used to indirect to the final value. It provides the
|
||||
following features over the built-in printing facilities provided by the fmt
|
||||
package:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output
|
||||
|
||||
The configuration options are controlled by an exported package global,
|
||||
spew.Config. See ConfigState for options documentation.
|
||||
|
||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||
get the formatted result as a string.
|
||||
*/
|
||||
func Dump(a ...interface{}) {
|
||||
fdump(&Config, os.Stdout, a...)
|
||||
}
|
||||
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
@@ -0,0 +1,419 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// supportedFlags is a list of all the character flags supported by fmt package.
|
||||
const supportedFlags = "0-+# "
|
||||
|
||||
// formatState implements the fmt.Formatter interface and contains information
|
||||
// about the state of a formatting operation. The NewFormatter function can
|
||||
// be used to get a new Formatter which can be used directly as arguments
|
||||
// in standard fmt package printing calls.
|
||||
type formatState struct {
|
||||
value interface{}
|
||||
fs fmt.State
|
||||
depth int
|
||||
pointers map[uintptr]int
|
||||
ignoreNextType bool
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// buildDefaultFormat recreates the original format string without precision
|
||||
// and width information to pass in to fmt.Sprintf in the case of an
|
||||
// unrecognized type. Unless new types are added to the language, this
|
||||
// function won't ever be called.
|
||||
func (f *formatState) buildDefaultFormat() (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteRune('v')
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// constructOrigFormat recreates the original format string including precision
|
||||
// and width information to pass along to the standard fmt package. This allows
|
||||
// automatic deferral of all format strings this package doesn't support.
|
||||
func (f *formatState) constructOrigFormat(verb rune) (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
if width, ok := f.fs.Width(); ok {
|
||||
buf.WriteString(strconv.Itoa(width))
|
||||
}
|
||||
|
||||
if precision, ok := f.fs.Precision(); ok {
|
||||
buf.Write(precisionBytes)
|
||||
buf.WriteString(strconv.Itoa(precision))
|
||||
}
|
||||
|
||||
buf.WriteRune(verb)
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// unpackValue returns values inside of non-nil interfaces when possible and
|
||||
// ensures that types for values which have been unpacked from an interface
|
||||
// are displayed when the show types flag is also set.
|
||||
// This is useful for data types like structs, arrays, slices, and maps which
|
||||
// can contain varying types packed inside an interface.
|
||||
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Interface {
|
||||
f.ignoreNextType = false
|
||||
if !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// formatPtr handles formatting of pointers by indirecting them as necessary.
|
||||
func (f *formatState) formatPtr(v reflect.Value) {
|
||||
// Display nil if top level pointer is nil.
|
||||
showTypes := f.fs.Flag('#')
|
||||
if v.IsNil() && (!showTypes || f.ignoreNextType) {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Remove pointers at or below the current depth from map used to detect
|
||||
// circular refs.
|
||||
for k, depth := range f.pointers {
|
||||
if depth >= f.depth {
|
||||
delete(f.pointers, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep list of all dereferenced pointers to possibly show later.
|
||||
pointerChain := make([]uintptr, 0)
|
||||
|
||||
// Figure out how many levels of indirection there are by derferencing
|
||||
// pointers and unpacking interfaces down the chain while detecting circular
|
||||
// references.
|
||||
nilFound := false
|
||||
cycleFound := false
|
||||
indirects := 0
|
||||
ve := v
|
||||
for ve.Kind() == reflect.Ptr {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
indirects++
|
||||
addr := ve.Pointer()
|
||||
pointerChain = append(pointerChain, addr)
|
||||
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
|
||||
cycleFound = true
|
||||
indirects--
|
||||
break
|
||||
}
|
||||
f.pointers[addr] = f.depth
|
||||
|
||||
ve = ve.Elem()
|
||||
if ve.Kind() == reflect.Interface {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
ve = ve.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Display type or indirection level depending on flags.
|
||||
if showTypes && !f.ignoreNextType {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||
f.fs.Write([]byte(ve.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
} else {
|
||||
if nilFound || cycleFound {
|
||||
indirects += strings.Count(ve.Type().String(), "*")
|
||||
}
|
||||
f.fs.Write(openAngleBytes)
|
||||
f.fs.Write([]byte(strings.Repeat("*", indirects)))
|
||||
f.fs.Write(closeAngleBytes)
|
||||
}
|
||||
|
||||
// Display pointer information depending on flags.
|
||||
if f.fs.Flag('+') && (len(pointerChain) > 0) {
|
||||
f.fs.Write(openParenBytes)
|
||||
for i, addr := range pointerChain {
|
||||
if i > 0 {
|
||||
f.fs.Write(pointerChainBytes)
|
||||
}
|
||||
printHexPtr(f.fs, addr)
|
||||
}
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// Display dereferenced value.
|
||||
switch {
|
||||
case nilFound:
|
||||
f.fs.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound:
|
||||
f.fs.Write(circularShortBytes)
|
||||
|
||||
default:
|
||||
f.ignoreNextType = true
|
||||
f.format(ve)
|
||||
}
|
||||
}
|
||||
|
||||
// format is the main workhorse for providing the Formatter interface. It
|
||||
// uses the passed reflect value to figure out what kind of object we are
|
||||
// dealing with and formats it appropriately. It is a recursive function,
|
||||
// however circular data structures are detected and handled properly.
|
||||
func (f *formatState) format(v reflect.Value) {
|
||||
// Handle invalid reflect values immediately.
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Invalid {
|
||||
f.fs.Write(invalidAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle pointers specially.
|
||||
if kind == reflect.Ptr {
|
||||
f.formatPtr(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Print type information unless already handled elsewhere.
|
||||
if !f.ignoreNextType && f.fs.Flag('#') {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write([]byte(v.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
f.ignoreNextType = false
|
||||
|
||||
// Call Stringer/error interfaces if they exist and the handle methods
|
||||
// flag is enabled.
|
||||
if !f.cs.DisableMethods {
|
||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||
if handled := handleMethods(f.cs, f.fs, v); handled {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Invalid:
|
||||
// Do nothing. We should never get here since invalid has already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Bool:
|
||||
printBool(f.fs, v.Bool())
|
||||
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
printInt(f.fs, v.Int(), 10)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
printUint(f.fs, v.Uint(), 10)
|
||||
|
||||
case reflect.Float32:
|
||||
printFloat(f.fs, v.Float(), 32)
|
||||
|
||||
case reflect.Float64:
|
||||
printFloat(f.fs, v.Float(), 64)
|
||||
|
||||
case reflect.Complex64:
|
||||
printComplex(f.fs, v.Complex(), 32)
|
||||
|
||||
case reflect.Complex128:
|
||||
printComplex(f.fs, v.Complex(), 64)
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
f.fs.Write(openBracketBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
numEntries := v.Len()
|
||||
for i := 0; i < numEntries; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.Index(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBracketBytes)
|
||||
|
||||
case reflect.String:
|
||||
f.fs.Write([]byte(v.String()))
|
||||
|
||||
case reflect.Interface:
|
||||
// The only time we should get here is for nil interfaces due to
|
||||
// unpackValue calls.
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// Do nothing. We should never get here since pointers have already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Map:
|
||||
// nil maps should be indicated as different than empty maps
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
|
||||
f.fs.Write(openMapBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
keys := v.MapKeys()
|
||||
if f.cs.SortKeys {
|
||||
sortValues(keys, f.cs)
|
||||
}
|
||||
for i, key := range keys {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(key))
|
||||
f.fs.Write(colonBytes)
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.MapIndex(key)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeMapBytes)
|
||||
|
||||
case reflect.Struct:
|
||||
numFields := v.NumField()
|
||||
f.fs.Write(openBraceBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
vt := v.Type()
|
||||
for i := 0; i < numFields; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
vtf := vt.Field(i)
|
||||
if f.fs.Flag('+') || f.fs.Flag('#') {
|
||||
f.fs.Write([]byte(vtf.Name))
|
||||
f.fs.Write(colonBytes)
|
||||
}
|
||||
f.format(f.unpackValue(v.Field(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Uintptr:
|
||||
printHexPtr(f.fs, uintptr(v.Uint()))
|
||||
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
printHexPtr(f.fs, v.Pointer())
|
||||
|
||||
// There were not any other types at the time this code was written, but
|
||||
// fall back to letting the default fmt package handle it if any get added.
|
||||
default:
|
||||
format := f.buildDefaultFormat()
|
||||
if v.CanInterface() {
|
||||
fmt.Fprintf(f.fs, format, v.Interface())
|
||||
} else {
|
||||
fmt.Fprintf(f.fs, format, v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
|
||||
// details.
|
||||
func (f *formatState) Format(fs fmt.State, verb rune) {
|
||||
f.fs = fs
|
||||
|
||||
// Use standard formatting for verbs that are not v.
|
||||
if verb != 'v' {
|
||||
format := f.constructOrigFormat(verb)
|
||||
fmt.Fprintf(fs, format, f.value)
|
||||
return
|
||||
}
|
||||
|
||||
if f.value == nil {
|
||||
if fs.Flag('#') {
|
||||
fs.Write(interfaceBytes)
|
||||
}
|
||||
fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
f.format(reflect.ValueOf(f.value))
|
||||
}
|
||||
|
||||
// newFormatter is a helper function to consolidate the logic from the various
|
||||
// public methods which take varying config states.
|
||||
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
|
||||
fs := &formatState{value: v, cs: cs}
|
||||
fs.pointers = make(map[uintptr]int)
|
||||
return fs
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
Printf, Println, or Fprintf.
|
||||
*/
|
||||
func NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(&Config, v)
|
||||
}
|
||||
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
@@ -0,0 +1,148 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the formatted string as a value that satisfies error. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Errorf(format string, a ...interface{}) (err error) {
|
||||
return fmt.Errorf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprint(w, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(w, format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||
// passed with a default Formatter interface returned by NewFormatter. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintln(w, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Print(a ...interface{}) (n int, err error) {
|
||||
return fmt.Print(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Printf(format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Printf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Println(a ...interface{}) (n int, err error) {
|
||||
return fmt.Println(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||
// were passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprintln(a ...interface{}) string {
|
||||
return fmt.Sprintln(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||
// length with each argument converted to a default spew Formatter interface.
|
||||
func convertArgs(args []interface{}) (formatters []interface{}) {
|
||||
formatters = make([]interface{}, len(args))
|
||||
for index, arg := range args {
|
||||
formatters[index] = NewFormatter(arg)
|
||||
}
|
||||
return formatters
|
||||
}
|
||||
18
vendor/github.com/guoyk93/rg/.gitignore
generated
vendored
Normal file
18
vendor/github.com/guoyk93/rg/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
# vendor/
|
||||
|
||||
# GoLand
|
||||
.idea/
|
||||
21
vendor/github.com/guoyk93/rg/LICENSE
generated
vendored
Normal file
21
vendor/github.com/guoyk93/rg/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023 GUO YANKE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
62
vendor/github.com/guoyk93/rg/README.md
generated
vendored
Normal file
62
vendor/github.com/guoyk93/rg/README.md
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
# rg
|
||||
|
||||
[](https://pkg.go.dev/github.com/guoyk93/rg)
|
||||
[](https://github.com/guoyk93/rg/actions/workflows/go.yml)
|
||||
|
||||
`rg (Royal Guard)` is a generics based throw-catch approach in Go
|
||||
|
||||
## Usage
|
||||
|
||||
Any function with the latest return value of type `error` can be wrapped by `rg.Must` (or `rg.Must2`, `rg.Must3` ...)
|
||||
|
||||
## Example
|
||||
|
||||
```go
|
||||
package demo
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/guoyk93/rg"
|
||||
"gopkg.in/yaml.v3"
|
||||
"os"
|
||||
)
|
||||
|
||||
// jsonFileToYAMLUgly this is a demo function WITHOUT rg
|
||||
func jsonFileToYAMLUgly(filename string) (err error) {
|
||||
var buf []byte
|
||||
if buf, err = os.ReadFile(filename); err != nil {
|
||||
return
|
||||
}
|
||||
var m map[string]interface{}
|
||||
if err = json.Unmarshal(buf, &m); err != nil {
|
||||
return
|
||||
}
|
||||
if buf, err = yaml.Marshal(m); err != nil {
|
||||
return
|
||||
}
|
||||
buf = rg.Must(yaml.Marshal(m))
|
||||
if err = os.WriteFile(filename+".yaml", buf, 0640); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// jsonFileToYAML this is a demo function WITH rg
|
||||
func jsonFileToYAML(filename string) (err error) {
|
||||
defer rg.Guard(&err)
|
||||
buf := rg.Must(os.ReadFile(filename))
|
||||
var m map[string]interface{}
|
||||
rg.Must0(json.Unmarshal(buf, &m))
|
||||
buf = rg.Must(yaml.Marshal(m))
|
||||
rg.Must0(os.WriteFile(filename+".yaml", buf, 0640))
|
||||
return
|
||||
}
|
||||
```
|
||||
|
||||
## Donation
|
||||
|
||||
See https://guoyk.xyz/donation
|
||||
|
||||
## Credits
|
||||
|
||||
GUO YANKE, MIT License
|
||||
80
vendor/github.com/guoyk93/rg/guard.go
generated
vendored
Normal file
80
vendor/github.com/guoyk93/rg/guard.go
generated
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Guard recover from panic and set err
|
||||
func Guard(err *error) {
|
||||
if r := recover(); r != nil {
|
||||
if re, ok := r.(error); ok {
|
||||
*err = re
|
||||
} else {
|
||||
*err = fmt.Errorf("panic: %v", r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Must0 panic err if not nil
|
||||
func Must0(err error) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Must panic err if not nil, else return remaining values
|
||||
func Must[T any](v T, err error) T {
|
||||
if err == nil {
|
||||
return v
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Must2 panic err if not nil, else return remaining values
|
||||
func Must2[T1 any, T2 any](v1 T1, v2 T2, err error) (T1, T2) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v1, v2
|
||||
}
|
||||
|
||||
// Must3 panic err if not nil, else return remaining values
|
||||
func Must3[T1 any, T2 any, T3 any](v1 T1, v2 T2, v3 T3, err error) (T1, T2, T3) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v1, v2, v3
|
||||
}
|
||||
|
||||
// Must4 panic err if not nil, else return remaining values
|
||||
func Must4[T1 any, T2 any, T3 any, T4 any](v1 T1, v2 T2, v3 T3, v4 T4, err error) (T1, T2, T3, T4) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v1, v2, v3, v4
|
||||
}
|
||||
|
||||
// Must5 panic err if not nil, else return remaining values
|
||||
func Must5[T1 any, T2 any, T3 any, T4 any, T5 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, err error) (T1, T2, T3, T4, T5) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v1, v2, v3, v4, v5
|
||||
}
|
||||
|
||||
// Must6 panic err if not nil, else return remaining values
|
||||
func Must6[T1 any, T2 any, T3 any, T4 any, T5 any, T6 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, err error) (T1, T2, T3, T4, T5, T6) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v1, v2, v3, v4, v5, v6
|
||||
}
|
||||
|
||||
// Must7 panic err if not nil, else return remaining values
|
||||
func Must7[T1 any, T2 any, T3 any, T4 any, T5 any, T6 any, T7 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, err error) (T1, T2, T3, T4, T5, T6, T7) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v1, v2, v3, v4, v5, v6, v7
|
||||
}
|
||||
27
vendor/github.com/pmezard/go-difflib/LICENSE
generated
vendored
Normal file
27
vendor/github.com/pmezard/go-difflib/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2013, Patrick Mezard
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
The names of its contributors may not be used to endorse or promote
|
||||
products derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
772
vendor/github.com/pmezard/go-difflib/difflib/difflib.go
generated
vendored
Normal file
772
vendor/github.com/pmezard/go-difflib/difflib/difflib.go
generated
vendored
Normal file
@@ -0,0 +1,772 @@
|
||||
// Package difflib is a partial port of Python difflib module.
|
||||
//
|
||||
// It provides tools to compare sequences of strings and generate textual diffs.
|
||||
//
|
||||
// The following class and functions have been ported:
|
||||
//
|
||||
// - SequenceMatcher
|
||||
//
|
||||
// - unified_diff
|
||||
//
|
||||
// - context_diff
|
||||
//
|
||||
// Getting unified diffs was the main goal of the port. Keep in mind this code
|
||||
// is mostly suitable to output text differences in a human friendly way, there
|
||||
// are no guarantees generated diffs are consumable by patch(1).
|
||||
package difflib
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func max(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func calculateRatio(matches, length int) float64 {
|
||||
if length > 0 {
|
||||
return 2.0 * float64(matches) / float64(length)
|
||||
}
|
||||
return 1.0
|
||||
}
|
||||
|
||||
type Match struct {
|
||||
A int
|
||||
B int
|
||||
Size int
|
||||
}
|
||||
|
||||
type OpCode struct {
|
||||
Tag byte
|
||||
I1 int
|
||||
I2 int
|
||||
J1 int
|
||||
J2 int
|
||||
}
|
||||
|
||||
// SequenceMatcher compares sequence of strings. The basic
|
||||
// algorithm predates, and is a little fancier than, an algorithm
|
||||
// published in the late 1980's by Ratcliff and Obershelp under the
|
||||
// hyperbolic name "gestalt pattern matching". The basic idea is to find
|
||||
// the longest contiguous matching subsequence that contains no "junk"
|
||||
// elements (R-O doesn't address junk). The same idea is then applied
|
||||
// recursively to the pieces of the sequences to the left and to the right
|
||||
// of the matching subsequence. This does not yield minimal edit
|
||||
// sequences, but does tend to yield matches that "look right" to people.
|
||||
//
|
||||
// SequenceMatcher tries to compute a "human-friendly diff" between two
|
||||
// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
|
||||
// longest *contiguous* & junk-free matching subsequence. That's what
|
||||
// catches peoples' eyes. The Windows(tm) windiff has another interesting
|
||||
// notion, pairing up elements that appear uniquely in each sequence.
|
||||
// That, and the method here, appear to yield more intuitive difference
|
||||
// reports than does diff. This method appears to be the least vulnerable
|
||||
// to synching up on blocks of "junk lines", though (like blank lines in
|
||||
// ordinary text files, or maybe "<P>" lines in HTML files). That may be
|
||||
// because this is the only method of the 3 that has a *concept* of
|
||||
// "junk" <wink>.
|
||||
//
|
||||
// Timing: Basic R-O is cubic time worst case and quadratic time expected
|
||||
// case. SequenceMatcher is quadratic time for the worst case and has
|
||||
// expected-case behavior dependent in a complicated way on how many
|
||||
// elements the sequences have in common; best case time is linear.
|
||||
type SequenceMatcher struct {
|
||||
a []string
|
||||
b []string
|
||||
b2j map[string][]int
|
||||
IsJunk func(string) bool
|
||||
autoJunk bool
|
||||
bJunk map[string]struct{}
|
||||
matchingBlocks []Match
|
||||
fullBCount map[string]int
|
||||
bPopular map[string]struct{}
|
||||
opCodes []OpCode
|
||||
}
|
||||
|
||||
func NewMatcher(a, b []string) *SequenceMatcher {
|
||||
m := SequenceMatcher{autoJunk: true}
|
||||
m.SetSeqs(a, b)
|
||||
return &m
|
||||
}
|
||||
|
||||
func NewMatcherWithJunk(a, b []string, autoJunk bool,
|
||||
isJunk func(string) bool) *SequenceMatcher {
|
||||
|
||||
m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
|
||||
m.SetSeqs(a, b)
|
||||
return &m
|
||||
}
|
||||
|
||||
// Set two sequences to be compared.
|
||||
func (m *SequenceMatcher) SetSeqs(a, b []string) {
|
||||
m.SetSeq1(a)
|
||||
m.SetSeq2(b)
|
||||
}
|
||||
|
||||
// Set the first sequence to be compared. The second sequence to be compared is
|
||||
// not changed.
|
||||
//
|
||||
// SequenceMatcher computes and caches detailed information about the second
|
||||
// sequence, so if you want to compare one sequence S against many sequences,
|
||||
// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
|
||||
// sequences.
|
||||
//
|
||||
// See also SetSeqs() and SetSeq2().
|
||||
func (m *SequenceMatcher) SetSeq1(a []string) {
|
||||
if &a == &m.a {
|
||||
return
|
||||
}
|
||||
m.a = a
|
||||
m.matchingBlocks = nil
|
||||
m.opCodes = nil
|
||||
}
|
||||
|
||||
// Set the second sequence to be compared. The first sequence to be compared is
|
||||
// not changed.
|
||||
func (m *SequenceMatcher) SetSeq2(b []string) {
|
||||
if &b == &m.b {
|
||||
return
|
||||
}
|
||||
m.b = b
|
||||
m.matchingBlocks = nil
|
||||
m.opCodes = nil
|
||||
m.fullBCount = nil
|
||||
m.chainB()
|
||||
}
|
||||
|
||||
func (m *SequenceMatcher) chainB() {
|
||||
// Populate line -> index mapping
|
||||
b2j := map[string][]int{}
|
||||
for i, s := range m.b {
|
||||
indices := b2j[s]
|
||||
indices = append(indices, i)
|
||||
b2j[s] = indices
|
||||
}
|
||||
|
||||
// Purge junk elements
|
||||
m.bJunk = map[string]struct{}{}
|
||||
if m.IsJunk != nil {
|
||||
junk := m.bJunk
|
||||
for s, _ := range b2j {
|
||||
if m.IsJunk(s) {
|
||||
junk[s] = struct{}{}
|
||||
}
|
||||
}
|
||||
for s, _ := range junk {
|
||||
delete(b2j, s)
|
||||
}
|
||||
}
|
||||
|
||||
// Purge remaining popular elements
|
||||
popular := map[string]struct{}{}
|
||||
n := len(m.b)
|
||||
if m.autoJunk && n >= 200 {
|
||||
ntest := n/100 + 1
|
||||
for s, indices := range b2j {
|
||||
if len(indices) > ntest {
|
||||
popular[s] = struct{}{}
|
||||
}
|
||||
}
|
||||
for s, _ := range popular {
|
||||
delete(b2j, s)
|
||||
}
|
||||
}
|
||||
m.bPopular = popular
|
||||
m.b2j = b2j
|
||||
}
|
||||
|
||||
func (m *SequenceMatcher) isBJunk(s string) bool {
|
||||
_, ok := m.bJunk[s]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Find longest matching block in a[alo:ahi] and b[blo:bhi].
|
||||
//
|
||||
// If IsJunk is not defined:
|
||||
//
|
||||
// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
|
||||
// alo <= i <= i+k <= ahi
|
||||
// blo <= j <= j+k <= bhi
|
||||
// and for all (i',j',k') meeting those conditions,
|
||||
// k >= k'
|
||||
// i <= i'
|
||||
// and if i == i', j <= j'
|
||||
//
|
||||
// In other words, of all maximal matching blocks, return one that
|
||||
// starts earliest in a, and of all those maximal matching blocks that
|
||||
// start earliest in a, return the one that starts earliest in b.
|
||||
//
|
||||
// If IsJunk is defined, first the longest matching block is
|
||||
// determined as above, but with the additional restriction that no
|
||||
// junk element appears in the block. Then that block is extended as
|
||||
// far as possible by matching (only) junk elements on both sides. So
|
||||
// the resulting block never matches on junk except as identical junk
|
||||
// happens to be adjacent to an "interesting" match.
|
||||
//
|
||||
// If no blocks match, return (alo, blo, 0).
|
||||
func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
|
||||
// CAUTION: stripping common prefix or suffix would be incorrect.
|
||||
// E.g.,
|
||||
// ab
|
||||
// acab
|
||||
// Longest matching block is "ab", but if common prefix is
|
||||
// stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
|
||||
// strip, so ends up claiming that ab is changed to acab by
|
||||
// inserting "ca" in the middle. That's minimal but unintuitive:
|
||||
// "it's obvious" that someone inserted "ac" at the front.
|
||||
// Windiff ends up at the same place as diff, but by pairing up
|
||||
// the unique 'b's and then matching the first two 'a's.
|
||||
besti, bestj, bestsize := alo, blo, 0
|
||||
|
||||
// find longest junk-free match
|
||||
// during an iteration of the loop, j2len[j] = length of longest
|
||||
// junk-free match ending with a[i-1] and b[j]
|
||||
j2len := map[int]int{}
|
||||
for i := alo; i != ahi; i++ {
|
||||
// look at all instances of a[i] in b; note that because
|
||||
// b2j has no junk keys, the loop is skipped if a[i] is junk
|
||||
newj2len := map[int]int{}
|
||||
for _, j := range m.b2j[m.a[i]] {
|
||||
// a[i] matches b[j]
|
||||
if j < blo {
|
||||
continue
|
||||
}
|
||||
if j >= bhi {
|
||||
break
|
||||
}
|
||||
k := j2len[j-1] + 1
|
||||
newj2len[j] = k
|
||||
if k > bestsize {
|
||||
besti, bestj, bestsize = i-k+1, j-k+1, k
|
||||
}
|
||||
}
|
||||
j2len = newj2len
|
||||
}
|
||||
|
||||
// Extend the best by non-junk elements on each end. In particular,
|
||||
// "popular" non-junk elements aren't in b2j, which greatly speeds
|
||||
// the inner loop above, but also means "the best" match so far
|
||||
// doesn't contain any junk *or* popular non-junk elements.
|
||||
for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
|
||||
m.a[besti-1] == m.b[bestj-1] {
|
||||
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
|
||||
}
|
||||
for besti+bestsize < ahi && bestj+bestsize < bhi &&
|
||||
!m.isBJunk(m.b[bestj+bestsize]) &&
|
||||
m.a[besti+bestsize] == m.b[bestj+bestsize] {
|
||||
bestsize += 1
|
||||
}
|
||||
|
||||
// Now that we have a wholly interesting match (albeit possibly
|
||||
// empty!), we may as well suck up the matching junk on each
|
||||
// side of it too. Can't think of a good reason not to, and it
|
||||
// saves post-processing the (possibly considerable) expense of
|
||||
// figuring out what to do with it. In the case of an empty
|
||||
// interesting match, this is clearly the right thing to do,
|
||||
// because no other kind of match is possible in the regions.
|
||||
for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
|
||||
m.a[besti-1] == m.b[bestj-1] {
|
||||
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
|
||||
}
|
||||
for besti+bestsize < ahi && bestj+bestsize < bhi &&
|
||||
m.isBJunk(m.b[bestj+bestsize]) &&
|
||||
m.a[besti+bestsize] == m.b[bestj+bestsize] {
|
||||
bestsize += 1
|
||||
}
|
||||
|
||||
return Match{A: besti, B: bestj, Size: bestsize}
|
||||
}
|
||||
|
||||
// Return list of triples describing matching subsequences.
|
||||
//
|
||||
// Each triple is of the form (i, j, n), and means that
|
||||
// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
|
||||
// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
|
||||
// adjacent triples in the list, and the second is not the last triple in the
|
||||
// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
|
||||
// adjacent equal blocks.
|
||||
//
|
||||
// The last triple is a dummy, (len(a), len(b), 0), and is the only
|
||||
// triple with n==0.
|
||||
func (m *SequenceMatcher) GetMatchingBlocks() []Match {
|
||||
if m.matchingBlocks != nil {
|
||||
return m.matchingBlocks
|
||||
}
|
||||
|
||||
var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
|
||||
matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
|
||||
match := m.findLongestMatch(alo, ahi, blo, bhi)
|
||||
i, j, k := match.A, match.B, match.Size
|
||||
if match.Size > 0 {
|
||||
if alo < i && blo < j {
|
||||
matched = matchBlocks(alo, i, blo, j, matched)
|
||||
}
|
||||
matched = append(matched, match)
|
||||
if i+k < ahi && j+k < bhi {
|
||||
matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
|
||||
}
|
||||
}
|
||||
return matched
|
||||
}
|
||||
matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
|
||||
|
||||
// It's possible that we have adjacent equal blocks in the
|
||||
// matching_blocks list now.
|
||||
nonAdjacent := []Match{}
|
||||
i1, j1, k1 := 0, 0, 0
|
||||
for _, b := range matched {
|
||||
// Is this block adjacent to i1, j1, k1?
|
||||
i2, j2, k2 := b.A, b.B, b.Size
|
||||
if i1+k1 == i2 && j1+k1 == j2 {
|
||||
// Yes, so collapse them -- this just increases the length of
|
||||
// the first block by the length of the second, and the first
|
||||
// block so lengthened remains the block to compare against.
|
||||
k1 += k2
|
||||
} else {
|
||||
// Not adjacent. Remember the first block (k1==0 means it's
|
||||
// the dummy we started with), and make the second block the
|
||||
// new block to compare against.
|
||||
if k1 > 0 {
|
||||
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
|
||||
}
|
||||
i1, j1, k1 = i2, j2, k2
|
||||
}
|
||||
}
|
||||
if k1 > 0 {
|
||||
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
|
||||
}
|
||||
|
||||
nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
|
||||
m.matchingBlocks = nonAdjacent
|
||||
return m.matchingBlocks
|
||||
}
|
||||
|
||||
// Return list of 5-tuples describing how to turn a into b.
|
||||
//
|
||||
// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
|
||||
// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
|
||||
// tuple preceding it, and likewise for j1 == the previous j2.
|
||||
//
|
||||
// The tags are characters, with these meanings:
|
||||
//
|
||||
// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
|
||||
//
|
||||
// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
|
||||
//
|
||||
// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
|
||||
//
|
||||
// 'e' (equal): a[i1:i2] == b[j1:j2]
|
||||
func (m *SequenceMatcher) GetOpCodes() []OpCode {
|
||||
if m.opCodes != nil {
|
||||
return m.opCodes
|
||||
}
|
||||
i, j := 0, 0
|
||||
matching := m.GetMatchingBlocks()
|
||||
opCodes := make([]OpCode, 0, len(matching))
|
||||
for _, m := range matching {
|
||||
// invariant: we've pumped out correct diffs to change
|
||||
// a[:i] into b[:j], and the next matching block is
|
||||
// a[ai:ai+size] == b[bj:bj+size]. So we need to pump
|
||||
// out a diff to change a[i:ai] into b[j:bj], pump out
|
||||
// the matching block, and move (i,j) beyond the match
|
||||
ai, bj, size := m.A, m.B, m.Size
|
||||
tag := byte(0)
|
||||
if i < ai && j < bj {
|
||||
tag = 'r'
|
||||
} else if i < ai {
|
||||
tag = 'd'
|
||||
} else if j < bj {
|
||||
tag = 'i'
|
||||
}
|
||||
if tag > 0 {
|
||||
opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
|
||||
}
|
||||
i, j = ai+size, bj+size
|
||||
// the list of matching blocks is terminated by a
|
||||
// sentinel with size 0
|
||||
if size > 0 {
|
||||
opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
|
||||
}
|
||||
}
|
||||
m.opCodes = opCodes
|
||||
return m.opCodes
|
||||
}
|
||||
|
||||
// Isolate change clusters by eliminating ranges with no changes.
|
||||
//
|
||||
// Return a generator of groups with up to n lines of context.
|
||||
// Each group is in the same format as returned by GetOpCodes().
|
||||
func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
|
||||
if n < 0 {
|
||||
n = 3
|
||||
}
|
||||
codes := m.GetOpCodes()
|
||||
if len(codes) == 0 {
|
||||
codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
|
||||
}
|
||||
// Fixup leading and trailing groups if they show no changes.
|
||||
if codes[0].Tag == 'e' {
|
||||
c := codes[0]
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
|
||||
}
|
||||
if codes[len(codes)-1].Tag == 'e' {
|
||||
c := codes[len(codes)-1]
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
|
||||
}
|
||||
nn := n + n
|
||||
groups := [][]OpCode{}
|
||||
group := []OpCode{}
|
||||
for _, c := range codes {
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
// End the current group and start a new one whenever
|
||||
// there is a large range with no changes.
|
||||
if c.Tag == 'e' && i2-i1 > nn {
|
||||
group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
|
||||
j1, min(j2, j1+n)})
|
||||
groups = append(groups, group)
|
||||
group = []OpCode{}
|
||||
i1, j1 = max(i1, i2-n), max(j1, j2-n)
|
||||
}
|
||||
group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
|
||||
}
|
||||
if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
|
||||
groups = append(groups, group)
|
||||
}
|
||||
return groups
|
||||
}
|
||||
|
||||
// Return a measure of the sequences' similarity (float in [0,1]).
|
||||
//
|
||||
// Where T is the total number of elements in both sequences, and
|
||||
// M is the number of matches, this is 2.0*M / T.
|
||||
// Note that this is 1 if the sequences are identical, and 0 if
|
||||
// they have nothing in common.
|
||||
//
|
||||
// .Ratio() is expensive to compute if you haven't already computed
|
||||
// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
|
||||
// want to try .QuickRatio() or .RealQuickRation() first to get an
|
||||
// upper bound.
|
||||
func (m *SequenceMatcher) Ratio() float64 {
|
||||
matches := 0
|
||||
for _, m := range m.GetMatchingBlocks() {
|
||||
matches += m.Size
|
||||
}
|
||||
return calculateRatio(matches, len(m.a)+len(m.b))
|
||||
}
|
||||
|
||||
// Return an upper bound on ratio() relatively quickly.
|
||||
//
|
||||
// This isn't defined beyond that it is an upper bound on .Ratio(), and
|
||||
// is faster to compute.
|
||||
func (m *SequenceMatcher) QuickRatio() float64 {
|
||||
// viewing a and b as multisets, set matches to the cardinality
|
||||
// of their intersection; this counts the number of matches
|
||||
// without regard to order, so is clearly an upper bound
|
||||
if m.fullBCount == nil {
|
||||
m.fullBCount = map[string]int{}
|
||||
for _, s := range m.b {
|
||||
m.fullBCount[s] = m.fullBCount[s] + 1
|
||||
}
|
||||
}
|
||||
|
||||
// avail[x] is the number of times x appears in 'b' less the
|
||||
// number of times we've seen it in 'a' so far ... kinda
|
||||
avail := map[string]int{}
|
||||
matches := 0
|
||||
for _, s := range m.a {
|
||||
n, ok := avail[s]
|
||||
if !ok {
|
||||
n = m.fullBCount[s]
|
||||
}
|
||||
avail[s] = n - 1
|
||||
if n > 0 {
|
||||
matches += 1
|
||||
}
|
||||
}
|
||||
return calculateRatio(matches, len(m.a)+len(m.b))
|
||||
}
|
||||
|
||||
// Return an upper bound on ratio() very quickly.
|
||||
//
|
||||
// This isn't defined beyond that it is an upper bound on .Ratio(), and
|
||||
// is faster to compute than either .Ratio() or .QuickRatio().
|
||||
func (m *SequenceMatcher) RealQuickRatio() float64 {
|
||||
la, lb := len(m.a), len(m.b)
|
||||
return calculateRatio(min(la, lb), la+lb)
|
||||
}
|
||||
|
||||
// Convert range to the "ed" format
|
||||
func formatRangeUnified(start, stop int) string {
|
||||
// Per the diff spec at http://www.unix.org/single_unix_specification/
|
||||
beginning := start + 1 // lines start numbering with one
|
||||
length := stop - start
|
||||
if length == 1 {
|
||||
return fmt.Sprintf("%d", beginning)
|
||||
}
|
||||
if length == 0 {
|
||||
beginning -= 1 // empty ranges begin at line just before the range
|
||||
}
|
||||
return fmt.Sprintf("%d,%d", beginning, length)
|
||||
}
|
||||
|
||||
// Unified diff parameters
|
||||
type UnifiedDiff struct {
|
||||
A []string // First sequence lines
|
||||
FromFile string // First file name
|
||||
FromDate string // First file time
|
||||
B []string // Second sequence lines
|
||||
ToFile string // Second file name
|
||||
ToDate string // Second file time
|
||||
Eol string // Headers end of line, defaults to LF
|
||||
Context int // Number of context lines
|
||||
}
|
||||
|
||||
// Compare two sequences of lines; generate the delta as a unified diff.
|
||||
//
|
||||
// Unified diffs are a compact way of showing line changes and a few
|
||||
// lines of context. The number of context lines is set by 'n' which
|
||||
// defaults to three.
|
||||
//
|
||||
// By default, the diff control lines (those with ---, +++, or @@) are
|
||||
// created with a trailing newline. This is helpful so that inputs
|
||||
// created from file.readlines() result in diffs that are suitable for
|
||||
// file.writelines() since both the inputs and outputs have trailing
|
||||
// newlines.
|
||||
//
|
||||
// For inputs that do not have trailing newlines, set the lineterm
|
||||
// argument to "" so that the output will be uniformly newline free.
|
||||
//
|
||||
// The unidiff format normally has a header for filenames and modification
|
||||
// times. Any or all of these may be specified using strings for
|
||||
// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
|
||||
// The modification times are normally expressed in the ISO 8601 format.
|
||||
func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
|
||||
buf := bufio.NewWriter(writer)
|
||||
defer buf.Flush()
|
||||
wf := func(format string, args ...interface{}) error {
|
||||
_, err := buf.WriteString(fmt.Sprintf(format, args...))
|
||||
return err
|
||||
}
|
||||
ws := func(s string) error {
|
||||
_, err := buf.WriteString(s)
|
||||
return err
|
||||
}
|
||||
|
||||
if len(diff.Eol) == 0 {
|
||||
diff.Eol = "\n"
|
||||
}
|
||||
|
||||
started := false
|
||||
m := NewMatcher(diff.A, diff.B)
|
||||
for _, g := range m.GetGroupedOpCodes(diff.Context) {
|
||||
if !started {
|
||||
started = true
|
||||
fromDate := ""
|
||||
if len(diff.FromDate) > 0 {
|
||||
fromDate = "\t" + diff.FromDate
|
||||
}
|
||||
toDate := ""
|
||||
if len(diff.ToDate) > 0 {
|
||||
toDate = "\t" + diff.ToDate
|
||||
}
|
||||
if diff.FromFile != "" || diff.ToFile != "" {
|
||||
err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
first, last := g[0], g[len(g)-1]
|
||||
range1 := formatRangeUnified(first.I1, last.I2)
|
||||
range2 := formatRangeUnified(first.J1, last.J2)
|
||||
if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, c := range g {
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
if c.Tag == 'e' {
|
||||
for _, line := range diff.A[i1:i2] {
|
||||
if err := ws(" " + line); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if c.Tag == 'r' || c.Tag == 'd' {
|
||||
for _, line := range diff.A[i1:i2] {
|
||||
if err := ws("-" + line); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if c.Tag == 'r' || c.Tag == 'i' {
|
||||
for _, line := range diff.B[j1:j2] {
|
||||
if err := ws("+" + line); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Like WriteUnifiedDiff but returns the diff a string.
|
||||
func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
|
||||
w := &bytes.Buffer{}
|
||||
err := WriteUnifiedDiff(w, diff)
|
||||
return string(w.Bytes()), err
|
||||
}
|
||||
|
||||
// Convert range to the "ed" format.
|
||||
func formatRangeContext(start, stop int) string {
|
||||
// Per the diff spec at http://www.unix.org/single_unix_specification/
|
||||
beginning := start + 1 // lines start numbering with one
|
||||
length := stop - start
|
||||
if length == 0 {
|
||||
beginning -= 1 // empty ranges begin at line just before the range
|
||||
}
|
||||
if length <= 1 {
|
||||
return fmt.Sprintf("%d", beginning)
|
||||
}
|
||||
return fmt.Sprintf("%d,%d", beginning, beginning+length-1)
|
||||
}
|
||||
|
||||
type ContextDiff UnifiedDiff
|
||||
|
||||
// Compare two sequences of lines; generate the delta as a context diff.
|
||||
//
|
||||
// Context diffs are a compact way of showing line changes and a few
|
||||
// lines of context. The number of context lines is set by diff.Context
|
||||
// which defaults to three.
|
||||
//
|
||||
// By default, the diff control lines (those with *** or ---) are
|
||||
// created with a trailing newline.
|
||||
//
|
||||
// For inputs that do not have trailing newlines, set the diff.Eol
|
||||
// argument to "" so that the output will be uniformly newline free.
|
||||
//
|
||||
// The context diff format normally has a header for filenames and
|
||||
// modification times. Any or all of these may be specified using
|
||||
// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.
|
||||
// The modification times are normally expressed in the ISO 8601 format.
|
||||
// If not specified, the strings default to blanks.
|
||||
func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
|
||||
buf := bufio.NewWriter(writer)
|
||||
defer buf.Flush()
|
||||
var diffErr error
|
||||
wf := func(format string, args ...interface{}) {
|
||||
_, err := buf.WriteString(fmt.Sprintf(format, args...))
|
||||
if diffErr == nil && err != nil {
|
||||
diffErr = err
|
||||
}
|
||||
}
|
||||
ws := func(s string) {
|
||||
_, err := buf.WriteString(s)
|
||||
if diffErr == nil && err != nil {
|
||||
diffErr = err
|
||||
}
|
||||
}
|
||||
|
||||
if len(diff.Eol) == 0 {
|
||||
diff.Eol = "\n"
|
||||
}
|
||||
|
||||
prefix := map[byte]string{
|
||||
'i': "+ ",
|
||||
'd': "- ",
|
||||
'r': "! ",
|
||||
'e': " ",
|
||||
}
|
||||
|
||||
started := false
|
||||
m := NewMatcher(diff.A, diff.B)
|
||||
for _, g := range m.GetGroupedOpCodes(diff.Context) {
|
||||
if !started {
|
||||
started = true
|
||||
fromDate := ""
|
||||
if len(diff.FromDate) > 0 {
|
||||
fromDate = "\t" + diff.FromDate
|
||||
}
|
||||
toDate := ""
|
||||
if len(diff.ToDate) > 0 {
|
||||
toDate = "\t" + diff.ToDate
|
||||
}
|
||||
if diff.FromFile != "" || diff.ToFile != "" {
|
||||
wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
|
||||
wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
|
||||
}
|
||||
}
|
||||
|
||||
first, last := g[0], g[len(g)-1]
|
||||
ws("***************" + diff.Eol)
|
||||
|
||||
range1 := formatRangeContext(first.I1, last.I2)
|
||||
wf("*** %s ****%s", range1, diff.Eol)
|
||||
for _, c := range g {
|
||||
if c.Tag == 'r' || c.Tag == 'd' {
|
||||
for _, cc := range g {
|
||||
if cc.Tag == 'i' {
|
||||
continue
|
||||
}
|
||||
for _, line := range diff.A[cc.I1:cc.I2] {
|
||||
ws(prefix[cc.Tag] + line)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
range2 := formatRangeContext(first.J1, last.J2)
|
||||
wf("--- %s ----%s", range2, diff.Eol)
|
||||
for _, c := range g {
|
||||
if c.Tag == 'r' || c.Tag == 'i' {
|
||||
for _, cc := range g {
|
||||
if cc.Tag == 'd' {
|
||||
continue
|
||||
}
|
||||
for _, line := range diff.B[cc.J1:cc.J2] {
|
||||
ws(prefix[cc.Tag] + line)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return diffErr
|
||||
}
|
||||
|
||||
// Like WriteContextDiff but returns the diff a string.
|
||||
func GetContextDiffString(diff ContextDiff) (string, error) {
|
||||
w := &bytes.Buffer{}
|
||||
err := WriteContextDiff(w, diff)
|
||||
return string(w.Bytes()), err
|
||||
}
|
||||
|
||||
// Split a string on "\n" while preserving them. The output can be used
|
||||
// as input for UnifiedDiff and ContextDiff structures.
|
||||
func SplitLines(s string) []string {
|
||||
lines := strings.SplitAfter(s, "\n")
|
||||
lines[len(lines)-1] += "\n"
|
||||
return lines
|
||||
}
|
||||
22
vendor/github.com/robfig/cron/v3/.gitignore
generated
vendored
Normal file
22
vendor/github.com/robfig/cron/v3/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
1
vendor/github.com/robfig/cron/v3/.travis.yml
generated
vendored
Normal file
1
vendor/github.com/robfig/cron/v3/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
language: go
|
||||
21
vendor/github.com/robfig/cron/v3/LICENSE
generated
vendored
Normal file
21
vendor/github.com/robfig/cron/v3/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
Copyright (C) 2012 Rob Figueiredo
|
||||
All Rights Reserved.
|
||||
|
||||
MIT LICENSE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
125
vendor/github.com/robfig/cron/v3/README.md
generated
vendored
Normal file
125
vendor/github.com/robfig/cron/v3/README.md
generated
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
[](http://godoc.org/github.com/robfig/cron)
|
||||
[](https://travis-ci.org/robfig/cron)
|
||||
|
||||
# cron
|
||||
|
||||
Cron V3 has been released!
|
||||
|
||||
To download the specific tagged release, run:
|
||||
|
||||
go get github.com/robfig/cron/v3@v3.0.0
|
||||
|
||||
Import it in your program as:
|
||||
|
||||
import "github.com/robfig/cron/v3"
|
||||
|
||||
It requires Go 1.11 or later due to usage of Go Modules.
|
||||
|
||||
Refer to the documentation here:
|
||||
http://godoc.org/github.com/robfig/cron
|
||||
|
||||
The rest of this document describes the the advances in v3 and a list of
|
||||
breaking changes for users that wish to upgrade from an earlier version.
|
||||
|
||||
## Upgrading to v3 (June 2019)
|
||||
|
||||
cron v3 is a major upgrade to the library that addresses all outstanding bugs,
|
||||
feature requests, and rough edges. It is based on a merge of master which
|
||||
contains various fixes to issues found over the years and the v2 branch which
|
||||
contains some backwards-incompatible features like the ability to remove cron
|
||||
jobs. In addition, v3 adds support for Go Modules, cleans up rough edges like
|
||||
the timezone support, and fixes a number of bugs.
|
||||
|
||||
New features:
|
||||
|
||||
- Support for Go modules. Callers must now import this library as
|
||||
`github.com/robfig/cron/v3`, instead of `gopkg.in/...`
|
||||
|
||||
- Fixed bugs:
|
||||
- 0f01e6b parser: fix combining of Dow and Dom (#70)
|
||||
- dbf3220 adjust times when rolling the clock forward to handle non-existent midnight (#157)
|
||||
- eeecf15 spec_test.go: ensure an error is returned on 0 increment (#144)
|
||||
- 70971dc cron.Entries(): update request for snapshot to include a reply channel (#97)
|
||||
- 1cba5e6 cron: fix: removing a job causes the next scheduled job to run too late (#206)
|
||||
|
||||
- Standard cron spec parsing by default (first field is "minute"), with an easy
|
||||
way to opt into the seconds field (quartz-compatible). Although, note that the
|
||||
year field (optional in Quartz) is not supported.
|
||||
|
||||
- Extensible, key/value logging via an interface that complies with
|
||||
the https://github.com/go-logr/logr project.
|
||||
|
||||
- The new Chain & JobWrapper types allow you to install "interceptors" to add
|
||||
cross-cutting behavior like the following:
|
||||
- Recover any panics from jobs
|
||||
- Delay a job's execution if the previous run hasn't completed yet
|
||||
- Skip a job's execution if the previous run hasn't completed yet
|
||||
- Log each job's invocations
|
||||
- Notification when jobs are completed
|
||||
|
||||
It is backwards incompatible with both v1 and v2. These updates are required:
|
||||
|
||||
- The v1 branch accepted an optional seconds field at the beginning of the cron
|
||||
spec. This is non-standard and has led to a lot of confusion. The new default
|
||||
parser conforms to the standard as described by [the Cron wikipedia page].
|
||||
|
||||
UPDATING: To retain the old behavior, construct your Cron with a custom
|
||||
parser:
|
||||
|
||||
// Seconds field, required
|
||||
cron.New(cron.WithSeconds())
|
||||
|
||||
// Seconds field, optional
|
||||
cron.New(
|
||||
cron.WithParser(
|
||||
cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor))
|
||||
|
||||
- The Cron type now accepts functional options on construction rather than the
|
||||
previous ad-hoc behavior modification mechanisms (setting a field, calling a setter).
|
||||
|
||||
UPDATING: Code that sets Cron.ErrorLogger or calls Cron.SetLocation must be
|
||||
updated to provide those values on construction.
|
||||
|
||||
- CRON_TZ is now the recommended way to specify the timezone of a single
|
||||
schedule, which is sanctioned by the specification. The legacy "TZ=" prefix
|
||||
will continue to be supported since it is unambiguous and easy to do so.
|
||||
|
||||
UPDATING: No update is required.
|
||||
|
||||
- By default, cron will no longer recover panics in jobs that it runs.
|
||||
Recovering can be surprising (see issue #192) and seems to be at odds with
|
||||
typical behavior of libraries. Relatedly, the `cron.WithPanicLogger` option
|
||||
has been removed to accommodate the more general JobWrapper type.
|
||||
|
||||
UPDATING: To opt into panic recovery and configure the panic logger:
|
||||
|
||||
cron.New(cron.WithChain(
|
||||
cron.Recover(logger), // or use cron.DefaultLogger
|
||||
))
|
||||
|
||||
- In adding support for https://github.com/go-logr/logr, `cron.WithVerboseLogger` was
|
||||
removed, since it is duplicative with the leveled logging.
|
||||
|
||||
UPDATING: Callers should use `WithLogger` and specify a logger that does not
|
||||
discard `Info` logs. For convenience, one is provided that wraps `*log.Logger`:
|
||||
|
||||
cron.New(
|
||||
cron.WithLogger(cron.VerbosePrintfLogger(logger)))
|
||||
|
||||
|
||||
### Background - Cron spec format
|
||||
|
||||
There are two cron spec formats in common usage:
|
||||
|
||||
- The "standard" cron format, described on [the Cron wikipedia page] and used by
|
||||
the cron Linux system utility.
|
||||
|
||||
- The cron format used by [the Quartz Scheduler], commonly used for scheduled
|
||||
jobs in Java software
|
||||
|
||||
[the Cron wikipedia page]: https://en.wikipedia.org/wiki/Cron
|
||||
[the Quartz Scheduler]: http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/tutorial-lesson-06.html
|
||||
|
||||
The original version of this package included an optional "seconds" field, which
|
||||
made it incompatible with both of these formats. Now, the "standard" format is
|
||||
the default format accepted, and the Quartz format is opt-in.
|
||||
92
vendor/github.com/robfig/cron/v3/chain.go
generated
vendored
Normal file
92
vendor/github.com/robfig/cron/v3/chain.go
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// JobWrapper decorates the given Job with some behavior.
|
||||
type JobWrapper func(Job) Job
|
||||
|
||||
// Chain is a sequence of JobWrappers that decorates submitted jobs with
|
||||
// cross-cutting behaviors like logging or synchronization.
|
||||
type Chain struct {
|
||||
wrappers []JobWrapper
|
||||
}
|
||||
|
||||
// NewChain returns a Chain consisting of the given JobWrappers.
|
||||
func NewChain(c ...JobWrapper) Chain {
|
||||
return Chain{c}
|
||||
}
|
||||
|
||||
// Then decorates the given job with all JobWrappers in the chain.
|
||||
//
|
||||
// This:
|
||||
// NewChain(m1, m2, m3).Then(job)
|
||||
// is equivalent to:
|
||||
// m1(m2(m3(job)))
|
||||
func (c Chain) Then(j Job) Job {
|
||||
for i := range c.wrappers {
|
||||
j = c.wrappers[len(c.wrappers)-i-1](j)
|
||||
}
|
||||
return j
|
||||
}
|
||||
|
||||
// Recover panics in wrapped jobs and log them with the provided logger.
|
||||
func Recover(logger Logger) JobWrapper {
|
||||
return func(j Job) Job {
|
||||
return FuncJob(func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
const size = 64 << 10
|
||||
buf := make([]byte, size)
|
||||
buf = buf[:runtime.Stack(buf, false)]
|
||||
err, ok := r.(error)
|
||||
if !ok {
|
||||
err = fmt.Errorf("%v", r)
|
||||
}
|
||||
logger.Error(err, "panic", "stack", "...\n"+string(buf))
|
||||
}
|
||||
}()
|
||||
j.Run()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// DelayIfStillRunning serializes jobs, delaying subsequent runs until the
|
||||
// previous one is complete. Jobs running after a delay of more than a minute
|
||||
// have the delay logged at Info.
|
||||
func DelayIfStillRunning(logger Logger) JobWrapper {
|
||||
return func(j Job) Job {
|
||||
var mu sync.Mutex
|
||||
return FuncJob(func() {
|
||||
start := time.Now()
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if dur := time.Since(start); dur > time.Minute {
|
||||
logger.Info("delay", "duration", dur)
|
||||
}
|
||||
j.Run()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// SkipIfStillRunning skips an invocation of the Job if a previous invocation is
|
||||
// still running. It logs skips to the given logger at Info level.
|
||||
func SkipIfStillRunning(logger Logger) JobWrapper {
|
||||
return func(j Job) Job {
|
||||
var ch = make(chan struct{}, 1)
|
||||
ch <- struct{}{}
|
||||
return FuncJob(func() {
|
||||
select {
|
||||
case v := <-ch:
|
||||
j.Run()
|
||||
ch <- v
|
||||
default:
|
||||
logger.Info("skip")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
27
vendor/github.com/robfig/cron/v3/constantdelay.go
generated
vendored
Normal file
27
vendor/github.com/robfig/cron/v3/constantdelay.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
package cron
|
||||
|
||||
import "time"
|
||||
|
||||
// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes".
|
||||
// It does not support jobs more frequent than once a second.
|
||||
type ConstantDelaySchedule struct {
|
||||
Delay time.Duration
|
||||
}
|
||||
|
||||
// Every returns a crontab Schedule that activates once every duration.
|
||||
// Delays of less than a second are not supported (will round up to 1 second).
|
||||
// Any fields less than a Second are truncated.
|
||||
func Every(duration time.Duration) ConstantDelaySchedule {
|
||||
if duration < time.Second {
|
||||
duration = time.Second
|
||||
}
|
||||
return ConstantDelaySchedule{
|
||||
Delay: duration - time.Duration(duration.Nanoseconds())%time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
// Next returns the next time this should be run.
|
||||
// This rounds so that the next activation time will be on the second.
|
||||
func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time {
|
||||
return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond)
|
||||
}
|
||||
355
vendor/github.com/robfig/cron/v3/cron.go
generated
vendored
Normal file
355
vendor/github.com/robfig/cron/v3/cron.go
generated
vendored
Normal file
@@ -0,0 +1,355 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Cron keeps track of any number of entries, invoking the associated func as
|
||||
// specified by the schedule. It may be started, stopped, and the entries may
|
||||
// be inspected while running.
|
||||
type Cron struct {
|
||||
entries []*Entry
|
||||
chain Chain
|
||||
stop chan struct{}
|
||||
add chan *Entry
|
||||
remove chan EntryID
|
||||
snapshot chan chan []Entry
|
||||
running bool
|
||||
logger Logger
|
||||
runningMu sync.Mutex
|
||||
location *time.Location
|
||||
parser ScheduleParser
|
||||
nextID EntryID
|
||||
jobWaiter sync.WaitGroup
|
||||
}
|
||||
|
||||
// ScheduleParser is an interface for schedule spec parsers that return a Schedule
|
||||
type ScheduleParser interface {
|
||||
Parse(spec string) (Schedule, error)
|
||||
}
|
||||
|
||||
// Job is an interface for submitted cron jobs.
|
||||
type Job interface {
|
||||
Run()
|
||||
}
|
||||
|
||||
// Schedule describes a job's duty cycle.
|
||||
type Schedule interface {
|
||||
// Next returns the next activation time, later than the given time.
|
||||
// Next is invoked initially, and then each time the job is run.
|
||||
Next(time.Time) time.Time
|
||||
}
|
||||
|
||||
// EntryID identifies an entry within a Cron instance
|
||||
type EntryID int
|
||||
|
||||
// Entry consists of a schedule and the func to execute on that schedule.
|
||||
type Entry struct {
|
||||
// ID is the cron-assigned ID of this entry, which may be used to look up a
|
||||
// snapshot or remove it.
|
||||
ID EntryID
|
||||
|
||||
// Schedule on which this job should be run.
|
||||
Schedule Schedule
|
||||
|
||||
// Next time the job will run, or the zero time if Cron has not been
|
||||
// started or this entry's schedule is unsatisfiable
|
||||
Next time.Time
|
||||
|
||||
// Prev is the last time this job was run, or the zero time if never.
|
||||
Prev time.Time
|
||||
|
||||
// WrappedJob is the thing to run when the Schedule is activated.
|
||||
WrappedJob Job
|
||||
|
||||
// Job is the thing that was submitted to cron.
|
||||
// It is kept around so that user code that needs to get at the job later,
|
||||
// e.g. via Entries() can do so.
|
||||
Job Job
|
||||
}
|
||||
|
||||
// Valid returns true if this is not the zero entry.
|
||||
func (e Entry) Valid() bool { return e.ID != 0 }
|
||||
|
||||
// byTime is a wrapper for sorting the entry array by time
|
||||
// (with zero time at the end).
|
||||
type byTime []*Entry
|
||||
|
||||
func (s byTime) Len() int { return len(s) }
|
||||
func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s byTime) Less(i, j int) bool {
|
||||
// Two zero times should return false.
|
||||
// Otherwise, zero is "greater" than any other time.
|
||||
// (To sort it at the end of the list.)
|
||||
if s[i].Next.IsZero() {
|
||||
return false
|
||||
}
|
||||
if s[j].Next.IsZero() {
|
||||
return true
|
||||
}
|
||||
return s[i].Next.Before(s[j].Next)
|
||||
}
|
||||
|
||||
// New returns a new Cron job runner, modified by the given options.
|
||||
//
|
||||
// Available Settings
|
||||
//
|
||||
// Time Zone
|
||||
// Description: The time zone in which schedules are interpreted
|
||||
// Default: time.Local
|
||||
//
|
||||
// Parser
|
||||
// Description: Parser converts cron spec strings into cron.Schedules.
|
||||
// Default: Accepts this spec: https://en.wikipedia.org/wiki/Cron
|
||||
//
|
||||
// Chain
|
||||
// Description: Wrap submitted jobs to customize behavior.
|
||||
// Default: A chain that recovers panics and logs them to stderr.
|
||||
//
|
||||
// See "cron.With*" to modify the default behavior.
|
||||
func New(opts ...Option) *Cron {
|
||||
c := &Cron{
|
||||
entries: nil,
|
||||
chain: NewChain(),
|
||||
add: make(chan *Entry),
|
||||
stop: make(chan struct{}),
|
||||
snapshot: make(chan chan []Entry),
|
||||
remove: make(chan EntryID),
|
||||
running: false,
|
||||
runningMu: sync.Mutex{},
|
||||
logger: DefaultLogger,
|
||||
location: time.Local,
|
||||
parser: standardParser,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(c)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// FuncJob is a wrapper that turns a func() into a cron.Job
|
||||
type FuncJob func()
|
||||
|
||||
func (f FuncJob) Run() { f() }
|
||||
|
||||
// AddFunc adds a func to the Cron to be run on the given schedule.
|
||||
// The spec is parsed using the time zone of this Cron instance as the default.
|
||||
// An opaque ID is returned that can be used to later remove it.
|
||||
func (c *Cron) AddFunc(spec string, cmd func()) (EntryID, error) {
|
||||
return c.AddJob(spec, FuncJob(cmd))
|
||||
}
|
||||
|
||||
// AddJob adds a Job to the Cron to be run on the given schedule.
|
||||
// The spec is parsed using the time zone of this Cron instance as the default.
|
||||
// An opaque ID is returned that can be used to later remove it.
|
||||
func (c *Cron) AddJob(spec string, cmd Job) (EntryID, error) {
|
||||
schedule, err := c.parser.Parse(spec)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return c.Schedule(schedule, cmd), nil
|
||||
}
|
||||
|
||||
// Schedule adds a Job to the Cron to be run on the given schedule.
|
||||
// The job is wrapped with the configured Chain.
|
||||
func (c *Cron) Schedule(schedule Schedule, cmd Job) EntryID {
|
||||
c.runningMu.Lock()
|
||||
defer c.runningMu.Unlock()
|
||||
c.nextID++
|
||||
entry := &Entry{
|
||||
ID: c.nextID,
|
||||
Schedule: schedule,
|
||||
WrappedJob: c.chain.Then(cmd),
|
||||
Job: cmd,
|
||||
}
|
||||
if !c.running {
|
||||
c.entries = append(c.entries, entry)
|
||||
} else {
|
||||
c.add <- entry
|
||||
}
|
||||
return entry.ID
|
||||
}
|
||||
|
||||
// Entries returns a snapshot of the cron entries.
|
||||
func (c *Cron) Entries() []Entry {
|
||||
c.runningMu.Lock()
|
||||
defer c.runningMu.Unlock()
|
||||
if c.running {
|
||||
replyChan := make(chan []Entry, 1)
|
||||
c.snapshot <- replyChan
|
||||
return <-replyChan
|
||||
}
|
||||
return c.entrySnapshot()
|
||||
}
|
||||
|
||||
// Location gets the time zone location
|
||||
func (c *Cron) Location() *time.Location {
|
||||
return c.location
|
||||
}
|
||||
|
||||
// Entry returns a snapshot of the given entry, or nil if it couldn't be found.
|
||||
func (c *Cron) Entry(id EntryID) Entry {
|
||||
for _, entry := range c.Entries() {
|
||||
if id == entry.ID {
|
||||
return entry
|
||||
}
|
||||
}
|
||||
return Entry{}
|
||||
}
|
||||
|
||||
// Remove an entry from being run in the future.
|
||||
func (c *Cron) Remove(id EntryID) {
|
||||
c.runningMu.Lock()
|
||||
defer c.runningMu.Unlock()
|
||||
if c.running {
|
||||
c.remove <- id
|
||||
} else {
|
||||
c.removeEntry(id)
|
||||
}
|
||||
}
|
||||
|
||||
// Start the cron scheduler in its own goroutine, or no-op if already started.
|
||||
func (c *Cron) Start() {
|
||||
c.runningMu.Lock()
|
||||
defer c.runningMu.Unlock()
|
||||
if c.running {
|
||||
return
|
||||
}
|
||||
c.running = true
|
||||
go c.run()
|
||||
}
|
||||
|
||||
// Run the cron scheduler, or no-op if already running.
|
||||
func (c *Cron) Run() {
|
||||
c.runningMu.Lock()
|
||||
if c.running {
|
||||
c.runningMu.Unlock()
|
||||
return
|
||||
}
|
||||
c.running = true
|
||||
c.runningMu.Unlock()
|
||||
c.run()
|
||||
}
|
||||
|
||||
// run the scheduler.. this is private just due to the need to synchronize
|
||||
// access to the 'running' state variable.
|
||||
func (c *Cron) run() {
|
||||
c.logger.Info("start")
|
||||
|
||||
// Figure out the next activation times for each entry.
|
||||
now := c.now()
|
||||
for _, entry := range c.entries {
|
||||
entry.Next = entry.Schedule.Next(now)
|
||||
c.logger.Info("schedule", "now", now, "entry", entry.ID, "next", entry.Next)
|
||||
}
|
||||
|
||||
for {
|
||||
// Determine the next entry to run.
|
||||
sort.Sort(byTime(c.entries))
|
||||
|
||||
var timer *time.Timer
|
||||
if len(c.entries) == 0 || c.entries[0].Next.IsZero() {
|
||||
// If there are no entries yet, just sleep - it still handles new entries
|
||||
// and stop requests.
|
||||
timer = time.NewTimer(100000 * time.Hour)
|
||||
} else {
|
||||
timer = time.NewTimer(c.entries[0].Next.Sub(now))
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case now = <-timer.C:
|
||||
now = now.In(c.location)
|
||||
c.logger.Info("wake", "now", now)
|
||||
|
||||
// Run every entry whose next time was less than now
|
||||
for _, e := range c.entries {
|
||||
if e.Next.After(now) || e.Next.IsZero() {
|
||||
break
|
||||
}
|
||||
c.startJob(e.WrappedJob)
|
||||
e.Prev = e.Next
|
||||
e.Next = e.Schedule.Next(now)
|
||||
c.logger.Info("run", "now", now, "entry", e.ID, "next", e.Next)
|
||||
}
|
||||
|
||||
case newEntry := <-c.add:
|
||||
timer.Stop()
|
||||
now = c.now()
|
||||
newEntry.Next = newEntry.Schedule.Next(now)
|
||||
c.entries = append(c.entries, newEntry)
|
||||
c.logger.Info("added", "now", now, "entry", newEntry.ID, "next", newEntry.Next)
|
||||
|
||||
case replyChan := <-c.snapshot:
|
||||
replyChan <- c.entrySnapshot()
|
||||
continue
|
||||
|
||||
case <-c.stop:
|
||||
timer.Stop()
|
||||
c.logger.Info("stop")
|
||||
return
|
||||
|
||||
case id := <-c.remove:
|
||||
timer.Stop()
|
||||
now = c.now()
|
||||
c.removeEntry(id)
|
||||
c.logger.Info("removed", "entry", id)
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// startJob runs the given job in a new goroutine.
|
||||
func (c *Cron) startJob(j Job) {
|
||||
c.jobWaiter.Add(1)
|
||||
go func() {
|
||||
defer c.jobWaiter.Done()
|
||||
j.Run()
|
||||
}()
|
||||
}
|
||||
|
||||
// now returns current time in c location
|
||||
func (c *Cron) now() time.Time {
|
||||
return time.Now().In(c.location)
|
||||
}
|
||||
|
||||
// Stop stops the cron scheduler if it is running; otherwise it does nothing.
|
||||
// A context is returned so the caller can wait for running jobs to complete.
|
||||
func (c *Cron) Stop() context.Context {
|
||||
c.runningMu.Lock()
|
||||
defer c.runningMu.Unlock()
|
||||
if c.running {
|
||||
c.stop <- struct{}{}
|
||||
c.running = false
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go func() {
|
||||
c.jobWaiter.Wait()
|
||||
cancel()
|
||||
}()
|
||||
return ctx
|
||||
}
|
||||
|
||||
// entrySnapshot returns a copy of the current cron entry list.
|
||||
func (c *Cron) entrySnapshot() []Entry {
|
||||
var entries = make([]Entry, len(c.entries))
|
||||
for i, e := range c.entries {
|
||||
entries[i] = *e
|
||||
}
|
||||
return entries
|
||||
}
|
||||
|
||||
func (c *Cron) removeEntry(id EntryID) {
|
||||
var entries []*Entry
|
||||
for _, e := range c.entries {
|
||||
if e.ID != id {
|
||||
entries = append(entries, e)
|
||||
}
|
||||
}
|
||||
c.entries = entries
|
||||
}
|
||||
231
vendor/github.com/robfig/cron/v3/doc.go
generated
vendored
Normal file
231
vendor/github.com/robfig/cron/v3/doc.go
generated
vendored
Normal file
@@ -0,0 +1,231 @@
|
||||
/*
|
||||
Package cron implements a cron spec parser and job runner.
|
||||
|
||||
Installation
|
||||
|
||||
To download the specific tagged release, run:
|
||||
|
||||
go get github.com/robfig/cron/v3@v3.0.0
|
||||
|
||||
Import it in your program as:
|
||||
|
||||
import "github.com/robfig/cron/v3"
|
||||
|
||||
It requires Go 1.11 or later due to usage of Go Modules.
|
||||
|
||||
Usage
|
||||
|
||||
Callers may register Funcs to be invoked on a given schedule. Cron will run
|
||||
them in their own goroutines.
|
||||
|
||||
c := cron.New()
|
||||
c.AddFunc("30 * * * *", func() { fmt.Println("Every hour on the half hour") })
|
||||
c.AddFunc("30 3-6,20-23 * * *", func() { fmt.Println(".. in the range 3-6am, 8-11pm") })
|
||||
c.AddFunc("CRON_TZ=Asia/Tokyo 30 04 * * *", func() { fmt.Println("Runs at 04:30 Tokyo time every day") })
|
||||
c.AddFunc("@hourly", func() { fmt.Println("Every hour, starting an hour from now") })
|
||||
c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty, starting an hour thirty from now") })
|
||||
c.Start()
|
||||
..
|
||||
// Funcs are invoked in their own goroutine, asynchronously.
|
||||
...
|
||||
// Funcs may also be added to a running Cron
|
||||
c.AddFunc("@daily", func() { fmt.Println("Every day") })
|
||||
..
|
||||
// Inspect the cron job entries' next and previous run times.
|
||||
inspect(c.Entries())
|
||||
..
|
||||
c.Stop() // Stop the scheduler (does not stop any jobs already running).
|
||||
|
||||
CRON Expression Format
|
||||
|
||||
A cron expression represents a set of times, using 5 space-separated fields.
|
||||
|
||||
Field name | Mandatory? | Allowed values | Allowed special characters
|
||||
---------- | ---------- | -------------- | --------------------------
|
||||
Minutes | Yes | 0-59 | * / , -
|
||||
Hours | Yes | 0-23 | * / , -
|
||||
Day of month | Yes | 1-31 | * / , - ?
|
||||
Month | Yes | 1-12 or JAN-DEC | * / , -
|
||||
Day of week | Yes | 0-6 or SUN-SAT | * / , - ?
|
||||
|
||||
Month and Day-of-week field values are case insensitive. "SUN", "Sun", and
|
||||
"sun" are equally accepted.
|
||||
|
||||
The specific interpretation of the format is based on the Cron Wikipedia page:
|
||||
https://en.wikipedia.org/wiki/Cron
|
||||
|
||||
Alternative Formats
|
||||
|
||||
Alternative Cron expression formats support other fields like seconds. You can
|
||||
implement that by creating a custom Parser as follows.
|
||||
|
||||
cron.New(
|
||||
cron.WithParser(
|
||||
cron.NewParser(
|
||||
cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor)))
|
||||
|
||||
Since adding Seconds is the most common modification to the standard cron spec,
|
||||
cron provides a builtin function to do that, which is equivalent to the custom
|
||||
parser you saw earlier, except that its seconds field is REQUIRED:
|
||||
|
||||
cron.New(cron.WithSeconds())
|
||||
|
||||
That emulates Quartz, the most popular alternative Cron schedule format:
|
||||
http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html
|
||||
|
||||
Special Characters
|
||||
|
||||
Asterisk ( * )
|
||||
|
||||
The asterisk indicates that the cron expression will match for all values of the
|
||||
field; e.g., using an asterisk in the 5th field (month) would indicate every
|
||||
month.
|
||||
|
||||
Slash ( / )
|
||||
|
||||
Slashes are used to describe increments of ranges. For example 3-59/15 in the
|
||||
1st field (minutes) would indicate the 3rd minute of the hour and every 15
|
||||
minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...",
|
||||
that is, an increment over the largest possible range of the field. The form
|
||||
"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the
|
||||
increment until the end of that specific range. It does not wrap around.
|
||||
|
||||
Comma ( , )
|
||||
|
||||
Commas are used to separate items of a list. For example, using "MON,WED,FRI" in
|
||||
the 5th field (day of week) would mean Mondays, Wednesdays and Fridays.
|
||||
|
||||
Hyphen ( - )
|
||||
|
||||
Hyphens are used to define ranges. For example, 9-17 would indicate every
|
||||
hour between 9am and 5pm inclusive.
|
||||
|
||||
Question mark ( ? )
|
||||
|
||||
Question mark may be used instead of '*' for leaving either day-of-month or
|
||||
day-of-week blank.
|
||||
|
||||
Predefined schedules
|
||||
|
||||
You may use one of several pre-defined schedules in place of a cron expression.
|
||||
|
||||
Entry | Description | Equivalent To
|
||||
----- | ----------- | -------------
|
||||
@yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 1 1 *
|
||||
@monthly | Run once a month, midnight, first of month | 0 0 1 * *
|
||||
@weekly | Run once a week, midnight between Sat/Sun | 0 0 * * 0
|
||||
@daily (or @midnight) | Run once a day, midnight | 0 0 * * *
|
||||
@hourly | Run once an hour, beginning of hour | 0 * * * *
|
||||
|
||||
Intervals
|
||||
|
||||
You may also schedule a job to execute at fixed intervals, starting at the time it's added
|
||||
or cron is run. This is supported by formatting the cron spec like this:
|
||||
|
||||
@every <duration>
|
||||
|
||||
where "duration" is a string accepted by time.ParseDuration
|
||||
(http://golang.org/pkg/time/#ParseDuration).
|
||||
|
||||
For example, "@every 1h30m10s" would indicate a schedule that activates after
|
||||
1 hour, 30 minutes, 10 seconds, and then every interval after that.
|
||||
|
||||
Note: The interval does not take the job runtime into account. For example,
|
||||
if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes,
|
||||
it will have only 2 minutes of idle time between each run.
|
||||
|
||||
Time zones
|
||||
|
||||
By default, all interpretation and scheduling is done in the machine's local
|
||||
time zone (time.Local). You can specify a different time zone on construction:
|
||||
|
||||
cron.New(
|
||||
cron.WithLocation(time.UTC))
|
||||
|
||||
Individual cron schedules may also override the time zone they are to be
|
||||
interpreted in by providing an additional space-separated field at the beginning
|
||||
of the cron spec, of the form "CRON_TZ=Asia/Tokyo".
|
||||
|
||||
For example:
|
||||
|
||||
# Runs at 6am in time.Local
|
||||
cron.New().AddFunc("0 6 * * ?", ...)
|
||||
|
||||
# Runs at 6am in America/New_York
|
||||
nyc, _ := time.LoadLocation("America/New_York")
|
||||
c := cron.New(cron.WithLocation(nyc))
|
||||
c.AddFunc("0 6 * * ?", ...)
|
||||
|
||||
# Runs at 6am in Asia/Tokyo
|
||||
cron.New().AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...)
|
||||
|
||||
# Runs at 6am in Asia/Tokyo
|
||||
c := cron.New(cron.WithLocation(nyc))
|
||||
c.SetLocation("America/New_York")
|
||||
c.AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...)
|
||||
|
||||
The prefix "TZ=(TIME ZONE)" is also supported for legacy compatibility.
|
||||
|
||||
Be aware that jobs scheduled during daylight-savings leap-ahead transitions will
|
||||
not be run!
|
||||
|
||||
Job Wrappers
|
||||
|
||||
A Cron runner may be configured with a chain of job wrappers to add
|
||||
cross-cutting functionality to all submitted jobs. For example, they may be used
|
||||
to achieve the following effects:
|
||||
|
||||
- Recover any panics from jobs (activated by default)
|
||||
- Delay a job's execution if the previous run hasn't completed yet
|
||||
- Skip a job's execution if the previous run hasn't completed yet
|
||||
- Log each job's invocations
|
||||
|
||||
Install wrappers for all jobs added to a cron using the `cron.WithChain` option:
|
||||
|
||||
cron.New(cron.WithChain(
|
||||
cron.SkipIfStillRunning(logger),
|
||||
))
|
||||
|
||||
Install wrappers for individual jobs by explicitly wrapping them:
|
||||
|
||||
job = cron.NewChain(
|
||||
cron.SkipIfStillRunning(logger),
|
||||
).Then(job)
|
||||
|
||||
Thread safety
|
||||
|
||||
Since the Cron service runs concurrently with the calling code, some amount of
|
||||
care must be taken to ensure proper synchronization.
|
||||
|
||||
All cron methods are designed to be correctly synchronized as long as the caller
|
||||
ensures that invocations have a clear happens-before ordering between them.
|
||||
|
||||
Logging
|
||||
|
||||
Cron defines a Logger interface that is a subset of the one defined in
|
||||
github.com/go-logr/logr. It has two logging levels (Info and Error), and
|
||||
parameters are key/value pairs. This makes it possible for cron logging to plug
|
||||
into structured logging systems. An adapter, [Verbose]PrintfLogger, is provided
|
||||
to wrap the standard library *log.Logger.
|
||||
|
||||
For additional insight into Cron operations, verbose logging may be activated
|
||||
which will record job runs, scheduling decisions, and added or removed jobs.
|
||||
Activate it with a one-off logger as follows:
|
||||
|
||||
cron.New(
|
||||
cron.WithLogger(
|
||||
cron.VerbosePrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags))))
|
||||
|
||||
|
||||
Implementation
|
||||
|
||||
Cron entries are stored in an array, sorted by their next activation time. Cron
|
||||
sleeps until the next job is due to be run.
|
||||
|
||||
Upon waking:
|
||||
- it runs each entry that is active on that second
|
||||
- it calculates the next run times for the jobs that were run
|
||||
- it re-sorts the array of entries by next activation time.
|
||||
- it goes to sleep until the soonest job.
|
||||
*/
|
||||
package cron
|
||||
86
vendor/github.com/robfig/cron/v3/logger.go
generated
vendored
Normal file
86
vendor/github.com/robfig/cron/v3/logger.go
generated
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DefaultLogger is used by Cron if none is specified.
|
||||
var DefaultLogger Logger = PrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags))
|
||||
|
||||
// DiscardLogger can be used by callers to discard all log messages.
|
||||
var DiscardLogger Logger = PrintfLogger(log.New(ioutil.Discard, "", 0))
|
||||
|
||||
// Logger is the interface used in this package for logging, so that any backend
|
||||
// can be plugged in. It is a subset of the github.com/go-logr/logr interface.
|
||||
type Logger interface {
|
||||
// Info logs routine messages about cron's operation.
|
||||
Info(msg string, keysAndValues ...interface{})
|
||||
// Error logs an error condition.
|
||||
Error(err error, msg string, keysAndValues ...interface{})
|
||||
}
|
||||
|
||||
// PrintfLogger wraps a Printf-based logger (such as the standard library "log")
|
||||
// into an implementation of the Logger interface which logs errors only.
|
||||
func PrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger {
|
||||
return printfLogger{l, false}
|
||||
}
|
||||
|
||||
// VerbosePrintfLogger wraps a Printf-based logger (such as the standard library
|
||||
// "log") into an implementation of the Logger interface which logs everything.
|
||||
func VerbosePrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger {
|
||||
return printfLogger{l, true}
|
||||
}
|
||||
|
||||
type printfLogger struct {
|
||||
logger interface{ Printf(string, ...interface{}) }
|
||||
logInfo bool
|
||||
}
|
||||
|
||||
func (pl printfLogger) Info(msg string, keysAndValues ...interface{}) {
|
||||
if pl.logInfo {
|
||||
keysAndValues = formatTimes(keysAndValues)
|
||||
pl.logger.Printf(
|
||||
formatString(len(keysAndValues)),
|
||||
append([]interface{}{msg}, keysAndValues...)...)
|
||||
}
|
||||
}
|
||||
|
||||
func (pl printfLogger) Error(err error, msg string, keysAndValues ...interface{}) {
|
||||
keysAndValues = formatTimes(keysAndValues)
|
||||
pl.logger.Printf(
|
||||
formatString(len(keysAndValues)+2),
|
||||
append([]interface{}{msg, "error", err}, keysAndValues...)...)
|
||||
}
|
||||
|
||||
// formatString returns a logfmt-like format string for the number of
|
||||
// key/values.
|
||||
func formatString(numKeysAndValues int) string {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("%s")
|
||||
if numKeysAndValues > 0 {
|
||||
sb.WriteString(", ")
|
||||
}
|
||||
for i := 0; i < numKeysAndValues/2; i++ {
|
||||
if i > 0 {
|
||||
sb.WriteString(", ")
|
||||
}
|
||||
sb.WriteString("%v=%v")
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// formatTimes formats any time.Time values as RFC3339.
|
||||
func formatTimes(keysAndValues []interface{}) []interface{} {
|
||||
var formattedArgs []interface{}
|
||||
for _, arg := range keysAndValues {
|
||||
if t, ok := arg.(time.Time); ok {
|
||||
arg = t.Format(time.RFC3339)
|
||||
}
|
||||
formattedArgs = append(formattedArgs, arg)
|
||||
}
|
||||
return formattedArgs
|
||||
}
|
||||
45
vendor/github.com/robfig/cron/v3/option.go
generated
vendored
Normal file
45
vendor/github.com/robfig/cron/v3/option.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Option represents a modification to the default behavior of a Cron.
|
||||
type Option func(*Cron)
|
||||
|
||||
// WithLocation overrides the timezone of the cron instance.
|
||||
func WithLocation(loc *time.Location) Option {
|
||||
return func(c *Cron) {
|
||||
c.location = loc
|
||||
}
|
||||
}
|
||||
|
||||
// WithSeconds overrides the parser used for interpreting job schedules to
|
||||
// include a seconds field as the first one.
|
||||
func WithSeconds() Option {
|
||||
return WithParser(NewParser(
|
||||
Second | Minute | Hour | Dom | Month | Dow | Descriptor,
|
||||
))
|
||||
}
|
||||
|
||||
// WithParser overrides the parser used for interpreting job schedules.
|
||||
func WithParser(p ScheduleParser) Option {
|
||||
return func(c *Cron) {
|
||||
c.parser = p
|
||||
}
|
||||
}
|
||||
|
||||
// WithChain specifies Job wrappers to apply to all jobs added to this cron.
|
||||
// Refer to the Chain* functions in this package for provided wrappers.
|
||||
func WithChain(wrappers ...JobWrapper) Option {
|
||||
return func(c *Cron) {
|
||||
c.chain = NewChain(wrappers...)
|
||||
}
|
||||
}
|
||||
|
||||
// WithLogger uses the provided logger.
|
||||
func WithLogger(logger Logger) Option {
|
||||
return func(c *Cron) {
|
||||
c.logger = logger
|
||||
}
|
||||
}
|
||||
434
vendor/github.com/robfig/cron/v3/parser.go
generated
vendored
Normal file
434
vendor/github.com/robfig/cron/v3/parser.go
generated
vendored
Normal file
@@ -0,0 +1,434 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Configuration options for creating a parser. Most options specify which
|
||||
// fields should be included, while others enable features. If a field is not
|
||||
// included the parser will assume a default value. These options do not change
|
||||
// the order fields are parse in.
|
||||
type ParseOption int
|
||||
|
||||
const (
|
||||
Second ParseOption = 1 << iota // Seconds field, default 0
|
||||
SecondOptional // Optional seconds field, default 0
|
||||
Minute // Minutes field, default 0
|
||||
Hour // Hours field, default 0
|
||||
Dom // Day of month field, default *
|
||||
Month // Month field, default *
|
||||
Dow // Day of week field, default *
|
||||
DowOptional // Optional day of week field, default *
|
||||
Descriptor // Allow descriptors such as @monthly, @weekly, etc.
|
||||
)
|
||||
|
||||
var places = []ParseOption{
|
||||
Second,
|
||||
Minute,
|
||||
Hour,
|
||||
Dom,
|
||||
Month,
|
||||
Dow,
|
||||
}
|
||||
|
||||
var defaults = []string{
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"*",
|
||||
"*",
|
||||
"*",
|
||||
}
|
||||
|
||||
// A custom Parser that can be configured.
|
||||
type Parser struct {
|
||||
options ParseOption
|
||||
}
|
||||
|
||||
// NewParser creates a Parser with custom options.
|
||||
//
|
||||
// It panics if more than one Optional is given, since it would be impossible to
|
||||
// correctly infer which optional is provided or missing in general.
|
||||
//
|
||||
// Examples
|
||||
//
|
||||
// // Standard parser without descriptors
|
||||
// specParser := NewParser(Minute | Hour | Dom | Month | Dow)
|
||||
// sched, err := specParser.Parse("0 0 15 */3 *")
|
||||
//
|
||||
// // Same as above, just excludes time fields
|
||||
// subsParser := NewParser(Dom | Month | Dow)
|
||||
// sched, err := specParser.Parse("15 */3 *")
|
||||
//
|
||||
// // Same as above, just makes Dow optional
|
||||
// subsParser := NewParser(Dom | Month | DowOptional)
|
||||
// sched, err := specParser.Parse("15 */3")
|
||||
//
|
||||
func NewParser(options ParseOption) Parser {
|
||||
optionals := 0
|
||||
if options&DowOptional > 0 {
|
||||
optionals++
|
||||
}
|
||||
if options&SecondOptional > 0 {
|
||||
optionals++
|
||||
}
|
||||
if optionals > 1 {
|
||||
panic("multiple optionals may not be configured")
|
||||
}
|
||||
return Parser{options}
|
||||
}
|
||||
|
||||
// Parse returns a new crontab schedule representing the given spec.
|
||||
// It returns a descriptive error if the spec is not valid.
|
||||
// It accepts crontab specs and features configured by NewParser.
|
||||
func (p Parser) Parse(spec string) (Schedule, error) {
|
||||
if len(spec) == 0 {
|
||||
return nil, fmt.Errorf("empty spec string")
|
||||
}
|
||||
|
||||
// Extract timezone if present
|
||||
var loc = time.Local
|
||||
if strings.HasPrefix(spec, "TZ=") || strings.HasPrefix(spec, "CRON_TZ=") {
|
||||
var err error
|
||||
i := strings.Index(spec, " ")
|
||||
eq := strings.Index(spec, "=")
|
||||
if loc, err = time.LoadLocation(spec[eq+1 : i]); err != nil {
|
||||
return nil, fmt.Errorf("provided bad location %s: %v", spec[eq+1:i], err)
|
||||
}
|
||||
spec = strings.TrimSpace(spec[i:])
|
||||
}
|
||||
|
||||
// Handle named schedules (descriptors), if configured
|
||||
if strings.HasPrefix(spec, "@") {
|
||||
if p.options&Descriptor == 0 {
|
||||
return nil, fmt.Errorf("parser does not accept descriptors: %v", spec)
|
||||
}
|
||||
return parseDescriptor(spec, loc)
|
||||
}
|
||||
|
||||
// Split on whitespace.
|
||||
fields := strings.Fields(spec)
|
||||
|
||||
// Validate & fill in any omitted or optional fields
|
||||
var err error
|
||||
fields, err = normalizeFields(fields, p.options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
field := func(field string, r bounds) uint64 {
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
var bits uint64
|
||||
bits, err = getField(field, r)
|
||||
return bits
|
||||
}
|
||||
|
||||
var (
|
||||
second = field(fields[0], seconds)
|
||||
minute = field(fields[1], minutes)
|
||||
hour = field(fields[2], hours)
|
||||
dayofmonth = field(fields[3], dom)
|
||||
month = field(fields[4], months)
|
||||
dayofweek = field(fields[5], dow)
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &SpecSchedule{
|
||||
Second: second,
|
||||
Minute: minute,
|
||||
Hour: hour,
|
||||
Dom: dayofmonth,
|
||||
Month: month,
|
||||
Dow: dayofweek,
|
||||
Location: loc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// normalizeFields takes a subset set of the time fields and returns the full set
|
||||
// with defaults (zeroes) populated for unset fields.
|
||||
//
|
||||
// As part of performing this function, it also validates that the provided
|
||||
// fields are compatible with the configured options.
|
||||
func normalizeFields(fields []string, options ParseOption) ([]string, error) {
|
||||
// Validate optionals & add their field to options
|
||||
optionals := 0
|
||||
if options&SecondOptional > 0 {
|
||||
options |= Second
|
||||
optionals++
|
||||
}
|
||||
if options&DowOptional > 0 {
|
||||
options |= Dow
|
||||
optionals++
|
||||
}
|
||||
if optionals > 1 {
|
||||
return nil, fmt.Errorf("multiple optionals may not be configured")
|
||||
}
|
||||
|
||||
// Figure out how many fields we need
|
||||
max := 0
|
||||
for _, place := range places {
|
||||
if options&place > 0 {
|
||||
max++
|
||||
}
|
||||
}
|
||||
min := max - optionals
|
||||
|
||||
// Validate number of fields
|
||||
if count := len(fields); count < min || count > max {
|
||||
if min == max {
|
||||
return nil, fmt.Errorf("expected exactly %d fields, found %d: %s", min, count, fields)
|
||||
}
|
||||
return nil, fmt.Errorf("expected %d to %d fields, found %d: %s", min, max, count, fields)
|
||||
}
|
||||
|
||||
// Populate the optional field if not provided
|
||||
if min < max && len(fields) == min {
|
||||
switch {
|
||||
case options&DowOptional > 0:
|
||||
fields = append(fields, defaults[5]) // TODO: improve access to default
|
||||
case options&SecondOptional > 0:
|
||||
fields = append([]string{defaults[0]}, fields...)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown optional field")
|
||||
}
|
||||
}
|
||||
|
||||
// Populate all fields not part of options with their defaults
|
||||
n := 0
|
||||
expandedFields := make([]string, len(places))
|
||||
copy(expandedFields, defaults)
|
||||
for i, place := range places {
|
||||
if options&place > 0 {
|
||||
expandedFields[i] = fields[n]
|
||||
n++
|
||||
}
|
||||
}
|
||||
return expandedFields, nil
|
||||
}
|
||||
|
||||
var standardParser = NewParser(
|
||||
Minute | Hour | Dom | Month | Dow | Descriptor,
|
||||
)
|
||||
|
||||
// ParseStandard returns a new crontab schedule representing the given
|
||||
// standardSpec (https://en.wikipedia.org/wiki/Cron). It requires 5 entries
|
||||
// representing: minute, hour, day of month, month and day of week, in that
|
||||
// order. It returns a descriptive error if the spec is not valid.
|
||||
//
|
||||
// It accepts
|
||||
// - Standard crontab specs, e.g. "* * * * ?"
|
||||
// - Descriptors, e.g. "@midnight", "@every 1h30m"
|
||||
func ParseStandard(standardSpec string) (Schedule, error) {
|
||||
return standardParser.Parse(standardSpec)
|
||||
}
|
||||
|
||||
// getField returns an Int with the bits set representing all of the times that
|
||||
// the field represents or error parsing field value. A "field" is a comma-separated
|
||||
// list of "ranges".
|
||||
func getField(field string, r bounds) (uint64, error) {
|
||||
var bits uint64
|
||||
ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' })
|
||||
for _, expr := range ranges {
|
||||
bit, err := getRange(expr, r)
|
||||
if err != nil {
|
||||
return bits, err
|
||||
}
|
||||
bits |= bit
|
||||
}
|
||||
return bits, nil
|
||||
}
|
||||
|
||||
// getRange returns the bits indicated by the given expression:
|
||||
// number | number "-" number [ "/" number ]
|
||||
// or error parsing range.
|
||||
func getRange(expr string, r bounds) (uint64, error) {
|
||||
var (
|
||||
start, end, step uint
|
||||
rangeAndStep = strings.Split(expr, "/")
|
||||
lowAndHigh = strings.Split(rangeAndStep[0], "-")
|
||||
singleDigit = len(lowAndHigh) == 1
|
||||
err error
|
||||
)
|
||||
|
||||
var extra uint64
|
||||
if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" {
|
||||
start = r.min
|
||||
end = r.max
|
||||
extra = starBit
|
||||
} else {
|
||||
start, err = parseIntOrName(lowAndHigh[0], r.names)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch len(lowAndHigh) {
|
||||
case 1:
|
||||
end = start
|
||||
case 2:
|
||||
end, err = parseIntOrName(lowAndHigh[1], r.names)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
default:
|
||||
return 0, fmt.Errorf("too many hyphens: %s", expr)
|
||||
}
|
||||
}
|
||||
|
||||
switch len(rangeAndStep) {
|
||||
case 1:
|
||||
step = 1
|
||||
case 2:
|
||||
step, err = mustParseInt(rangeAndStep[1])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Special handling: "N/step" means "N-max/step".
|
||||
if singleDigit {
|
||||
end = r.max
|
||||
}
|
||||
if step > 1 {
|
||||
extra = 0
|
||||
}
|
||||
default:
|
||||
return 0, fmt.Errorf("too many slashes: %s", expr)
|
||||
}
|
||||
|
||||
if start < r.min {
|
||||
return 0, fmt.Errorf("beginning of range (%d) below minimum (%d): %s", start, r.min, expr)
|
||||
}
|
||||
if end > r.max {
|
||||
return 0, fmt.Errorf("end of range (%d) above maximum (%d): %s", end, r.max, expr)
|
||||
}
|
||||
if start > end {
|
||||
return 0, fmt.Errorf("beginning of range (%d) beyond end of range (%d): %s", start, end, expr)
|
||||
}
|
||||
if step == 0 {
|
||||
return 0, fmt.Errorf("step of range should be a positive number: %s", expr)
|
||||
}
|
||||
|
||||
return getBits(start, end, step) | extra, nil
|
||||
}
|
||||
|
||||
// parseIntOrName returns the (possibly-named) integer contained in expr.
|
||||
func parseIntOrName(expr string, names map[string]uint) (uint, error) {
|
||||
if names != nil {
|
||||
if namedInt, ok := names[strings.ToLower(expr)]; ok {
|
||||
return namedInt, nil
|
||||
}
|
||||
}
|
||||
return mustParseInt(expr)
|
||||
}
|
||||
|
||||
// mustParseInt parses the given expression as an int or returns an error.
|
||||
func mustParseInt(expr string) (uint, error) {
|
||||
num, err := strconv.Atoi(expr)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse int from %s: %s", expr, err)
|
||||
}
|
||||
if num < 0 {
|
||||
return 0, fmt.Errorf("negative number (%d) not allowed: %s", num, expr)
|
||||
}
|
||||
|
||||
return uint(num), nil
|
||||
}
|
||||
|
||||
// getBits sets all bits in the range [min, max], modulo the given step size.
|
||||
func getBits(min, max, step uint) uint64 {
|
||||
var bits uint64
|
||||
|
||||
// If step is 1, use shifts.
|
||||
if step == 1 {
|
||||
return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min)
|
||||
}
|
||||
|
||||
// Else, use a simple loop.
|
||||
for i := min; i <= max; i += step {
|
||||
bits |= 1 << i
|
||||
}
|
||||
return bits
|
||||
}
|
||||
|
||||
// all returns all bits within the given bounds. (plus the star bit)
|
||||
func all(r bounds) uint64 {
|
||||
return getBits(r.min, r.max, 1) | starBit
|
||||
}
|
||||
|
||||
// parseDescriptor returns a predefined schedule for the expression, or error if none matches.
|
||||
func parseDescriptor(descriptor string, loc *time.Location) (Schedule, error) {
|
||||
switch descriptor {
|
||||
case "@yearly", "@annually":
|
||||
return &SpecSchedule{
|
||||
Second: 1 << seconds.min,
|
||||
Minute: 1 << minutes.min,
|
||||
Hour: 1 << hours.min,
|
||||
Dom: 1 << dom.min,
|
||||
Month: 1 << months.min,
|
||||
Dow: all(dow),
|
||||
Location: loc,
|
||||
}, nil
|
||||
|
||||
case "@monthly":
|
||||
return &SpecSchedule{
|
||||
Second: 1 << seconds.min,
|
||||
Minute: 1 << minutes.min,
|
||||
Hour: 1 << hours.min,
|
||||
Dom: 1 << dom.min,
|
||||
Month: all(months),
|
||||
Dow: all(dow),
|
||||
Location: loc,
|
||||
}, nil
|
||||
|
||||
case "@weekly":
|
||||
return &SpecSchedule{
|
||||
Second: 1 << seconds.min,
|
||||
Minute: 1 << minutes.min,
|
||||
Hour: 1 << hours.min,
|
||||
Dom: all(dom),
|
||||
Month: all(months),
|
||||
Dow: 1 << dow.min,
|
||||
Location: loc,
|
||||
}, nil
|
||||
|
||||
case "@daily", "@midnight":
|
||||
return &SpecSchedule{
|
||||
Second: 1 << seconds.min,
|
||||
Minute: 1 << minutes.min,
|
||||
Hour: 1 << hours.min,
|
||||
Dom: all(dom),
|
||||
Month: all(months),
|
||||
Dow: all(dow),
|
||||
Location: loc,
|
||||
}, nil
|
||||
|
||||
case "@hourly":
|
||||
return &SpecSchedule{
|
||||
Second: 1 << seconds.min,
|
||||
Minute: 1 << minutes.min,
|
||||
Hour: all(hours),
|
||||
Dom: all(dom),
|
||||
Month: all(months),
|
||||
Dow: all(dow),
|
||||
Location: loc,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
const every = "@every "
|
||||
if strings.HasPrefix(descriptor, every) {
|
||||
duration, err := time.ParseDuration(descriptor[len(every):])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse duration %s: %s", descriptor, err)
|
||||
}
|
||||
return Every(duration), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unrecognized descriptor: %s", descriptor)
|
||||
}
|
||||
188
vendor/github.com/robfig/cron/v3/spec.go
generated
vendored
Normal file
188
vendor/github.com/robfig/cron/v3/spec.go
generated
vendored
Normal file
@@ -0,0 +1,188 @@
|
||||
package cron
|
||||
|
||||
import "time"
|
||||
|
||||
// SpecSchedule specifies a duty cycle (to the second granularity), based on a
|
||||
// traditional crontab specification. It is computed initially and stored as bit sets.
|
||||
type SpecSchedule struct {
|
||||
Second, Minute, Hour, Dom, Month, Dow uint64
|
||||
|
||||
// Override location for this schedule.
|
||||
Location *time.Location
|
||||
}
|
||||
|
||||
// bounds provides a range of acceptable values (plus a map of name to value).
|
||||
type bounds struct {
|
||||
min, max uint
|
||||
names map[string]uint
|
||||
}
|
||||
|
||||
// The bounds for each field.
|
||||
var (
|
||||
seconds = bounds{0, 59, nil}
|
||||
minutes = bounds{0, 59, nil}
|
||||
hours = bounds{0, 23, nil}
|
||||
dom = bounds{1, 31, nil}
|
||||
months = bounds{1, 12, map[string]uint{
|
||||
"jan": 1,
|
||||
"feb": 2,
|
||||
"mar": 3,
|
||||
"apr": 4,
|
||||
"may": 5,
|
||||
"jun": 6,
|
||||
"jul": 7,
|
||||
"aug": 8,
|
||||
"sep": 9,
|
||||
"oct": 10,
|
||||
"nov": 11,
|
||||
"dec": 12,
|
||||
}}
|
||||
dow = bounds{0, 6, map[string]uint{
|
||||
"sun": 0,
|
||||
"mon": 1,
|
||||
"tue": 2,
|
||||
"wed": 3,
|
||||
"thu": 4,
|
||||
"fri": 5,
|
||||
"sat": 6,
|
||||
}}
|
||||
)
|
||||
|
||||
const (
|
||||
// Set the top bit if a star was included in the expression.
|
||||
starBit = 1 << 63
|
||||
)
|
||||
|
||||
// Next returns the next time this schedule is activated, greater than the given
|
||||
// time. If no time can be found to satisfy the schedule, return the zero time.
|
||||
func (s *SpecSchedule) Next(t time.Time) time.Time {
|
||||
// General approach
|
||||
//
|
||||
// For Month, Day, Hour, Minute, Second:
|
||||
// Check if the time value matches. If yes, continue to the next field.
|
||||
// If the field doesn't match the schedule, then increment the field until it matches.
|
||||
// While incrementing the field, a wrap-around brings it back to the beginning
|
||||
// of the field list (since it is necessary to re-verify previous field
|
||||
// values)
|
||||
|
||||
// Convert the given time into the schedule's timezone, if one is specified.
|
||||
// Save the original timezone so we can convert back after we find a time.
|
||||
// Note that schedules without a time zone specified (time.Local) are treated
|
||||
// as local to the time provided.
|
||||
origLocation := t.Location()
|
||||
loc := s.Location
|
||||
if loc == time.Local {
|
||||
loc = t.Location()
|
||||
}
|
||||
if s.Location != time.Local {
|
||||
t = t.In(s.Location)
|
||||
}
|
||||
|
||||
// Start at the earliest possible time (the upcoming second).
|
||||
t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond)
|
||||
|
||||
// This flag indicates whether a field has been incremented.
|
||||
added := false
|
||||
|
||||
// If no time is found within five years, return zero.
|
||||
yearLimit := t.Year() + 5
|
||||
|
||||
WRAP:
|
||||
if t.Year() > yearLimit {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// Find the first applicable month.
|
||||
// If it's this month, then do nothing.
|
||||
for 1<<uint(t.Month())&s.Month == 0 {
|
||||
// If we have to add a month, reset the other parts to 0.
|
||||
if !added {
|
||||
added = true
|
||||
// Otherwise, set the date at the beginning (since the current time is irrelevant).
|
||||
t = time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, loc)
|
||||
}
|
||||
t = t.AddDate(0, 1, 0)
|
||||
|
||||
// Wrapped around.
|
||||
if t.Month() == time.January {
|
||||
goto WRAP
|
||||
}
|
||||
}
|
||||
|
||||
// Now get a day in that month.
|
||||
//
|
||||
// NOTE: This causes issues for daylight savings regimes where midnight does
|
||||
// not exist. For example: Sao Paulo has DST that transforms midnight on
|
||||
// 11/3 into 1am. Handle that by noticing when the Hour ends up != 0.
|
||||
for !dayMatches(s, t) {
|
||||
if !added {
|
||||
added = true
|
||||
t = time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, loc)
|
||||
}
|
||||
t = t.AddDate(0, 0, 1)
|
||||
// Notice if the hour is no longer midnight due to DST.
|
||||
// Add an hour if it's 23, subtract an hour if it's 1.
|
||||
if t.Hour() != 0 {
|
||||
if t.Hour() > 12 {
|
||||
t = t.Add(time.Duration(24-t.Hour()) * time.Hour)
|
||||
} else {
|
||||
t = t.Add(time.Duration(-t.Hour()) * time.Hour)
|
||||
}
|
||||
}
|
||||
|
||||
if t.Day() == 1 {
|
||||
goto WRAP
|
||||
}
|
||||
}
|
||||
|
||||
for 1<<uint(t.Hour())&s.Hour == 0 {
|
||||
if !added {
|
||||
added = true
|
||||
t = time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, loc)
|
||||
}
|
||||
t = t.Add(1 * time.Hour)
|
||||
|
||||
if t.Hour() == 0 {
|
||||
goto WRAP
|
||||
}
|
||||
}
|
||||
|
||||
for 1<<uint(t.Minute())&s.Minute == 0 {
|
||||
if !added {
|
||||
added = true
|
||||
t = t.Truncate(time.Minute)
|
||||
}
|
||||
t = t.Add(1 * time.Minute)
|
||||
|
||||
if t.Minute() == 0 {
|
||||
goto WRAP
|
||||
}
|
||||
}
|
||||
|
||||
for 1<<uint(t.Second())&s.Second == 0 {
|
||||
if !added {
|
||||
added = true
|
||||
t = t.Truncate(time.Second)
|
||||
}
|
||||
t = t.Add(1 * time.Second)
|
||||
|
||||
if t.Second() == 0 {
|
||||
goto WRAP
|
||||
}
|
||||
}
|
||||
|
||||
return t.In(origLocation)
|
||||
}
|
||||
|
||||
// dayMatches returns true if the schedule's day-of-week and day-of-month
|
||||
// restrictions are satisfied by the given time.
|
||||
func dayMatches(s *SpecSchedule, t time.Time) bool {
|
||||
var (
|
||||
domMatch bool = 1<<uint(t.Day())&s.Dom > 0
|
||||
dowMatch bool = 1<<uint(t.Weekday())&s.Dow > 0
|
||||
)
|
||||
if s.Dom&starBit > 0 || s.Dow&starBit > 0 {
|
||||
return domMatch && dowMatch
|
||||
}
|
||||
return domMatch || dowMatch
|
||||
}
|
||||
21
vendor/github.com/stretchr/testify/LICENSE
generated
vendored
Normal file
21
vendor/github.com/stretchr/testify/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
458
vendor/github.com/stretchr/testify/assert/assertion_compare.go
generated
vendored
Normal file
458
vendor/github.com/stretchr/testify/assert/assertion_compare.go
generated
vendored
Normal file
@@ -0,0 +1,458 @@
|
||||
package assert
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type CompareType int
|
||||
|
||||
const (
|
||||
compareLess CompareType = iota - 1
|
||||
compareEqual
|
||||
compareGreater
|
||||
)
|
||||
|
||||
var (
|
||||
intType = reflect.TypeOf(int(1))
|
||||
int8Type = reflect.TypeOf(int8(1))
|
||||
int16Type = reflect.TypeOf(int16(1))
|
||||
int32Type = reflect.TypeOf(int32(1))
|
||||
int64Type = reflect.TypeOf(int64(1))
|
||||
|
||||
uintType = reflect.TypeOf(uint(1))
|
||||
uint8Type = reflect.TypeOf(uint8(1))
|
||||
uint16Type = reflect.TypeOf(uint16(1))
|
||||
uint32Type = reflect.TypeOf(uint32(1))
|
||||
uint64Type = reflect.TypeOf(uint64(1))
|
||||
|
||||
float32Type = reflect.TypeOf(float32(1))
|
||||
float64Type = reflect.TypeOf(float64(1))
|
||||
|
||||
stringType = reflect.TypeOf("")
|
||||
|
||||
timeType = reflect.TypeOf(time.Time{})
|
||||
bytesType = reflect.TypeOf([]byte{})
|
||||
)
|
||||
|
||||
func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
obj1Value := reflect.ValueOf(obj1)
|
||||
obj2Value := reflect.ValueOf(obj2)
|
||||
|
||||
// throughout this switch we try and avoid calling .Convert() if possible,
|
||||
// as this has a pretty big performance impact
|
||||
switch kind {
|
||||
case reflect.Int:
|
||||
{
|
||||
intobj1, ok := obj1.(int)
|
||||
if !ok {
|
||||
intobj1 = obj1Value.Convert(intType).Interface().(int)
|
||||
}
|
||||
intobj2, ok := obj2.(int)
|
||||
if !ok {
|
||||
intobj2 = obj2Value.Convert(intType).Interface().(int)
|
||||
}
|
||||
if intobj1 > intobj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if intobj1 == intobj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if intobj1 < intobj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.Int8:
|
||||
{
|
||||
int8obj1, ok := obj1.(int8)
|
||||
if !ok {
|
||||
int8obj1 = obj1Value.Convert(int8Type).Interface().(int8)
|
||||
}
|
||||
int8obj2, ok := obj2.(int8)
|
||||
if !ok {
|
||||
int8obj2 = obj2Value.Convert(int8Type).Interface().(int8)
|
||||
}
|
||||
if int8obj1 > int8obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if int8obj1 == int8obj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if int8obj1 < int8obj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.Int16:
|
||||
{
|
||||
int16obj1, ok := obj1.(int16)
|
||||
if !ok {
|
||||
int16obj1 = obj1Value.Convert(int16Type).Interface().(int16)
|
||||
}
|
||||
int16obj2, ok := obj2.(int16)
|
||||
if !ok {
|
||||
int16obj2 = obj2Value.Convert(int16Type).Interface().(int16)
|
||||
}
|
||||
if int16obj1 > int16obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if int16obj1 == int16obj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if int16obj1 < int16obj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.Int32:
|
||||
{
|
||||
int32obj1, ok := obj1.(int32)
|
||||
if !ok {
|
||||
int32obj1 = obj1Value.Convert(int32Type).Interface().(int32)
|
||||
}
|
||||
int32obj2, ok := obj2.(int32)
|
||||
if !ok {
|
||||
int32obj2 = obj2Value.Convert(int32Type).Interface().(int32)
|
||||
}
|
||||
if int32obj1 > int32obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if int32obj1 == int32obj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if int32obj1 < int32obj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.Int64:
|
||||
{
|
||||
int64obj1, ok := obj1.(int64)
|
||||
if !ok {
|
||||
int64obj1 = obj1Value.Convert(int64Type).Interface().(int64)
|
||||
}
|
||||
int64obj2, ok := obj2.(int64)
|
||||
if !ok {
|
||||
int64obj2 = obj2Value.Convert(int64Type).Interface().(int64)
|
||||
}
|
||||
if int64obj1 > int64obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if int64obj1 == int64obj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if int64obj1 < int64obj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.Uint:
|
||||
{
|
||||
uintobj1, ok := obj1.(uint)
|
||||
if !ok {
|
||||
uintobj1 = obj1Value.Convert(uintType).Interface().(uint)
|
||||
}
|
||||
uintobj2, ok := obj2.(uint)
|
||||
if !ok {
|
||||
uintobj2 = obj2Value.Convert(uintType).Interface().(uint)
|
||||
}
|
||||
if uintobj1 > uintobj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if uintobj1 == uintobj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if uintobj1 < uintobj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.Uint8:
|
||||
{
|
||||
uint8obj1, ok := obj1.(uint8)
|
||||
if !ok {
|
||||
uint8obj1 = obj1Value.Convert(uint8Type).Interface().(uint8)
|
||||
}
|
||||
uint8obj2, ok := obj2.(uint8)
|
||||
if !ok {
|
||||
uint8obj2 = obj2Value.Convert(uint8Type).Interface().(uint8)
|
||||
}
|
||||
if uint8obj1 > uint8obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if uint8obj1 == uint8obj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if uint8obj1 < uint8obj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.Uint16:
|
||||
{
|
||||
uint16obj1, ok := obj1.(uint16)
|
||||
if !ok {
|
||||
uint16obj1 = obj1Value.Convert(uint16Type).Interface().(uint16)
|
||||
}
|
||||
uint16obj2, ok := obj2.(uint16)
|
||||
if !ok {
|
||||
uint16obj2 = obj2Value.Convert(uint16Type).Interface().(uint16)
|
||||
}
|
||||
if uint16obj1 > uint16obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if uint16obj1 == uint16obj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if uint16obj1 < uint16obj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.Uint32:
|
||||
{
|
||||
uint32obj1, ok := obj1.(uint32)
|
||||
if !ok {
|
||||
uint32obj1 = obj1Value.Convert(uint32Type).Interface().(uint32)
|
||||
}
|
||||
uint32obj2, ok := obj2.(uint32)
|
||||
if !ok {
|
||||
uint32obj2 = obj2Value.Convert(uint32Type).Interface().(uint32)
|
||||
}
|
||||
if uint32obj1 > uint32obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if uint32obj1 == uint32obj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if uint32obj1 < uint32obj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.Uint64:
|
||||
{
|
||||
uint64obj1, ok := obj1.(uint64)
|
||||
if !ok {
|
||||
uint64obj1 = obj1Value.Convert(uint64Type).Interface().(uint64)
|
||||
}
|
||||
uint64obj2, ok := obj2.(uint64)
|
||||
if !ok {
|
||||
uint64obj2 = obj2Value.Convert(uint64Type).Interface().(uint64)
|
||||
}
|
||||
if uint64obj1 > uint64obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if uint64obj1 == uint64obj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if uint64obj1 < uint64obj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.Float32:
|
||||
{
|
||||
float32obj1, ok := obj1.(float32)
|
||||
if !ok {
|
||||
float32obj1 = obj1Value.Convert(float32Type).Interface().(float32)
|
||||
}
|
||||
float32obj2, ok := obj2.(float32)
|
||||
if !ok {
|
||||
float32obj2 = obj2Value.Convert(float32Type).Interface().(float32)
|
||||
}
|
||||
if float32obj1 > float32obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if float32obj1 == float32obj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if float32obj1 < float32obj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.Float64:
|
||||
{
|
||||
float64obj1, ok := obj1.(float64)
|
||||
if !ok {
|
||||
float64obj1 = obj1Value.Convert(float64Type).Interface().(float64)
|
||||
}
|
||||
float64obj2, ok := obj2.(float64)
|
||||
if !ok {
|
||||
float64obj2 = obj2Value.Convert(float64Type).Interface().(float64)
|
||||
}
|
||||
if float64obj1 > float64obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if float64obj1 == float64obj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if float64obj1 < float64obj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.String:
|
||||
{
|
||||
stringobj1, ok := obj1.(string)
|
||||
if !ok {
|
||||
stringobj1 = obj1Value.Convert(stringType).Interface().(string)
|
||||
}
|
||||
stringobj2, ok := obj2.(string)
|
||||
if !ok {
|
||||
stringobj2 = obj2Value.Convert(stringType).Interface().(string)
|
||||
}
|
||||
if stringobj1 > stringobj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if stringobj1 == stringobj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if stringobj1 < stringobj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
// Check for known struct types we can check for compare results.
|
||||
case reflect.Struct:
|
||||
{
|
||||
// All structs enter here. We're not interested in most types.
|
||||
if !canConvert(obj1Value, timeType) {
|
||||
break
|
||||
}
|
||||
|
||||
// time.Time can compared!
|
||||
timeObj1, ok := obj1.(time.Time)
|
||||
if !ok {
|
||||
timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time)
|
||||
}
|
||||
|
||||
timeObj2, ok := obj2.(time.Time)
|
||||
if !ok {
|
||||
timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time)
|
||||
}
|
||||
|
||||
return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64)
|
||||
}
|
||||
case reflect.Slice:
|
||||
{
|
||||
// We only care about the []byte type.
|
||||
if !canConvert(obj1Value, bytesType) {
|
||||
break
|
||||
}
|
||||
|
||||
// []byte can be compared!
|
||||
bytesObj1, ok := obj1.([]byte)
|
||||
if !ok {
|
||||
bytesObj1 = obj1Value.Convert(bytesType).Interface().([]byte)
|
||||
|
||||
}
|
||||
bytesObj2, ok := obj2.([]byte)
|
||||
if !ok {
|
||||
bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte)
|
||||
}
|
||||
|
||||
return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true
|
||||
}
|
||||
}
|
||||
|
||||
return compareEqual, false
|
||||
}
|
||||
|
||||
// Greater asserts that the first element is greater than the second
|
||||
//
|
||||
// assert.Greater(t, 2, 1)
|
||||
// assert.Greater(t, float64(2), float64(1))
|
||||
// assert.Greater(t, "b", "a")
|
||||
func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
|
||||
}
|
||||
|
||||
// GreaterOrEqual asserts that the first element is greater than or equal to the second
|
||||
//
|
||||
// assert.GreaterOrEqual(t, 2, 1)
|
||||
// assert.GreaterOrEqual(t, 2, 2)
|
||||
// assert.GreaterOrEqual(t, "b", "a")
|
||||
// assert.GreaterOrEqual(t, "b", "b")
|
||||
func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
|
||||
}
|
||||
|
||||
// Less asserts that the first element is less than the second
|
||||
//
|
||||
// assert.Less(t, 1, 2)
|
||||
// assert.Less(t, float64(1), float64(2))
|
||||
// assert.Less(t, "a", "b")
|
||||
func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
|
||||
}
|
||||
|
||||
// LessOrEqual asserts that the first element is less than or equal to the second
|
||||
//
|
||||
// assert.LessOrEqual(t, 1, 2)
|
||||
// assert.LessOrEqual(t, 2, 2)
|
||||
// assert.LessOrEqual(t, "a", "b")
|
||||
// assert.LessOrEqual(t, "b", "b")
|
||||
func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
|
||||
}
|
||||
|
||||
// Positive asserts that the specified element is positive
|
||||
//
|
||||
// assert.Positive(t, 1)
|
||||
// assert.Positive(t, 1.23)
|
||||
func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
zero := reflect.Zero(reflect.TypeOf(e))
|
||||
return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...)
|
||||
}
|
||||
|
||||
// Negative asserts that the specified element is negative
|
||||
//
|
||||
// assert.Negative(t, -1)
|
||||
// assert.Negative(t, -1.23)
|
||||
func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
zero := reflect.Zero(reflect.TypeOf(e))
|
||||
return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...)
|
||||
}
|
||||
|
||||
func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
|
||||
e1Kind := reflect.ValueOf(e1).Kind()
|
||||
e2Kind := reflect.ValueOf(e2).Kind()
|
||||
if e1Kind != e2Kind {
|
||||
return Fail(t, "Elements should be the same type", msgAndArgs...)
|
||||
}
|
||||
|
||||
compareResult, isComparable := compare(e1, e2, e1Kind)
|
||||
if !isComparable {
|
||||
return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
|
||||
}
|
||||
|
||||
if !containsValue(allowedComparesResults, compareResult) {
|
||||
return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func containsValue(values []CompareType, value CompareType) bool {
|
||||
for _, v := range values {
|
||||
if v == value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
16
vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
generated
vendored
Normal file
16
vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
//go:build go1.17
|
||||
// +build go1.17
|
||||
|
||||
// TODO: once support for Go 1.16 is dropped, this file can be
|
||||
// merged/removed with assertion_compare_go1.17_test.go and
|
||||
// assertion_compare_legacy.go
|
||||
|
||||
package assert
|
||||
|
||||
import "reflect"
|
||||
|
||||
// Wrapper around reflect.Value.CanConvert, for compatibility
|
||||
// reasons.
|
||||
func canConvert(value reflect.Value, to reflect.Type) bool {
|
||||
return value.CanConvert(to)
|
||||
}
|
||||
16
vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
generated
vendored
Normal file
16
vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
//go:build !go1.17
|
||||
// +build !go1.17
|
||||
|
||||
// TODO: once support for Go 1.16 is dropped, this file can be
|
||||
// merged/removed with assertion_compare_go1.17_test.go and
|
||||
// assertion_compare_can_convert.go
|
||||
|
||||
package assert
|
||||
|
||||
import "reflect"
|
||||
|
||||
// Older versions of Go does not have the reflect.Value.CanConvert
|
||||
// method.
|
||||
func canConvert(value reflect.Value, to reflect.Type) bool {
|
||||
return false
|
||||
}
|
||||
763
vendor/github.com/stretchr/testify/assert/assertion_format.go
generated
vendored
Normal file
763
vendor/github.com/stretchr/testify/assert/assertion_format.go
generated
vendored
Normal file
@@ -0,0 +1,763 @@
|
||||
/*
|
||||
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
|
||||
* THIS FILE MUST NOT BE EDITED BY HAND
|
||||
*/
|
||||
|
||||
package assert
|
||||
|
||||
import (
|
||||
http "net/http"
|
||||
url "net/url"
|
||||
time "time"
|
||||
)
|
||||
|
||||
// Conditionf uses a Comparison to assert a complex condition.
|
||||
func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Condition(t, comp, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Containsf asserts that the specified string, list(array, slice...) or map contains the
|
||||
// specified substring or element.
|
||||
//
|
||||
// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
|
||||
// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
|
||||
// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
|
||||
func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Contains(t, s, contains, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// DirExistsf checks whether a directory exists in the given path. It also fails
|
||||
// if the path is a file rather a directory or there is an error checking whether it exists.
|
||||
func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return DirExists(t, path, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
|
||||
// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
|
||||
// the number of appearances of each of them in both lists should match.
|
||||
//
|
||||
// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
|
||||
func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
|
||||
// a slice or a channel with len == 0.
|
||||
//
|
||||
// assert.Emptyf(t, obj, "error message %s", "formatted")
|
||||
func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Empty(t, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Equalf asserts that two objects are equal.
|
||||
//
|
||||
// assert.Equalf(t, 123, 123, "error message %s", "formatted")
|
||||
//
|
||||
// Pointer variable equality is determined based on the equality of the
|
||||
// referenced values (as opposed to the memory addresses). Function equality
|
||||
// cannot be determined and will always fail.
|
||||
func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Equal(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
|
||||
// and that it is equal to the provided error.
|
||||
//
|
||||
// actualObj, err := SomeFunction()
|
||||
// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted")
|
||||
func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// EqualValuesf asserts that two objects are equal or convertable to the same types
|
||||
// and equal.
|
||||
//
|
||||
// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
|
||||
func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Errorf asserts that a function returned an error (i.e. not `nil`).
|
||||
//
|
||||
// actualObj, err := SomeFunction()
|
||||
// if assert.Errorf(t, err, "error message %s", "formatted") {
|
||||
// assert.Equal(t, expectedErrorf, err)
|
||||
// }
|
||||
func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Error(t, err, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
|
||||
// This is a wrapper for errors.As.
|
||||
func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
|
||||
// and that the error contains the specified substring.
|
||||
//
|
||||
// actualObj, err := SomeFunction()
|
||||
// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted")
|
||||
func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return ErrorContains(t, theError, contains, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// ErrorIsf asserts that at least one of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return ErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Eventuallyf asserts that given condition will be met in waitFor time,
|
||||
// periodically checking target function each tick.
|
||||
//
|
||||
// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
|
||||
func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Exactlyf asserts that two objects are equal in value and type.
|
||||
//
|
||||
// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
|
||||
func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Failf reports a failure through
|
||||
func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Fail(t, failureMessage, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// FailNowf fails test
|
||||
func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Falsef asserts that the specified value is false.
|
||||
//
|
||||
// assert.Falsef(t, myBool, "error message %s", "formatted")
|
||||
func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return False(t, value, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// FileExistsf checks whether a file exists in the given path. It also fails if
|
||||
// the path points to a directory or there is an error when trying to check the file.
|
||||
func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return FileExists(t, path, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Greaterf asserts that the first element is greater than the second
|
||||
//
|
||||
// assert.Greaterf(t, 2, 1, "error message %s", "formatted")
|
||||
// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
|
||||
// assert.Greaterf(t, "b", "a", "error message %s", "formatted")
|
||||
func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Greater(t, e1, e2, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// GreaterOrEqualf asserts that the first element is greater than or equal to the second
|
||||
//
|
||||
// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted")
|
||||
// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted")
|
||||
// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted")
|
||||
// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted")
|
||||
func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return GreaterOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// HTTPBodyContainsf asserts that a specified handler returns a
|
||||
// body that contains a string.
|
||||
//
|
||||
// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// HTTPBodyNotContainsf asserts that a specified handler returns a
|
||||
// body that does not contain a string.
|
||||
//
|
||||
// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// HTTPErrorf asserts that a specified handler returns an error status code.
|
||||
//
|
||||
// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// HTTPRedirectf asserts that a specified handler returns a redirect status code.
|
||||
//
|
||||
// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// HTTPStatusCodef asserts that a specified handler returns a specified status code.
|
||||
//
|
||||
// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return HTTPStatusCode(t, handler, method, url, values, statuscode, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// HTTPSuccessf asserts that a specified handler returns a success status code.
|
||||
//
|
||||
// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Implementsf asserts that an object is implemented by the specified interface.
|
||||
//
|
||||
// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
|
||||
func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// InDeltaf asserts that the two numerals are within delta of each other.
|
||||
//
|
||||
// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
|
||||
func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
|
||||
func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// InDeltaSlicef is the same as InDelta, except it compares two slices.
|
||||
func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// InEpsilonf asserts that expected and actual have a relative error less than epsilon
|
||||
func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
|
||||
func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// IsDecreasingf asserts that the collection is decreasing
|
||||
//
|
||||
// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
|
||||
// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
|
||||
// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
|
||||
func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return IsDecreasing(t, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// IsIncreasingf asserts that the collection is increasing
|
||||
//
|
||||
// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
|
||||
// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
|
||||
// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
|
||||
func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return IsIncreasing(t, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// IsNonDecreasingf asserts that the collection is not decreasing
|
||||
//
|
||||
// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
|
||||
// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
|
||||
// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
|
||||
func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return IsNonDecreasing(t, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// IsNonIncreasingf asserts that the collection is not increasing
|
||||
//
|
||||
// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
|
||||
// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
|
||||
// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
|
||||
func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// IsTypef asserts that the specified objects are of the same type.
|
||||
func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// JSONEqf asserts that two JSON strings are equivalent.
|
||||
//
|
||||
// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
|
||||
func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Lenf asserts that the specified object has specific length.
|
||||
// Lenf also fails if the object has a type that len() not accept.
|
||||
//
|
||||
// assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
|
||||
func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Len(t, object, length, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Lessf asserts that the first element is less than the second
|
||||
//
|
||||
// assert.Lessf(t, 1, 2, "error message %s", "formatted")
|
||||
// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
|
||||
// assert.Lessf(t, "a", "b", "error message %s", "formatted")
|
||||
func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Less(t, e1, e2, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// LessOrEqualf asserts that the first element is less than or equal to the second
|
||||
//
|
||||
// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted")
|
||||
// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted")
|
||||
// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted")
|
||||
// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted")
|
||||
func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Negativef asserts that the specified element is negative
|
||||
//
|
||||
// assert.Negativef(t, -1, "error message %s", "formatted")
|
||||
// assert.Negativef(t, -1.23, "error message %s", "formatted")
|
||||
func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Negative(t, e, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Neverf asserts that the given condition doesn't satisfy in waitFor time,
|
||||
// periodically checking the target function each tick.
|
||||
//
|
||||
// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
|
||||
func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Never(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Nilf asserts that the specified object is nil.
|
||||
//
|
||||
// assert.Nilf(t, err, "error message %s", "formatted")
|
||||
func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Nil(t, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NoDirExistsf checks whether a directory does not exist in the given path.
|
||||
// It fails if the path points to an existing _directory_ only.
|
||||
func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NoDirExists(t, path, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NoErrorf asserts that a function returned no error (i.e. `nil`).
|
||||
//
|
||||
// actualObj, err := SomeFunction()
|
||||
// if assert.NoErrorf(t, err, "error message %s", "formatted") {
|
||||
// assert.Equal(t, expectedObj, actualObj)
|
||||
// }
|
||||
func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NoError(t, err, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NoFileExistsf checks whether a file does not exist in a given path. It fails
|
||||
// if the path points to an existing _file_ only.
|
||||
func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NoFileExists(t, path, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
|
||||
// specified substring or element.
|
||||
//
|
||||
// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
|
||||
// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
|
||||
// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
|
||||
func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotContains(t, s, contains, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
|
||||
// a slice or a channel with len == 0.
|
||||
//
|
||||
// if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
|
||||
// assert.Equal(t, "two", obj[1])
|
||||
// }
|
||||
func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotEmpty(t, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotEqualf asserts that the specified values are NOT equal.
|
||||
//
|
||||
// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
|
||||
//
|
||||
// Pointer variable equality is determined based on the equality of the
|
||||
// referenced values (as opposed to the memory addresses).
|
||||
func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
|
||||
//
|
||||
// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
|
||||
func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotErrorIsf asserts that at none of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotNilf asserts that the specified object is not nil.
|
||||
//
|
||||
// assert.NotNilf(t, err, "error message %s", "formatted")
|
||||
func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotNil(t, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
|
||||
//
|
||||
// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
|
||||
func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotPanics(t, f, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotRegexpf asserts that a specified regexp does not match a string.
|
||||
//
|
||||
// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
|
||||
// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
|
||||
func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotSamef asserts that two pointers do not reference the same object.
|
||||
//
|
||||
// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
|
||||
//
|
||||
// Both arguments must be pointer variables. Pointer variable sameness is
|
||||
// determined based on the equality of both type and value.
|
||||
func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotSubsetf asserts that the specified list(array, slice...) contains not all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
//
|
||||
// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
|
||||
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotZerof asserts that i is not the zero value for its type.
|
||||
func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotZero(t, i, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Panicsf asserts that the code inside the specified PanicTestFunc panics.
|
||||
//
|
||||
// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
|
||||
func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Panics(t, f, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
|
||||
// panics, and that the recovered panic value is an error that satisfies the
|
||||
// EqualError comparison.
|
||||
//
|
||||
// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
|
||||
func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return PanicsWithError(t, errString, f, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
|
||||
// the recovered panic value equals the expected panic value.
|
||||
//
|
||||
// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
|
||||
func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Positivef asserts that the specified element is positive
|
||||
//
|
||||
// assert.Positivef(t, 1, "error message %s", "formatted")
|
||||
// assert.Positivef(t, 1.23, "error message %s", "formatted")
|
||||
func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Positive(t, e, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Regexpf asserts that a specified regexp matches a string.
|
||||
//
|
||||
// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
|
||||
// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
|
||||
func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Regexp(t, rx, str, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Samef asserts that two pointers reference the same object.
|
||||
//
|
||||
// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted")
|
||||
//
|
||||
// Both arguments must be pointer variables. Pointer variable sameness is
|
||||
// determined based on the equality of both type and value.
|
||||
func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Same(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Subsetf asserts that the specified list(array, slice...) contains all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
//
|
||||
// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
|
||||
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Subset(t, list, subset, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Truef asserts that the specified value is true.
|
||||
//
|
||||
// assert.Truef(t, myBool, "error message %s", "formatted")
|
||||
func Truef(t TestingT, value bool, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return True(t, value, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// WithinDurationf asserts that the two times are within duration delta of each other.
|
||||
//
|
||||
// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
|
||||
func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// WithinRangef asserts that a time is within a time range (inclusive).
|
||||
//
|
||||
// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
|
||||
func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return WithinRange(t, actual, start, end, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// YAMLEqf asserts that two YAML strings are equivalent.
|
||||
func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Zerof asserts that i is the zero value for its type.
|
||||
func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Zero(t, i, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
5
vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
generated
vendored
Normal file
5
vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
{{.CommentFormat}}
|
||||
func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool {
|
||||
if h, ok := t.(tHelper); ok { h.Helper() }
|
||||
return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}})
|
||||
}
|
||||
1514
vendor/github.com/stretchr/testify/assert/assertion_forward.go
generated
vendored
Normal file
1514
vendor/github.com/stretchr/testify/assert/assertion_forward.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
5
vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl
generated
vendored
Normal file
5
vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
{{.CommentWithoutT "a"}}
|
||||
func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool {
|
||||
if h, ok := a.t.(tHelper); ok { h.Helper() }
|
||||
return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
|
||||
}
|
||||
81
vendor/github.com/stretchr/testify/assert/assertion_order.go
generated
vendored
Normal file
81
vendor/github.com/stretchr/testify/assert/assertion_order.go
generated
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
package assert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// isOrdered checks that collection contains orderable elements.
|
||||
func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
|
||||
objKind := reflect.TypeOf(object).Kind()
|
||||
if objKind != reflect.Slice && objKind != reflect.Array {
|
||||
return false
|
||||
}
|
||||
|
||||
objValue := reflect.ValueOf(object)
|
||||
objLen := objValue.Len()
|
||||
|
||||
if objLen <= 1 {
|
||||
return true
|
||||
}
|
||||
|
||||
value := objValue.Index(0)
|
||||
valueInterface := value.Interface()
|
||||
firstValueKind := value.Kind()
|
||||
|
||||
for i := 1; i < objLen; i++ {
|
||||
prevValue := value
|
||||
prevValueInterface := valueInterface
|
||||
|
||||
value = objValue.Index(i)
|
||||
valueInterface = value.Interface()
|
||||
|
||||
compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind)
|
||||
|
||||
if !isComparable {
|
||||
return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...)
|
||||
}
|
||||
|
||||
if !containsValue(allowedComparesResults, compareResult) {
|
||||
return Fail(t, fmt.Sprintf(failMessage, prevValue, value), msgAndArgs...)
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// IsIncreasing asserts that the collection is increasing
|
||||
//
|
||||
// assert.IsIncreasing(t, []int{1, 2, 3})
|
||||
// assert.IsIncreasing(t, []float{1, 2})
|
||||
// assert.IsIncreasing(t, []string{"a", "b"})
|
||||
func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
|
||||
}
|
||||
|
||||
// IsNonIncreasing asserts that the collection is not increasing
|
||||
//
|
||||
// assert.IsNonIncreasing(t, []int{2, 1, 1})
|
||||
// assert.IsNonIncreasing(t, []float{2, 1})
|
||||
// assert.IsNonIncreasing(t, []string{"b", "a"})
|
||||
func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
|
||||
}
|
||||
|
||||
// IsDecreasing asserts that the collection is decreasing
|
||||
//
|
||||
// assert.IsDecreasing(t, []int{2, 1, 0})
|
||||
// assert.IsDecreasing(t, []float{2, 1})
|
||||
// assert.IsDecreasing(t, []string{"b", "a"})
|
||||
func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
|
||||
}
|
||||
|
||||
// IsNonDecreasing asserts that the collection is not decreasing
|
||||
//
|
||||
// assert.IsNonDecreasing(t, []int{1, 1, 2})
|
||||
// assert.IsNonDecreasing(t, []float{1, 2})
|
||||
// assert.IsNonDecreasing(t, []string{"a", "b"})
|
||||
func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
|
||||
}
|
||||
1868
vendor/github.com/stretchr/testify/assert/assertions.go
generated
vendored
Normal file
1868
vendor/github.com/stretchr/testify/assert/assertions.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user