Compare commits

...

28 Commits

Author SHA1 Message Date
Andrey Melnikov
1170b53f61 Merge pull request #964 from Vafilor/feat/remove.links
fix: remove slack link
2021-11-15 12:24:52 -08:00
Andrey Melnikov
0213e1ead6 fix: remove slack link 2021-11-15 12:24:15 -08:00
Andrey Melnikov
9c1430273c Merge pull request #963 from Vafilor/feat/remove.links
feat: remove enterprise support link
2021-11-10 10:02:01 -08:00
Andrey Melnikov
90c45fb6db feat: remove enterprise support link 2021-11-10 09:54:23 -08:00
Andrey Melnikov
c984ff34d5 Merge pull request #961 from Vafilor/feat/remove.community.restrictions
fix: remove namespace creation restriction
2021-10-28 16:12:59 -07:00
Andrey Melnikov
5283b7beb6 fix: add missing manifests and incorrect cvat_1.6.0 version name in metadata 2021-10-28 16:08:31 -07:00
Andrey Melnikov
2cddf4a88a fix: remove namespace creation restriction 2021-10-28 15:50:39 -07:00
Andrey Melnikov
dd3d7f6632 Merge pull request #960 from Vafilor/feat/new.cvat
feat: cvat 1.6.0 workspace migration
2021-10-28 14:47:57 -07:00
Andrey Melnikov
4d1aff5c5b fix: method comments 2021-10-28 14:45:55 -07:00
Andrey Melnikov
719613ecd4 feat: cvat 1.6.0 workspace migration 2021-10-28 14:44:29 -07:00
Andrey Melnikov
09b854a434 Merge pull request #959 from Vafilor/feat/create.namespace.stub
feat: stub out create namespace
2021-10-27 10:09:19 -07:00
Andrey Melnikov
493ca51682 fix: docs for CreateNamespace 2021-10-27 09:31:07 -07:00
Andrey Melnikov
6407c2a7b4 feat: add create namespace code 2021-10-27 09:24:28 -07:00
Andrey Melnikov
62896b2f52 fix: Deny create namespace permission in community edition 2021-10-26 15:47:13 -07:00
Andrey Melnikov
d934163fc8 fix: code formatting and docs 2021-10-26 15:37:21 -07:00
Andrey Melnikov
e991102d85 Merge pull request #958 from Vafilor/feat/cache.values
feat: cache artifactRepositoryType
2021-10-26 15:32:59 -07:00
Andrey Melnikov
467f7f71dd fix: add missing comment 2021-10-26 15:29:12 -07:00
Andrey Melnikov
f1c0f0d31e feat: stub out create namespace and add supporting methods from enterprise version 2021-10-26 15:26:23 -07:00
Andrey Melnikov
11fc055ee3 feat: cache artifactRepositoryType 2021-10-26 15:23:47 -07:00
Andrey Melnikov
d9c79370e9 Merge pull request #957 from Vafilor/feat/update.migrations
feat: updated go migration data to have metadata
2021-10-26 15:19:19 -07:00
Andrey Melnikov
98f78d453a feat: updated go migration data to have metadata to make it easier to get all of the information from one file 2021-10-26 15:14:43 -07:00
Andrey Melnikov
700b3bd512 Merge pull request #955 from Vafilor/feat/add.serving.variables
feat: add onepanel serving url to workspaces
2021-10-07 12:29:05 -07:00
Andrey Melnikov
3abdc54d3c feat: add onepanel serving url to workspaces 2021-10-07 12:24:01 -07:00
Rush Tehrani
f570a710ba Update README.md 2021-09-17 09:40:54 -07:00
Rush Tehrani
c922b708fc Merge pull request #953 from Vafilor/fix/workflow.volumes
fix: workflow volumes don't delete on failed workflow
2021-09-08 12:57:56 -07:00
Andrey Melnikov
fc9669d757 fix: add PodCompletion volume claim gc by default to workflows. This will clean up the volumes if the workflow fails 2021-09-08 12:47:23 -07:00
rushtehrani
8eeb90d3f1 update features image in README 2021-08-19 12:22:57 -07:00
rushtehrani
c25dfce84f Update features image 2021-08-18 11:09:48 -07:00
102 changed files with 10224 additions and 6122 deletions

View File

@@ -12,6 +12,7 @@ FROM golang:1.15.5
COPY --from=builder /go/bin/core . COPY --from=builder /go/bin/core .
COPY --from=builder /go/src/db ./db COPY --from=builder /go/src/db ./db
COPY --from=builder /go/bin/goose . COPY --from=builder /go/bin/goose .
COPY --from=builder /go/src/manifest ./manifest
EXPOSE 8888 EXPOSE 8888
EXPOSE 8887 EXPOSE 8887

View File

@@ -6,7 +6,6 @@
[![sdk](https://img.shields.io/pypi/v/onepanel-sdk?color=01579b&label=sdk)](https://pypi.org/project/onepanel-sdk/) [![sdk](https://img.shields.io/pypi/v/onepanel-sdk?color=01579b&label=sdk)](https://pypi.org/project/onepanel-sdk/)
[![docs](https://img.shields.io/github/v/release/onepanelio/core?color=01579b&label=docs)](https://docs.onepanel.io) [![docs](https://img.shields.io/github/v/release/onepanelio/core?color=01579b&label=docs)](https://docs.onepanel.io)
[![issues](https://img.shields.io/github/issues-raw/onepanelio/core?color=01579b&label=issues)](https://github.com/onepanelio/core/issues) [![issues](https://img.shields.io/github/issues-raw/onepanelio/core?color=01579b&label=issues)](https://github.com/onepanelio/core/issues)
[![chat](https://img.shields.io/badge/support-slack-01579b)](https://join.slack.com/t/onepanel-ce/shared_invite/zt-eyjnwec0-nLaHhjif9Y~gA05KuX6AUg)
[![lfai](https://img.shields.io/badge/link-LFAI-01579b)](https://landscape.lfai.foundation/?selected=onepanel) [![lfai](https://img.shields.io/badge/link-LFAI-01579b)](https://landscape.lfai.foundation/?selected=onepanel)
[![license](https://img.shields.io/github/license/onepanelio/core?color=01579b)](https://opensource.org/licenses/Apache-2.0) [![license](https://img.shields.io/github/license/onepanelio/core?color=01579b)](https://opensource.org/licenses/Apache-2.0)
@@ -21,14 +20,9 @@ https://user-images.githubusercontent.com/1211823/116489376-afc60000-a849-11eb-8
## Quick start ## Quick start
See [quick start guide](https://docs.onepanel.ai/docs/getting-started/quickstart) to get started. See [quick start guide](https://docs.onepanel.ai/docs/getting-started/quickstart) to get started.
## Online demo
For a quick look at some features see this shared, read-only [online demo](https://onepanel.typeform.com/to/kQfDX5Vf?product=github).
## Community ## Community
To submit a feature request, report a bug or documentation issue, please open a GitHub [pull request](https://github.com/onepanelio/core/pulls) or [issue](https://github.com/onepanelio/core/issues). To submit a feature request, report a bug or documentation issue, please open a GitHub [pull request](https://github.com/onepanelio/core/pulls) or [issue](https://github.com/onepanelio/core/issues).
For help, questions, release announcements and contribution discussions, join us on [Slack](https://join.slack.com/t/onepanel-ce/shared_invite/zt-eyjnwec0-nLaHhjif9Y~gA05KuX6AUg).
## Contributing ## Contributing
Onepanel is modular and consists of [multiple repositories](https://docs.onepanel.ai/docs/getting-started/contributing/#project-repositories). Onepanel is modular and consists of [multiple repositories](https://docs.onepanel.ai/docs/getting-started/contributing/#project-repositories).
@@ -43,6 +37,3 @@ We are grateful for the support these communities provide and do our best to con
## License ## License
Onepanel is licensed under [Apache 2.0](https://github.com/onepanelio/core/blob/master/LICENSE). Onepanel is licensed under [Apache 2.0](https://github.com/onepanelio/core/blob/master/LICENSE).
## Enterprise support
Need enterprise features and support? Visit our [website](https://www.onepanel.ai/) for more information.

View File

@@ -3,7 +3,7 @@
"info": { "info": {
"title": "Onepanel", "title": "Onepanel",
"description": "Onepanel API", "description": "Onepanel API",
"version": "1.0.0", "version": "1.0.2",
"contact": { "contact": {
"name": "Onepanel project", "name": "Onepanel project",
"url": "https://github.com/onepanelio/core" "url": "https://github.com/onepanelio/core"
@@ -4184,6 +4184,9 @@
"properties": { "properties": {
"name": { "name": {
"type": "string" "type": "string"
},
"sourceName": {
"type": "string"
} }
} }
}, },

View File

@@ -220,7 +220,8 @@ type Namespace struct {
sizeCache protoimpl.SizeCache sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields unknownFields protoimpl.UnknownFields
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
SourceName string `protobuf:"bytes,2,opt,name=sourceName,proto3" json:"sourceName,omitempty"`
} }
func (x *Namespace) Reset() { func (x *Namespace) Reset() {
@@ -262,6 +263,13 @@ func (x *Namespace) GetName() string {
return "" return ""
} }
func (x *Namespace) GetSourceName() string {
if x != nil {
return x.SourceName
}
return ""
}
var File_namespace_proto protoreflect.FileDescriptor var File_namespace_proto protoreflect.FileDescriptor
var file_namespace_proto_rawDesc = []byte{ var file_namespace_proto_rawDesc = []byte{
@@ -289,9 +297,11 @@ var file_namespace_proto_rawDesc = []byte{
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65,
0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x61, 0x70, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x61, 0x70,
0x69, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x69, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x09, 0x6e, 0x61, 0x6d,
0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x1f, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x3f, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70,
0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x32, 0xec, 0x01, 0x0a, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75,
0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x32, 0xec, 0x01, 0x0a, 0x10, 0x4e, 0x61, 0x6d, 0x65,
0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x6b, 0x0a, 0x0e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x6b, 0x0a, 0x0e,
0x4c, 0x69, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x1a,
0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,

View File

@@ -40,4 +40,5 @@ message CreateNamespaceRequest {
message Namespace { message Namespace {
string name = 1; string name = 1;
string sourceName = 2;
} }

View File

@@ -0,0 +1,28 @@
package migration
import (
"database/sql"
"github.com/pressly/goose"
"path/filepath"
)
func initialize20211028205201() {
if _, ok := initializedMigrations[20211028205201]; !ok {
goose.AddMigration(Up20211028205201, Down20211028205201)
initializedMigrations[20211028205201] = true
}
}
// Up20211028205201 creates the new cvat 1.6.0 workspace template
func Up20211028205201(tx *sql.Tx) error {
// This code is executed when the migration is applied.
return createWorkspaceTemplate(
filepath.Join("workspaces", "cvat_1_6_0", "20211028205201.yaml"),
"CVAT_1.6.0",
"Powerful and efficient Computer Vision Annotation Tool (CVAT)")
}
// Down20211028205201 archives the new cvat 1.6.0 workspace template
func Down20211028205201(tx *sql.Tx) error {
return archiveWorkspaceTemplate("CVAT_1.6.0")
}

View File

@@ -96,6 +96,7 @@ func Initialize() {
initialize20210329194731() initialize20210329194731()
initialize20210414165510() initialize20210414165510()
initialize20210719190719() initialize20210719190719()
initialize20211028205201()
if err := client.DB.Close(); err != nil { if err := client.DB.Close(); err != nil {
log.Printf("[error] closing db %v", err) log.Printf("[error] closing db %v", err)

View File

@@ -3,7 +3,9 @@ package migration
import ( import (
"fmt" "fmt"
v1 "github.com/onepanelio/core/pkg" v1 "github.com/onepanelio/core/pkg"
"github.com/onepanelio/core/pkg/util/data"
uid2 "github.com/onepanelio/core/pkg/util/uid" uid2 "github.com/onepanelio/core/pkg/util/uid"
"path/filepath"
) )
// createWorkspaceTemplate will create the workspace template given by {{templateName}} with the contents // createWorkspaceTemplate will create the workspace template given by {{templateName}} with the contents
@@ -21,7 +23,13 @@ func createWorkspaceTemplate(filename, templateName, description string) error {
return err return err
} }
newManifest, err := readDataFile(filename) filename = filepath.Join("db", "yaml", filename)
manifestFile, err := data.ManifestFileFromFile(filename)
if err != nil {
return err
}
newManifest, err := manifestFile.SpecString()
if err != nil { if err != nil {
return err return err
} }
@@ -97,12 +105,19 @@ func updateWorkspaceTemplateManifest(filename, templateName string) error {
} }
defer client.DB.Close() defer client.DB.Close()
filename = filepath.Join("db", "yaml", filename)
namespaces, err := client.ListOnepanelEnabledNamespaces() namespaces, err := client.ListOnepanelEnabledNamespaces()
if err != nil { if err != nil {
return err return err
} }
newManifest, err := readDataFile(filename) manifest, err := data.ManifestFileFromFile(filename)
if err != nil {
return err
}
newManifest, err := manifest.SpecString()
if err != nil { if err != nil {
return err return err
} }
@@ -145,7 +160,14 @@ func createWorkflowTemplate(filename, templateName string, labels map[string]str
return err return err
} }
manifest, err := readDataFile(filename) filename = filepath.Join("db", "yaml", filename)
manifestFile, err := data.ManifestFileFromFile(filename)
if err != nil {
return err
}
manifest, err := manifestFile.SpecString()
if err != nil { if err != nil {
return err return err
} }
@@ -190,7 +212,14 @@ func updateWorkflowTemplateManifest(filename, templateName string, labels map[st
return err return err
} }
newManifest, err := readDataFile(filename) filename = filepath.Join("db", "yaml", filename)
manifestFile, err := data.ManifestFileFromFile(filename)
if err != nil {
return err
}
newManifest, err := manifestFile.SpecString()
if err != nil { if err != nil {
return err return err
} }

View File

@@ -1,183 +1,194 @@
# source: https://github.com/onepanelio/templates/blob/master/workflows/nni-hyperparameter-tuning/mnist/ metadata:
entrypoint: main name: "Hyperparameter Tuning Example"
arguments: kind: Workflow
parameters: version: 20201225172926
- name: source action: create
value: https://github.com/onepanelio/templates source: "https://github.com/onepanelio/templates/blob/master/workflows/nni-hyperparameter-tuning/mnist/"
- name: revision deprecated: true
value: master labels:
- name: config framework: tensorflow
displayName: Configuration tuner: TPE
required: true "created-by": system
hint: NNI configuration spec:
type: textarea.textarea entrypoint: main
value: |- arguments:
authorName: Onepanel, Inc. parameters:
experimentName: MNIST TF v2.x - name: source
trialConcurrency: 1 value: https://github.com/onepanelio/templates
maxExecDuration: 1h - name: revision
maxTrialNum: 10 value: master
trainingServicePlatform: local - name: config
searchSpacePath: search_space.json displayName: Configuration
useAnnotation: false required: true
tuner: hint: NNI configuration
# gpuIndices: '0' # uncomment and update to the GPU indices to assign this tuner type: textarea.textarea
builtinTunerName: TPE # choices: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner value: |-
classArgs: authorName: Onepanel, Inc.
optimize_mode: maximize # choices: maximize, minimize experimentName: MNIST TF v2.x
trial: trialConcurrency: 1
command: python main.py --output /mnt/output maxExecDuration: 1h
codeDir: . maxTrialNum: 10
# gpuNum: 1 # uncomment and update to number of GPUs trainingServicePlatform: local
- name: search-space searchSpacePath: search_space.json
displayName: Search space configuration useAnnotation: false
required: true tuner:
type: textarea.textarea # gpuIndices: '0' # uncomment and update to the GPU indices to assign this tuner
value: |- builtinTunerName: TPE # choices: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner
{ classArgs:
"dropout_rate": { "_type": "uniform", "_value": [0.5, 0.9] }, optimize_mode: maximize # choices: maximize, minimize
"conv_size": { "_type": "choice", "_value": [2, 3, 5, 7] }, trial:
"hidden_size": { "_type": "choice", "_value": [124, 512, 1024] }, command: python main.py --output /mnt/output
"batch_size": { "_type": "choice", "_value": [16, 32] }, codeDir: .
"learning_rate": { "_type": "choice", "_value": [0.0001, 0.001, 0.01, 0.1] }, # gpuNum: 1 # uncomment and update to number of GPUs
"epochs": { "_type": "choice", "_value": [10] } - name: search-space
} displayName: Search space configuration
- displayName: Node pool required: true
hint: Name of node pool or group to run this workflow task type: textarea.textarea
type: select.nodepool value: |-
name: sys-node-pool {
value: {{.DefaultNodePoolOption}} "dropout_rate": { "_type": "uniform", "_value": [0.5, 0.9] },
required: true "conv_size": { "_type": "choice", "_value": [2, 3, 5, 7] },
"hidden_size": { "_type": "choice", "_value": [124, 512, 1024] },
"batch_size": { "_type": "choice", "_value": [16, 32] },
"learning_rate": { "_type": "choice", "_value": [0.0001, 0.001, 0.01, 0.1] },
"epochs": { "_type": "choice", "_value": [10] }
}
- displayName: Node pool
hint: Name of node pool or group to run this workflow task
type: select.nodepool
name: sys-node-pool
value: "{{.DefaultNodePoolOption}}"
required: true
volumeClaimTemplates: volumeClaimTemplates:
- metadata: - metadata:
name: hyperparamtuning-data name: hyperparamtuning-data
spec: spec:
accessModes: [ "ReadWriteOnce" ] accessModes: [ "ReadWriteOnce" ]
resources: resources:
requests: requests:
storage: 20Gi storage: 20Gi
- metadata: - metadata:
name: hyperparamtuning-output name: hyperparamtuning-output
spec: spec:
accessModes: [ "ReadWriteOnce" ] accessModes: [ "ReadWriteOnce" ]
resources: resources:
requests: requests:
storage: 20Gi storage: 20Gi
templates: templates:
- name: main - name: main
dag: dag:
tasks: tasks:
- name: hyperparameter-tuning - name: hyperparameter-tuning
template: hyperparameter-tuning template: hyperparameter-tuning
- name: workflow-metrics-writer - name: workflow-metrics-writer
template: workflow-metrics-writer template: workflow-metrics-writer
dependencies: [hyperparameter-tuning] dependencies: [hyperparameter-tuning]
arguments: arguments:
# Use sys-metrics artifact output from hyperparameter-tuning Task # Use sys-metrics artifact output from hyperparameter-tuning Task
artifacts: artifacts:
- name: best-metrics - name: best-metrics
from: "{{tasks.hyperparameter-tuning.outputs.artifacts.sys-metrics}}" from: "{{tasks.hyperparameter-tuning.outputs.artifacts.sys-metrics}}"
- name: hyperparameter-tuning - name: hyperparameter-tuning
inputs: inputs:
artifacts: artifacts:
- name: src - name: src
git: git:
repo: '{{workflow.parameters.source}}' repo: '{{workflow.parameters.source}}'
revision: '{{workflow.parameters.revision}}' revision: '{{workflow.parameters.revision}}'
path: /mnt/data/src path: /mnt/data/src
- name: config - name: config
path: /mnt/data/src/workflows/hyperparameter-tuning/mnist/config.yaml path: /mnt/data/src/workflows/hyperparameter-tuning/mnist/config.yaml
raw: raw:
data: '{{workflow.parameters.config}}' data: '{{workflow.parameters.config}}'
- name: search-space - name: search-space
path: /mnt/data/src/workflows/hyperparameter-tuning/mnist/search_space.json path: /mnt/data/src/workflows/hyperparameter-tuning/mnist/search_space.json
raw: raw:
data: '{{workflow.parameters.search-space}}' data: '{{workflow.parameters.search-space}}'
outputs: outputs:
artifacts: artifacts:
- name: output - name: output
path: /mnt/output path: /mnt/output
optional: true optional: true
container: container:
image: onepanel/dl:0.17.0 image: onepanel/dl:0.17.0
args:
- --config
- /mnt/data/src/workflows/hyperparameter-tuning/mnist/config.yaml
workingDir: /mnt
volumeMounts:
- name: hyperparamtuning-data
mountPath: /mnt/data
- name: hyperparamtuning-output
mountPath: /mnt/output
nodeSelector:
beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
sidecars:
- name: nni-web-ui
image: 'onepanel/nni-web-ui:0.17.0'
env:
- name: ONEPANEL_INTERACTIVE_SIDECAR
value: 'true'
ports:
- containerPort: 9000
name: nni
- name: tensorboard
image: 'tensorflow/tensorflow:2.3.0'
command:
- sh
- '-c'
env:
- name: ONEPANEL_INTERACTIVE_SIDECAR
value: 'true'
args: args:
# Read logs from /mnt/output/tensorboard - /mnt/output is auto-mounted from volumeMounts - --config
- tensorboard --logdir /mnt/output/tensorboard - /mnt/data/src/workflows/hyperparameter-tuning/mnist/config.yaml
ports: workingDir: /mnt
- containerPort: 6006 volumeMounts:
name: tensorboard - name: hyperparamtuning-data
- name: workflow-metrics-writer mountPath: /mnt/data
inputs: - name: hyperparamtuning-output
artifacts: mountPath: /mnt/output
- name: best-metrics nodeSelector:
path: /tmp/sys-metrics.json beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
script: sidecars:
image: onepanel/python-sdk:v0.16.0 - name: nni-web-ui
command: [python, '-u'] image: 'onepanel/nni-web-ui:0.17.0'
source: | env:
import os - name: ONEPANEL_INTERACTIVE_SIDECAR
import json value: 'true'
ports:
- containerPort: 9000
name: nni
- name: tensorboard
image: 'tensorflow/tensorflow:2.3.0'
command:
- sh
- '-c'
env:
- name: ONEPANEL_INTERACTIVE_SIDECAR
value: 'true'
args:
# Read logs from /mnt/output/tensorboard - /mnt/output is auto-mounted from volumeMounts
- tensorboard --logdir /mnt/output/tensorboard
ports:
- containerPort: 6006
name: tensorboard
- name: workflow-metrics-writer
inputs:
artifacts:
- name: best-metrics
path: /tmp/sys-metrics.json
script:
image: onepanel/python-sdk:v0.16.0
command: [python, '-u']
source: |
import os
import json
import onepanel.core.api import onepanel.core.api
from onepanel.core.api.models.metric import Metric from onepanel.core.api.models.metric import Metric
from onepanel.core.api.rest import ApiException from onepanel.core.api.rest import ApiException
from onepanel.core.api.models import Parameter from onepanel.core.api.models import Parameter
# Load Task A metrics # Load Task A metrics
with open('/tmp/sys-metrics.json') as f: with open('/tmp/sys-metrics.json') as f:
metrics = json.load(f) metrics = json.load(f)
with open('/var/run/secrets/kubernetes.io/serviceaccount/token') as f: with open('/var/run/secrets/kubernetes.io/serviceaccount/token') as f:
token = f.read() token = f.read()
# Configure API authorization # Configure API authorization
configuration = onepanel.core.api.Configuration( configuration = onepanel.core.api.Configuration(
host = os.getenv('ONEPANEL_API_URL'), host = os.getenv('ONEPANEL_API_URL'),
api_key = { api_key = {
'authorization': token 'authorization': token
} }
) )
configuration.api_key_prefix['authorization'] = 'Bearer' configuration.api_key_prefix['authorization'] = 'Bearer'
# Call SDK method to save metrics # Call SDK method to save metrics
with onepanel.core.api.ApiClient(configuration) as api_client: with onepanel.core.api.ApiClient(configuration) as api_client:
api_instance = onepanel.core.api.WorkflowServiceApi(api_client) api_instance = onepanel.core.api.WorkflowServiceApi(api_client)
namespace = '{{workflow.namespace}}' namespace = '{{workflow.namespace}}'
uid = '{{workflow.name}}' uid = '{{workflow.name}}'
body = onepanel.core.api.AddWorkflowExecutionsMetricsRequest() body = onepanel.core.api.AddWorkflowExecutionsMetricsRequest()
body.metrics = metrics body.metrics = metrics
try: try:
api_response = api_instance.add_workflow_execution_metrics(namespace, uid, body) api_response = api_instance.add_workflow_execution_metrics(namespace, uid, body)
print('Metrics added.') print('Metrics added.')
except ApiException as e: except ApiException as e:
print("Exception when calling WorkflowServiceApi->add_workflow_execution_metrics: %s\n" % e) print("Exception when calling WorkflowServiceApi->add_workflow_execution_metrics: %s\n" % e)

View File

@@ -1,194 +1,205 @@
# source: https://github.com/onepanelio/templates/blob/master/workflows/nni-hyperparameter-tuning/mnist/ metadata:
# Workflow Template example for hyperparameter tuning name: "Hyperparameter Tuning Example"
# Documentation: https://docs.onepanel.ai/docs/reference/workflows/hyperparameter-tuning kind: Workflow
# version: 20210118175809
# Only change the fields marked with [CHANGE] action: update
entrypoint: main source: "https://github.com/onepanelio/templates/blob/master/workflows/nni-hyperparameter-tuning/mnist/"
arguments: deprecated: true
parameters: labels:
framework: tensorflow
tuner: TPE
"created-by": system
spec:
# Workflow Template example for hyperparameter tuning
# Documentation: https://docs.onepanel.ai/docs/reference/workflows/hyperparameter-tuning
#
# Only change the fields marked with [CHANGE]
entrypoint: main
arguments:
parameters:
# [CHANGE] Path to your training/model architecture code repository # [CHANGE] Path to your training/model architecture code repository
# Change this value and revision value to your code repository and branch respectively # Change this value and revision value to your code repository and branch respectively
- name: source - name: source
value: https://github.com/onepanelio/templates value: https://github.com/onepanelio/templates
# [CHANGE] Revision is the branch or tag that you want to use # [CHANGE] Revision is the branch or tag that you want to use
# You can change this to any tag or branch name in your repository # You can change this to any tag or branch name in your repository
- name: revision - name: revision
value: v0.18.0 value: v0.18.0
# [CHANGE] Default configuration for the NNI tuner # [CHANGE] Default configuration for the NNI tuner
# See https://docs.onepanel.ai/docs/reference/workflows/hyperparameter-tuning#understanding-the-configurations # See https://docs.onepanel.ai/docs/reference/workflows/hyperparameter-tuning#understanding-the-configurations
- name: config - name: config
displayName: Configuration displayName: Configuration
required: true required: true
hint: NNI configuration hint: NNI configuration
type: textarea.textarea type: textarea.textarea
value: |- value: |-
authorName: Onepanel, Inc. authorName: Onepanel, Inc.
experimentName: MNIST TF v2.x experimentName: MNIST TF v2.x
trialConcurrency: 1 trialConcurrency: 1
maxExecDuration: 1h maxExecDuration: 1h
maxTrialNum: 10 maxTrialNum: 10
trainingServicePlatform: local trainingServicePlatform: local
searchSpacePath: search_space.json searchSpacePath: search_space.json
useAnnotation: false useAnnotation: false
tuner: tuner:
# gpuIndices: '0' # uncomment and update to the GPU indices to assign this tuner # gpuIndices: '0' # uncomment and update to the GPU indices to assign this tuner
builtinTunerName: TPE # choices: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner builtinTunerName: TPE # choices: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner
classArgs: classArgs:
optimize_mode: maximize # choices: maximize, minimize optimize_mode: maximize # choices: maximize, minimize
trial: trial:
command: python main.py --output /mnt/output command: python main.py --output /mnt/output
codeDir: . codeDir: .
# gpuNum: 1 # uncomment and update to number of GPUs # gpuNum: 1 # uncomment and update to number of GPUs
# [CHANGE] Search space configuration # [CHANGE] Search space configuration
# Change according to your hyperparameters and ranges # Change according to your hyperparameters and ranges
- name: search-space - name: search-space
displayName: Search space configuration displayName: Search space configuration
required: true required: true
type: textarea.textarea type: textarea.textarea
value: |- value: |-
{ {
"dropout_rate": { "_type": "uniform", "_value": [0.5, 0.9] }, "dropout_rate": { "_type": "uniform", "_value": [0.5, 0.9] },
"conv_size": { "_type": "choice", "_value": [2, 3, 5, 7] }, "conv_size": { "_type": "choice", "_value": [2, 3, 5, 7] },
"hidden_size": { "_type": "choice", "_value": [124, 512, 1024] }, "hidden_size": { "_type": "choice", "_value": [124, 512, 1024] },
"batch_size": { "_type": "choice", "_value": [16, 32] }, "batch_size": { "_type": "choice", "_value": [16, 32] },
"learning_rate": { "_type": "choice", "_value": [0.0001, 0.001, 0.01, 0.1] }, "learning_rate": { "_type": "choice", "_value": [0.0001, 0.001, 0.01, 0.1] },
"epochs": { "_type": "choice", "_value": [10] } "epochs": { "_type": "choice", "_value": [10] }
} }
# Node pool dropdown (Node group in EKS) # Node pool dropdown (Node group in EKS)
# You can add more of these if you have additional tasks that can run on different node pools # You can add more of these if you have additional tasks that can run on different node pools
- displayName: Node pool - displayName: Node pool
hint: Name of node pool or group to run this workflow task hint: Name of node pool or group to run this workflow task
type: select.nodepool type: select.nodepool
name: sys-node-pool name: sys-node-pool
value: {{.DefaultNodePoolOption}} value: "{{.DefaultNodePoolOption}}"
required: true required: true
templates: templates:
- name: main - name: main
dag: dag:
tasks: tasks:
- name: hyperparameter-tuning - name: hyperparameter-tuning
template: hyperparameter-tuning template: hyperparameter-tuning
- name: metrics-writer - name: metrics-writer
template: metrics-writer template: metrics-writer
dependencies: [hyperparameter-tuning] dependencies: [hyperparameter-tuning]
arguments: arguments:
# Use sys-metrics artifact output from hyperparameter-tuning Task # Use sys-metrics artifact output from hyperparameter-tuning Task
# This writes the best metrics to the Workflow # This writes the best metrics to the Workflow
artifacts: artifacts:
- name: sys-metrics - name: sys-metrics
from: "{{tasks.hyperparameter-tuning.outputs.artifacts.sys-metrics}}" from: "{{tasks.hyperparameter-tuning.outputs.artifacts.sys-metrics}}"
- name: hyperparameter-tuning - name: hyperparameter-tuning
inputs: inputs:
artifacts: artifacts:
- name: src - name: src
# Clone the above repository into '/mnt/data/src' # Clone the above repository into '/mnt/data/src'
# See https://docs.onepanel.ai/docs/reference/workflows/artifacts#git for private repositories # See https://docs.onepanel.ai/docs/reference/workflows/artifacts#git for private repositories
git: git:
repo: '{{workflow.parameters.source}}' repo: '{{workflow.parameters.source}}'
revision: '{{workflow.parameters.revision}}' revision: '{{workflow.parameters.revision}}'
path: /mnt/data/src path: /mnt/data/src
# [CHANGE] Path where config.yaml will be generated or already exists # [CHANGE] Path where config.yaml will be generated or already exists
# Update the path below so that config.yaml is written to the same directory as your main.py file # Update the path below so that config.yaml is written to the same directory as your main.py file
# Note that your source code is cloned to /mnt/data/src # Note that your source code is cloned to /mnt/data/src
- name: config - name: config
path: /mnt/data/src/workflows/hyperparameter-tuning/mnist/config.yaml path: /mnt/data/src/workflows/hyperparameter-tuning/mnist/config.yaml
raw: raw:
data: '{{workflow.parameters.config}}' data: '{{workflow.parameters.config}}'
# [CHANGE] Path where search_space.json will be generated or already exists # [CHANGE] Path where search_space.json will be generated or already exists
# Update the path below so that search_space.json is written to the same directory as your main.py file # Update the path below so that search_space.json is written to the same directory as your main.py file
# Note that your source code is cloned to /mnt/data/src # Note that your source code is cloned to /mnt/data/src
- name: search-space - name: search-space
path: /mnt/data/src/workflows/hyperparameter-tuning/mnist/search_space.json path: /mnt/data/src/workflows/hyperparameter-tuning/mnist/search_space.json
raw: raw:
data: '{{workflow.parameters.search-space}}' data: '{{workflow.parameters.search-space}}'
outputs: outputs:
artifacts: artifacts:
- name: output - name: output
path: /mnt/output path: /mnt/output
optional: true optional: true
container: container:
image: onepanel/dl:0.17.0
command:
- sh
- -c
args:
# [CHANGE] Update the config path below to point to config.yaml path as described above
# Note that you can `pip install` additional tools here if necessary
- |
python -u /opt/onepanel/nni/start.py \
--config /mnt/data/src/workflows/hyperparameter-tuning/mnist/config.yaml
workingDir: /mnt
volumeMounts:
- name: hyperparamtuning-data
mountPath: /mnt/data
- name: hyperparamtuning-output
mountPath: /mnt/output
nodeSelector:
{{.NodePoolLabel}}: '{{workflow.parameters.sys-node-pool}}'
sidecars:
- name: nni-web-ui
image: onepanel/nni-web-ui:0.17.0
env:
- name: ONEPANEL_INTERACTIVE_SIDECAR
value: 'true'
ports:
- containerPort: 9000
name: nni
- name: tensorboard
image: onepanel/dl:0.17.0 image: onepanel/dl:0.17.0
command: command:
- sh - sh
- '-c' - -c
env:
- name: ONEPANEL_INTERACTIVE_SIDECAR
value: 'true'
args: args:
# Read logs from /mnt/output/tensorboard - /mnt/output is auto-mounted from volumeMounts # [CHANGE] Update the config path below to point to config.yaml path as described above
- tensorboard --logdir /mnt/output/tensorboard # Note that you can `pip install` additional tools here if necessary
ports: - |
- containerPort: 6006 python -u /opt/onepanel/nni/start.py \
name: tensorboard --config /mnt/data/src/workflows/hyperparameter-tuning/mnist/config.yaml
# Use the metrics-writer tasks to write best metrics to Workflow workingDir: /mnt
- name: metrics-writer volumeMounts:
inputs: - name: hyperparamtuning-data
artifacts: mountPath: /mnt/data
- name: sys-metrics - name: hyperparamtuning-output
path: /tmp/sys-metrics.json mountPath: /mnt/output
- git: nodeSelector:
repo: https://github.com/onepanelio/templates.git "{{.NodePoolLabel}}": '{{workflow.parameters.sys-node-pool}}'
revision: v0.18.0 sidecars:
name: src - name: nni-web-ui
path: /mnt/src image: onepanel/nni-web-ui:0.17.0
container: env:
image: onepanel/python-sdk:v0.16.0 - name: ONEPANEL_INTERACTIVE_SIDECAR
command: value: 'true'
- python ports:
- -u - containerPort: 9000
args: name: nni
- /mnt/src/tasks/metrics-writer/main.py - name: tensorboard
- --from_file=/tmp/sys-metrics.json image: onepanel/dl:0.17.0
command:
- sh
- '-c'
env:
- name: ONEPANEL_INTERACTIVE_SIDECAR
value: 'true'
args:
# Read logs from /mnt/output/tensorboard - /mnt/output is auto-mounted from volumeMounts
- tensorboard --logdir /mnt/output/tensorboard
ports:
- containerPort: 6006
name: tensorboard
# Use the metrics-writer tasks to write best metrics to Workflow
- name: metrics-writer
inputs:
artifacts:
- name: sys-metrics
path: /tmp/sys-metrics.json
- git:
repo: https://github.com/onepanelio/templates.git
revision: v0.18.0
name: src
path: /mnt/src
container:
image: onepanel/python-sdk:v0.16.0
command:
- python
- -u
args:
- /mnt/src/tasks/metrics-writer/main.py
- --from_file=/tmp/sys-metrics.json
# [CHANGE] Volumes that will mount to /mnt/data (annotated data) and /mnt/output (models, checkpoints, logs) # [CHANGE] Volumes that will mount to /mnt/data (annotated data) and /mnt/output (models, checkpoints, logs)
# Update this depending on your annotation data, model, checkpoint, logs, etc. sizes # Update this depending on your annotation data, model, checkpoint, logs, etc. sizes
# Example values: 250Mi, 500Gi, 1Ti # Example values: 250Mi, 500Gi, 1Ti
volumeClaimTemplates: volumeClaimTemplates:
- metadata: - metadata:
name: hyperparamtuning-data name: hyperparamtuning-data
spec: spec:
accessModes: [ "ReadWriteOnce" ] accessModes: [ "ReadWriteOnce" ]
resources: resources:
requests: requests:
storage: 20Gi storage: 20Gi
- metadata: - metadata:
name: hyperparamtuning-output name: hyperparamtuning-output
spec: spec:
accessModes: [ "ReadWriteOnce" ] accessModes: [ "ReadWriteOnce" ]
resources: resources:
requests: requests:
storage: 20Gi storage: 20Gi

View File

@@ -0,0 +1,197 @@
metadata:
name: "MaskRCNN Training"
kind: Workflow
version: 20200812104328
action: create
labels:
"used-by": "cvat"
"created-by": "system"
spec:
arguments:
parameters:
- name: source
value: https://github.com/onepanelio/Mask_RCNN.git
displayName: Model source code
type: hidden
visibility: private
- name: sys-annotation-path
value: annotation-dump/sample_dataset
hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated.
displayName: Dataset path
visibility: private
- name: sys-output-path
value: workflow-data/output/sample_output
hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
displayName: Workflow output path
visibility: private
- name: sys-finetune-checkpoint
value: ''
hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
displayName: Checkpoint path
visibility: public
- name: sys-num-classes
displayName: Number of classes
hint: Number of classes (i.e in CVAT taks) + 1 for background
value: '81'
visibility: private
- name: extras
displayName: Hyperparameters
visibility: public
type: textarea.textarea
value: |-
stage-1-epochs=1 # Epochs for network heads
stage-2-epochs=2 # Epochs for finetune layers
stage-3-epochs=3 # Epochs for all layers
hint: "Please refer to our <a href='https://docs.onepanel.ai/docs/getting-started/use-cases/computervision/annotation/cvat/cvat_annotation_model#arguments-optional' target='_blank'>documentation</a> for more information on parameters."
- name: dump-format
type: select.select
value: cvat_coco
displayName: CVAT dump format
visibility: public
options:
- name: 'MS COCO'
value: 'cvat_coco'
- name: 'TF Detection API'
value: 'cvat_tfrecord'
- name: tf-image
visibility: public
value: tensorflow/tensorflow:1.13.1-py3
type: select.select
displayName: Select TensorFlow image
hint: Select the GPU image if you are running on a GPU node pool
options:
- name: 'TensorFlow 1.13.1 CPU Image'
value: 'tensorflow/tensorflow:1.13.1-py3'
- name: 'TensorFlow 1.13.1 GPU Image'
value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
- displayName: Node pool
hint: Name of node pool or group to run this workflow task
type: select.select
visibility: public
name: sys-node-pool
value: Standard_D4s_v3
required: true
options:
- name: 'CPU: 2, RAM: 8GB'
value: Standard_D2s_v3
- name: 'CPU: 4, RAM: 16GB'
value: Standard_D4s_v3
- name: 'GPU: 1xK80, CPU: 6, RAM: 56GB'
value: Standard_NC6
entrypoint: main
templates:
- dag:
tasks:
- name: train-model
template: tensorflow
# Uncomment the lines below if you want to send Slack notifications
# - arguments:
# artifacts:
# - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}'
# name: metrics
# parameters:
# - name: status
# value: '{{tasks.train-model.status}}'
# dependencies:
# - train-model
# name: notify-in-slack
# template: slack-notify-success
name: main
- container:
args:
- |
apt-get update \
&& apt-get install -y git wget libglib2.0-0 libsm6 libxext6 libxrender-dev \
&& pip install -r requirements.txt \
&& pip install boto3 pyyaml google-cloud-storage \
&& git clone https://github.com/waleedka/coco \
&& cd coco/PythonAPI \
&& python setup.py build_ext install \
&& rm -rf build \
&& cd ../../ \
&& wget https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5 \
&& python setup.py install && ls \
&& python samples/coco/cvat.py train --dataset=/mnt/data/datasets \
--model=workflow_maskrcnn \
--extras="{{workflow.parameters.extras}}" \
--ref_model_path="{{workflow.parameters.sys-finetune-checkpoint}}" \
--num_classes="{{workflow.parameters.sys-num-classes}}" \
&& cd /mnt/src/ \
&& python prepare_dataset.py /mnt/data/datasets/annotations/instances_default.json
command:
- sh
- -c
image: '{{workflow.parameters.tf-image}}'
volumeMounts:
- mountPath: /mnt/data
name: data
- mountPath: /mnt/output
name: output
workingDir: /mnt/src
nodeSelector:
beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
inputs:
artifacts:
- name: data
path: /mnt/data/datasets/
"{{.ArtifactRepositoryType}}":
key: '{{workflow.namespace}}/{{workflow.parameters.sys-annotation-path}}'
- git:
repo: '{{workflow.parameters.source}}'
revision: "no-boto"
name: src
path: /mnt/src
name: tensorflow
outputs:
artifacts:
- name: model
optional: true
path: /mnt/output
"{{.ArtifactRepositoryType}}":
key: '{{workflow.namespace}}/{{workflow.parameters.sys-output-path}}'
# Uncomment the lines below if you want to send Slack notifications
#- container:
# args:
# - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}"
# SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd
# SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify
# command:
# - sh
# - -c
# image: technosophos/slack-notify
# inputs:
# artifacts:
# - name: metrics
# optional: true
# path: /tmp/metrics.json
# parameters:
# - name: status
# name: slack-notify-success
volumeClaimTemplates:
- metadata:
creationTimestamp: null
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 200Gi
- metadata:
creationTimestamp: null
name: output
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 200Gi

View File

@@ -0,0 +1,191 @@
metadata:
name: "MaskRCNN Training"
kind: Workflow
version: 20200824095513
action: update
labels:
"used-by": "cvat"
"created-by": "system"
spec:
arguments:
parameters:
- name: source
value: https://github.com/onepanelio/Mask_RCNN.git
displayName: Model source code
type: hidden
visibility: private
- name: cvat-annotation-path
value: annotation-dump/sample_dataset
hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated.
displayName: Dataset path
visibility: private
- name: cvat-output-path
value: workflow-data/output/sample_output
hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
displayName: Workflow output path
visibility: private
- name: cvat-finetune-checkpoint
value: ''
hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
displayName: Checkpoint path
visibility: public
- name: cvat-num-classes
displayName: Number of classes
hint: Number of classes (i.e in CVAT taks) + 1 for background
value: '81'
visibility: private
- name: hyperparameters
displayName: Hyperparameters
visibility: public
type: textarea.textarea
value: |-
stage-1-epochs=1 # Epochs for network heads
stage-2-epochs=2 # Epochs for finetune layers
stage-3-epochs=3 # Epochs for all layers
hint: "Please refer to our <a href='https://docs.onepanel.ai/docs/getting-started/use-cases/computervision/annotation/cvat/cvat_annotation_model#arguments-optional' target='_blank'>documentation</a> for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow."
- name: dump-format
value: cvat_coco
displayName: CVAT dump format
visibility: public
- name: tf-image
visibility: public
value: tensorflow/tensorflow:1.13.1-py3
type: select.select
displayName: Select TensorFlow image
hint: Select the GPU image if you are running on a GPU node pool
options:
- name: 'TensorFlow 1.13.1 CPU Image'
value: 'tensorflow/tensorflow:1.13.1-py3'
- name: 'TensorFlow 1.13.1 GPU Image'
value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
- displayName: Node pool
hint: Name of node pool or group to run this workflow task
type: select.select
visibility: public
name: sys-node-pool
value: Standard_D4s_v3
required: true
options:
- name: 'CPU: 2, RAM: 8GB'
value: Standard_D2s_v3
- name: 'CPU: 4, RAM: 16GB'
value: Standard_D4s_v3
- name: 'GPU: 1xK80, CPU: 6, RAM: 56GB'
value: Standard_NC6
entrypoint: main
templates:
- dag:
tasks:
- name: train-model
template: tensorflow
# Uncomment the lines below if you want to send Slack notifications
# - arguments:
# artifacts:
# - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}'
# name: metrics
# parameters:
# - name: status
# value: '{{tasks.train-model.status}}'
# dependencies:
# - train-model
# name: notify-in-slack
# template: slack-notify-success
name: main
- container:
args:
- |
apt-get update \
&& apt-get install -y git wget libglib2.0-0 libsm6 libxext6 libxrender-dev \
&& pip install -r requirements.txt \
&& pip install boto3 pyyaml google-cloud-storage \
&& git clone https://github.com/waleedka/coco \
&& cd coco/PythonAPI \
&& python setup.py build_ext install \
&& rm -rf build \
&& cd ../../ \
&& wget https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5 \
&& python setup.py install && ls \
&& python samples/coco/cvat.py train --dataset=/mnt/data/datasets \
--model=workflow_maskrcnn \
--extras="{{workflow.parameters.hyperparameters}}" \
--ref_model_path="{{workflow.parameters.cvat-finetune-checkpoint}}" \
--num_classes="{{workflow.parameters.cvat-num-classes}}" \
&& cd /mnt/src/ \
&& python prepare_dataset.py /mnt/data/datasets/annotations/instances_default.json
command:
- sh
- -c
image: '{{workflow.parameters.tf-image}}'
volumeMounts:
- mountPath: /mnt/data
name: data
- mountPath: /mnt/output
name: output
workingDir: /mnt/src
nodeSelector:
beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
inputs:
artifacts:
- name: data
path: /mnt/data/datasets/
"{{.ArtifactRepositoryType}}":
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
- git:
repo: '{{workflow.parameters.source}}'
revision: "no-boto"
name: src
path: /mnt/src
name: tensorflow
outputs:
artifacts:
- name: model
optional: true
path: /mnt/output
"{{.ArtifactRepositoryType}}":
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
# Uncomment the lines below if you want to send Slack notifications
#- container:
# args:
# - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}"
# SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd
# SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify
# command:
# - sh
# - -c
# image: technosophos/slack-notify
# inputs:
# artifacts:
# - name: metrics
# optional: true
# path: /tmp/metrics.json
# parameters:
# - name: status
# name: slack-notify-success
volumeClaimTemplates:
- metadata:
creationTimestamp: null
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 200Gi
- metadata:
creationTimestamp: null
name: output
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 200Gi

View File

@@ -1,190 +1,199 @@
entrypoint: main metadata:
arguments: name: "MaskRCNN Training"
parameters: kind: Workflow
- name: source version: 20201115145814
value: https://github.com/onepanelio/Mask_RCNN.git action: update
displayName: Model source code labels:
type: hidden "used-by": "cvat"
visibility: private "created-by": "system"
spec:
entrypoint: main
arguments:
parameters:
- name: source
value: https://github.com/onepanelio/Mask_RCNN.git
displayName: Model source code
type: hidden
visibility: private
- name: cvat-annotation-path - name: cvat-annotation-path
value: annotation-dump/sample_dataset value: annotation-dump/sample_dataset
hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated. hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated.
displayName: Dataset path displayName: Dataset path
visibility: private visibility: private
- name: cvat-output-path - name: cvat-output-path
value: workflow-data/output/sample_output value: workflow-data/output/sample_output
hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated. hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
displayName: Workflow output path displayName: Workflow output path
visibility: private visibility: private
- name: cvat-finetune-checkpoint - name: cvat-finetune-checkpoint
value: '' value: ''
hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model. hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
displayName: Checkpoint path displayName: Checkpoint path
visibility: public visibility: public
- name: cvat-num-classes - name: cvat-num-classes
displayName: Number of classes displayName: Number of classes
hint: Number of classes (i.e in CVAT taks) + 1 for background hint: Number of classes (i.e in CVAT taks) + 1 for background
value: '81' value: '81'
visibility: private visibility: private
- name: hyperparameters - name: hyperparameters
displayName: Hyperparameters displayName: Hyperparameters
visibility: public visibility: public
type: textarea.textarea type: textarea.textarea
value: |- value: |-
stage-1-epochs=1 # Epochs for network heads stage-1-epochs=1 # Epochs for network heads
stage-2-epochs=2 # Epochs for finetune layers stage-2-epochs=2 # Epochs for finetune layers
stage-3-epochs=3 # Epochs for all layers stage-3-epochs=3 # Epochs for all layers
hint: "Please refer to our <a href='https://docs.onepanel.ai/docs/getting-started/use-cases/computervision/annotation/cvat/cvat_annotation_model#arguments-optional' target='_blank'>documentation</a> for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow." hint: "Please refer to our <a href='https://docs.onepanel.ai/docs/getting-started/use-cases/computervision/annotation/cvat/cvat_annotation_model#arguments-optional' target='_blank'>documentation</a> for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow."
- name: dump-format - name: dump-format
value: cvat_coco value: cvat_coco
displayName: CVAT dump format displayName: CVAT dump format
visibility: public visibility: public
- name: tf-image - name: tf-image
visibility: public visibility: public
value: tensorflow/tensorflow:1.13.1-py3 value: tensorflow/tensorflow:1.13.1-py3
type: select.select type: select.select
displayName: Select TensorFlow image displayName: Select TensorFlow image
hint: Select the GPU image if you are running on a GPU node pool hint: Select the GPU image if you are running on a GPU node pool
options: options:
- name: 'TensorFlow 1.13.1 CPU Image' - name: 'TensorFlow 1.13.1 CPU Image'
value: 'tensorflow/tensorflow:1.13.1-py3' value: 'tensorflow/tensorflow:1.13.1-py3'
- name: 'TensorFlow 1.13.1 GPU Image' - name: 'TensorFlow 1.13.1 GPU Image'
value: 'tensorflow/tensorflow:1.13.1-gpu-py3' value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
- displayName: Node pool - displayName: Node pool
hint: Name of node pool or group to run this workflow task hint: Name of node pool or group to run this workflow task
type: select.select type: select.select
visibility: public visibility: public
name: sys-node-pool name: sys-node-pool
value: Standard_D4s_v3 value: Standard_D4s_v3
required: true required: true
options: options:
- name: 'CPU: 2, RAM: 8GB' - name: 'CPU: 2, RAM: 8GB'
value: Standard_D2s_v3 value: Standard_D2s_v3
- name: 'CPU: 4, RAM: 16GB' - name: 'CPU: 4, RAM: 16GB'
value: Standard_D4s_v3 value: Standard_D4s_v3
- name: 'GPU: 1xK80, CPU: 6, RAM: 56GB' - name: 'GPU: 1xK80, CPU: 6, RAM: 56GB'
value: Standard_NC6 value: Standard_NC6
templates: templates:
- name: main - name: main
dag: dag:
tasks: tasks:
- name: train-model - name: train-model
template: tensorflow template: tensorflow
# Uncomment the lines below if you want to send Slack notifications # Uncomment the lines below if you want to send Slack notifications
# - arguments: # - arguments:
# artifacts: # artifacts:
# - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}' # - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}'
# name: metrics # name: metrics
# parameters: # parameters:
# - name: status # - name: status
# value: '{{tasks.train-model.status}}' # value: '{{tasks.train-model.status}}'
# dependencies: # dependencies:
# - train-model # - train-model
# name: notify-in-slack # name: notify-in-slack
# template: slack-notify-success # template: slack-notify-success
- name: tensorflow - name: tensorflow
container: container:
args: args:
- | - |
apt-get update \ apt-get update \
&& apt-get install -y git wget libglib2.0-0 libsm6 libxext6 libxrender-dev \ && apt-get install -y git wget libglib2.0-0 libsm6 libxext6 libxrender-dev \
&& pip install -r requirements.txt \ && pip install -r requirements.txt \
&& pip install boto3 pyyaml google-cloud-storage \ && pip install boto3 pyyaml google-cloud-storage \
&& git clone https://github.com/waleedka/coco \ && git clone https://github.com/waleedka/coco \
&& cd coco/PythonAPI \ && cd coco/PythonAPI \
&& python setup.py build_ext install \ && python setup.py build_ext install \
&& rm -rf build \ && rm -rf build \
&& cd ../../ \ && cd ../../ \
&& wget https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5 \ && wget https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5 \
&& python setup.py install && ls \ && python setup.py install && ls \
&& python samples/coco/cvat.py train --dataset=/mnt/data/datasets \ && python samples/coco/cvat.py train --dataset=/mnt/data/datasets \
--model=workflow_maskrcnn \ --model=workflow_maskrcnn \
--extras="{{workflow.parameters.hyperparameters}}" \ --extras="{{workflow.parameters.hyperparameters}}" \
--ref_model_path="{{workflow.parameters.cvat-finetune-checkpoint}}" \ --ref_model_path="{{workflow.parameters.cvat-finetune-checkpoint}}" \
--num_classes="{{workflow.parameters.cvat-num-classes}}" \ --num_classes="{{workflow.parameters.cvat-num-classes}}" \
&& cd /mnt/src/ \ && cd /mnt/src/ \
&& python prepare_dataset.py /mnt/data/datasets/annotations/instances_default.json && python prepare_dataset.py /mnt/data/datasets/annotations/instances_default.json
command: command:
- sh - sh
- -c - -c
image: '{{workflow.parameters.tf-image}}' image: '{{workflow.parameters.tf-image}}'
volumeMounts: volumeMounts:
- mountPath: /mnt/data - mountPath: /mnt/data
name: data name: data
- mountPath: /mnt/output - mountPath: /mnt/output
name: output name: output
workingDir: /mnt/src workingDir: /mnt/src
nodeSelector: nodeSelector:
beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}' beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
sidecars: sidecars:
- name: tensorboard - name: tensorboard
image: tensorflow/tensorflow:2.3.0 image: tensorflow/tensorflow:2.3.0
command: [sh, -c] command: [sh, -c]
tty: true tty: true
args: ["tensorboard --logdir /mnt/output/"] args: ["tensorboard --logdir /mnt/output/"]
ports: ports:
- containerPort: 6006 - containerPort: 6006
name: tensorboard name: tensorboard
inputs: inputs:
artifacts: artifacts:
- name: data - name: data
path: /mnt/data/datasets/ path: /mnt/data/datasets/
{{.ArtifactRepositoryType}}: "{{.ArtifactRepositoryType}}":
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}' key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
- git: - git:
repo: '{{workflow.parameters.source}}' repo: '{{workflow.parameters.source}}'
revision: "no-boto" revision: "no-boto"
name: src name: src
path: /mnt/src path: /mnt/src
outputs: outputs:
artifacts: artifacts:
- name: model - name: model
optional: true optional: true
path: /mnt/output path: /mnt/output
{{.ArtifactRepositoryType}}: "{{.ArtifactRepositoryType}}":
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}' key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
# Uncomment the lines below if you want to send Slack notifications # Uncomment the lines below if you want to send Slack notifications
#- container: #- container:
# args: # args:
# - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}" # - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}"
# SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd # SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd
# SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify # SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify
# command: # command:
# - sh # - sh
# - -c # - -c
# image: technosophos/slack-notify # image: technosophos/slack-notify
# inputs: # inputs:
# artifacts: # artifacts:
# - name: metrics # - name: metrics
# optional: true # optional: true
# path: /tmp/metrics.json # path: /tmp/metrics.json
# parameters: # parameters:
# - name: status # - name: status
# name: slack-notify-success # name: slack-notify-success
volumeClaimTemplates: volumeClaimTemplates:
- metadata: - metadata:
creationTimestamp: null creationTimestamp: null
name: data name: data
spec: spec:
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
requests: requests:
storage: 200Gi storage: 200Gi
- metadata: - metadata:
creationTimestamp: null creationTimestamp: null
name: output name: output
spec: spec:
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
requests: requests:
storage: 200Gi storage: 200Gi

View File

@@ -1,192 +1,201 @@
entrypoint: main metadata:
arguments: name: "MaskRCNN Training"
parameters: kind: Workflow
- name: source version: 20201208155115
value: https://github.com/onepanelio/Mask_RCNN.git action: update
displayName: Model source code labels:
type: hidden "used-by": "cvat"
visibility: private "created-by": "system"
spec:
entrypoint: main
arguments:
parameters:
- name: source
value: https://github.com/onepanelio/Mask_RCNN.git
displayName: Model source code
type: hidden
visibility: private
- name: cvat-annotation-path - name: cvat-annotation-path
value: annotation-dump/sample_dataset value: annotation-dump/sample_dataset
hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated. hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated.
displayName: Dataset path displayName: Dataset path
visibility: private visibility: private
- name: cvat-output-path - name: cvat-output-path
value: workflow-data/output/sample_output value: workflow-data/output/sample_output
hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated. hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
displayName: Workflow output path displayName: Workflow output path
visibility: private visibility: private
- name: cvat-finetune-checkpoint - name: cvat-finetune-checkpoint
value: '' value: ''
hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model. hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
displayName: Checkpoint path displayName: Checkpoint path
visibility: public visibility: public
- name: cvat-num-classes - name: cvat-num-classes
displayName: Number of classes displayName: Number of classes
hint: Number of classes (i.e in CVAT taks) + 1 for background hint: Number of classes (i.e in CVAT taks) + 1 for background
value: '81' value: '81'
visibility: private visibility: private
- name: hyperparameters - name: hyperparameters
displayName: Hyperparameters displayName: Hyperparameters
visibility: public visibility: public
type: textarea.textarea type: textarea.textarea
value: |- value: |-
stage-1-epochs=1 # Epochs for network heads stage-1-epochs=1 # Epochs for network heads
stage-2-epochs=2 # Epochs for finetune layers stage-2-epochs=2 # Epochs for finetune layers
stage-3-epochs=3 # Epochs for all layers stage-3-epochs=3 # Epochs for all layers
hint: "Please refer to our <a href='https://docs.onepanel.ai/docs/getting-started/use-cases/computervision/annotation/cvat/cvat_annotation_model#arguments-optional' target='_blank'>documentation</a> for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow." hint: "Please refer to our <a href='https://docs.onepanel.ai/docs/getting-started/use-cases/computervision/annotation/cvat/cvat_annotation_model#arguments-optional' target='_blank'>documentation</a> for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow."
- name: dump-format - name: dump-format
value: cvat_coco value: cvat_coco
displayName: CVAT dump format displayName: CVAT dump format
visibility: public visibility: public
- name: tf-image - name: tf-image
visibility: public visibility: public
value: tensorflow/tensorflow:1.13.1-py3 value: tensorflow/tensorflow:1.13.1-py3
type: select.select type: select.select
displayName: Select TensorFlow image displayName: Select TensorFlow image
hint: Select the GPU image if you are running on a GPU node pool hint: Select the GPU image if you are running on a GPU node pool
options: options:
- name: 'TensorFlow 1.13.1 CPU Image' - name: 'TensorFlow 1.13.1 CPU Image'
value: 'tensorflow/tensorflow:1.13.1-py3' value: 'tensorflow/tensorflow:1.13.1-py3'
- name: 'TensorFlow 1.13.1 GPU Image' - name: 'TensorFlow 1.13.1 GPU Image'
value: 'tensorflow/tensorflow:1.13.1-gpu-py3' value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
- displayName: Node pool - displayName: Node pool
hint: Name of node pool or group to run this workflow task hint: Name of node pool or group to run this workflow task
type: select.select type: select.select
visibility: public visibility: public
name: sys-node-pool name: sys-node-pool
value: Standard_D4s_v3 value: Standard_D4s_v3
required: true required: true
options: options:
- name: 'CPU: 2, RAM: 8GB' - name: 'CPU: 2, RAM: 8GB'
value: Standard_D2s_v3 value: Standard_D2s_v3
- name: 'CPU: 4, RAM: 16GB' - name: 'CPU: 4, RAM: 16GB'
value: Standard_D4s_v3 value: Standard_D4s_v3
- name: 'GPU: 1xK80, CPU: 6, RAM: 56GB' - name: 'GPU: 1xK80, CPU: 6, RAM: 56GB'
value: Standard_NC6 value: Standard_NC6
templates: templates:
- name: main - name: main
dag: dag:
tasks: tasks:
- name: train-model - name: train-model
template: tensorflow template: tensorflow
# Uncomment the lines below if you want to send Slack notifications # Uncomment the lines below if you want to send Slack notifications
# - arguments: # - arguments:
# artifacts: # artifacts:
# - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}' # - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}'
# name: metrics # name: metrics
# parameters: # parameters:
# - name: status # - name: status
# value: '{{tasks.train-model.status}}' # value: '{{tasks.train-model.status}}'
# dependencies: # dependencies:
# - train-model # - train-model
# name: notify-in-slack # name: notify-in-slack
# template: slack-notify-success # template: slack-notify-success
- name: tensorflow - name: tensorflow
container: container:
args: args:
- | - |
apt-get update \ apt-get update \
&& apt-get install -y git wget libglib2.0-0 libsm6 libxext6 libxrender-dev \ && apt-get install -y git wget libglib2.0-0 libsm6 libxext6 libxrender-dev \
&& pip install -r requirements.txt \ && pip install -r requirements.txt \
&& pip install boto3 pyyaml google-cloud-storage \ && pip install boto3 pyyaml google-cloud-storage \
&& git clone https://github.com/waleedka/coco \ && git clone https://github.com/waleedka/coco \
&& cd coco/PythonAPI \ && cd coco/PythonAPI \
&& python setup.py build_ext install \ && python setup.py build_ext install \
&& rm -rf build \ && rm -rf build \
&& cd ../../ \ && cd ../../ \
&& wget https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5 \ && wget https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5 \
&& python setup.py install && ls \ && python setup.py install && ls \
&& python samples/coco/cvat.py train --dataset=/mnt/data/datasets \ && python samples/coco/cvat.py train --dataset=/mnt/data/datasets \
--model=workflow_maskrcnn \ --model=workflow_maskrcnn \
--extras="{{workflow.parameters.hyperparameters}}" \ --extras="{{workflow.parameters.hyperparameters}}" \
--ref_model_path="{{workflow.parameters.cvat-finetune-checkpoint}}" \ --ref_model_path="{{workflow.parameters.cvat-finetune-checkpoint}}" \
--num_classes="{{workflow.parameters.cvat-num-classes}}" \ --num_classes="{{workflow.parameters.cvat-num-classes}}" \
&& cd /mnt/src/ \ && cd /mnt/src/ \
&& python prepare_dataset.py /mnt/data/datasets/annotations/instances_default.json && python prepare_dataset.py /mnt/data/datasets/annotations/instances_default.json
command: command:
- sh - sh
- -c - -c
image: '{{workflow.parameters.tf-image}}' image: '{{workflow.parameters.tf-image}}'
volumeMounts: volumeMounts:
- mountPath: /mnt/data - mountPath: /mnt/data
name: data name: data
- mountPath: /mnt/output - mountPath: /mnt/output
name: output name: output
workingDir: /mnt/src workingDir: /mnt/src
nodeSelector: nodeSelector:
beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}' beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
sidecars: sidecars:
- name: tensorboard - name: tensorboard
image: tensorflow/tensorflow:2.3.0 image: tensorflow/tensorflow:2.3.0
command: [sh, -c] command: [sh, -c]
env: env:
- name: ONEPANEL_INTERACTIVE_SIDECAR - name: ONEPANEL_INTERACTIVE_SIDECAR
value: 'true' value: 'true'
args: ["tensorboard --logdir /mnt/output/"] args: ["tensorboard --logdir /mnt/output/"]
ports: ports:
- containerPort: 6006 - containerPort: 6006
name: tensorboard name: tensorboard
inputs: inputs:
artifacts: artifacts:
- name: data - name: data
path: /mnt/data/datasets/ path: /mnt/data/datasets/
{{.ArtifactRepositoryType}}: "{{.ArtifactRepositoryType}}":
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}' key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
- git: - git:
repo: '{{workflow.parameters.source}}' repo: '{{workflow.parameters.source}}'
revision: "no-boto" revision: "no-boto"
name: src name: src
path: /mnt/src path: /mnt/src
outputs: outputs:
artifacts: artifacts:
- name: model - name: model
optional: true optional: true
path: /mnt/output path: /mnt/output
{{.ArtifactRepositoryType}}: "{{.ArtifactRepositoryType}}":
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}' key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
# Uncomment the lines below if you want to send Slack notifications # Uncomment the lines below if you want to send Slack notifications
#- container: #- container:
# args: # args:
# - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}" # - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}"
# SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd # SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd
# SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify # SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify
# command: # command:
# - sh # - sh
# - -c # - -c
# image: technosophos/slack-notify # image: technosophos/slack-notify
# inputs: # inputs:
# artifacts: # artifacts:
# - name: metrics # - name: metrics
# optional: true # optional: true
# path: /tmp/metrics.json # path: /tmp/metrics.json
# parameters: # parameters:
# - name: status # - name: status
# name: slack-notify-success # name: slack-notify-success
volumeClaimTemplates: volumeClaimTemplates:
- metadata: - metadata:
creationTimestamp: null creationTimestamp: null
name: data name: data
spec: spec:
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
requests: requests:
storage: 200Gi storage: 200Gi
- metadata: - metadata:
creationTimestamp: null creationTimestamp: null
name: output name: output
spec: spec:
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
requests: requests:
storage: 200Gi storage: 200Gi

View File

@@ -1,149 +1,158 @@
# source: https://github.com/onepanelio/templates/blob/master/workflows/maskrcnn-training/ metadata:
arguments: name: "MaskRCNN Training"
parameters: kind: Workflow
- name: cvat-annotation-path version: 20201221195937
value: annotation-dump/sample_dataset action: update
hint: Path to annotated data in default object storage. In CVAT, this parameter will be pre-populated. source: "https://github.com/onepanelio/templates/blob/master/workflows/maskrcnn-training/"
displayName: Dataset path labels:
visibility: internal "used-by": "cvat"
"created-by": "system"
spec:
arguments:
parameters:
- name: cvat-annotation-path
value: annotation-dump/sample_dataset
hint: Path to annotated data in default object storage. In CVAT, this parameter will be pre-populated.
displayName: Dataset path
visibility: internal
- name: cvat-output-path - name: cvat-output-path
value: workflow-data/output/sample_output value: workflow-data/output/sample_output
hint: Path to store output artifacts in default object storage. In CVAT, this parameter will be pre-populated. hint: Path to store output artifacts in default object storage. In CVAT, this parameter will be pre-populated.
displayName: Workflow output path displayName: Workflow output path
visibility: internal visibility: internal
- name: cvat-finetune-checkpoint - name: cvat-finetune-checkpoint
value: '' value: ''
hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model. hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
displayName: Checkpoint path displayName: Checkpoint path
visibility: public visibility: public
- name: cvat-num-classes - name: cvat-num-classes
displayName: Number of classes displayName: Number of classes
hint: Number of classes + 1 for background. In CVAT, this parameter will be pre-populated. hint: Number of classes + 1 for background. In CVAT, this parameter will be pre-populated.
value: '11' value: '11'
visibility: internal visibility: internal
- name: hyperparameters - name: hyperparameters
displayName: Hyperparameters displayName: Hyperparameters
visibility: public visibility: public
type: textarea.textarea type: textarea.textarea
value: |- value: |-
stage-1-epochs=1 # Epochs for network heads stage-1-epochs=1 # Epochs for network heads
stage-2-epochs=2 # Epochs for finetune layers stage-2-epochs=2 # Epochs for finetune layers
stage-3-epochs=3 # Epochs for all layers stage-3-epochs=3 # Epochs for all layers
hint: "See <a href='https://docs.onepanel.ai/docs/getting-started/use-cases/computervision/annotation/cvat/cvat_annotation_model#maskrcnn-hyperparameters' target='_blank'>documentation</a> for more information on parameters." hint: "See <a href='https://docs.onepanel.ai/docs/getting-started/use-cases/computervision/annotation/cvat/cvat_annotation_model#maskrcnn-hyperparameters' target='_blank'>documentation</a> for more information on parameters."
- name: dump-format - name: dump-format
value: cvat_coco value: cvat_coco
displayName: CVAT dump format displayName: CVAT dump format
visibility: public visibility: public
- name: tf-image - name: tf-image
visibility: public visibility: public
value: tensorflow/tensorflow:1.13.1-py3 value: tensorflow/tensorflow:1.13.1-py3
type: select.select type: select.select
displayName: Select TensorFlow image displayName: Select TensorFlow image
hint: Select the GPU image if you are running on a GPU node pool hint: Select the GPU image if you are running on a GPU node pool
options: options:
- name: 'TensorFlow 1.13.1 CPU Image' - name: 'TensorFlow 1.13.1 CPU Image'
value: 'tensorflow/tensorflow:1.13.1-py3' value: 'tensorflow/tensorflow:1.13.1-py3'
- name: 'TensorFlow 1.13.1 GPU Image' - name: 'TensorFlow 1.13.1 GPU Image'
value: 'tensorflow/tensorflow:1.13.1-gpu-py3' value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
- displayName: Node pool - displayName: Node pool
hint: Name of node pool or group to run this workflow task hint: Name of node pool or group to run this workflow task
type: select.nodepool type: select.nodepool
visibility: public visibility: public
name: sys-node-pool name: sys-node-pool
value: {{.DefaultNodePoolOption}} value: "{{.DefaultNodePoolOption}}"
required: true required: true
entrypoint: main entrypoint: main
templates: templates:
- dag: - dag:
tasks: tasks:
- name: train-model - name: train-model
template: tensorflow template: tensorflow
name: main name: main
- container: - container:
args: args:
- | - |
apt-get update \ apt-get update \
&& apt-get install -y git wget libglib2.0-0 libsm6 libxext6 libxrender-dev \ && apt-get install -y git wget libglib2.0-0 libsm6 libxext6 libxrender-dev \
&& pip install -r requirements.txt \ && pip install -r requirements.txt \
&& pip install boto3 pyyaml google-cloud-storage \ && pip install boto3 pyyaml google-cloud-storage \
&& git clone https://github.com/waleedka/coco \ && git clone https://github.com/waleedka/coco \
&& cd coco/PythonAPI \ && cd coco/PythonAPI \
&& python setup.py build_ext install \ && python setup.py build_ext install \
&& rm -rf build \ && rm -rf build \
&& cd ../../ \ && cd ../../ \
&& wget https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5 \ && wget https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5 \
&& python setup.py install && ls \ && python setup.py install && ls \
&& python samples/coco/cvat.py train --dataset=/mnt/data/datasets \ && python samples/coco/cvat.py train --dataset=/mnt/data/datasets \
--model=workflow_maskrcnn \ --model=workflow_maskrcnn \
--extras="{{workflow.parameters.hyperparameters}}" \ --extras="{{workflow.parameters.hyperparameters}}" \
--ref_model_path="{{workflow.parameters.cvat-finetune-checkpoint}}" \ --ref_model_path="{{workflow.parameters.cvat-finetune-checkpoint}}" \
--num_classes="{{workflow.parameters.cvat-num-classes}}" \ --num_classes="{{workflow.parameters.cvat-num-classes}}" \
&& cd /mnt/src/ \ && cd /mnt/src/ \
&& python prepare_dataset.py /mnt/data/datasets/annotations/instances_default.json && python prepare_dataset.py /mnt/data/datasets/annotations/instances_default.json
command: command:
- sh - sh
- -c - -c
image: '{{workflow.parameters.tf-image}}' image: '{{workflow.parameters.tf-image}}'
volumeMounts: volumeMounts:
- mountPath: /mnt/data - mountPath: /mnt/data
name: data name: data
- mountPath: /mnt/output - mountPath: /mnt/output
name: output name: output
workingDir: /mnt/src workingDir: /mnt/src
sidecars: sidecars:
- name: tensorboard - name: tensorboard
image: tensorflow/tensorflow:2.3.0 image: tensorflow/tensorflow:2.3.0
command: [ sh, -c ] command: [ sh, -c ]
env: env:
- name: ONEPANEL_INTERACTIVE_SIDECAR - name: ONEPANEL_INTERACTIVE_SIDECAR
value: 'true' value: 'true'
args: [ "tensorboard --logdir /mnt/output/" ] args: [ "tensorboard --logdir /mnt/output/" ]
ports: ports:
- containerPort: 6006 - containerPort: 6006
name: tensorboard name: tensorboard
nodeSelector: nodeSelector:
beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}' beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
inputs: inputs:
artifacts: artifacts:
- name: data - name: data
path: /mnt/data/datasets/ path: /mnt/data/datasets/
s3: s3:
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}' key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
- git: - git:
repo: 'https://github.com/onepanelio/Mask_RCNN.git' repo: 'https://github.com/onepanelio/Mask_RCNN.git'
revision: 'no-boto' revision: 'no-boto'
name: src name: src
path: /mnt/src path: /mnt/src
name: tensorflow name: tensorflow
outputs: outputs:
artifacts: artifacts:
- name: model - name: model
optional: true optional: true
path: /mnt/output path: /mnt/output
s3: s3:
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}' key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
volumeClaimTemplates: volumeClaimTemplates:
- metadata: - metadata:
name: data name: data
spec: spec:
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
requests: requests:
storage: 200Gi storage: 200Gi
- metadata: - metadata:
name: output name: output
spec: spec:
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
requests: requests:
storage: 200Gi storage: 200Gi

View File

@@ -1,208 +1,217 @@
# source: https://github.com/onepanelio/templates/blob/master/workflows/maskrcnn-training/ metadata:
arguments: name: "MaskRCNN Training"
parameters: kind: Workflow
- name: cvat-annotation-path version: 20210118175809
value: 'artifacts/{{workflow.namespace}}/annotations/' action: update
hint: Path to annotated data (COCO format) in default object storage. In CVAT, this parameter will be pre-populated. source: "https://github.com/onepanelio/templates/blob/master/workflows/maskrcnn-training/"
displayName: Dataset path labels:
visibility: internal "used-by": "cvat"
"created-by": "system"
spec:
arguments:
parameters:
- name: cvat-annotation-path
value: 'artifacts/{{workflow.namespace}}/annotations/'
hint: Path to annotated data (COCO format) in default object storage. In CVAT, this parameter will be pre-populated.
displayName: Dataset path
visibility: internal
- name: val-split - name: val-split
value: 10 value: 10
displayName: Validation split size displayName: Validation split size
type: input.number type: input.number
visibility: public visibility: public
hint: Enter validation set size in percentage of full dataset. (0 - 100) hint: Enter validation set size in percentage of full dataset. (0 - 100)
- name: num-augmentation-cycles - name: num-augmentation-cycles
value: 1 value: 1
displayName: Number of augmentation cycles displayName: Number of augmentation cycles
type: input.number type: input.number
visibility: public visibility: public
hint: Number of augmentation cycles, zero means no data augmentation hint: Number of augmentation cycles, zero means no data augmentation
- name: preprocessing-parameters - name: preprocessing-parameters
value: |- value: |-
RandomBrightnessContrast: RandomBrightnessContrast:
p: 0.2 p: 0.2
GaussianBlur: GaussianBlur:
p: 0.3 p: 0.3
GaussNoise: GaussNoise:
p: 0.4 p: 0.4
HorizontalFlip: HorizontalFlip:
p: 0.5 p: 0.5
VerticalFlip: VerticalFlip:
p: 0.3 p: 0.3
displayName: Preprocessing parameters displayName: Preprocessing parameters
visibility: public visibility: public
type: textarea.textarea type: textarea.textarea
hint: 'See <a href="https://albumentations.ai/docs/api_reference/augmentations/transforms/" target="_blank">documentation</a> for more information on parameters.' hint: 'See <a href="https://albumentations.ai/docs/api_reference/augmentations/transforms/" target="_blank">documentation</a> for more information on parameters.'
- name: cvat-num-classes - name: cvat-num-classes
displayName: Number of classes displayName: Number of classes
hint: Number of classes. In CVAT, this parameter will be pre-populated. hint: Number of classes. In CVAT, this parameter will be pre-populated.
value: '10' value: '10'
visibility: internal visibility: internal
- name: hyperparameters - name: hyperparameters
displayName: Hyperparameters displayName: Hyperparameters
visibility: public visibility: public
type: textarea.textarea type: textarea.textarea
value: |- value: |-
stage_1_epochs: 1 # Epochs for network heads stage_1_epochs: 1 # Epochs for network heads
stage_2_epochs: 1 # Epochs for finetune layers stage_2_epochs: 1 # Epochs for finetune layers
stage_3_epochs: 1 # Epochs for all layers stage_3_epochs: 1 # Epochs for all layers
num_steps: 1000 # Num steps per epoch num_steps: 1000 # Num steps per epoch
hint: 'See <a href="https://docs.onepanel.ai/docs/reference/workflows/training#maskrcnn-hyperparameters" target="_blank">documentation</a> for more information on parameters.' hint: 'See <a href="https://docs.onepanel.ai/docs/reference/workflows/training#maskrcnn-hyperparameters" target="_blank">documentation</a> for more information on parameters.'
- name: dump-format - name: dump-format
value: cvat_coco value: cvat_coco
displayName: CVAT dump format displayName: CVAT dump format
visibility: private visibility: private
- name: cvat-finetune-checkpoint - name: cvat-finetune-checkpoint
value: '' value: ''
hint: Path to the last fine-tune checkpoint for this model in default object storage. Leave empty if this is the first time you're training this model. hint: Path to the last fine-tune checkpoint for this model in default object storage. Leave empty if this is the first time you're training this model.
displayName: Checkpoint path displayName: Checkpoint path
visibility: public visibility: public
- displayName: Node pool - displayName: Node pool
hint: Name of node pool or group to run this workflow task hint: Name of node pool or group to run this workflow task
type: select.nodepool type: select.nodepool
visibility: public visibility: public
name: sys-node-pool name: sys-node-pool
value: {{.DefaultNodePoolOption}} value: "{{.DefaultNodePoolOption}}"
required: true required: true
entrypoint: main entrypoint: main
templates: templates:
- dag: - dag:
tasks: tasks:
- name: preprocessing - name: preprocessing
template: preprocessing template: preprocessing
- name: train-model - name: train-model
template: tensorflow template: tensorflow
dependencies: [preprocessing] dependencies: [preprocessing]
arguments: arguments:
artifacts: artifacts:
- name: data - name: data
from: "{{tasks.preprocessing.outputs.artifacts.processed-data}}" from: "{{tasks.preprocessing.outputs.artifacts.processed-data}}"
name: main name: main
- container: - container:
args: args:
- | - |
pip install pycocotools scikit-image==0.16.2 && \ pip install pycocotools scikit-image==0.16.2 && \
cd /mnt/src/train/workflows/maskrcnn-training && \ cd /mnt/src/train/workflows/maskrcnn-training && \
python -u main.py train --dataset=/mnt/data/datasets/train_set/ \ python -u main.py train --dataset=/mnt/data/datasets/train_set/ \
--model=workflow_maskrcnn \ --model=workflow_maskrcnn \
--extras="{{workflow.parameters.hyperparameters}}" \ --extras="{{workflow.parameters.hyperparameters}}" \
--ref_model_path="{{workflow.parameters.cvat-finetune-checkpoint}}" \ --ref_model_path="{{workflow.parameters.cvat-finetune-checkpoint}}" \
--num_classes="{{workflow.parameters.cvat-num-classes}}" \ --num_classes="{{workflow.parameters.cvat-num-classes}}" \
--val_dataset=/mnt/data/datasets/eval_set/ \ --val_dataset=/mnt/data/datasets/eval_set/ \
--use_validation=True --use_validation=True
command: command:
- sh - sh
- -c - -c
image: onepanel/dl:v0.20.0
volumeMounts:
- mountPath: /mnt/data
name: processed-data
- mountPath: /mnt/output
name: output
workingDir: /mnt/src
sidecars:
- name: tensorboard
image: onepanel/dl:v0.20.0 image: onepanel/dl:v0.20.0
command: [ sh, -c ] volumeMounts:
env: - mountPath: /mnt/data
- name: ONEPANEL_INTERACTIVE_SIDECAR name: processed-data
value: 'true' - mountPath: /mnt/output
args: [ "tensorboard --logdir /mnt/output/tensorboard" ] name: output
ports: workingDir: /mnt/src
- containerPort: 6006 sidecars:
name: tensorboard - name: tensorboard
nodeSelector: image: onepanel/dl:v0.20.0
{{.NodePoolLabel}}: '{{workflow.parameters.sys-node-pool}}' command: [ sh, -c ]
inputs: env:
artifacts: - name: ONEPANEL_INTERACTIVE_SIDECAR
- name: data value: 'true'
path: /mnt/data/datasets/ args: [ "tensorboard --logdir /mnt/output/tensorboard" ]
- name: models ports:
path: /mnt/data/models/ - containerPort: 6006
optional: true name: tensorboard
s3: nodeSelector:
key: '{{workflow.parameters.cvat-finetune-checkpoint}}' "{{.NodePoolLabel}}": '{{workflow.parameters.sys-node-pool}}'
- git: inputs:
repo: https://github.com/onepanelio/templates.git artifacts:
revision: v0.18.0 - name: data
name: src path: /mnt/data/datasets/
path: /mnt/src/train - name: models
name: tensorflow path: /mnt/data/models/
outputs: optional: true
artifacts: s3:
- name: model key: '{{workflow.parameters.cvat-finetune-checkpoint}}'
optional: true - git:
path: /mnt/output repo: https://github.com/onepanelio/templates.git
- container: revision: v0.18.0
args: name: src
- | path: /mnt/src/train
pip install pycocotools && \ name: tensorflow
cd /mnt/src/preprocessing/workflows/albumentations-preprocessing && \ outputs:
python -u main.py \ artifacts:
--data_aug_params="{{workflow.parameters.preprocessing-parameters}}" \ - name: model
--val_split={{workflow.parameters.val-split}} \ optional: true
--aug_steps={{workflow.parameters.num-augmentation-cycles}} path: /mnt/output
command: - container:
- sh args:
- -c - |
image: onepanel/dl:v0.20.0 pip install pycocotools && \
volumeMounts: cd /mnt/src/preprocessing/workflows/albumentations-preprocessing && \
- mountPath: /mnt/data python -u main.py \
name: data --data_aug_params="{{workflow.parameters.preprocessing-parameters}}" \
- mountPath: /mnt/output --val_split={{workflow.parameters.val-split}} \
name: processed-data --aug_steps={{workflow.parameters.num-augmentation-cycles}}
workingDir: /mnt/src command:
nodeSelector: - sh
{{.NodePoolLabel}}: '{{workflow.parameters.sys-node-pool}}' - -c
inputs: image: onepanel/dl:v0.20.0
artifacts: volumeMounts:
- name: data - mountPath: /mnt/data
path: /mnt/data/datasets/ name: data
s3: - mountPath: /mnt/output
key: '{{workflow.parameters.cvat-annotation-path}}' name: processed-data
- git: workingDir: /mnt/src
repo: https://github.com/onepanelio/templates.git nodeSelector:
revision: v0.18.0 "{{.NodePoolLabel}}": '{{workflow.parameters.sys-node-pool}}'
name: src inputs:
path: /mnt/src/preprocessing artifacts:
name: preprocessing - name: data
outputs: path: /mnt/data/datasets/
artifacts: s3:
- name: processed-data key: '{{workflow.parameters.cvat-annotation-path}}'
optional: true - git:
path: /mnt/output repo: https://github.com/onepanelio/templates.git
volumeClaimTemplates: revision: v0.18.0
- metadata: name: src
name: data path: /mnt/src/preprocessing
spec: name: preprocessing
accessModes: outputs:
- ReadWriteOnce artifacts:
resources: - name: processed-data
requests: optional: true
storage: 200Gi path: /mnt/output
- metadata: volumeClaimTemplates:
name: processed-data - metadata:
spec: name: data
accessModes: spec:
- ReadWriteOnce accessModes:
resources: - ReadWriteOnce
requests: resources:
storage: 200Gi requests:
- metadata: storage: 200Gi
name: output - metadata:
spec: name: processed-data
accessModes: spec:
- ReadWriteOnce accessModes:
resources: - ReadWriteOnce
requests: resources:
storage: 200Gi requests:
storage: 200Gi
- metadata:
name: output
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 200Gi

View File

@@ -1,75 +1,84 @@
entrypoint: main metadata:
arguments: name: "PyTorch Training"
parameters: kind: Workflow
- name: source version: 20200605090509
value: https://github.com/onepanelio/pytorch-examples.git action: create
- name: command labels:
value: "python mnist/main.py --epochs=1" "created-by": "system"
volumeClaimTemplates: framework: pytorch
- metadata: spec:
name: data entrypoint: main
spec: arguments:
accessModes: [ "ReadWriteOnce" ] parameters:
resources: - name: source
requests: value: https://github.com/onepanelio/pytorch-examples.git
storage: 2Gi - name: command
- metadata: value: "python mnist/main.py --epochs=1"
name: output volumeClaimTemplates:
spec: - metadata:
accessModes: [ "ReadWriteOnce" ] name: data
resources: spec:
requests: accessModes: [ "ReadWriteOnce" ]
storage: 2Gi resources:
templates: requests:
- name: main storage: 2Gi
dag: - metadata:
tasks: name: output
- name: train-model spec:
template: pytorch accessModes: [ "ReadWriteOnce" ]
# Uncomment section below to send metrics to Slack resources:
# - name: notify-in-slack requests:
# dependencies: [train-model] storage: 2Gi
# template: slack-notify-success templates:
# arguments: - name: main
# parameters: dag:
# - name: status tasks:
# value: "{{tasks.train-model.status}}" - name: train-model
# artifacts: template: pytorch
# - name: metrics # Uncomment section below to send metrics to Slack
# from: "{{tasks.train-model.outputs.artifacts.sys-metrics}}" # - name: notify-in-slack
- name: pytorch # dependencies: [train-model]
inputs: # template: slack-notify-success
artifacts: # arguments:
- name: src # parameters:
path: /mnt/src # - name: status
git: # value: "{{tasks.train-model.status}}"
repo: "{{workflow.parameters.source}}" # artifacts:
outputs: # - name: metrics
artifacts: # from: "{{tasks.train-model.outputs.artifacts.sys-metrics}}"
- name: model - name: pytorch
path: /mnt/output inputs:
optional: true artifacts:
archive: - name: src
none: {} path: /mnt/src
container: git:
image: pytorch/pytorch:latest repo: "{{workflow.parameters.source}}"
command: [sh,-c] outputs:
args: ["{{workflow.parameters.command}}"] artifacts:
workingDir: /mnt/src - name: model
volumeMounts: path: /mnt/output
- name: data optional: true
mountPath: /mnt/data archive:
- name: output none: {}
mountPath: /mnt/output container:
- name: slack-notify-success image: pytorch/pytorch:latest
container: command: [sh,-c]
image: technosophos/slack-notify args: ["{{workflow.parameters.command}}"]
command: [sh,-c] workingDir: /mnt/src
args: ['SLACK_USERNAME=Worker SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify'] volumeMounts:
inputs: - name: data
parameters: mountPath: /mnt/data
- name: status - name: output
artifacts: mountPath: /mnt/output
- name: metrics - name: slack-notify-success
path: /tmp/metrics.json container:
optional: true image: technosophos/slack-notify
command: [sh,-c]
args: ['SLACK_USERNAME=Worker SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify']
inputs:
parameters:
- name: status
artifacts:
- name: metrics
path: /tmp/metrics.json
optional: true

View File

@@ -1,207 +1,216 @@
# source: https://github.com/onepanelio/templates/blob/master/workflows/pytorch-mnist-training/ metadata:
arguments: name: "PyTorch Training"
parameters: kind: Workflow
- name: epochs version: 20201221194344
value: '10' action: update
- displayName: Node pool source: "https://github.com/onepanelio/templates/blob/master/workflows/pytorch-mnist-training/"
hint: Name of node pool or group to run this workflow task labels:
type: select.nodepool "created-by": "system"
name: sys-node-pool framework: pytorch
value: {{.DefaultNodePoolOption}} spec:
visibility: public arguments:
required: true parameters:
entrypoint: main - name: epochs
templates: value: '10'
- name: main - displayName: Node pool
dag: hint: Name of node pool or group to run this workflow task
tasks: type: select.nodepool
- name: train-model name: sys-node-pool
template: train-model value: "{{.DefaultNodePoolOption}}"
- name: train-model visibility: public
# Indicates that we want to push files in /mnt/output to object storage required: true
outputs: entrypoint: main
artifacts: templates:
- name: output - name: main
path: /mnt/output dag:
optional: true tasks:
script: - name: train-model
image: onepanel/dl:0.17.0 template: train-model
command: - name: train-model
- python # Indicates that we want to push files in /mnt/output to object storage
- '-u' outputs:
source: | artifacts:
import json - name: output
import torch path: /mnt/output
import torch.nn as nn optional: true
import torch.nn.functional as F script:
import torch.optim as optim image: onepanel/dl:0.17.0
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(model, device, train_loader, optimizer, epoch, batch_size, writer):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
writer.add_scalar('training loss', loss.item(), epoch)
def test(model, device, test_loader, epoch, writer):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
loss = test_loss / len(test_loader.dataset)
accuracy = correct / len(test_loader.dataset)
print('\nTest set: Average loss: {}, Accuracy: {}\n'.format(
loss, accuracy))
# Store metrics for this task
metrics = [
{'name': 'accuracy', 'value': accuracy},
{'name': 'loss', 'value': loss}
]
with open('/tmp/sys-metrics.json', 'w') as f:
json.dump(metrics, f)
def main(params):
writer = SummaryWriter(log_dir='/mnt/output/tensorboard')
use_cuda = torch.cuda.is_available()
torch.manual_seed(params['seed'])
device = torch.device('cuda' if use_cuda else 'cpu')
train_kwargs = {'batch_size': params['batch_size']}
test_kwargs = {'batch_size': params['test_batch_size']}
if use_cuda:
cuda_kwargs = {'num_workers': 1,
'pin_memory': True,
'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
dataset1 = datasets.MNIST('/mnt/data', train=True, download=True,
transform=transform)
dataset2 = datasets.MNIST('/mnt/data', train=False,
transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=params['lr'])
scheduler = StepLR(optimizer, step_size=1, gamma=params['gamma'])
for epoch in range(1, params['epochs'] + 1):
train(model, device, train_loader, optimizer, epoch, params['batch_size'], writer)
test(model, device, test_loader, epoch, writer)
scheduler.step()
# Save model
torch.save(model.state_dict(), '/mnt/output/model.pt')
writer.close()
if __name__ == '__main__':
params = {
'seed': 1,
'batch_size': 64,
'test_batch_size': 1000,
'epochs': {{workflow.parameters.epochs}},
'lr': 0.001,
'gamma': 0.7,
}
main(params)
volumeMounts:
# TensorBoard sidecar will automatically mount these volumes
# The `data` volume is mounted for saving datasets
# The `output` volume is mounted to save model output and share TensorBoard logs
- name: data
mountPath: /mnt/data
- name: output
mountPath: /mnt/output
nodeSelector:
beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
sidecars:
- name: tensorboard
image: tensorflow/tensorflow:2.3.0
command: command:
- sh - python
- '-c' - '-u'
env: source: |
- name: ONEPANEL_INTERACTIVE_SIDECAR import json
value: 'true' import torch
args: import torch.nn as nn
# Read logs from /mnt/output - this directory is auto-mounted from volumeMounts import torch.nn.functional as F
- tensorboard --logdir /mnt/output/tensorboard import torch.optim as optim
ports: from torchvision import datasets, transforms
- containerPort: 6006 from torch.optim.lr_scheduler import StepLR
name: tensorboard from torch.utils.tensorboard import SummaryWriter
volumeClaimTemplates:
# Provision volumes for storing data and output
- metadata: class Net(nn.Module):
name: data def __init__(self):
spec: super(Net, self).__init__()
accessModes: [ "ReadWriteOnce" ] self.conv1 = nn.Conv2d(1, 32, 3, 1)
resources: self.conv2 = nn.Conv2d(32, 64, 3, 1)
requests: self.dropout1 = nn.Dropout(0.25)
storage: 2Gi self.dropout2 = nn.Dropout(0.5)
- metadata: self.fc1 = nn.Linear(9216, 128)
name: output self.fc2 = nn.Linear(128, 10)
spec:
accessModes: [ "ReadWriteOnce" ] def forward(self, x):
resources: x = self.conv1(x)
requests: x = F.relu(x)
storage: 2Gi x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(model, device, train_loader, optimizer, epoch, batch_size, writer):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
writer.add_scalar('training loss', loss.item(), epoch)
def test(model, device, test_loader, epoch, writer):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
loss = test_loss / len(test_loader.dataset)
accuracy = correct / len(test_loader.dataset)
print('\nTest set: Average loss: {}, Accuracy: {}\n'.format(
loss, accuracy))
# Store metrics for this task
metrics = [
{'name': 'accuracy', 'value': accuracy},
{'name': 'loss', 'value': loss}
]
with open('/tmp/sys-metrics.json', 'w') as f:
json.dump(metrics, f)
def main(params):
writer = SummaryWriter(log_dir='/mnt/output/tensorboard')
use_cuda = torch.cuda.is_available()
torch.manual_seed(params['seed'])
device = torch.device('cuda' if use_cuda else 'cpu')
train_kwargs = {'batch_size': params['batch_size']}
test_kwargs = {'batch_size': params['test_batch_size']}
if use_cuda:
cuda_kwargs = {'num_workers': 1,
'pin_memory': True,
'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
dataset1 = datasets.MNIST('/mnt/data', train=True, download=True,
transform=transform)
dataset2 = datasets.MNIST('/mnt/data', train=False,
transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=params['lr'])
scheduler = StepLR(optimizer, step_size=1, gamma=params['gamma'])
for epoch in range(1, params['epochs'] + 1):
train(model, device, train_loader, optimizer, epoch, params['batch_size'], writer)
test(model, device, test_loader, epoch, writer)
scheduler.step()
# Save model
torch.save(model.state_dict(), '/mnt/output/model.pt')
writer.close()
if __name__ == '__main__':
params = {
'seed': 1,
'batch_size': 64,
'test_batch_size': 1000,
'epochs': {{workflow.parameters.epochs}},
'lr': 0.001,
'gamma': 0.7,
}
main(params)
volumeMounts:
# TensorBoard sidecar will automatically mount these volumes
# The `data` volume is mounted for saving datasets
# The `output` volume is mounted to save model output and share TensorBoard logs
- name: data
mountPath: /mnt/data
- name: output
mountPath: /mnt/output
nodeSelector:
beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
sidecars:
- name: tensorboard
image: tensorflow/tensorflow:2.3.0
command:
- sh
- '-c'
env:
- name: ONEPANEL_INTERACTIVE_SIDECAR
value: 'true'
args:
# Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
- tensorboard --logdir /mnt/output/tensorboard
ports:
- containerPort: 6006
name: tensorboard
volumeClaimTemplates:
# Provision volumes for storing data and output
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2Gi
- metadata:
name: output
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2Gi

View File

@@ -1,207 +1,216 @@
# source: https://github.com/onepanelio/templates/blob/master/workflows/pytorch-mnist-training/ metadata:
arguments: name: "PyTorch Training"
parameters: kind: Workflow
- name: epochs version: 20210118175809
value: '10' action: update
- displayName: Node pool source: "https://github.com/onepanelio/templates/blob/master/workflows/pytorch-mnist-training/"
hint: Name of node pool or group to run this workflow task labels:
type: select.nodepool "created-by": "system"
name: sys-node-pool framework: pytorch
value: {{.DefaultNodePoolOption}} spec:
visibility: public arguments:
required: true parameters:
entrypoint: main - name: epochs
templates: value: '10'
- name: main - displayName: Node pool
dag: hint: Name of node pool or group to run this workflow task
tasks: type: select.nodepool
- name: train-model name: sys-node-pool
template: train-model value: "{{.DefaultNodePoolOption}}"
- name: train-model visibility: public
# Indicates that we want to push files in /mnt/output to object storage required: true
outputs: entrypoint: main
artifacts: templates:
- name: output - name: main
path: /mnt/output dag:
optional: true tasks:
script: - name: train-model
image: onepanel/dl:0.17.0 template: train-model
command: - name: train-model
- python # Indicates that we want to push files in /mnt/output to object storage
- '-u' outputs:
source: | artifacts:
import json - name: output
import torch path: /mnt/output
import torch.nn as nn optional: true
import torch.nn.functional as F script:
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(model, device, train_loader, optimizer, epoch, batch_size, writer):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
writer.add_scalar('training loss', loss.item(), epoch)
def test(model, device, test_loader, epoch, writer):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
loss = test_loss / len(test_loader.dataset)
accuracy = correct / len(test_loader.dataset)
print('\nTest set: Average loss: {}, Accuracy: {}\n'.format(
loss, accuracy))
# Store metrics for this task
metrics = [
{'name': 'accuracy', 'value': accuracy},
{'name': 'loss', 'value': loss}
]
with open('/tmp/sys-metrics.json', 'w') as f:
json.dump(metrics, f)
def main(params):
writer = SummaryWriter(log_dir='/mnt/output/tensorboard')
use_cuda = torch.cuda.is_available()
torch.manual_seed(params['seed'])
device = torch.device('cuda' if use_cuda else 'cpu')
train_kwargs = {'batch_size': params['batch_size']}
test_kwargs = {'batch_size': params['test_batch_size']}
if use_cuda:
cuda_kwargs = {'num_workers': 1,
'pin_memory': True,
'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
dataset1 = datasets.MNIST('/mnt/data', train=True, download=True,
transform=transform)
dataset2 = datasets.MNIST('/mnt/data', train=False,
transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=params['lr'])
scheduler = StepLR(optimizer, step_size=1, gamma=params['gamma'])
for epoch in range(1, params['epochs'] + 1):
train(model, device, train_loader, optimizer, epoch, params['batch_size'], writer)
test(model, device, test_loader, epoch, writer)
scheduler.step()
# Save model
torch.save(model.state_dict(), '/mnt/output/model.pt')
writer.close()
if __name__ == '__main__':
params = {
'seed': 1,
'batch_size': 64,
'test_batch_size': 1000,
'epochs': {{workflow.parameters.epochs}},
'lr': 0.001,
'gamma': 0.7,
}
main(params)
volumeMounts:
# TensorBoard sidecar will automatically mount these volumes
# The `data` volume is mounted for saving datasets
# The `output` volume is mounted to save model output and share TensorBoard logs
- name: data
mountPath: /mnt/data
- name: output
mountPath: /mnt/output
nodeSelector:
{{.NodePoolLabel}}: '{{workflow.parameters.sys-node-pool}}'
sidecars:
- name: tensorboard
image: onepanel/dl:0.17.0 image: onepanel/dl:0.17.0
command: command:
- sh - python
- '-c' - '-u'
env: source: |
- name: ONEPANEL_INTERACTIVE_SIDECAR import json
value: 'true' import torch
args: import torch.nn as nn
# Read logs from /mnt/output - this directory is auto-mounted from volumeMounts import torch.nn.functional as F
- tensorboard --logdir /mnt/output/tensorboard import torch.optim as optim
ports: from torchvision import datasets, transforms
- containerPort: 6006 from torch.optim.lr_scheduler import StepLR
name: tensorboard from torch.utils.tensorboard import SummaryWriter
volumeClaimTemplates:
# Provision volumes for storing data and output
- metadata: class Net(nn.Module):
name: data def __init__(self):
spec: super(Net, self).__init__()
accessModes: [ "ReadWriteOnce" ] self.conv1 = nn.Conv2d(1, 32, 3, 1)
resources: self.conv2 = nn.Conv2d(32, 64, 3, 1)
requests: self.dropout1 = nn.Dropout(0.25)
storage: 2Gi self.dropout2 = nn.Dropout(0.5)
- metadata: self.fc1 = nn.Linear(9216, 128)
name: output self.fc2 = nn.Linear(128, 10)
spec:
accessModes: [ "ReadWriteOnce" ] def forward(self, x):
resources: x = self.conv1(x)
requests: x = F.relu(x)
storage: 2Gi x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(model, device, train_loader, optimizer, epoch, batch_size, writer):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
writer.add_scalar('training loss', loss.item(), epoch)
def test(model, device, test_loader, epoch, writer):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
loss = test_loss / len(test_loader.dataset)
accuracy = correct / len(test_loader.dataset)
print('\nTest set: Average loss: {}, Accuracy: {}\n'.format(
loss, accuracy))
# Store metrics for this task
metrics = [
{'name': 'accuracy', 'value': accuracy},
{'name': 'loss', 'value': loss}
]
with open('/tmp/sys-metrics.json', 'w') as f:
json.dump(metrics, f)
def main(params):
writer = SummaryWriter(log_dir='/mnt/output/tensorboard')
use_cuda = torch.cuda.is_available()
torch.manual_seed(params['seed'])
device = torch.device('cuda' if use_cuda else 'cpu')
train_kwargs = {'batch_size': params['batch_size']}
test_kwargs = {'batch_size': params['test_batch_size']}
if use_cuda:
cuda_kwargs = {'num_workers': 1,
'pin_memory': True,
'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
dataset1 = datasets.MNIST('/mnt/data', train=True, download=True,
transform=transform)
dataset2 = datasets.MNIST('/mnt/data', train=False,
transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=params['lr'])
scheduler = StepLR(optimizer, step_size=1, gamma=params['gamma'])
for epoch in range(1, params['epochs'] + 1):
train(model, device, train_loader, optimizer, epoch, params['batch_size'], writer)
test(model, device, test_loader, epoch, writer)
scheduler.step()
# Save model
torch.save(model.state_dict(), '/mnt/output/model.pt')
writer.close()
if __name__ == '__main__':
params = {
'seed': 1,
'batch_size': 64,
'test_batch_size': 1000,
'epochs': {{workflow.parameters.epochs}},
'lr': 0.001,
'gamma': 0.7,
}
main(params)
volumeMounts:
# TensorBoard sidecar will automatically mount these volumes
# The `data` volume is mounted for saving datasets
# The `output` volume is mounted to save model output and share TensorBoard logs
- name: data
mountPath: /mnt/data
- name: output
mountPath: /mnt/output
nodeSelector:
"{{.NodePoolLabel}}": '{{workflow.parameters.sys-node-pool}}'
sidecars:
- name: tensorboard
image: onepanel/dl:0.17.0
command:
- sh
- '-c'
env:
- name: ONEPANEL_INTERACTIVE_SIDECAR
value: 'true'
args:
# Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
- tensorboard --logdir /mnt/output/tensorboard
ports:
- containerPort: 6006
name: tensorboard
volumeClaimTemplates:
# Provision volumes for storing data and output
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2Gi
- metadata:
name: output
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2Gi

View File

@@ -1,207 +1,216 @@
# source: https://github.com/onepanelio/templates/blob/master/workflows/pytorch-mnist-training/ metadata:
arguments: name: "PyTorch Training"
parameters: kind: Workflow
- name: epochs version: 20210323175655
value: '10' action: update
- displayName: Node pool source: "https://github.com/onepanelio/templates/blob/master/workflows/pytorch-mnist-training/"
hint: Name of node pool or group to run this workflow task labels:
type: select.nodepool "created-by": "system"
name: sys-node-pool framework: pytorch
value: {{.DefaultNodePoolOption}} spec:
visibility: public arguments:
required: true parameters:
entrypoint: main - name: epochs
templates: value: '10'
- name: main - displayName: Node pool
dag: hint: Name of node pool or group to run this workflow task
tasks: type: select.nodepool
- name: train-model name: sys-node-pool
template: train-model value: "{{.DefaultNodePoolOption}}"
- name: train-model visibility: public
# Indicates that we want to push files in /mnt/output to object storage required: true
outputs: entrypoint: main
artifacts: templates:
- name: output - name: main
path: /mnt/output dag:
optional: true tasks:
script: - name: train-model
image: onepanel/dl:v0.20.0 template: train-model
command: - name: train-model
- python # Indicates that we want to push files in /mnt/output to object storage
- '-u' outputs:
source: | artifacts:
import json - name: output
import torch path: /mnt/output
import torch.nn as nn optional: true
import torch.nn.functional as F script:
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(model, device, train_loader, optimizer, epoch, batch_size, writer):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
writer.add_scalar('training loss', loss.item(), epoch)
def test(model, device, test_loader, epoch, writer):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
loss = test_loss / len(test_loader.dataset)
accuracy = correct / len(test_loader.dataset)
print('\nTest set: Average loss: {}, Accuracy: {}\n'.format(
loss, accuracy))
# Store metrics for this task
metrics = [
{'name': 'accuracy', 'value': accuracy},
{'name': 'loss', 'value': loss}
]
with open('/mnt/tmp/sys-metrics.json', 'w') as f:
json.dump(metrics, f)
def main(params):
writer = SummaryWriter(log_dir='/mnt/output/tensorboard')
use_cuda = torch.cuda.is_available()
torch.manual_seed(params['seed'])
device = torch.device('cuda' if use_cuda else 'cpu')
train_kwargs = {'batch_size': params['batch_size']}
test_kwargs = {'batch_size': params['test_batch_size']}
if use_cuda:
cuda_kwargs = {'num_workers': 1,
'pin_memory': True,
'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
dataset1 = datasets.MNIST('/mnt/data', train=True, download=True,
transform=transform)
dataset2 = datasets.MNIST('/mnt/data', train=False,
transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=params['lr'])
scheduler = StepLR(optimizer, step_size=1, gamma=params['gamma'])
for epoch in range(1, params['epochs'] + 1):
train(model, device, train_loader, optimizer, epoch, params['batch_size'], writer)
test(model, device, test_loader, epoch, writer)
scheduler.step()
# Save model
torch.save(model.state_dict(), '/mnt/output/model.pt')
writer.close()
if __name__ == '__main__':
params = {
'seed': 1,
'batch_size': 64,
'test_batch_size': 1000,
'epochs': {{workflow.parameters.epochs}},
'lr': 0.001,
'gamma': 0.7,
}
main(params)
volumeMounts:
# TensorBoard sidecar will automatically mount these volumes
# The `data` volume is mounted for saving datasets
# The `output` volume is mounted to save model output and share TensorBoard logs
- name: data
mountPath: /mnt/data
- name: output
mountPath: /mnt/output
nodeSelector:
{{.NodePoolLabel}}: '{{workflow.parameters.sys-node-pool}}'
sidecars:
- name: tensorboard
image: onepanel/dl:v0.20.0 image: onepanel/dl:v0.20.0
command: command:
- sh - python
- '-c' - '-u'
env: source: |
- name: ONEPANEL_INTERACTIVE_SIDECAR import json
value: 'true' import torch
args: import torch.nn as nn
# Read logs from /mnt/output - this directory is auto-mounted from volumeMounts import torch.nn.functional as F
- tensorboard --logdir /mnt/output/tensorboard import torch.optim as optim
ports: from torchvision import datasets, transforms
- containerPort: 6006 from torch.optim.lr_scheduler import StepLR
name: tensorboard from torch.utils.tensorboard import SummaryWriter
volumeClaimTemplates:
# Provision volumes for storing data and output
- metadata: class Net(nn.Module):
name: data def __init__(self):
spec: super(Net, self).__init__()
accessModes: [ "ReadWriteOnce" ] self.conv1 = nn.Conv2d(1, 32, 3, 1)
resources: self.conv2 = nn.Conv2d(32, 64, 3, 1)
requests: self.dropout1 = nn.Dropout(0.25)
storage: 2Gi self.dropout2 = nn.Dropout(0.5)
- metadata: self.fc1 = nn.Linear(9216, 128)
name: output self.fc2 = nn.Linear(128, 10)
spec:
accessModes: [ "ReadWriteOnce" ] def forward(self, x):
resources: x = self.conv1(x)
requests: x = F.relu(x)
storage: 2Gi x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(model, device, train_loader, optimizer, epoch, batch_size, writer):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
writer.add_scalar('training loss', loss.item(), epoch)
def test(model, device, test_loader, epoch, writer):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
loss = test_loss / len(test_loader.dataset)
accuracy = correct / len(test_loader.dataset)
print('\nTest set: Average loss: {}, Accuracy: {}\n'.format(
loss, accuracy))
# Store metrics for this task
metrics = [
{'name': 'accuracy', 'value': accuracy},
{'name': 'loss', 'value': loss}
]
with open('/mnt/tmp/sys-metrics.json', 'w') as f:
json.dump(metrics, f)
def main(params):
writer = SummaryWriter(log_dir='/mnt/output/tensorboard')
use_cuda = torch.cuda.is_available()
torch.manual_seed(params['seed'])
device = torch.device('cuda' if use_cuda else 'cpu')
train_kwargs = {'batch_size': params['batch_size']}
test_kwargs = {'batch_size': params['test_batch_size']}
if use_cuda:
cuda_kwargs = {'num_workers': 1,
'pin_memory': True,
'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
dataset1 = datasets.MNIST('/mnt/data', train=True, download=True,
transform=transform)
dataset2 = datasets.MNIST('/mnt/data', train=False,
transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=params['lr'])
scheduler = StepLR(optimizer, step_size=1, gamma=params['gamma'])
for epoch in range(1, params['epochs'] + 1):
train(model, device, train_loader, optimizer, epoch, params['batch_size'], writer)
test(model, device, test_loader, epoch, writer)
scheduler.step()
# Save model
torch.save(model.state_dict(), '/mnt/output/model.pt')
writer.close()
if __name__ == '__main__':
params = {
'seed': 1,
'batch_size': 64,
'test_batch_size': 1000,
'epochs': {{workflow.parameters.epochs}},
'lr': 0.001,
'gamma': 0.7,
}
main(params)
volumeMounts:
# TensorBoard sidecar will automatically mount these volumes
# The `data` volume is mounted for saving datasets
# The `output` volume is mounted to save model output and share TensorBoard logs
- name: data
mountPath: /mnt/data
- name: output
mountPath: /mnt/output
nodeSelector:
"{{.NodePoolLabel}}": '{{workflow.parameters.sys-node-pool}}'
sidecars:
- name: tensorboard
image: onepanel/dl:v0.20.0
command:
- sh
- '-c'
env:
- name: ONEPANEL_INTERACTIVE_SIDECAR
value: 'true'
args:
# Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
- tensorboard --logdir /mnt/output/tensorboard
ports:
- containerPort: 6006
name: tensorboard
volumeClaimTemplates:
# Provision volumes for storing data and output
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2Gi
- metadata:
name: output
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2Gi

View File

@@ -1,76 +1,85 @@
# source: https://github.com/onepanelio/templates/blob/master/workflows/tensorflow-mnist-training/template.yaml metadata:
entrypoint: main name: "TensorFlow Training"
arguments: kind: Workflow
parameters: version: 20200605090535
- name: source action: create
value: https://github.com/onepanelio/tensorflow-examples.git source: "https://github.com/onepanelio/templates/blob/master/workflows/tensorflow-mnist-training/template.yaml"
- name: command labels:
value: "python mnist/main.py --epochs=5" "created-by": "system"
volumeClaimTemplates: framework: tensorflow
- metadata: spec:
name: data entrypoint: main
spec: arguments:
accessModes: [ "ReadWriteOnce" ] parameters:
resources: - name: source
requests: value: https://github.com/onepanelio/tensorflow-examples.git
storage: 2Gi - name: command
- metadata: value: "python mnist/main.py --epochs=5"
name: output volumeClaimTemplates:
spec: - metadata:
accessModes: [ "ReadWriteOnce" ] name: data
resources: spec:
requests: accessModes: [ "ReadWriteOnce" ]
storage: 2Gi resources:
templates: requests:
- name: main storage: 2Gi
dag: - metadata:
tasks: name: output
- name: train-model spec:
template: pytorch accessModes: [ "ReadWriteOnce" ]
# Uncomment section below to send metrics to Slack resources:
# - name: notify-in-slack requests:
# dependencies: [train-model] storage: 2Gi
# template: slack-notify-success templates:
# arguments: - name: main
# parameters: dag:
# - name: status tasks:
# value: "{{tasks.train-model.status}}" - name: train-model
# artifacts: template: pytorch
# - name: metrics # Uncomment section below to send metrics to Slack
# from: "{{tasks.train-model.outputs.artifacts.sys-metrics}}" # - name: notify-in-slack
- name: pytorch # dependencies: [train-model]
inputs: # template: slack-notify-success
artifacts: # arguments:
- name: src # parameters:
path: /mnt/src # - name: status
git: # value: "{{tasks.train-model.status}}"
repo: "{{workflow.parameters.source}}" # artifacts:
outputs: # - name: metrics
artifacts: # from: "{{tasks.train-model.outputs.artifacts.sys-metrics}}"
- name: model - name: pytorch
path: /mnt/output inputs:
optional: true artifacts:
archive: - name: src
none: {} path: /mnt/src
container: git:
image: tensorflow/tensorflow:latest repo: "{{workflow.parameters.source}}"
command: [sh,-c] outputs:
args: ["{{workflow.parameters.command}}"] artifacts:
workingDir: /mnt/src - name: model
volumeMounts: path: /mnt/output
- name: data optional: true
mountPath: /mnt/data archive:
- name: output none: {}
mountPath: /mnt/output container:
- name: slack-notify-success image: tensorflow/tensorflow:latest
container: command: [sh,-c]
image: technosophos/slack-notify args: ["{{workflow.parameters.command}}"]
command: [sh,-c] workingDir: /mnt/src
args: ['SLACK_USERNAME=Worker SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify'] volumeMounts:
inputs: - name: data
parameters: mountPath: /mnt/data
- name: status - name: output
artifacts: mountPath: /mnt/output
- name: metrics - name: slack-notify-success
path: /tmp/metrics.json container:
optional: true image: technosophos/slack-notify
command: [sh,-c]
args: ['SLACK_USERNAME=Worker SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify']
inputs:
parameters:
- name: status
artifacts:
- name: metrics
path: /tmp/metrics.json
optional: true

View File

@@ -1,71 +1,80 @@
# source: https://github.com/onepanelio/templates/blob/master/workflows/tensorflow-mnist-training/template.yaml metadata:
arguments: name: "TensorFlow Training"
parameters: kind: Workflow
- name: epochs version: 20201209124226
value: '10' action: update
entrypoint: main source: "https://github.com/onepanelio/templates/blob/master/workflows/tensorflow-mnist-training/template.yaml"
templates: labels:
- name: main "created-by": "system"
dag: framework: tensorflow
tasks: spec:
- name: train-model arguments:
template: tf-dense parameters:
- name: tf-dense - name: epochs
script: value: '10'
image: tensorflow/tensorflow:2.3.0 entrypoint: main
command: templates:
- python - name: main
- '-u' dag:
source: | tasks:
import tensorflow as tf - name: train-model
import datetime template: tf-dense
mnist = tf.keras.datasets.mnist - name: tf-dense
(x_train, y_train),(x_test, y_test) = mnist.load_data() script:
x_train, x_test = x_train / 255.0, x_test / 255.0 image: tensorflow/tensorflow:2.3.0
def create_model():
return tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
model = create_model()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Write logs to /mnt/output
log_dir = "/mnt/output/logs/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
history = model.fit(x=x_train,
y=y_train,
epochs={{workflow.parameters.epochs}},
validation_data=(x_test, y_test),
callbacks=[tensorboard_callback])
volumeMounts:
# TensorBoard sidecar will automatically mount this volume
- name: output
mountPath: /mnt/output
sidecars:
- name: tensorboard
image: 'tensorflow/tensorflow:2.3.0'
command: command:
- sh - python
- '-c' - '-u'
env: source: |
- name: ONEPANEL_INTERACTIVE_SIDECAR import tensorflow as tf
value: 'true' import datetime
args: mnist = tf.keras.datasets.mnist
# Read logs from /mnt/output - this directory is auto-mounted from volumeMounts (x_train, y_train),(x_test, y_test) = mnist.load_data()
- tensorboard --logdir /mnt/output/ x_train, x_test = x_train / 255.0, x_test / 255.0
ports: def create_model():
- containerPort: 6006 return tf.keras.models.Sequential([
name: tensorboard tf.keras.layers.Flatten(input_shape=(28, 28)),
volumeClaimTemplates: tf.keras.layers.Dense(512, activation='relu'),
# Provision a volume that can be shared between main container and TensorBoard side car tf.keras.layers.Dropout(0.2),
- metadata: tf.keras.layers.Dense(10, activation='softmax')
name: output ])
spec: model = create_model()
accessModes: [ "ReadWriteOnce" ] model.compile(optimizer='adam',
resources: loss='sparse_categorical_crossentropy',
requests: metrics=['accuracy'])
storage: 2Gi # Write logs to /mnt/output
log_dir = "/mnt/output/logs/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
history = model.fit(x=x_train,
y=y_train,
epochs={{workflow.parameters.epochs}},
validation_data=(x_test, y_test),
callbacks=[tensorboard_callback])
volumeMounts:
# TensorBoard sidecar will automatically mount this volume
- name: output
mountPath: /mnt/output
sidecars:
- name: tensorboard
image: 'tensorflow/tensorflow:2.3.0'
command:
- sh
- '-c'
env:
- name: ONEPANEL_INTERACTIVE_SIDECAR
value: 'true'
args:
# Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
- tensorboard --logdir /mnt/output/
ports:
- containerPort: 6006
name: tensorboard
volumeClaimTemplates:
# Provision a volume that can be shared between main container and TensorBoard side car
- metadata:
name: output
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2Gi

View File

@@ -1,118 +1,127 @@
# source: https://github.com/onepanelio/templates/blob/master/workflows/tensorflow-mnist-training/ metadata:
arguments: name: "TensorFlow Training"
parameters: kind: Workflow
- name: epochs version: 20201223062947
value: '10' action: update
- displayName: Node pool source: "https://github.com/onepanelio/templates/blob/master/workflows/tensorflow-mnist-training/"
hint: Name of node pool or group to run this workflow task labels:
type: select.nodepool "created-by": "system"
name: sys-node-pool framework: tensorflow
value: {{.DefaultNodePoolOption}} spec:
visibility: public arguments:
required: true parameters:
entrypoint: main - name: epochs
templates: value: '10'
- name: main - displayName: Node pool
dag: hint: Name of node pool or group to run this workflow task
tasks: type: select.nodepool
- name: train-model name: sys-node-pool
template: train-model value: "{{.DefaultNodePoolOption}}"
- name: train-model visibility: public
# Indicates that we want to push files in /mnt/output to object storage required: true
outputs: entrypoint: main
artifacts: templates:
- name: output - name: main
path: /mnt/output dag:
optional: true tasks:
script: - name: train-model
image: onepanel/dl:0.17.0 template: train-model
command: - name: train-model
- python # Indicates that we want to push files in /mnt/output to object storage
- '-u' outputs:
source: | artifacts:
import json - name: output
import tensorflow as tf path: /mnt/output
optional: true
mnist = tf.keras.datasets.mnist script:
image: onepanel/dl:0.17.0
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train = x_train[..., tf.newaxis]
x_test = x_test[..., tf.newaxis]
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(filters=32, kernel_size=5, activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=2),
tf.keras.layers.Conv2D(filters=64, kernel_size=5, activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=124, activation='relu'),
tf.keras.layers.Dropout(rate=0.75),
tf.keras.layers.Dense(units=10, activation='softmax')
])
model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Write TensorBoard logs to /mnt/output
log_dir = '/mnt/output/tensorboard/'
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
model.fit(x=x_train,
y=y_train,
epochs={{workflow.parameters.epochs}},
validation_data=(x_test, y_test),
callbacks=[tensorboard_callback])
# Store metrics for this task
loss, accuracy = model.evaluate(x_test, y_test)
metrics = [
{'name': 'accuracy', 'value': accuracy},
{'name': 'loss', 'value': loss}
]
with open('/tmp/sys-metrics.json', 'w') as f:
json.dump(metrics, f)
# Save model
model.save('/mnt/output/model.h5')
volumeMounts:
# TensorBoard sidecar will automatically mount these volumes
# The `data` volume is mounted to support Keras datasets
# The `output` volume is mounted to save model output and share TensorBoard logs
- name: data
mountPath: /home/root/.keras/datasets
- name: output
mountPath: /mnt/output
nodeSelector:
beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
sidecars:
- name: tensorboard
image: tensorflow/tensorflow:2.3.0
command: command:
- sh - python
- '-c' - '-u'
env: source: |
- name: ONEPANEL_INTERACTIVE_SIDECAR import json
value: 'true' import tensorflow as tf
args:
# Read logs from /mnt/output - this directory is auto-mounted from volumeMounts mnist = tf.keras.datasets.mnist
- tensorboard --logdir /mnt/output/tensorboard
ports: (x_train, y_train),(x_test, y_test) = mnist.load_data()
- containerPort: 6006 x_train, x_test = x_train / 255.0, x_test / 255.0
name: tensorboard x_train = x_train[..., tf.newaxis]
volumeClaimTemplates: x_test = x_test[..., tf.newaxis]
# Provision volumes for storing data and output
- metadata: model = tf.keras.Sequential([
name: data tf.keras.layers.Conv2D(filters=32, kernel_size=5, activation='relu'),
spec: tf.keras.layers.MaxPool2D(pool_size=2),
accessModes: [ "ReadWriteOnce" ] tf.keras.layers.Conv2D(filters=64, kernel_size=5, activation='relu'),
resources: tf.keras.layers.MaxPool2D(pool_size=2),
requests: tf.keras.layers.Flatten(),
storage: 2Gi tf.keras.layers.Dense(units=124, activation='relu'),
- metadata: tf.keras.layers.Dropout(rate=0.75),
name: output tf.keras.layers.Dense(units=10, activation='softmax')
spec: ])
accessModes: [ "ReadWriteOnce" ] model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001),
resources: loss='sparse_categorical_crossentropy',
requests: metrics=['accuracy'])
storage: 2Gi
# Write TensorBoard logs to /mnt/output
log_dir = '/mnt/output/tensorboard/'
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
model.fit(x=x_train,
y=y_train,
epochs={{workflow.parameters.epochs}},
validation_data=(x_test, y_test),
callbacks=[tensorboard_callback])
# Store metrics for this task
loss, accuracy = model.evaluate(x_test, y_test)
metrics = [
{'name': 'accuracy', 'value': accuracy},
{'name': 'loss', 'value': loss}
]
with open('/tmp/sys-metrics.json', 'w') as f:
json.dump(metrics, f)
# Save model
model.save('/mnt/output/model.h5')
volumeMounts:
# TensorBoard sidecar will automatically mount these volumes
# The `data` volume is mounted to support Keras datasets
# The `output` volume is mounted to save model output and share TensorBoard logs
- name: data
mountPath: /home/root/.keras/datasets
- name: output
mountPath: /mnt/output
nodeSelector:
beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
sidecars:
- name: tensorboard
image: tensorflow/tensorflow:2.3.0
command:
- sh
- '-c'
env:
- name: ONEPANEL_INTERACTIVE_SIDECAR
value: 'true'
args:
# Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
- tensorboard --logdir /mnt/output/tensorboard
ports:
- containerPort: 6006
name: tensorboard
volumeClaimTemplates:
# Provision volumes for storing data and output
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2Gi
- metadata:
name: output
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2Gi

View File

@@ -1,118 +1,127 @@
# source: https://github.com/onepanelio/templates/blob/master/workflows/tensorflow-mnist-training/ metadata:
arguments: name: "TensorFlow Training"
parameters: kind: Workflow
- name: epochs version: 20210118175809
value: '10' action: update
- displayName: Node pool source: "https://github.com/onepanelio/templates/blob/master/workflows/tensorflow-mnist-training/"
hint: Name of node pool or group to run this workflow task labels:
type: select.nodepool "created-by": "system"
name: sys-node-pool framework: tensorflow
value: {{.DefaultNodePoolOption}} spec:
visibility: public arguments:
required: true parameters:
entrypoint: main - name: epochs
templates: value: '10'
- name: main - displayName: Node pool
dag: hint: Name of node pool or group to run this workflow task
tasks: type: select.nodepool
- name: train-model name: sys-node-pool
template: train-model value: "{{.DefaultNodePoolOption}}"
- name: train-model visibility: public
# Indicates that we want to push files in /mnt/output to object storage required: true
outputs: entrypoint: main
artifacts: templates:
- name: output - name: main
path: /mnt/output dag:
optional: true tasks:
script: - name: train-model
image: onepanel/dl:0.17.0 template: train-model
command: - name: train-model
- python # Indicates that we want to push files in /mnt/output to object storage
- '-u' outputs:
source: | artifacts:
import json - name: output
import tensorflow as tf path: /mnt/output
optional: true
mnist = tf.keras.datasets.mnist script:
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train = x_train[..., tf.newaxis]
x_test = x_test[..., tf.newaxis]
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(filters=32, kernel_size=5, activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=2),
tf.keras.layers.Conv2D(filters=64, kernel_size=5, activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=124, activation='relu'),
tf.keras.layers.Dropout(rate=0.75),
tf.keras.layers.Dense(units=10, activation='softmax')
])
model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Write TensorBoard logs to /mnt/output
log_dir = '/mnt/output/tensorboard/'
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
model.fit(x=x_train,
y=y_train,
epochs={{workflow.parameters.epochs}},
validation_data=(x_test, y_test),
callbacks=[tensorboard_callback])
# Store metrics for this task
loss, accuracy = model.evaluate(x_test, y_test)
metrics = [
{'name': 'accuracy', 'value': accuracy},
{'name': 'loss', 'value': loss}
]
with open('/tmp/sys-metrics.json', 'w') as f:
json.dump(metrics, f)
# Save model
model.save('/mnt/output/model.h5')
volumeMounts:
# TensorBoard sidecar will automatically mount these volumes
# The `data` volume is mounted to support Keras datasets
# The `output` volume is mounted to save model output and share TensorBoard logs
- name: data
mountPath: /home/root/.keras/datasets
- name: output
mountPath: /mnt/output
nodeSelector:
{{.NodePoolLabel}}: '{{workflow.parameters.sys-node-pool}}'
sidecars:
- name: tensorboard
image: onepanel/dl:0.17.0 image: onepanel/dl:0.17.0
command: command:
- sh - python
- '-c' - '-u'
env: source: |
- name: ONEPANEL_INTERACTIVE_SIDECAR import json
value: 'true' import tensorflow as tf
args:
# Read logs from /mnt/output - this directory is auto-mounted from volumeMounts mnist = tf.keras.datasets.mnist
- tensorboard --logdir /mnt/output/tensorboard
ports: (x_train, y_train),(x_test, y_test) = mnist.load_data()
- containerPort: 6006 x_train, x_test = x_train / 255.0, x_test / 255.0
name: tensorboard x_train = x_train[..., tf.newaxis]
volumeClaimTemplates: x_test = x_test[..., tf.newaxis]
# Provision volumes for storing data and output
- metadata: model = tf.keras.Sequential([
name: data tf.keras.layers.Conv2D(filters=32, kernel_size=5, activation='relu'),
spec: tf.keras.layers.MaxPool2D(pool_size=2),
accessModes: [ "ReadWriteOnce" ] tf.keras.layers.Conv2D(filters=64, kernel_size=5, activation='relu'),
resources: tf.keras.layers.MaxPool2D(pool_size=2),
requests: tf.keras.layers.Flatten(),
storage: 2Gi tf.keras.layers.Dense(units=124, activation='relu'),
- metadata: tf.keras.layers.Dropout(rate=0.75),
name: output tf.keras.layers.Dense(units=10, activation='softmax')
spec: ])
accessModes: [ "ReadWriteOnce" ] model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001),
resources: loss='sparse_categorical_crossentropy',
requests: metrics=['accuracy'])
storage: 2Gi
# Write TensorBoard logs to /mnt/output
log_dir = '/mnt/output/tensorboard/'
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
model.fit(x=x_train,
y=y_train,
epochs={{workflow.parameters.epochs}},
validation_data=(x_test, y_test),
callbacks=[tensorboard_callback])
# Store metrics for this task
loss, accuracy = model.evaluate(x_test, y_test)
metrics = [
{'name': 'accuracy', 'value': accuracy},
{'name': 'loss', 'value': loss}
]
with open('/tmp/sys-metrics.json', 'w') as f:
json.dump(metrics, f)
# Save model
model.save('/mnt/output/model.h5')
volumeMounts:
# TensorBoard sidecar will automatically mount these volumes
# The `data` volume is mounted to support Keras datasets
# The `output` volume is mounted to save model output and share TensorBoard logs
- name: data
mountPath: /home/root/.keras/datasets
- name: output
mountPath: /mnt/output
nodeSelector:
"{{.NodePoolLabel}}": '{{workflow.parameters.sys-node-pool}}'
sidecars:
- name: tensorboard
image: onepanel/dl:0.17.0
command:
- sh
- '-c'
env:
- name: ONEPANEL_INTERACTIVE_SIDECAR
value: 'true'
args:
# Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
- tensorboard --logdir /mnt/output/tensorboard
ports:
- containerPort: 6006
name: tensorboard
volumeClaimTemplates:
# Provision volumes for storing data and output
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2Gi
- metadata:
name: output
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2Gi

View File

@@ -1,118 +1,127 @@
# source: https://github.com/onepanelio/templates/blob/master/workflows/tensorflow-mnist-training/ metadata:
arguments: name: "TensorFlow Training"
parameters: kind: Workflow
- name: epochs version: 20210323175655
value: '10' action: update
- displayName: Node pool source: "https://github.com/onepanelio/templates/blob/master/workflows/tensorflow-mnist-training/"
hint: Name of node pool or group to run this workflow task labels:
type: select.nodepool "created-by": "system"
name: sys-node-pool framework: tensorflow
value: {{.DefaultNodePoolOption}} spec:
visibility: public arguments:
required: true parameters:
entrypoint: main - name: epochs
templates: value: '10'
- name: main - displayName: Node pool
dag: hint: Name of node pool or group to run this workflow task
tasks: type: select.nodepool
- name: train-model name: sys-node-pool
template: train-model value: "{{.DefaultNodePoolOption}}"
- name: train-model visibility: public
# Indicates that we want to push files in /mnt/output to object storage required: true
outputs: entrypoint: main
artifacts: templates:
- name: output - name: main
path: /mnt/output dag:
optional: true tasks:
script: - name: train-model
image: onepanel/dl:v0.20.0 template: train-model
command: - name: train-model
- python # Indicates that we want to push files in /mnt/output to object storage
- '-u' outputs:
source: | artifacts:
import json - name: output
import tensorflow as tf path: /mnt/output
optional: true
mnist = tf.keras.datasets.mnist script:
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train = x_train[..., tf.newaxis]
x_test = x_test[..., tf.newaxis]
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(filters=32, kernel_size=5, activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=2),
tf.keras.layers.Conv2D(filters=64, kernel_size=5, activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=124, activation='relu'),
tf.keras.layers.Dropout(rate=0.75),
tf.keras.layers.Dense(units=10, activation='softmax')
])
model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Write TensorBoard logs to /mnt/output
log_dir = '/mnt/output/tensorboard/'
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
model.fit(x=x_train,
y=y_train,
epochs={{workflow.parameters.epochs}},
validation_data=(x_test, y_test),
callbacks=[tensorboard_callback])
# Store metrics for this task
loss, accuracy = model.evaluate(x_test, y_test)
metrics = [
{'name': 'accuracy', 'value': accuracy},
{'name': 'loss', 'value': loss}
]
with open('/mnt/tmp/sys-metrics.json', 'w') as f:
json.dump(metrics, f)
# Save model
model.save('/mnt/output/model.h5')
volumeMounts:
# TensorBoard sidecar will automatically mount these volumes
# The `data` volume is mounted to support Keras datasets
# The `output` volume is mounted to save model output and share TensorBoard logs
- name: data
mountPath: /home/root/.keras/datasets
- name: output
mountPath: /mnt/output
nodeSelector:
{{.NodePoolLabel}}: '{{workflow.parameters.sys-node-pool}}'
sidecars:
- name: tensorboard
image: onepanel/dl:v0.20.0 image: onepanel/dl:v0.20.0
command: command:
- sh - python
- '-c' - '-u'
env: source: |
- name: ONEPANEL_INTERACTIVE_SIDECAR import json
value: 'true' import tensorflow as tf
args:
# Read logs from /mnt/output - this directory is auto-mounted from volumeMounts mnist = tf.keras.datasets.mnist
- tensorboard --logdir /mnt/output/tensorboard
ports: (x_train, y_train),(x_test, y_test) = mnist.load_data()
- containerPort: 6006 x_train, x_test = x_train / 255.0, x_test / 255.0
name: tensorboard x_train = x_train[..., tf.newaxis]
volumeClaimTemplates: x_test = x_test[..., tf.newaxis]
# Provision volumes for storing data and output
- metadata: model = tf.keras.Sequential([
name: data tf.keras.layers.Conv2D(filters=32, kernel_size=5, activation='relu'),
spec: tf.keras.layers.MaxPool2D(pool_size=2),
accessModes: [ "ReadWriteOnce" ] tf.keras.layers.Conv2D(filters=64, kernel_size=5, activation='relu'),
resources: tf.keras.layers.MaxPool2D(pool_size=2),
requests: tf.keras.layers.Flatten(),
storage: 2Gi tf.keras.layers.Dense(units=124, activation='relu'),
- metadata: tf.keras.layers.Dropout(rate=0.75),
name: output tf.keras.layers.Dense(units=10, activation='softmax')
spec: ])
accessModes: [ "ReadWriteOnce" ] model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001),
resources: loss='sparse_categorical_crossentropy',
requests: metrics=['accuracy'])
storage: 2Gi
# Write TensorBoard logs to /mnt/output
log_dir = '/mnt/output/tensorboard/'
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
model.fit(x=x_train,
y=y_train,
epochs={{workflow.parameters.epochs}},
validation_data=(x_test, y_test),
callbacks=[tensorboard_callback])
# Store metrics for this task
loss, accuracy = model.evaluate(x_test, y_test)
metrics = [
{'name': 'accuracy', 'value': accuracy},
{'name': 'loss', 'value': loss}
]
with open('/mnt/tmp/sys-metrics.json', 'w') as f:
json.dump(metrics, f)
# Save model
model.save('/mnt/output/model.h5')
volumeMounts:
# TensorBoard sidecar will automatically mount these volumes
# The `data` volume is mounted to support Keras datasets
# The `output` volume is mounted to save model output and share TensorBoard logs
- name: data
mountPath: /home/root/.keras/datasets
- name: output
mountPath: /mnt/output
nodeSelector:
"{{.NodePoolLabel}}": '{{workflow.parameters.sys-node-pool}}'
sidecars:
- name: tensorboard
image: onepanel/dl:v0.20.0
command:
- sh
- '-c'
env:
- name: ONEPANEL_INTERACTIVE_SIDECAR
value: 'true'
args:
# Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
- tensorboard --logdir /mnt/output/tensorboard
ports:
- containerPort: 6006
name: tensorboard
volumeClaimTemplates:
# Provision volumes for storing data and output
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2Gi
- metadata:
name: output
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2Gi

View File

@@ -0,0 +1,221 @@
metadata:
name: "TF Object Detection Training"
kind: Workflow
version: 20200812104328
action: create
source: "https://github.com/onepanelio/templates/blob/master/workflows/tf-object-detection-training/"
labels:
"created-by": "system"
"used-by": "cvat"
spec:
arguments:
parameters:
- name: source
value: https://github.com/tensorflow/models.git
displayName: Model source code
type: hidden
visibility: private
- name: trainingsource
value: https://github.com/onepanelio/cvat-training.git
type: hidden
visibility: private
- name: revision
value: v1.13.0
type: hidden
visibility: private
- name: sys-annotation-path
value: annotation-dump/sample_dataset
displayName: Dataset path
hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated.
- name: sys-output-path
value: workflow-data/output/sample_output
hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
displayName: Workflow output path
visibility: private
- name: ref-model
value: frcnn-res50-coco
displayName: Model
hint: TF Detection API's model to use for training.
type: select.select
visibility: public
options:
- name: 'Faster RCNN-ResNet 101-COCO'
value: frcnn-res101-coco
- name: 'Faster RCNN-ResNet 101-Low Proposal-COCO'
value: frcnn-res101-low
- name: 'Faster RCNN-ResNet 50-COCO'
value: frcnn-res50-coco
- name: 'Faster RCNN-NAS-COCO'
value: frcnn-nas-coco
- name: 'SSD MobileNet V1-COCO'
value: ssd-mobilenet-v1-coco2
- name: 'SSD MobileNet V2-COCO'
value: ssd-mobilenet-v2-coco
- name: 'SSDLite MobileNet-COCO'
value: ssdlite-mobilenet-coco
- name: extras
value: |-
epochs=1000
displayName: Hyperparameters
visibility: public
type: textarea.textarea
hint: "Please refer to our <a href='https://docs.onepanel.ai/docs/getting-started/use-cases/computervision/annotation/cvat/cvat_annotation_model#arguments-optional' target='_blank'>documentation</a> for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow."
- name: sys-finetune-checkpoint
value: ''
hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
displayName: Checkpoint path
visibility: public
- name: sys-num-classes
value: '81'
hint: Number of classes
displayName: Number of classes
visibility: private
- name: tf-image
value: tensorflow/tensorflow:1.13.1-py3
type: select.select
displayName: Select TensorFlow image
visibility: public
hint: Select the GPU image if you are running on a GPU node pool
options:
- name: 'TensorFlow 1.13.1 CPU Image'
value: 'tensorflow/tensorflow:1.13.1-py3'
- name: 'TensorFlow 1.13.1 GPU Image'
value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
- displayName: Node pool
hint: Name of node pool or group to run this workflow task
type: select.select
name: sys-node-pool
value: Standard_D4s_v3
visibility: public
required: true
options:
- name: 'CPU: 2, RAM: 8GB'
value: Standard_D2s_v3
- name: 'CPU: 4, RAM: 16GB'
value: Standard_D4s_v3
- name: 'GPU: 1xK80, CPU: 6, RAM: 56GB'
value: Standard_NC6
- name: dump-format
value: cvat_tfrecord
visibility: public
entrypoint: main
templates:
- dag:
tasks:
- name: train-model
template: tensorflow
# Uncomment the lines below if you want to send Slack notifications
# - arguments:
# artifacts:
# - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}'
# name: metrics
# parameters:
# - name: status
# value: '{{tasks.train-model.status}}'
# dependencies:
# - train-model
# name: notify-in-slack
# template: slack-notify-success
name: main
- container:
args:
- |
apt-get update && \
apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \
pip install pillow lxml Cython contextlib2 jupyter matplotlib numpy scipy boto3 pycocotools pyyaml google-cloud-storage && \
cd /mnt/src/tf/research && \
export PYTHONPATH=$PYTHONPATH:` + "`pwd`:`pwd`/slim" + ` && \
cd /mnt/src/train && \
python convert_workflow.py \
--extras="{{workflow.parameters.extras}}" \
--model="{{workflow.parameters.ref-model}}" \
--num_classes="{{workflow.parameters.sys-num-classes}}" \
--sys_finetune_checkpoint={{workflow.parameters.sys-finetune-checkpoint}}
command:
- sh
- -c
image: '{{workflow.parameters.tf-image}}'
volumeMounts:
- mountPath: /mnt/data
name: data
- mountPath: /mnt/output
name: output
workingDir: /mnt/src
nodeSelector:
beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
inputs:
artifacts:
- name: data
path: /mnt/data/datasets/
"{{.ArtifactRepositoryType}}":
key: '{{workflow.namespace}}/{{workflow.parameters.sys-annotation-path}}'
- name: models
path: /mnt/data/models/
optional: true
"{{.ArtifactRepositoryType}}":
key: '{{workflow.namespace}}/{{workflow.parameters.sys-finetune-checkpoint}}'
- git:
repo: '{{workflow.parameters.source}}'
revision: '{{workflow.parameters.revision}}'
name: src
path: /mnt/src/tf
- git:
repo: '{{workflow.parameters.trainingsource}}'
revision: 'optional-artifacts'
name: tsrc
path: /mnt/src/train
name: tensorflow
outputs:
artifacts:
- name: model
optional: true
path: /mnt/output
"{{.ArtifactRepositoryType}}":
key: '{{workflow.namespace}}/{{workflow.parameters.sys-output-path}}'
# Uncomment the lines below if you want to send Slack notifications
#- container:
# args:
# - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}"
# SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd
# SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify
# command:
# - sh
# - -c
# image: technosophos/slack-notify
# inputs:
# artifacts:
# - name: metrics
# optional: true
# path: /tmp/metrics.json
# parameters:
# - name: status
# name: slack-notify-success
volumeClaimTemplates:
- metadata:
creationTimestamp: null
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 200Gi
- metadata:
creationTimestamp: null
name: output
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 200Gi

View File

@@ -0,0 +1,222 @@
metadata:
name: "TF Object Detection Training"
kind: Workflow
version: 20200824101019
action: update
source: "https://github.com/onepanelio/templates/blob/master/workflows/tf-object-detection-training/"
labels:
"created-by": "system"
"used-by": "cvat"
spec:
arguments:
parameters:
- name: source
value: https://github.com/tensorflow/models.git
displayName: Model source code
type: hidden
visibility: private
- name: trainingsource
value: https://github.com/onepanelio/cvat-training.git
type: hidden
visibility: private
- name: revision
value: v1.13.0
type: hidden
visibility: private
- name: cvat-annotation-path
value: annotation-dump/sample_dataset
displayName: Dataset path
hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated.
visibility: private
- name: cvat-output-path
value: workflow-data/output/sample_output
hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
displayName: Workflow output path
visibility: private
- name: cvat-model
value: frcnn-res50-coco
displayName: Model
hint: TF Detection API's model to use for training.
type: select.select
visibility: public
options:
- name: 'Faster RCNN-ResNet 101-COCO'
value: frcnn-res101-coco
- name: 'Faster RCNN-ResNet 101-Low Proposal-COCO'
value: frcnn-res101-low
- name: 'Faster RCNN-ResNet 50-COCO'
value: frcnn-res50-coco
- name: 'Faster RCNN-NAS-COCO'
value: frcnn-nas-coco
- name: 'SSD MobileNet V1-COCO'
value: ssd-mobilenet-v1-coco2
- name: 'SSD MobileNet V2-COCO'
value: ssd-mobilenet-v2-coco
- name: 'SSDLite MobileNet-COCO'
value: ssdlite-mobilenet-coco
- name: hyperparameters
value: |-
num-steps=10000
displayName: Hyperparameters
visibility: public
type: textarea.textarea
hint: "Please refer to our <a href='https://docs.onepanel.ai/docs/getting-started/use-cases/computervision/annotation/cvat/cvat_annotation_model#arguments-optional' target='_blank'>documentation</a> for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow."
- name: cvat-finetune-checkpoint
value: ''
hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
displayName: Checkpoint path
visibility: public
- name: cvat-num-classes
value: '81'
hint: Number of classes
displayName: Number of classes
visibility: private
- name: tf-image
value: tensorflow/tensorflow:1.13.1-py3
type: select.select
displayName: Select TensorFlow image
visibility: public
hint: Select the GPU image if you are running on a GPU node pool
options:
- name: 'TensorFlow 1.13.1 CPU Image'
value: 'tensorflow/tensorflow:1.13.1-py3'
- name: 'TensorFlow 1.13.1 GPU Image'
value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
- displayName: Node pool
hint: Name of node pool or group to run this workflow task
type: select.select
name: sys-node-pool
value: Standard_D4s_v3
visibility: public
required: true
options:
- name: 'CPU: 2, RAM: 8GB'
value: Standard_D2s_v3
- name: 'CPU: 4, RAM: 16GB'
value: Standard_D4s_v3
- name: 'GPU: 1xK80, CPU: 6, RAM: 56GB'
value: Standard_NC6
- name: dump-format
value: cvat_tfrecord
visibility: public
entrypoint: main
templates:
- dag:
tasks:
- name: train-model
template: tensorflow
# Uncomment the lines below if you want to send Slack notifications
# - arguments:
# artifacts:
# - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}'
# name: metrics
# parameters:
# - name: status
# value: '{{tasks.train-model.status}}'
# dependencies:
# - train-model
# name: notify-in-slack
# template: slack-notify-success
name: main
- container:
args:
- |
apt-get update && \
apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \
pip install pillow lxml Cython contextlib2 jupyter matplotlib numpy scipy boto3 pycocotools pyyaml google-cloud-storage && \
cd /mnt/src/tf/research && \
export PYTHONPATH=$PYTHONPATH:` + "`pwd`:`pwd`" + `/slim && \
cd /mnt/src/train && \
python convert_workflow.py \
--extras="{{workflow.parameters.hyperparameters}}" \
--model="{{workflow.parameters.cvat-model}}" \
--num_classes="{{workflow.parameters.cvat-num-classes}}" \
--sys_finetune_checkpoint={{workflow.parameters.cvat-finetune-checkpoint}}
command:
- sh
- -c
image: '{{workflow.parameters.tf-image}}'
volumeMounts:
- mountPath: /mnt/data
name: data
- mountPath: /mnt/output
name: output
workingDir: /mnt/src
nodeSelector:
beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
inputs:
artifacts:
- name: data
path: /mnt/data/datasets/
"{{.ArtifactRepositoryType}}":
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
- name: models
path: /mnt/data/models/
optional: true
"{{.ArtifactRepositoryType}}":
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-finetune-checkpoint}}'
- git:
repo: '{{workflow.parameters.source}}'
revision: '{{workflow.parameters.revision}}'
name: src
path: /mnt/src/tf
- git:
repo: '{{workflow.parameters.trainingsource}}'
revision: 'optional-artifacts'
name: tsrc
path: /mnt/src/train
name: tensorflow
outputs:
artifacts:
- name: model
optional: true
path: /mnt/output
"{{.ArtifactRepositoryType}}":
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
# Uncomment the lines below if you want to send Slack notifications
#- container:
# args:
# - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}"
# SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd
# SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify
# command:
# - sh
# - -c
# image: technosophos/slack-notify
# inputs:
# artifacts:
# - name: metrics
# optional: true
# path: /tmp/metrics.json
# parameters:
# - name: status
# name: slack-notify-success
volumeClaimTemplates:
- metadata:
creationTimestamp: null
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 200Gi
- metadata:
creationTimestamp: null
name: output
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 200Gi

View File

@@ -1,221 +1,231 @@
entrypoint: main metadata:
arguments: name: "TF Object Detection Training"
parameters: kind: Workflow
- name: source version: 20201115134934
value: https://github.com/tensorflow/models.git action: update
displayName: Model source code source: "https://github.com/onepanelio/templates/blob/master/workflows/tf-object-detection-training/"
type: hidden labels:
visibility: private "created-by": "system"
"used-by": "cvat"
spec:
entrypoint: main
arguments:
parameters:
- name: source
value: https://github.com/tensorflow/models.git
displayName: Model source code
type: hidden
visibility: private
- name: trainingsource - name: trainingsource
value: https://github.com/onepanelio/cvat-training.git value: https://github.com/onepanelio/cvat-training.git
type: hidden type: hidden
visibility: private visibility: private
- name: revision - name: revision
value: v1.13.0 value: v1.13.0
type: hidden type: hidden
visibility: private visibility: private
- name: cvat-annotation-path - name: cvat-annotation-path
value: annotation-dump/sample_dataset value: annotation-dump/sample_dataset
displayName: Dataset path displayName: Dataset path
hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated. hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated.
visibility: private visibility: private
- name: cvat-output-path - name: cvat-output-path
value: workflow-data/output/sample_output value: workflow-data/output/sample_output
hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated. hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
displayName: Workflow output path displayName: Workflow output path
visibility: private visibility: private
- name: cvat-model - name: cvat-model
value: frcnn-res50-coco value: frcnn-res50-coco
displayName: Model displayName: Model
hint: TF Detection API's model to use for training. hint: TF Detection API's model to use for training.
type: select.select type: select.select
visibility: public visibility: public
options: options:
- name: 'Faster RCNN-ResNet 101-COCO' - name: 'Faster RCNN-ResNet 101-COCO'
value: frcnn-res101-coco value: frcnn-res101-coco
- name: 'Faster RCNN-ResNet 101-Low Proposal-COCO' - name: 'Faster RCNN-ResNet 101-Low Proposal-COCO'
value: frcnn-res101-low value: frcnn-res101-low
- name: 'Faster RCNN-ResNet 50-COCO' - name: 'Faster RCNN-ResNet 50-COCO'
value: frcnn-res50-coco value: frcnn-res50-coco
- name: 'Faster RCNN-NAS-COCO' - name: 'Faster RCNN-NAS-COCO'
value: frcnn-nas-coco value: frcnn-nas-coco
- name: 'SSD MobileNet V1-COCO' - name: 'SSD MobileNet V1-COCO'
value: ssd-mobilenet-v1-coco2 value: ssd-mobilenet-v1-coco2
- name: 'SSD MobileNet V2-COCO' - name: 'SSD MobileNet V2-COCO'
value: ssd-mobilenet-v2-coco value: ssd-mobilenet-v2-coco
- name: 'SSDLite MobileNet-COCO' - name: 'SSDLite MobileNet-COCO'
value: ssdlite-mobilenet-coco value: ssdlite-mobilenet-coco
- name: hyperparameters - name: hyperparameters
value: |- value: |-
num-steps=10000 num-steps=10000
displayName: Hyperparameters displayName: Hyperparameters
visibility: public visibility: public
type: textarea.textarea type: textarea.textarea
hint: "Please refer to our <a href='https://docs.onepanel.ai/docs/getting-started/use-cases/computervision/annotation/cvat/cvat_annotation_model#arguments-optional' target='_blank'>documentation</a> for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow." hint: "Please refer to our <a href='https://docs.onepanel.ai/docs/getting-started/use-cases/computervision/annotation/cvat/cvat_annotation_model#arguments-optional' target='_blank'>documentation</a> for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow."
- name: cvat-finetune-checkpoint - name: cvat-finetune-checkpoint
value: '' value: ''
hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model. hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
displayName: Checkpoint path displayName: Checkpoint path
visibility: public visibility: public
- name: cvat-num-classes - name: cvat-num-classes
value: '81' value: '81'
hint: Number of classes hint: Number of classes
displayName: Number of classes displayName: Number of classes
visibility: private visibility: private
- name: tf-image - name: tf-image
value: tensorflow/tensorflow:1.13.1-py3 value: tensorflow/tensorflow:1.13.1-py3
type: select.select type: select.select
displayName: Select TensorFlow image displayName: Select TensorFlow image
visibility: public visibility: public
hint: Select the GPU image if you are running on a GPU node pool hint: Select the GPU image if you are running on a GPU node pool
options: options:
- name: 'TensorFlow 1.13.1 CPU Image' - name: 'TensorFlow 1.13.1 CPU Image'
value: 'tensorflow/tensorflow:1.13.1-py3' value: 'tensorflow/tensorflow:1.13.1-py3'
- name: 'TensorFlow 1.13.1 GPU Image' - name: 'TensorFlow 1.13.1 GPU Image'
value: 'tensorflow/tensorflow:1.13.1-gpu-py3' value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
- displayName: Node pool - displayName: Node pool
hint: Name of node pool or group to run this workflow task hint: Name of node pool or group to run this workflow task
type: select.select type: select.select
name: sys-node-pool name: sys-node-pool
value: Standard_D4s_v3 value: Standard_D4s_v3
visibility: public visibility: public
required: true required: true
options: options:
- name: 'CPU: 2, RAM: 8GB' - name: 'CPU: 2, RAM: 8GB'
value: Standard_D2s_v3 value: Standard_D2s_v3
- name: 'CPU: 4, RAM: 16GB' - name: 'CPU: 4, RAM: 16GB'
value: Standard_D4s_v3 value: Standard_D4s_v3
- name: 'GPU: 1xK80, CPU: 6, RAM: 56GB' - name: 'GPU: 1xK80, CPU: 6, RAM: 56GB'
value: Standard_NC6 value: Standard_NC6
- name: dump-format - name: dump-format
value: cvat_tfrecord value: cvat_tfrecord
visibility: public visibility: public
templates: templates:
- name: main - name: main
dag: dag:
tasks: tasks:
- name: train-model - name: train-model
template: tensorflow template: tensorflow
# Uncomment the lines below if you want to send Slack notifications # Uncomment the lines below if you want to send Slack notifications
# - arguments: # - arguments:
# artifacts: # artifacts:
# - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}' # - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}'
# name: metrics # name: metrics
# parameters: # parameters:
# - name: status # - name: status
# value: '{{tasks.train-model.status}}' # value: '{{tasks.train-model.status}}'
# dependencies: # dependencies:
# - train-model # - train-model
# name: notify-in-slack # name: notify-in-slack
# template: slack-notify-success # template: slack-notify-success
- name: tensorflow - name: tensorflow
container: container:
args: args:
- | - |
apt-get update && \ apt-get update && \
apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \ apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \
pip install pillow lxml Cython contextlib2 jupyter matplotlib numpy scipy boto3 pycocotools pyyaml google-cloud-storage && \ pip install pillow lxml Cython contextlib2 jupyter matplotlib numpy scipy boto3 pycocotools pyyaml google-cloud-storage && \
cd /mnt/src/tf/research && \ cd /mnt/src/tf/research && \
export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim && \ export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim && \
cd /mnt/src/train && \ cd /mnt/src/train && \
python convert_workflow.py \ python convert_workflow.py \
--extras="{{workflow.parameters.hyperparameters}}" \ --extras="{{workflow.parameters.hyperparameters}}" \
--model="{{workflow.parameters.cvat-model}}" \ --model="{{workflow.parameters.cvat-model}}" \
--num_classes="{{workflow.parameters.cvat-num-classes}}" \ --num_classes="{{workflow.parameters.cvat-num-classes}}" \
--sys_finetune_checkpoint={{workflow.parameters.cvat-finetune-checkpoint}} --sys_finetune_checkpoint={{workflow.parameters.cvat-finetune-checkpoint}}
command: command:
- sh - sh
- -c - -c
image: '{{workflow.parameters.tf-image}}' image: '{{workflow.parameters.tf-image}}'
volumeMounts: volumeMounts:
- mountPath: /mnt/data - mountPath: /mnt/data
name: data name: data
- mountPath: /mnt/output - mountPath: /mnt/output
name: output name: output
workingDir: /mnt/src workingDir: /mnt/src
nodeSelector: nodeSelector:
beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}' beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
sidecars: sidecars:
- name: tensorboard - name: tensorboard
image: tensorflow/tensorflow:2.3.0 image: tensorflow/tensorflow:2.3.0
command: [sh, -c] command: [sh, -c]
tty: true tty: true
args: ["tensorboard --logdir /mnt/output/"] args: ["tensorboard --logdir /mnt/output/"]
ports: ports:
- containerPort: 6006 - containerPort: 6006
name: tensorboard name: tensorboard
inputs: inputs:
artifacts: artifacts:
- name: data - name: data
path: /mnt/data/datasets/ path: /mnt/data/datasets/
{{.ArtifactRepositoryType}}: "{{.ArtifactRepositoryType}}":
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}' key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
- name: models - name: models
path: /mnt/data/models/ path: /mnt/data/models/
optional: true optional: true
{{.ArtifactRepositoryType}}: "{{.ArtifactRepositoryType}}":
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-finetune-checkpoint}}' key: '{{workflow.namespace}}/{{workflow.parameters.cvat-finetune-checkpoint}}'
- git: - git:
repo: '{{workflow.parameters.source}}' repo: '{{workflow.parameters.source}}'
revision: '{{workflow.parameters.revision}}' revision: '{{workflow.parameters.revision}}'
name: src name: src
path: /mnt/src/tf path: /mnt/src/tf
- git: - git:
repo: '{{workflow.parameters.trainingsource}}' repo: '{{workflow.parameters.trainingsource}}'
revision: 'optional-artifacts' revision: 'optional-artifacts'
name: tsrc name: tsrc
path: /mnt/src/train path: /mnt/src/train
outputs: outputs:
artifacts: artifacts:
- name: model - name: model
optional: true optional: true
path: /mnt/output path: /mnt/output
{{.ArtifactRepositoryType}}: "{{.ArtifactRepositoryType}}":
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}' key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
# Uncomment the lines below if you want to send Slack notifications # Uncomment the lines below if you want to send Slack notifications
#- container: #- container:
# args: # args:
# - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}" # - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}"
# SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd # SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd
# SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify # SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify
# command: # command:
# - sh # - sh
# - -c # - -c
# image: technosophos/slack-notify # image: technosophos/slack-notify
# inputs: # inputs:
# artifacts: # artifacts:
# - name: metrics # - name: metrics
# optional: true # optional: true
# path: /tmp/metrics.json # path: /tmp/metrics.json
# parameters: # parameters:
# - name: status # - name: status
# name: slack-notify-success # name: slack-notify-success
volumeClaimTemplates: volumeClaimTemplates:
- metadata: - metadata:
creationTimestamp: null creationTimestamp: null
name: data name: data
spec: spec:
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
requests: requests:
storage: 200Gi storage: 200Gi
- metadata: - metadata:
creationTimestamp: null creationTimestamp: null
name: output name: output
spec: spec:
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
requests: requests:
storage: 200Gi storage: 200Gi

View File

@@ -1,221 +1,231 @@
entrypoint: main metadata:
arguments: name: "TF Object Detection Training"
parameters: kind: Workflow
- name: source version: 20201130130433
value: https://github.com/tensorflow/models.git action: update
displayName: Model source code source: "https://github.com/onepanelio/templates/blob/master/workflows/tf-object-detection-training/"
type: hidden labels:
visibility: private "created-by": "system"
"used-by": "cvat"
spec:
entrypoint: main
arguments:
parameters:
- name: source
value: https://github.com/tensorflow/models.git
displayName: Model source code
type: hidden
visibility: private
- name: trainingsource - name: trainingsource
value: https://github.com/onepanelio/cvat-training.git value: https://github.com/onepanelio/cvat-training.git
type: hidden type: hidden
visibility: private visibility: private
- name: revision - name: revision
value: v1.13.0 value: v1.13.0
type: hidden type: hidden
visibility: private visibility: private
- name: cvat-annotation-path - name: cvat-annotation-path
value: annotation-dump/sample_dataset value: annotation-dump/sample_dataset
displayName: Dataset path displayName: Dataset path
hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated. hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated.
visibility: private visibility: private
- name: cvat-output-path - name: cvat-output-path
value: workflow-data/output/sample_output value: workflow-data/output/sample_output
hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated. hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
displayName: Workflow output path displayName: Workflow output path
visibility: private visibility: private
- name: cvat-model - name: cvat-model
value: frcnn-res50-coco value: frcnn-res50-coco
displayName: Model displayName: Model
hint: TF Detection API's model to use for training. hint: TF Detection API's model to use for training.
type: select.select type: select.select
visibility: public visibility: public
options: options:
- name: 'Faster RCNN-ResNet 101-COCO' - name: 'Faster RCNN-ResNet 101-COCO'
value: frcnn-res101-coco value: frcnn-res101-coco
- name: 'Faster RCNN-ResNet 101-Low Proposal-COCO' - name: 'Faster RCNN-ResNet 101-Low Proposal-COCO'
value: frcnn-res101-low value: frcnn-res101-low
- name: 'Faster RCNN-ResNet 50-COCO' - name: 'Faster RCNN-ResNet 50-COCO'
value: frcnn-res50-coco value: frcnn-res50-coco
- name: 'Faster RCNN-NAS-COCO' - name: 'Faster RCNN-NAS-COCO'
value: frcnn-nas-coco value: frcnn-nas-coco
- name: 'SSD MobileNet V1-COCO' - name: 'SSD MobileNet V1-COCO'
value: ssd-mobilenet-v1-coco2 value: ssd-mobilenet-v1-coco2
- name: 'SSD MobileNet V2-COCO' - name: 'SSD MobileNet V2-COCO'
value: ssd-mobilenet-v2-coco value: ssd-mobilenet-v2-coco
- name: 'SSDLite MobileNet-COCO' - name: 'SSDLite MobileNet-COCO'
value: ssdlite-mobilenet-coco value: ssdlite-mobilenet-coco
- name: hyperparameters - name: hyperparameters
value: |- value: |-
num-steps=10000 num-steps=10000
displayName: Hyperparameters displayName: Hyperparameters
visibility: public visibility: public
type: textarea.textarea type: textarea.textarea
hint: "Please refer to our <a href='https://docs.onepanel.ai/docs/getting-started/use-cases/computervision/annotation/cvat/cvat_annotation_model#arguments-optional' target='_blank'>documentation</a> for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow." hint: "Please refer to our <a href='https://docs.onepanel.ai/docs/getting-started/use-cases/computervision/annotation/cvat/cvat_annotation_model#arguments-optional' target='_blank'>documentation</a> for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow."
- name: cvat-finetune-checkpoint - name: cvat-finetune-checkpoint
value: '' value: ''
hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model. hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
displayName: Checkpoint path displayName: Checkpoint path
visibility: public visibility: public
- name: cvat-num-classes - name: cvat-num-classes
value: '81' value: '81'
hint: Number of classes hint: Number of classes
displayName: Number of classes displayName: Number of classes
visibility: private visibility: private
- name: tf-image - name: tf-image
value: tensorflow/tensorflow:1.13.1-py3 value: tensorflow/tensorflow:1.13.1-py3
type: select.select type: select.select
displayName: Select TensorFlow image displayName: Select TensorFlow image
visibility: public visibility: public
hint: Select the GPU image if you are running on a GPU node pool hint: Select the GPU image if you are running on a GPU node pool
options: options:
- name: 'TensorFlow 1.13.1 CPU Image' - name: 'TensorFlow 1.13.1 CPU Image'
value: 'tensorflow/tensorflow:1.13.1-py3' value: 'tensorflow/tensorflow:1.13.1-py3'
- name: 'TensorFlow 1.13.1 GPU Image' - name: 'TensorFlow 1.13.1 GPU Image'
value: 'tensorflow/tensorflow:1.13.1-gpu-py3' value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
- displayName: Node pool - displayName: Node pool
hint: Name of node pool or group to run this workflow task hint: Name of node pool or group to run this workflow task
type: select.select type: select.select
name: sys-node-pool name: sys-node-pool
value: Standard_D4s_v3 value: Standard_D4s_v3
visibility: public visibility: public
required: true required: true
options: options:
- name: 'CPU: 2, RAM: 8GB' - name: 'CPU: 2, RAM: 8GB'
value: Standard_D2s_v3 value: Standard_D2s_v3
- name: 'CPU: 4, RAM: 16GB' - name: 'CPU: 4, RAM: 16GB'
value: Standard_D4s_v3 value: Standard_D4s_v3
- name: 'GPU: 1xK80, CPU: 6, RAM: 56GB' - name: 'GPU: 1xK80, CPU: 6, RAM: 56GB'
value: Standard_NC6 value: Standard_NC6
- name: dump-format - name: dump-format
value: cvat_tfrecord value: cvat_tfrecord
visibility: public visibility: public
templates: templates:
- name: main - name: main
dag: dag:
tasks: tasks:
- name: train-model - name: train-model
template: tensorflow template: tensorflow
# Uncomment the lines below if you want to send Slack notifications # Uncomment the lines below if you want to send Slack notifications
# - arguments: # - arguments:
# artifacts: # artifacts:
# - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}' # - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}'
# name: metrics # name: metrics
# parameters: # parameters:
# - name: status # - name: status
# value: '{{tasks.train-model.status}}' # value: '{{tasks.train-model.status}}'
# dependencies: # dependencies:
# - train-model # - train-model
# name: notify-in-slack # name: notify-in-slack
# template: slack-notify-success # template: slack-notify-success
- name: tensorflow - name: tensorflow
container: container:
args: args:
- | - |
apt-get update && \ apt-get update && \
apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \ apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \
pip install pillow lxml Cython contextlib2 jupyter matplotlib numpy scipy boto3 pycocotools pyyaml google-cloud-storage && \ pip install pillow lxml Cython contextlib2 jupyter matplotlib numpy scipy boto3 pycocotools pyyaml google-cloud-storage && \
cd /mnt/src/tf/research && \ cd /mnt/src/tf/research && \
export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim && \ export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim && \
cd /mnt/src/train && \ cd /mnt/src/train && \
python convert_workflow.py \ python convert_workflow.py \
--extras="{{workflow.parameters.hyperparameters}}" \ --extras="{{workflow.parameters.hyperparameters}}" \
--model="{{workflow.parameters.cvat-model}}" \ --model="{{workflow.parameters.cvat-model}}" \
--num_classes="{{workflow.parameters.cvat-num-classes}}" \ --num_classes="{{workflow.parameters.cvat-num-classes}}" \
--sys_finetune_checkpoint={{workflow.parameters.cvat-finetune-checkpoint}} --sys_finetune_checkpoint={{workflow.parameters.cvat-finetune-checkpoint}}
command: command:
- sh - sh
- -c - -c
image: '{{workflow.parameters.tf-image}}' image: '{{workflow.parameters.tf-image}}'
volumeMounts: volumeMounts:
- mountPath: /mnt/data - mountPath: /mnt/data
name: data name: data
- mountPath: /mnt/output - mountPath: /mnt/output
name: output name: output
workingDir: /mnt/src workingDir: /mnt/src
nodeSelector: nodeSelector:
beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}' beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
sidecars: sidecars:
- name: tensorboard - name: tensorboard
image: tensorflow/tensorflow:2.3.0 image: tensorflow/tensorflow:2.3.0
command: [sh, -c] command: [sh, -c]
tty: true tty: true
args: ["tensorboard --logdir /mnt/output/"] args: ["tensorboard --logdir /mnt/output/"]
ports: ports:
- containerPort: 6006 - containerPort: 6006
name: tensorboard name: tensorboard
inputs: inputs:
artifacts: artifacts:
- name: data - name: data
path: /mnt/data/datasets/ path: /mnt/data/datasets/
{{.ArtifactRepositoryType}}: "{{.ArtifactRepositoryType}}":
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}' key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
- name: models - name: models
path: /mnt/data/models/ path: /mnt/data/models/
optional: true optional: true
{{.ArtifactRepositoryType}}: "{{.ArtifactRepositoryType}}":
key: '{{workflow.parameters.cvat-finetune-checkpoint}}' key: '{{workflow.parameters.cvat-finetune-checkpoint}}'
- git: - git:
repo: '{{workflow.parameters.source}}' repo: '{{workflow.parameters.source}}'
revision: '{{workflow.parameters.revision}}' revision: '{{workflow.parameters.revision}}'
name: src name: src
path: /mnt/src/tf path: /mnt/src/tf
- git: - git:
repo: '{{workflow.parameters.trainingsource}}' repo: '{{workflow.parameters.trainingsource}}'
revision: 'optional-artifacts' revision: 'optional-artifacts'
name: tsrc name: tsrc
path: /mnt/src/train path: /mnt/src/train
outputs: outputs:
artifacts: artifacts:
- name: model - name: model
optional: true optional: true
path: /mnt/output path: /mnt/output
{{.ArtifactRepositoryType}}: "{{.ArtifactRepositoryType}}":
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}' key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
# Uncomment the lines below if you want to send Slack notifications # Uncomment the lines below if you want to send Slack notifications
#- container: #- container:
# args: # args:
# - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}" # - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}"
# SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd # SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd
# SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify # SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify
# command: # command:
# - sh # - sh
# - -c # - -c
# image: technosophos/slack-notify # image: technosophos/slack-notify
# inputs: # inputs:
# artifacts: # artifacts:
# - name: metrics # - name: metrics
# optional: true # optional: true
# path: /tmp/metrics.json # path: /tmp/metrics.json
# parameters: # parameters:
# - name: status # - name: status
# name: slack-notify-success # name: slack-notify-success
volumeClaimTemplates: volumeClaimTemplates:
- metadata: - metadata:
creationTimestamp: null creationTimestamp: null
name: data name: data
spec: spec:
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
requests: requests:
storage: 200Gi storage: 200Gi
- metadata: - metadata:
creationTimestamp: null creationTimestamp: null
name: output name: output
spec: spec:
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
requests: requests:
storage: 200Gi storage: 200Gi

View File

@@ -1,223 +1,233 @@
entrypoint: main metadata:
arguments: name: "TF Object Detection Training"
parameters: kind: Workflow
- name: source version: 20201208155115
value: https://github.com/tensorflow/models.git action: update
displayName: Model source code source: "https://github.com/onepanelio/templates/blob/master/workflows/tf-object-detection-training/"
type: hidden labels:
visibility: private "created-by": "system"
"used-by": "cvat"
spec:
entrypoint: main
arguments:
parameters:
- name: source
value: https://github.com/tensorflow/models.git
displayName: Model source code
type: hidden
visibility: private
- name: trainingsource - name: trainingsource
value: https://github.com/onepanelio/cvat-training.git value: https://github.com/onepanelio/cvat-training.git
type: hidden type: hidden
visibility: private visibility: private
- name: revision - name: revision
value: v1.13.0 value: v1.13.0
type: hidden type: hidden
visibility: private visibility: private
- name: cvat-annotation-path - name: cvat-annotation-path
value: annotation-dump/sample_dataset value: annotation-dump/sample_dataset
displayName: Dataset path displayName: Dataset path
hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated. hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated.
visibility: private visibility: private
- name: cvat-output-path - name: cvat-output-path
value: workflow-data/output/sample_output value: workflow-data/output/sample_output
hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated. hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
displayName: Workflow output path displayName: Workflow output path
visibility: private visibility: private
- name: cvat-model - name: cvat-model
value: frcnn-res50-coco value: frcnn-res50-coco
displayName: Model displayName: Model
hint: TF Detection API's model to use for training. hint: TF Detection API's model to use for training.
type: select.select type: select.select
visibility: public visibility: public
options: options:
- name: 'Faster RCNN-ResNet 101-COCO' - name: 'Faster RCNN-ResNet 101-COCO'
value: frcnn-res101-coco value: frcnn-res101-coco
- name: 'Faster RCNN-ResNet 101-Low Proposal-COCO' - name: 'Faster RCNN-ResNet 101-Low Proposal-COCO'
value: frcnn-res101-low value: frcnn-res101-low
- name: 'Faster RCNN-ResNet 50-COCO' - name: 'Faster RCNN-ResNet 50-COCO'
value: frcnn-res50-coco value: frcnn-res50-coco
- name: 'Faster RCNN-NAS-COCO' - name: 'Faster RCNN-NAS-COCO'
value: frcnn-nas-coco value: frcnn-nas-coco
- name: 'SSD MobileNet V1-COCO' - name: 'SSD MobileNet V1-COCO'
value: ssd-mobilenet-v1-coco2 value: ssd-mobilenet-v1-coco2
- name: 'SSD MobileNet V2-COCO' - name: 'SSD MobileNet V2-COCO'
value: ssd-mobilenet-v2-coco value: ssd-mobilenet-v2-coco
- name: 'SSDLite MobileNet-COCO' - name: 'SSDLite MobileNet-COCO'
value: ssdlite-mobilenet-coco value: ssdlite-mobilenet-coco
- name: hyperparameters - name: hyperparameters
value: |- value: |-
num-steps=10000 num-steps=10000
displayName: Hyperparameters displayName: Hyperparameters
visibility: public visibility: public
type: textarea.textarea type: textarea.textarea
hint: "Please refer to our <a href='https://docs.onepanel.ai/docs/getting-started/use-cases/computervision/annotation/cvat/cvat_annotation_model#arguments-optional' target='_blank'>documentation</a> for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow." hint: "Please refer to our <a href='https://docs.onepanel.ai/docs/getting-started/use-cases/computervision/annotation/cvat/cvat_annotation_model#arguments-optional' target='_blank'>documentation</a> for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow."
- name: cvat-finetune-checkpoint - name: cvat-finetune-checkpoint
value: '' value: ''
hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model. hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
displayName: Checkpoint path displayName: Checkpoint path
visibility: public visibility: public
- name: cvat-num-classes - name: cvat-num-classes
value: '81' value: '81'
hint: Number of classes hint: Number of classes
displayName: Number of classes displayName: Number of classes
visibility: private visibility: private
- name: tf-image - name: tf-image
value: tensorflow/tensorflow:1.13.1-py3 value: tensorflow/tensorflow:1.13.1-py3
type: select.select type: select.select
displayName: Select TensorFlow image displayName: Select TensorFlow image
visibility: public visibility: public
hint: Select the GPU image if you are running on a GPU node pool hint: Select the GPU image if you are running on a GPU node pool
options: options:
- name: 'TensorFlow 1.13.1 CPU Image' - name: 'TensorFlow 1.13.1 CPU Image'
value: 'tensorflow/tensorflow:1.13.1-py3' value: 'tensorflow/tensorflow:1.13.1-py3'
- name: 'TensorFlow 1.13.1 GPU Image' - name: 'TensorFlow 1.13.1 GPU Image'
value: 'tensorflow/tensorflow:1.13.1-gpu-py3' value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
- displayName: Node pool - displayName: Node pool
hint: Name of node pool or group to run this workflow task hint: Name of node pool or group to run this workflow task
type: select.select type: select.select
name: sys-node-pool name: sys-node-pool
value: Standard_D4s_v3 value: Standard_D4s_v3
visibility: public visibility: public
required: true required: true
options: options:
- name: 'CPU: 2, RAM: 8GB' - name: 'CPU: 2, RAM: 8GB'
value: Standard_D2s_v3 value: Standard_D2s_v3
- name: 'CPU: 4, RAM: 16GB' - name: 'CPU: 4, RAM: 16GB'
value: Standard_D4s_v3 value: Standard_D4s_v3
- name: 'GPU: 1xK80, CPU: 6, RAM: 56GB' - name: 'GPU: 1xK80, CPU: 6, RAM: 56GB'
value: Standard_NC6 value: Standard_NC6
- name: dump-format - name: dump-format
value: cvat_tfrecord value: cvat_tfrecord
visibility: public visibility: public
templates: templates:
- name: main - name: main
dag: dag:
tasks: tasks:
- name: train-model - name: train-model
template: tensorflow template: tensorflow
# Uncomment the lines below if you want to send Slack notifications # Uncomment the lines below if you want to send Slack notifications
# - arguments: # - arguments:
# artifacts: # artifacts:
# - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}' # - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}'
# name: metrics # name: metrics
# parameters: # parameters:
# - name: status # - name: status
# value: '{{tasks.train-model.status}}' # value: '{{tasks.train-model.status}}'
# dependencies: # dependencies:
# - train-model # - train-model
# name: notify-in-slack # name: notify-in-slack
# template: slack-notify-success # template: slack-notify-success
- name: tensorflow - name: tensorflow
container: container:
args: args:
- | - |
apt-get update && \ apt-get update && \
apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \ apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \
pip install pillow lxml Cython contextlib2 jupyter matplotlib numpy scipy boto3 pycocotools pyyaml google-cloud-storage && \ pip install pillow lxml Cython contextlib2 jupyter matplotlib numpy scipy boto3 pycocotools pyyaml google-cloud-storage && \
cd /mnt/src/tf/research && \ cd /mnt/src/tf/research && \
export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim && \ export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim && \
cd /mnt/src/train && \ cd /mnt/src/train && \
python convert_workflow.py \ python convert_workflow.py \
--extras="{{workflow.parameters.hyperparameters}}" \ --extras="{{workflow.parameters.hyperparameters}}" \
--model="{{workflow.parameters.cvat-model}}" \ --model="{{workflow.parameters.cvat-model}}" \
--num_classes="{{workflow.parameters.cvat-num-classes}}" \ --num_classes="{{workflow.parameters.cvat-num-classes}}" \
--sys_finetune_checkpoint={{workflow.parameters.cvat-finetune-checkpoint}} --sys_finetune_checkpoint={{workflow.parameters.cvat-finetune-checkpoint}}
command: command:
- sh - sh
- -c - -c
image: '{{workflow.parameters.tf-image}}' image: '{{workflow.parameters.tf-image}}'
volumeMounts: volumeMounts:
- mountPath: /mnt/data - mountPath: /mnt/data
name: data name: data
- mountPath: /mnt/output - mountPath: /mnt/output
name: output name: output
workingDir: /mnt/src workingDir: /mnt/src
nodeSelector: nodeSelector:
beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}' beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
sidecars: sidecars:
- name: tensorboard - name: tensorboard
image: tensorflow/tensorflow:2.3.0 image: tensorflow/tensorflow:2.3.0
command: [sh, -c] command: [sh, -c]
env: env:
- name: ONEPANEL_INTERACTIVE_SIDECAR - name: ONEPANEL_INTERACTIVE_SIDECAR
value: 'true' value: 'true'
args: ["tensorboard --logdir /mnt/output/"] args: ["tensorboard --logdir /mnt/output/"]
ports: ports:
- containerPort: 6006 - containerPort: 6006
name: tensorboard name: tensorboard
inputs: inputs:
artifacts: artifacts:
- name: data - name: data
path: /mnt/data/datasets/ path: /mnt/data/datasets/
{{.ArtifactRepositoryType}}: "{{.ArtifactRepositoryType}}":
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}' key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
- name: models - name: models
path: /mnt/data/models/ path: /mnt/data/models/
optional: true optional: true
{{.ArtifactRepositoryType}}: "{{.ArtifactRepositoryType}}":
key: '{{workflow.parameters.cvat-finetune-checkpoint}}' key: '{{workflow.parameters.cvat-finetune-checkpoint}}'
- git: - git:
repo: '{{workflow.parameters.source}}' repo: '{{workflow.parameters.source}}'
revision: '{{workflow.parameters.revision}}' revision: '{{workflow.parameters.revision}}'
name: src name: src
path: /mnt/src/tf path: /mnt/src/tf
- git: - git:
repo: '{{workflow.parameters.trainingsource}}' repo: '{{workflow.parameters.trainingsource}}'
revision: 'optional-artifacts' revision: 'optional-artifacts'
name: tsrc name: tsrc
path: /mnt/src/train path: /mnt/src/train
outputs: outputs:
artifacts: artifacts:
- name: model - name: model
optional: true optional: true
path: /mnt/output path: /mnt/output
{{.ArtifactRepositoryType}}: "{{.ArtifactRepositoryType}}":
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}' key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
# Uncomment the lines below if you want to send Slack notifications # Uncomment the lines below if you want to send Slack notifications
#- container: #- container:
# args: # args:
# - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}" # - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}"
# SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd # SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd
# SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify # SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify
# command: # command:
# - sh # - sh
# - -c # - -c
# image: technosophos/slack-notify # image: technosophos/slack-notify
# inputs: # inputs:
# artifacts: # artifacts:
# - name: metrics # - name: metrics
# optional: true # optional: true
# path: /tmp/metrics.json # path: /tmp/metrics.json
# parameters: # parameters:
# - name: status # - name: status
# name: slack-notify-success # name: slack-notify-success
volumeClaimTemplates: volumeClaimTemplates:
- metadata: - metadata:
creationTimestamp: null creationTimestamp: null
name: data name: data
spec: spec:
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
requests: requests:
storage: 200Gi storage: 200Gi
- metadata: - metadata:
creationTimestamp: null creationTimestamp: null
name: output name: output
spec: spec:
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
requests: requests:
storage: 200Gi storage: 200Gi

View File

@@ -1,165 +1,174 @@
# source: https://github.com/onepanelio/templates/blob/master/workflows/tf-object-detection-training/ metadata:
arguments: name: "TF Object Detection Training"
parameters: kind: Workflow
- name: cvat-annotation-path version: 20201223202929
value: annotation-dump/sample_dataset action: update
displayName: Dataset path source: "https://github.com/onepanelio/templates/blob/master/workflows/tf-object-detection-training/"
hint: Path to annotated data (TFRecord format) in default object storage. In CVAT, this parameter will be pre-populated. labels:
visibility: internal "created-by": "system"
"used-by": "cvat"
spec:
arguments:
parameters:
- name: cvat-annotation-path
value: annotation-dump/sample_dataset
displayName: Dataset path
hint: Path to annotated data (TFRecord format) in default object storage. In CVAT, this parameter will be pre-populated.
visibility: internal
- name: cvat-output-path - name: cvat-output-path
value: workflow-data/output/sample_output value: workflow-data/output/sample_output
hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated. hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
displayName: Workflow output path displayName: Workflow output path
visibility: internal visibility: internal
- name: cvat-model - name: cvat-model
value: frcnn-res50-coco value: frcnn-res50-coco
displayName: Model displayName: Model
hint: TF Detection API's model to use for training. hint: TF Detection API's model to use for training.
type: select.select type: select.select
visibility: public visibility: public
options: options:
- name: 'Faster RCNN-ResNet 101-COCO' - name: 'Faster RCNN-ResNet 101-COCO'
value: frcnn-res101-coco value: frcnn-res101-coco
- name: 'Faster RCNN-ResNet 101-Low Proposal-COCO' - name: 'Faster RCNN-ResNet 101-Low Proposal-COCO'
value: frcnn-res101-low value: frcnn-res101-low
- name: 'Faster RCNN-ResNet 50-COCO' - name: 'Faster RCNN-ResNet 50-COCO'
value: frcnn-res50-coco value: frcnn-res50-coco
- name: 'Faster RCNN-NAS-COCO' - name: 'Faster RCNN-NAS-COCO'
value: frcnn-nas-coco value: frcnn-nas-coco
- name: 'SSD MobileNet V1-COCO' - name: 'SSD MobileNet V1-COCO'
value: ssd-mobilenet-v1-coco2 value: ssd-mobilenet-v1-coco2
- name: 'SSD MobileNet V2-COCO' - name: 'SSD MobileNet V2-COCO'
value: ssd-mobilenet-v2-coco value: ssd-mobilenet-v2-coco
- name: 'SSDLite MobileNet-COCO' - name: 'SSDLite MobileNet-COCO'
value: ssdlite-mobilenet-coco value: ssdlite-mobilenet-coco
- name: hyperparameters - name: hyperparameters
value: |- value: |-
num-steps=10000 num-steps=10000
displayName: Hyperparameters displayName: Hyperparameters
visibility: public visibility: public
type: textarea.textarea type: textarea.textarea
hint: 'See <a href="https://docs.onepanel.ai/docs/getting-started/use-cases/computervision/annotation/cvat/cvat_annotation_model/#tfod-hyperparameters" target="_blank">documentation</a> for more information on parameters.' hint: 'See <a href="https://docs.onepanel.ai/docs/getting-started/use-cases/computervision/annotation/cvat/cvat_annotation_model/#tfod-hyperparameters" target="_blank">documentation</a> for more information on parameters.'
- name: cvat-finetune-checkpoint - name: cvat-finetune-checkpoint
value: '' value: ''
hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model. hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
displayName: Checkpoint path displayName: Checkpoint path
visibility: public visibility: public
- name: cvat-num-classes - name: cvat-num-classes
value: '10' value: '10'
hint: Number of classes. In CVAT, this parameter will be pre-populated. hint: Number of classes. In CVAT, this parameter will be pre-populated.
displayName: Number of classes displayName: Number of classes
visibility: internal visibility: internal
- name: tf-image - name: tf-image
value: tensorflow/tensorflow:1.13.1-py3 value: tensorflow/tensorflow:1.13.1-py3
type: select.select type: select.select
displayName: Select TensorFlow image displayName: Select TensorFlow image
visibility: public visibility: public
hint: Select the GPU image if you are running on a GPU node pool hint: Select the GPU image if you are running on a GPU node pool
options: options:
- name: 'TensorFlow 1.13.1 CPU Image' - name: 'TensorFlow 1.13.1 CPU Image'
value: 'tensorflow/tensorflow:1.13.1-py3' value: 'tensorflow/tensorflow:1.13.1-py3'
- name: 'TensorFlow 1.13.1 GPU Image' - name: 'TensorFlow 1.13.1 GPU Image'
value: 'tensorflow/tensorflow:1.13.1-gpu-py3' value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
- name: dump-format - name: dump-format
value: cvat_tfrecord value: cvat_tfrecord
visibility: public visibility: public
- displayName: Node pool - displayName: Node pool
hint: Name of node pool or group to run this workflow task hint: Name of node pool or group to run this workflow task
type: select.nodepool type: select.nodepool
name: sys-node-pool name: sys-node-pool
value: {{.DefaultNodePoolOption}} value: "{{.DefaultNodePoolOption}}"
visibility: public visibility: public
required: true required: true
entrypoint: main entrypoint: main
templates: templates:
- dag: - dag:
tasks: tasks:
- name: train-model - name: train-model
template: tensorflow template: tensorflow
name: main name: main
- container: - container:
args: args:
- | - |
apt-get update && \ apt-get update && \
apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \ apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \
pip install pillow lxml Cython contextlib2 matplotlib numpy scipy pycocotools pyyaml test-generator && \ pip install pillow lxml Cython contextlib2 matplotlib numpy scipy pycocotools pyyaml test-generator && \
cd /mnt/src/tf/research && \ cd /mnt/src/tf/research && \
export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim && \ export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim && \
mkdir -p /mnt/src/protoc && \ mkdir -p /mnt/src/protoc && \
wget -P /mnt/src/protoc https://github.com/protocolbuffers/protobuf/releases/download/v3.10.1/protoc-3.10.1-linux-x86_64.zip && \ wget -P /mnt/src/protoc https://github.com/protocolbuffers/protobuf/releases/download/v3.10.1/protoc-3.10.1-linux-x86_64.zip && \
cd /mnt/src/protoc/ && \ cd /mnt/src/protoc/ && \
unzip protoc-3.10.1-linux-x86_64.zip && \ unzip protoc-3.10.1-linux-x86_64.zip && \
cd /mnt/src/tf/research/ && \ cd /mnt/src/tf/research/ && \
/mnt/src/protoc/bin/protoc object_detection/protos/*.proto --python_out=. && \ /mnt/src/protoc/bin/protoc object_detection/protos/*.proto --python_out=. && \
cd /mnt/src/train/workflows/tf-object-detection-training && \ cd /mnt/src/train/workflows/tf-object-detection-training && \
python main.py \ python main.py \
--extras="{{workflow.parameters.hyperparameters}}" \ --extras="{{workflow.parameters.hyperparameters}}" \
--model="{{workflow.parameters.cvat-model}}" \ --model="{{workflow.parameters.cvat-model}}" \
--num_classes="{{workflow.parameters.cvat-num-classes}}" \ --num_classes="{{workflow.parameters.cvat-num-classes}}" \
--sys_finetune_checkpoint={{workflow.parameters.cvat-finetune-checkpoint}} --sys_finetune_checkpoint={{workflow.parameters.cvat-finetune-checkpoint}}
command: command:
- sh - sh
- -c - -c
image: '{{workflow.parameters.tf-image}}' image: '{{workflow.parameters.tf-image}}'
volumeMounts: volumeMounts:
- mountPath: /mnt/data - mountPath: /mnt/data
name: data name: data
- mountPath: /mnt/output - mountPath: /mnt/output
name: output name: output
workingDir: /mnt/src workingDir: /mnt/src
nodeSelector: nodeSelector:
beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}' beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
inputs: inputs:
artifacts: artifacts:
- name: data - name: data
path: /mnt/data/datasets/ path: /mnt/data/datasets/
s3: s3:
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}' key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
- name: models - name: models
path: /mnt/data/models/ path: /mnt/data/models/
optional: true optional: true
s3: s3:
key: '{{workflow.parameters.cvat-finetune-checkpoint}}' key: '{{workflow.parameters.cvat-finetune-checkpoint}}'
- git: - git:
repo: https://github.com/tensorflow/models.git repo: https://github.com/tensorflow/models.git
revision: v1.13.0 revision: v1.13.0
name: src name: src
path: /mnt/src/tf path: /mnt/src/tf
- git: - git:
repo: https://github.com/onepanelio/templates.git repo: https://github.com/onepanelio/templates.git
name: tsrc name: tsrc
path: /mnt/src/train path: /mnt/src/train
name: tensorflow name: tensorflow
outputs: outputs:
artifacts: artifacts:
- name: model - name: model
optional: true optional: true
path: /mnt/output path: /mnt/output
s3: s3:
key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}' key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
volumeClaimTemplates: volumeClaimTemplates:
- metadata: - metadata:
name: data name: data
spec: spec:
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
requests: requests:
storage: 200Gi storage: 200Gi
- metadata: - metadata:
name: output name: output
spec: spec:
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
requests: requests:
storage: 200Gi storage: 200Gi

View File

@@ -1,260 +1,269 @@
# source: https://github.com/onepanelio/templates/blob/master/workflows/tf-object-detection-training/ metadata:
arguments: name: "TF Object Detection Training"
parameters: kind: Workflow
- name: cvat-annotation-path version: 20210118175809
value: 'artifacts/{{workflow.namespace}}/annotations/' action: update
hint: Path to annotated data (COCO format) in default object storage. In CVAT, this parameter will be pre-populated. source: "https://github.com/onepanelio/templates/blob/master/workflows/tf-object-detection-training/"
displayName: Dataset path labels:
visibility: internal "created-by": "system"
"used-by": "cvat"
spec:
arguments:
parameters:
- name: cvat-annotation-path
value: 'artifacts/{{workflow.namespace}}/annotations/'
hint: Path to annotated data (COCO format) in default object storage. In CVAT, this parameter will be pre-populated.
displayName: Dataset path
visibility: internal
- name: val-split - name: val-split
value: 10 value: 10
displayName: Validation split size displayName: Validation split size
type: input.number type: input.number
visibility: public visibility: public
hint: Enter validation set size in percentage of full dataset. (0 - 100) hint: Enter validation set size in percentage of full dataset. (0 - 100)
- name: num-augmentation-cycles - name: num-augmentation-cycles
value: 1 value: 1
displayName: Number of augmentation cycles displayName: Number of augmentation cycles
type: input.number type: input.number
visibility: public visibility: public
hint: Number of augmentation cycles, zero means no data augmentation hint: Number of augmentation cycles, zero means no data augmentation
- name: preprocessing-parameters - name: preprocessing-parameters
value: |- value: |-
RandomBrightnessContrast: RandomBrightnessContrast:
p: 0.2 p: 0.2
GaussianBlur: GaussianBlur:
p: 0.3 p: 0.3
GaussNoise: GaussNoise:
p: 0.4 p: 0.4
HorizontalFlip: HorizontalFlip:
p: 0.5 p: 0.5
VerticalFlip: VerticalFlip:
p: 0.3 p: 0.3
displayName: Preprocessing parameters displayName: Preprocessing parameters
visibility: public visibility: public
type: textarea.textarea type: textarea.textarea
hint: 'See <a href="https://albumentations.ai/docs/api_reference/augmentations/transforms/" target="_blank">documentation</a> for more information on parameters.' hint: 'See <a href="https://albumentations.ai/docs/api_reference/augmentations/transforms/" target="_blank">documentation</a> for more information on parameters.'
- name: cvat-model - name: cvat-model
value: frcnn-res50-coco value: frcnn-res50-coco
displayName: Model displayName: Model
hint: TF Detection API's model to use for training. hint: TF Detection API's model to use for training.
type: select.select type: select.select
visibility: public visibility: public
options: options:
- name: 'Faster RCNN-ResNet 101-COCO' - name: 'Faster RCNN-ResNet 101-COCO'
value: frcnn-res101-coco value: frcnn-res101-coco
- name: 'Faster RCNN-ResNet 101-Low Proposal-COCO' - name: 'Faster RCNN-ResNet 101-Low Proposal-COCO'
value: frcnn-res101-low value: frcnn-res101-low
- name: 'Faster RCNN-ResNet 50-COCO' - name: 'Faster RCNN-ResNet 50-COCO'
value: frcnn-res50-coco value: frcnn-res50-coco
- name: 'Faster RCNN-NAS-COCO' - name: 'Faster RCNN-NAS-COCO'
value: frcnn-nas-coco value: frcnn-nas-coco
- name: 'SSD MobileNet V1-COCO' - name: 'SSD MobileNet V1-COCO'
value: ssd-mobilenet-v1-coco2 value: ssd-mobilenet-v1-coco2
- name: 'SSD MobileNet V2-COCO' - name: 'SSD MobileNet V2-COCO'
value: ssd-mobilenet-v2-coco value: ssd-mobilenet-v2-coco
- name: 'SSDLite MobileNet-COCO' - name: 'SSDLite MobileNet-COCO'
value: ssdlite-mobilenet-coco value: ssdlite-mobilenet-coco
- name: cvat-num-classes - name: cvat-num-classes
value: '10' value: '10'
hint: Number of classes. In CVAT, this parameter will be pre-populated. hint: Number of classes. In CVAT, this parameter will be pre-populated.
displayName: Number of classes displayName: Number of classes
visibility: internal visibility: internal
- name: hyperparameters - name: hyperparameters
value: |- value: |-
num_steps: 10000 num_steps: 10000
displayName: Hyperparameters displayName: Hyperparameters
visibility: public visibility: public
type: textarea.textarea type: textarea.textarea
hint: 'See <a href="https://docs.onepanel.ai/docs/reference/workflows/training#tfod-hyperparameters" target="_blank">documentation</a> for more information on parameters.' hint: 'See <a href="https://docs.onepanel.ai/docs/reference/workflows/training#tfod-hyperparameters" target="_blank">documentation</a> for more information on parameters.'
- name: dump-format - name: dump-format
value: cvat_coco value: cvat_coco
displayName: CVAT dump format displayName: CVAT dump format
visibility: private visibility: private
- name: cvat-finetune-checkpoint - name: cvat-finetune-checkpoint
value: '' value: ''
hint: Path to the last fine-tune checkpoint for this model in default object storage. Leave empty if this is the first time you're training this model. hint: Path to the last fine-tune checkpoint for this model in default object storage. Leave empty if this is the first time you're training this model.
displayName: Checkpoint path displayName: Checkpoint path
visibility: public visibility: public
- name: tf-image - name: tf-image
value: tensorflow/tensorflow:1.13.1-py3 value: tensorflow/tensorflow:1.13.1-py3
type: select.select type: select.select
displayName: Select TensorFlow image displayName: Select TensorFlow image
visibility: public visibility: public
hint: Select the GPU image if you are running on a GPU node pool hint: Select the GPU image if you are running on a GPU node pool
options: options:
- name: 'TensorFlow 1.13.1 CPU Image' - name: 'TensorFlow 1.13.1 CPU Image'
value: 'tensorflow/tensorflow:1.13.1-py3' value: 'tensorflow/tensorflow:1.13.1-py3'
- name: 'TensorFlow 1.13.1 GPU Image' - name: 'TensorFlow 1.13.1 GPU Image'
value: 'tensorflow/tensorflow:1.13.1-gpu-py3' value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
- displayName: Node pool - displayName: Node pool
hint: Name of node pool or group to run this workflow task hint: Name of node pool or group to run this workflow task
type: select.nodepool type: select.nodepool
name: sys-node-pool name: sys-node-pool
value: {{.DefaultNodePoolOption}} value: "{{.DefaultNodePoolOption}}"
visibility: public visibility: public
required: true required: true
entrypoint: main entrypoint: main
templates: templates:
- dag: - dag:
tasks: tasks:
- name: preprocessing - name: preprocessing
template: preprocessing template: preprocessing
- name: train-model - name: train-model
template: tensorflow template: tensorflow
dependencies: [preprocessing] dependencies: [preprocessing]
arguments: arguments:
artifacts: artifacts:
- name: data - name: data
from: "{{tasks.preprocessing.outputs.artifacts.processed-data}}" from: "{{tasks.preprocessing.outputs.artifacts.processed-data}}"
name: main name: main
- container: - container:
args: args:
- | - |
apt-get update && \ apt-get update && \
apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \ apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \
pip install --upgrade pip && \ pip install --upgrade pip && \
pip install pillow lxml Cython contextlib2 matplotlib numpy scipy pycocotools pyyaml test-generator && \ pip install pillow lxml Cython contextlib2 matplotlib numpy scipy pycocotools pyyaml test-generator && \
cd /mnt/src/tf/research && \ cd /mnt/src/tf/research && \
export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim && \ export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim && \
mkdir -p /mnt/src/protoc && \ mkdir -p /mnt/src/protoc && \
wget -P /mnt/src/protoc https://github.com/protocolbuffers/protobuf/releases/download/v3.10.1/protoc-3.10.1-linux-x86_64.zip && \ wget -P /mnt/src/protoc https://github.com/protocolbuffers/protobuf/releases/download/v3.10.1/protoc-3.10.1-linux-x86_64.zip && \
cd /mnt/src/protoc/ && \ cd /mnt/src/protoc/ && \
unzip protoc-3.10.1-linux-x86_64.zip && \ unzip protoc-3.10.1-linux-x86_64.zip && \
cd /mnt/src/tf/research/ && \ cd /mnt/src/tf/research/ && \
/mnt/src/protoc/bin/protoc object_detection/protos/*.proto --python_out=. && \ /mnt/src/protoc/bin/protoc object_detection/protos/*.proto --python_out=. && \
cd /mnt/src/train/workflows/tf-object-detection-training && \ cd /mnt/src/train/workflows/tf-object-detection-training && \
python main.py \ python main.py \
--extras="{{workflow.parameters.hyperparameters}}" \ --extras="{{workflow.parameters.hyperparameters}}" \
--model="{{workflow.parameters.cvat-model}}" \ --model="{{workflow.parameters.cvat-model}}" \
--num_classes="{{workflow.parameters.cvat-num-classes}}" \ --num_classes="{{workflow.parameters.cvat-num-classes}}" \
--sys_finetune_checkpoint="{{workflow.parameters.cvat-finetune-checkpoint}}" \ --sys_finetune_checkpoint="{{workflow.parameters.cvat-finetune-checkpoint}}" \
--from_preprocessing=True --from_preprocessing=True
command:
- sh
- -c
image: '{{workflow.parameters.tf-image}}'
volumeMounts:
- mountPath: /mnt/data
name: processed-data
- mountPath: /mnt/output
name: output
workingDir: /mnt/src
nodeSelector:
{{.NodePoolLabel}}: '{{workflow.parameters.sys-node-pool}}'
inputs:
artifacts:
- name: data
path: /mnt/data/datasets/
- name: models
path: /mnt/data/models/
optional: true
s3:
key: '{{workflow.parameters.cvat-finetune-checkpoint}}'
- git:
repo: https://github.com/tensorflow/models.git
revision: v1.13.0
name: src
path: /mnt/src/tf
- git:
repo: https://github.com/onepanelio/templates.git
revision: v0.18.0
name: tsrc
path: /mnt/src/train
name: tensorflow
outputs:
artifacts:
- name: model
optional: true
path: /mnt/output
sidecars:
- name: tensorboard
image: '{{workflow.parameters.tf-image}}'
command: command:
- sh - sh
- '-c' - -c
env: image: '{{workflow.parameters.tf-image}}'
- name: ONEPANEL_INTERACTIVE_SIDECAR volumeMounts:
value: 'true' - mountPath: /mnt/data
name: processed-data
- mountPath: /mnt/output
name: output
workingDir: /mnt/src
nodeSelector:
"{{.NodePoolLabel}}": '{{workflow.parameters.sys-node-pool}}'
inputs:
artifacts:
- name: data
path: /mnt/data/datasets/
- name: models
path: /mnt/data/models/
optional: true
s3:
key: '{{workflow.parameters.cvat-finetune-checkpoint}}'
- git:
repo: https://github.com/tensorflow/models.git
revision: v1.13.0
name: src
path: /mnt/src/tf
- git:
repo: https://github.com/onepanelio/templates.git
revision: v0.18.0
name: tsrc
path: /mnt/src/train
name: tensorflow
outputs:
artifacts:
- name: model
optional: true
path: /mnt/output
sidecars:
- name: tensorboard
image: '{{workflow.parameters.tf-image}}'
command:
- sh
- '-c'
env:
- name: ONEPANEL_INTERACTIVE_SIDECAR
value: 'true'
args:
# Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
- tensorboard --logdir /mnt/output/checkpoints/
ports:
- containerPort: 6006
name: tensorboard
- container:
args: args:
# Read logs from /mnt/output - this directory is auto-mounted from volumeMounts - |
- tensorboard --logdir /mnt/output/checkpoints/ pip install --upgrade pip &&\
ports: pip install opencv-python albumentations tqdm pyyaml pycocotools && \
- containerPort: 6006 cd /mnt/src/preprocessing/workflows/albumentations-preprocessing && \
name: tensorboard python -u main.py \
- container: --data_aug_params="{{workflow.parameters.preprocessing-parameters}}" \
args: --format="tfrecord" \
- | --val_split={{workflow.parameters.val-split}} \
pip install --upgrade pip &&\ --aug_steps={{workflow.parameters.num-augmentation-cycles}}
pip install opencv-python albumentations tqdm pyyaml pycocotools && \ command:
cd /mnt/src/preprocessing/workflows/albumentations-preprocessing && \ - sh
python -u main.py \ - -c
--data_aug_params="{{workflow.parameters.preprocessing-parameters}}" \ image: '{{workflow.parameters.tf-image}}'
--format="tfrecord" \ volumeMounts:
--val_split={{workflow.parameters.val-split}} \ - mountPath: /mnt/data
--aug_steps={{workflow.parameters.num-augmentation-cycles}} name: data
command: - mountPath: /mnt/output
- sh name: processed-data
- -c workingDir: /mnt/src
image: '{{workflow.parameters.tf-image}}' nodeSelector:
volumeMounts: "{{.NodePoolLabel}}": '{{workflow.parameters.sys-node-pool}}'
- mountPath: /mnt/data inputs:
name: data artifacts:
- mountPath: /mnt/output - name: data
name: processed-data path: /mnt/data/datasets/
workingDir: /mnt/src s3:
nodeSelector: key: '{{workflow.parameters.cvat-annotation-path}}'
{{.NodePoolLabel}}: '{{workflow.parameters.sys-node-pool}}' - git:
inputs: repo: https://github.com/onepanelio/templates.git
artifacts: revision: v0.18.0
- name: data name: src
path: /mnt/data/datasets/ path: /mnt/src/preprocessing
s3: name: preprocessing
key: '{{workflow.parameters.cvat-annotation-path}}' outputs:
- git: artifacts:
repo: https://github.com/onepanelio/templates.git - name: processed-data
revision: v0.18.0 optional: true
name: src path: /mnt/output
path: /mnt/src/preprocessing volumeClaimTemplates:
name: preprocessing - metadata:
outputs: name: data
artifacts: spec:
- name: processed-data accessModes:
optional: true - ReadWriteOnce
path: /mnt/output resources:
volumeClaimTemplates: requests:
- metadata: storage: 200Gi
name: data - metadata:
spec: name: processed-data
accessModes: spec:
- ReadWriteOnce accessModes:
resources: - ReadWriteOnce
requests: resources:
storage: 200Gi requests:
- metadata: storage: 200Gi
name: processed-data - metadata:
spec: name: output
accessModes: spec:
- ReadWriteOnce accessModes:
resources: - ReadWriteOnce
requests: resources:
storage: 200Gi requests:
- metadata: storage: 200Gi
name: output
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 200Gi

View File

@@ -0,0 +1,105 @@
metadata:
name: CVAT
kind: Workspace
version: 20200528140124
action: create
description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
spec:
# Docker containers that are part of the Workspace
containers:
- name: cvat-db
image: postgres:10-alpine
env:
- name: POSTGRES_USER
value: root
- name: POSTGRES_DB
value: cvat
- name: POSTGRES_HOST_AUTH_METHOD
value: trust
- name: PGDATA
value: /var/lib/psql/data
ports:
- containerPort: 5432
name: tcp
volumeMounts:
- name: db
mountPath: /var/lib/psql
- name: cvat-redis
image: redis:4.0-alpine
ports:
- containerPort: 6379
name: tcp
- name: cvat
image: onepanel/cvat:v0.7.0
env:
- name: DJANGO_MODWSGI_EXTRA_ARGS
value: ""
- name: ALLOWED_HOSTS
value: '*'
- name: CVAT_REDIS_HOST
value: localhost
- name: CVAT_POSTGRES_HOST
value: localhost
- name: CVAT_SHARE_URL
value: /home/django/data
ports:
- containerPort: 8080
name: http
volumeMounts:
- name: data
mountPath: /home/django/data
- name: keys
mountPath: /home/django/keys
- name: logs
mountPath: /home/django/logs
- name: models
mountPath: /home/django/models
- name: cvat-ui
image: onepanel/cvat-ui:v0.7.0
ports:
- containerPort: 80
name: http
ports:
- name: cvat-ui
port: 80
protocol: TCP
targetPort: 80
- name: cvat
port: 8080
protocol: TCP
targetPort: 8080
routes:
- match:
- uri:
regex: /api/.*|/git/.*|/tensorflow/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- queryParams:
id:
regex: \d+.*
route:
- destination:
port:
number: 8080
- match:
- uri:
prefix: /
route:
- destination:
port:
number: 80
# DAG Workflow to be executed once a Workspace action completes
# postExecutionWorkflow:
# entrypoint: main
# templates:
# - name: main
# dag:
# tasks:
# - name: slack-notify
# template: slack-notify
# - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -0,0 +1,116 @@
metadata:
name: CVAT
kind: Workspace
version: 20200626113635
action: update
description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
spec:
# Docker containers that are part of the Workspace
containers:
- name: cvat-db
image: postgres:10-alpine
env:
- name: POSTGRES_USER
value: root
- name: POSTGRES_DB
value: cvat
- name: POSTGRES_HOST_AUTH_METHOD
value: trust
- name: PGDATA
value: /var/lib/psql/data
ports:
- containerPort: 5432
name: tcp
volumeMounts:
- name: db
mountPath: /var/lib/psql
- name: cvat-redis
image: redis:4.0-alpine
ports:
- containerPort: 6379
name: tcp
- name: cvat
image: onepanel/cvat:v0.7.6
env:
- name: DJANGO_MODWSGI_EXTRA_ARGS
value: ""
- name: ALLOWED_HOSTS
value: '*'
- name: CVAT_REDIS_HOST
value: localhost
- name: CVAT_POSTGRES_HOST
value: localhost
- name: CVAT_SHARE_URL
value: /home/django/data
ports:
- containerPort: 8080
name: http
volumeMounts:
- name: data
mountPath: /home/django/data
- name: keys
mountPath: /home/django/keys
- name: logs
mountPath: /home/django/logs
- name: models
mountPath: /home/django/models
- name: share
mountPath: /home/django/share
- name: cvat-ui
image: onepanel/cvat-ui:v0.7.5
ports:
- containerPort: 80
name: http
- name: filesyncer
image: onepanel/filesyncer:v0.0.4
command: ['python3', 'main.py']
volumeMounts:
- name: share
mountPath: /mnt/share
ports:
- name: cvat-ui
port: 80
protocol: TCP
targetPort: 80
- name: cvat
port: 8080
protocol: TCP
targetPort: 8080
routes:
- match:
- uri:
regex: /api/.*|/git/.*|/tensorflow/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- queryParams:
id:
regex: \d+.*
route:
- destination:
port:
number: 8080
timeout: 600s
- match:
- uri:
prefix: /
route:
- destination:
port:
number: 80
timeout: 600s
# DAG Workflow to be executed once a Workspace action completes (optional)
# Uncomment the lines below if you want to send Slack notifications
#postExecutionWorkflow:
# entrypoint: main
# templates:
# - name: main
# dag:
# tasks:
# - name: slack-notify
# template: slack-notify
# - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -0,0 +1,118 @@
metadata:
name: CVAT
kind: Workspace
version: 20200704151301
action: update
description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
spec:
# Docker containers that are part of the Workspace
containers:
- name: cvat-db
image: postgres:10-alpine
env:
- name: POSTGRES_USER
value: root
- name: POSTGRES_DB
value: cvat
- name: POSTGRES_HOST_AUTH_METHOD
value: trust
- name: PGDATA
value: /var/lib/psql/data
ports:
- containerPort: 5432
name: tcp
volumeMounts:
- name: db
mountPath: /var/lib/psql
- name: cvat-redis
image: redis:4.0-alpine
ports:
- containerPort: 6379
name: tcp
- name: cvat
image: onepanel/cvat:v0.7.10-stable
env:
- name: DJANGO_MODWSGI_EXTRA_ARGS
value: ""
- name: ALLOWED_HOSTS
value: '*'
- name: CVAT_REDIS_HOST
value: localhost
- name: CVAT_POSTGRES_HOST
value: localhost
- name: CVAT_SHARE_URL
value: /home/django/data
ports:
- containerPort: 8080
name: http
volumeMounts:
- name: data
mountPath: /home/django/data
- name: keys
mountPath: /home/django/keys
- name: logs
mountPath: /home/django/logs
- name: models
mountPath: /home/django/models
- name: share
mountPath: /home/django/share
- name: cvat-ui
image: onepanel/cvat-ui:v0.7.10-stable
ports:
- containerPort: 80
name: http
# Uncomment following lines to enable S3 FileSyncer
# Refer to https://docs.onepanel.ai/docs/getting-started/use-cases/computervision/annotation/cvat/cvat_quick_guide#setting-up-environment-variables
#- name: filesyncer
# image: onepanel/filesyncer:v0.0.4
# command: ['python3', 'main.py']
# volumeMounts:
# - name: share
# mountPath: /mnt/share
ports:
- name: cvat-ui
port: 80
protocol: TCP
targetPort: 80
- name: cvat
port: 8080
protocol: TCP
targetPort: 8080
routes:
- match:
- uri:
regex: /api/.*|/git/.*|/tensorflow/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- queryParams:
id:
regex: \d+.*
route:
- destination:
port:
number: 8080
timeout: 600s
- match:
- uri:
prefix: /
route:
- destination:
port:
number: 80
timeout: 600s
# DAG Workflow to be executed once a Workspace action completes (optional)
# Uncomment the lines below if you want to send Slack notifications
#postExecutionWorkflow:
# entrypoint: main
# templates:
# - name: main
# dag:
# tasks:
# - name: slack-notify
# template: slack-notify
# - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -0,0 +1,135 @@
metadata:
name: CVAT
kind: Workspace
version: 20200724220450
action: update
description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
spec:
# Workspace arguments
arguments:
parameters:
- name: storage-prefix
displayName: Directory in default object storage
value: data
hint: Location of data and models in default object storage, will continuously sync to '/mnt/share'
containers:
- name: cvat-db
image: postgres:10-alpine
env:
- name: POSTGRES_USER
value: root
- name: POSTGRES_DB
value: cvat
- name: POSTGRES_HOST_AUTH_METHOD
value: trust
- name: PGDATA
value: /var/lib/psql/data
ports:
- containerPort: 5432
name: tcp
volumeMounts:
- name: db
mountPath: /var/lib/psql
- name: cvat-redis
image: redis:4.0-alpine
ports:
- containerPort: 6379
name: tcp
- name: cvat
image: onepanel/cvat:v0.7.10-stable
env:
- name: DJANGO_MODWSGI_EXTRA_ARGS
value: ""
- name: ALLOWED_HOSTS
value: '*'
- name: CVAT_REDIS_HOST
value: localhost
- name: CVAT_POSTGRES_HOST
value: localhost
- name: CVAT_SHARE_URL
value: /home/django/data
ports:
- containerPort: 8080
name: http
volumeMounts:
- name: data
mountPath: /home/django/data
- name: keys
mountPath: /home/django/keys
- name: logs
mountPath: /home/django/logs
- name: models
mountPath: /home/django/models
- name: share
mountPath: /home/django/share
- name: sys-namespace-config
mountPath: /etc/onepanel
readOnly: true
- name: cvat-ui
image: onepanel/cvat-ui:v0.7.10-stable
ports:
- containerPort: 80
name: http
# You can add multiple FileSyncer sidecar containers if needed
- name: filesyncer
image: "onepanel/filesyncer:{{.ArtifactRepositoryType}}"
args:
- download
env:
- name: FS_PATH
value: /mnt/share
- name: FS_PREFIX
value: '{{workspace.parameters.storage-prefix}}'
volumeMounts:
- name: share
mountPath: /mnt/share
- name: sys-namespace-config
mountPath: /etc/onepanel
readOnly: true
ports:
- name: cvat-ui
port: 80
protocol: TCP
targetPort: 80
- name: cvat
port: 8080
protocol: TCP
targetPort: 8080
routes:
- match:
- uri:
regex: /api/.*|/git/.*|/tensorflow/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- queryParams:
id:
regex: \d+.*
route:
- destination:
port:
number: 8080
timeout: 600s
- match:
- uri:
prefix: /
route:
- destination:
port:
number: 80
timeout: 600s
# DAG Workflow to be executed once a Workspace action completes (optional)
# Uncomment the lines below if you want to send Slack notifications
#postExecutionWorkflow:
# entrypoint: main
# templates:
# - name: main
# dag:
# tasks:
# - name: slack-notify
# template: slack-notify
# - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -0,0 +1,144 @@
metadata:
name: CVAT
kind: Workspace
version: 20200812113316
action: update
description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
spec:
# Workspace arguments
arguments:
parameters:
- name: sync-directory
displayName: Directory to sync raw input and training output
value: workflow-data
hint: Location to sync raw input, models and checkpoints from default object storage. Note that this will be relative to the current namespace.
containers:
- name: cvat-db
image: postgres:10-alpine
env:
- name: POSTGRES_USER
value: root
- name: POSTGRES_DB
value: cvat
- name: POSTGRES_HOST_AUTH_METHOD
value: trust
- name: PGDATA
value: /var/lib/psql/data
ports:
- containerPort: 5432
name: tcp
volumeMounts:
- name: db
mountPath: /var/lib/psql
- name: cvat-redis
image: redis:4.0-alpine
ports:
- containerPort: 6379
name: tcp
- name: cvat
image: onepanel/cvat:0.12.0_cvat.1.0.0-beta.2-cuda
env:
- name: DJANGO_MODWSGI_EXTRA_ARGS
value: ""
- name: ALLOWED_HOSTS
value: '*'
- name: CVAT_REDIS_HOST
value: localhost
- name: CVAT_POSTGRES_HOST
value: localhost
- name: CVAT_SHARE_URL
value: /home/django/data
- name: ONEPANEL_SYNC_DIRECTORY
value: '{{workspace.parameters.sync-directory}}'
- name: NVIDIA_VISIBLE_DEVICES
value: all
- name: NVIDIA_DRIVER_CAPABILITIES
value: compute,utility
- name: NVIDIA_REQUIRE_CUDA
value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
ports:
- containerPort: 8080
name: http
volumeMounts:
- name: data
mountPath: /home/django/data
- name: keys
mountPath: /home/django/keys
- name: logs
mountPath: /home/django/logs
- name: models
mountPath: /home/django/models
- name: share
mountPath: /home/django/share
- name: sys-namespace-config
mountPath: /etc/onepanel
readOnly: true
- name: cvat-ui
image: onepanel/cvat-ui:0.12.0_cvat.1.0.0-beta.2
ports:
- containerPort: 80
name: http
# You can add multiple FileSyncer sidecar containers if needed
- name: filesyncer
image: "onepanel/filesyncer:{{.ArtifactRepositoryType}}"
imagePullPolicy: Always
args:
- download
env:
- name: FS_PATH
value: /mnt/share
- name: FS_PREFIX
value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
volumeMounts:
- name: share
mountPath: /mnt/share
- name: sys-namespace-config
mountPath: /etc/onepanel
readOnly: true
ports:
- name: cvat-ui
port: 80
protocol: TCP
targetPort: 80
- name: cvat
port: 8080
protocol: TCP
targetPort: 8080
routes:
- match:
- uri:
regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- queryParams:
id:
regex: \d+.*
route:
- destination:
port:
number: 8080
timeout: 600s
- match:
- uri:
prefix: /
route:
- destination:
port:
number: 80
timeout: 600s
# DAG Workflow to be executed once a Workspace action completes (optional)
# Uncomment the lines below if you want to send Slack notifications
#postExecutionWorkflow:
# entrypoint: main
# templates:
# - name: main
# dag:
# tasks:
# - name: slack-notify
# template: slack-notify
# - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -0,0 +1,144 @@
metadata:
name: CVAT
kind: Workspace
version: 20200824101905
action: update
description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
spec:
# Workspace arguments
arguments:
parameters:
- name: sync-directory
displayName: Directory to sync raw input and training output
value: workflow-data
hint: Location to sync raw input, models and checkpoints from default object storage. Note that this will be relative to the current namespace.
containers:
- name: cvat-db
image: postgres:10-alpine
env:
- name: POSTGRES_USER
value: root
- name: POSTGRES_DB
value: cvat
- name: POSTGRES_HOST_AUTH_METHOD
value: trust
- name: PGDATA
value: /var/lib/psql/data
ports:
- containerPort: 5432
name: tcp
volumeMounts:
- name: db
mountPath: /var/lib/psql
- name: cvat-redis
image: redis:4.0-alpine
ports:
- containerPort: 6379
name: tcp
- name: cvat
image: onepanel/cvat:0.12.0-rc.6_cvat.1.0.0
env:
- name: DJANGO_MODWSGI_EXTRA_ARGS
value: ""
- name: ALLOWED_HOSTS
value: '*'
- name: CVAT_REDIS_HOST
value: localhost
- name: CVAT_POSTGRES_HOST
value: localhost
- name: CVAT_SHARE_URL
value: /home/django/data
- name: ONEPANEL_SYNC_DIRECTORY
value: '{{workspace.parameters.sync-directory}}'
- name: NVIDIA_VISIBLE_DEVICES
value: all
- name: NVIDIA_DRIVER_CAPABILITIES
value: compute,utility
- name: NVIDIA_REQUIRE_CUDA
value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
ports:
- containerPort: 8080
name: http
volumeMounts:
- name: data
mountPath: /home/django/data
- name: keys
mountPath: /home/django/keys
- name: logs
mountPath: /home/django/logs
- name: models
mountPath: /home/django/models
- name: share
mountPath: /home/django/share
- name: sys-namespace-config
mountPath: /etc/onepanel
readOnly: true
- name: cvat-ui
image: onepanel/cvat-ui:0.12.0-rc.1_cvat.1.0.0
ports:
- containerPort: 80
name: http
# You can add multiple FileSyncer sidecar containers if needed
- name: filesyncer
image: "onepanel/filesyncer:{{.ArtifactRepositoryType}}"
imagePullPolicy: Always
args:
- download
env:
- name: FS_PATH
value: /mnt/share
- name: FS_PREFIX
value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
volumeMounts:
- name: share
mountPath: /mnt/share
- name: sys-namespace-config
mountPath: /etc/onepanel
readOnly: true
ports:
- name: cvat-ui
port: 80
protocol: TCP
targetPort: 80
- name: cvat
port: 8080
protocol: TCP
targetPort: 8080
routes:
- match:
- uri:
regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- queryParams:
id:
regex: \d+.*
route:
- destination:
port:
number: 8080
timeout: 600s
- match:
- uri:
prefix: /
route:
- destination:
port:
number: 80
timeout: 600s
# DAG Workflow to be executed once a Workspace action completes (optional)
# Uncomment the lines below if you want to send Slack notifications
#postExecutionWorkflow:
# entrypoint: main
# templates:
# - name: main
# dag:
# tasks:
# - name: slack-notify
# template: slack-notify
# - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -0,0 +1,144 @@
metadata:
name: CVAT
kind: Workspace
version: 20200825154403
action: update
description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
spec:
# Workspace arguments
arguments:
parameters:
- name: sync-directory
displayName: Directory to sync raw input and training output
value: workflow-data
hint: Location to sync raw input, models and checkpoints from default object storage. Note that this will be relative to the current namespace.
containers:
- name: cvat-db
image: postgres:10-alpine
env:
- name: POSTGRES_USER
value: root
- name: POSTGRES_DB
value: cvat
- name: POSTGRES_HOST_AUTH_METHOD
value: trust
- name: PGDATA
value: /var/lib/psql/data
ports:
- containerPort: 5432
name: tcp
volumeMounts:
- name: db
mountPath: /var/lib/psql
- name: cvat-redis
image: redis:4.0-alpine
ports:
- containerPort: 6379
name: tcp
- name: cvat
image: onepanel/cvat:0.12.0_cvat.1.0.0
env:
- name: DJANGO_MODWSGI_EXTRA_ARGS
value: ""
- name: ALLOWED_HOSTS
value: '*'
- name: CVAT_REDIS_HOST
value: localhost
- name: CVAT_POSTGRES_HOST
value: localhost
- name: CVAT_SHARE_URL
value: /home/django/data
- name: ONEPANEL_SYNC_DIRECTORY
value: '{{workspace.parameters.sync-directory}}'
- name: NVIDIA_VISIBLE_DEVICES
value: all
- name: NVIDIA_DRIVER_CAPABILITIES
value: compute,utility
- name: NVIDIA_REQUIRE_CUDA
value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
ports:
- containerPort: 8080
name: http
volumeMounts:
- name: data
mountPath: /home/django/data
- name: keys
mountPath: /home/django/keys
- name: logs
mountPath: /home/django/logs
- name: models
mountPath: /home/django/models
- name: share
mountPath: /home/django/share
- name: sys-namespace-config
mountPath: /etc/onepanel
readOnly: true
- name: cvat-ui
image: onepanel/cvat-ui:0.12.0_cvat.1.0.0
ports:
- containerPort: 80
name: http
# You can add multiple FileSyncer sidecar containers if needed
- name: filesyncer
image: "onepanel/filesyncer:{{.ArtifactRepositoryType}}"
imagePullPolicy: Always
args:
- download
env:
- name: FS_PATH
value: /mnt/share
- name: FS_PREFIX
value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
volumeMounts:
- name: share
mountPath: /mnt/share
- name: sys-namespace-config
mountPath: /etc/onepanel
readOnly: true
ports:
- name: cvat-ui
port: 80
protocol: TCP
targetPort: 80
- name: cvat
port: 8080
protocol: TCP
targetPort: 8080
routes:
- match:
- uri:
regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- queryParams:
id:
regex: \d+.*
route:
- destination:
port:
number: 8080
timeout: 600s
- match:
- uri:
prefix: /
route:
- destination:
port:
number: 80
timeout: 600s
# DAG Workflow to be executed once a Workspace action completes (optional)
# Uncomment the lines below if you want to send Slack notifications
#postExecutionWorkflow:
# entrypoint: main
# templates:
# - name: main
# dag:
# tasks:
# - name: slack-notify
# template: slack-notify
# - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -0,0 +1,156 @@
metadata:
name: CVAT
kind: Workspace
version: 20200826185926
action: update
description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
spec:
# Workspace arguments
arguments:
parameters:
- name: sync-directory
displayName: Directory to sync raw input and training output
value: workflow-data
hint: Location to sync raw input, models and checkpoints from default object storage. Note that this will be relative to the current namespace.
containers:
- name: cvat-db
image: postgres:10-alpine
env:
- name: POSTGRES_USER
value: root
- name: POSTGRES_DB
value: cvat
- name: POSTGRES_HOST_AUTH_METHOD
value: trust
- name: PGDATA
value: /var/lib/psql/data
ports:
- containerPort: 5432
name: tcp
volumeMounts:
- name: db
mountPath: /var/lib/psql
- name: cvat-redis
image: redis:4.0-alpine
ports:
- containerPort: 6379
name: tcp
- name: cvat
image: onepanel/cvat:0.12.0_cvat.1.0.0
env:
- name: DJANGO_MODWSGI_EXTRA_ARGS
value: ""
- name: ALLOWED_HOSTS
value: '*'
- name: CVAT_REDIS_HOST
value: localhost
- name: CVAT_POSTGRES_HOST
value: localhost
- name: CVAT_SHARE_URL
value: /home/django/data
- name: ONEPANEL_SYNC_DIRECTORY
value: '{{workspace.parameters.sync-directory}}'
- name: NVIDIA_VISIBLE_DEVICES
value: all
- name: NVIDIA_DRIVER_CAPABILITIES
value: compute,utility
- name: NVIDIA_REQUIRE_CUDA
value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
ports:
- containerPort: 8080
name: http
volumeMounts:
- name: data
mountPath: /home/django/data
- name: keys
mountPath: /home/django/keys
- name: logs
mountPath: /home/django/logs
- name: models
mountPath: /home/django/models
- name: share
mountPath: /home/django/share
- name: sys-namespace-config
mountPath: /etc/onepanel
readOnly: true
- name: cvat-ui
image: onepanel/cvat-ui:0.12.0_cvat.1.0.0
ports:
- containerPort: 80
name: http
# You can add multiple FileSyncer sidecar containers if needed
- name: filesyncer
image: "onepanel/filesyncer:{{.ArtifactRepositoryType}}"
imagePullPolicy: Always
args:
- download
- -server-prefix=/sys/filesyncer
env:
- name: FS_PATH
value: /mnt/share
- name: FS_PREFIX
value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
volumeMounts:
- name: share
mountPath: /mnt/share
- name: sys-namespace-config
mountPath: /etc/onepanel
readOnly: true
ports:
- name: cvat-ui
port: 80
protocol: TCP
targetPort: 80
- name: cvat
port: 8080
protocol: TCP
targetPort: 8080
- name: fs
port: 8888
protocol: TCP
targetPort: 8888
routes:
- match:
- uri:
prefix: /sys/filesyncer
route:
- destination:
port:
number: 8888
- match:
- uri:
regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- queryParams:
id:
regex: \d+.*
route:
- destination:
port:
number: 8080
timeout: 600s
- match:
- uri:
prefix: /
route:
- destination:
port:
number: 80
timeout: 600s
# DAG Workflow to be executed once a Workspace action completes (optional)
# Uncomment the lines below if you want to send Slack notifications
#postExecutionWorkflow:
# entrypoint: main
# templates:
# - name: main
# dag:
# tasks:
# - name: slack-notify
# template: slack-notify
# - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -0,0 +1,154 @@
metadata:
name: CVAT
kind: Workspace
version: 20201001070806
action: update
description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
spec:
# Workspace arguments
arguments:
parameters:
- name: sync-directory
displayName: Directory to sync raw input and training output
value: workflow-data
hint: Location to sync raw input, models and checkpoints from default object storage. Note that this will be relative to the current namespace.
containers:
- name: cvat-db
image: postgres:10-alpine
env:
- name: POSTGRES_USER
value: root
- name: POSTGRES_DB
value: cvat
- name: POSTGRES_HOST_AUTH_METHOD
value: trust
- name: PGDATA
value: /var/lib/psql/data
ports:
- containerPort: 5432
name: tcp
volumeMounts:
- name: db
mountPath: /var/lib/psql
- name: cvat-redis
image: redis:4.0-alpine
ports:
- containerPort: 6379
name: tcp
- name: cvat
image: onepanel/cvat:0.12.1_cvat.1.0.0
env:
- name: DJANGO_MODWSGI_EXTRA_ARGS
value: ""
- name: ALLOWED_HOSTS
value: '*'
- name: CVAT_REDIS_HOST
value: localhost
- name: CVAT_POSTGRES_HOST
value: localhost
- name: CVAT_SHARE_URL
value: /home/django/data
- name: ONEPANEL_SYNC_DIRECTORY
value: '{{workspace.parameters.sync-directory}}'
- name: NVIDIA_VISIBLE_DEVICES
value: all
- name: NVIDIA_DRIVER_CAPABILITIES
value: compute,utility
- name: NVIDIA_REQUIRE_CUDA
value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
ports:
- containerPort: 8080
name: http
volumeMounts:
- name: data
mountPath: /home/django/data
- name: keys
mountPath: /home/django/keys
- name: logs
mountPath: /home/django/logs
- name: models
mountPath: /home/django/models
- name: share
mountPath: /home/django/share
- name: sys-namespace-config
mountPath: /etc/onepanel
readOnly: true
- name: cvat-ui
image: onepanel/cvat-ui:0.12.1_cvat.1.0.0
ports:
- containerPort: 80
name: http
# You can add multiple FileSyncer sidecar containers if needed
- name: filesyncer
image: "onepanel/filesyncer:{{.ArtifactRepositoryType}}"
imagePullPolicy: Always
args:
- download
- -server-prefix=/sys/filesyncer
env:
- name: FS_PATH
value: /mnt/share
- name: FS_PREFIX
value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
volumeMounts:
- name: share
mountPath: /mnt/share
- name: sys-namespace-config
mountPath: /etc/onepanel
readOnly: true
ports:
- name: cvat-ui
port: 80
protocol: TCP
targetPort: 80
- name: cvat
port: 8080
protocol: TCP
targetPort: 8080
- name: fs
port: 8888
protocol: TCP
targetPort: 8888
routes:
- match:
- uri:
prefix: /sys/filesyncer
route:
- destination:
port:
number: 8888
- match:
- uri:
regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- queryParams:
id:
regex: \d+.*
route:
- destination:
port:
number: 8080
- match:
- uri:
prefix: /
route:
- destination:
port:
number: 80
# DAG Workflow to be executed once a Workspace action completes (optional)
# Uncomment the lines below if you want to send Slack notifications
#postExecutionWorkflow:
# entrypoint: main
# templates:
# - name: main
# dag:
# tasks:
# - name: slack-notify
# template: slack-notify
# - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -1,147 +1,154 @@
# Workspace arguments metadata:
arguments: name: CVAT
parameters: kind: Workspace
- name: sync-directory version: 20201016170415
displayName: Directory to sync raw input and training output action: update
value: workflow-data description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
hint: Location to sync raw input, models and checkpoints from default object storage. Note that this will be relative to the current namespace. spec:
containers: # Workspace arguments
- name: cvat-db arguments:
image: postgres:10-alpine parameters:
env: - name: sync-directory
- name: POSTGRES_USER displayName: Directory to sync raw input and training output
value: root value: workflow-data
- name: POSTGRES_DB hint: Location to sync raw input, models and checkpoints from default object storage. Note that this will be relative to the current namespace.
value: cvat containers:
- name: POSTGRES_HOST_AUTH_METHOD - name: cvat-db
value: trust image: postgres:10-alpine
- name: PGDATA env:
value: /var/lib/psql/data - name: POSTGRES_USER
ports: value: root
- containerPort: 5432 - name: POSTGRES_DB
name: tcp value: cvat
volumeMounts: - name: POSTGRES_HOST_AUTH_METHOD
- name: db value: trust
mountPath: /var/lib/psql - name: PGDATA
- name: cvat-redis value: /var/lib/psql/data
image: redis:4.0-alpine ports:
ports: - containerPort: 5432
- containerPort: 6379 name: tcp
name: tcp volumeMounts:
- name: cvat - name: db
image: onepanel/cvat:0.14.0_cvat.1.0.0 mountPath: /var/lib/psql
env: - name: cvat-redis
- name: DJANGO_MODWSGI_EXTRA_ARGS image: redis:4.0-alpine
value: "" ports:
- name: ALLOWED_HOSTS - containerPort: 6379
value: '*' name: tcp
- name: CVAT_REDIS_HOST - name: cvat
value: localhost image: onepanel/cvat:0.14.0_cvat.1.0.0
- name: CVAT_POSTGRES_HOST env:
value: localhost - name: DJANGO_MODWSGI_EXTRA_ARGS
- name: CVAT_SHARE_URL value: ""
value: /home/django/data - name: ALLOWED_HOSTS
- name: ONEPANEL_SYNC_DIRECTORY value: '*'
value: '{{workspace.parameters.sync-directory}}' - name: CVAT_REDIS_HOST
- name: NVIDIA_VISIBLE_DEVICES value: localhost
value: all - name: CVAT_POSTGRES_HOST
- name: NVIDIA_DRIVER_CAPABILITIES value: localhost
value: compute,utility - name: CVAT_SHARE_URL
- name: NVIDIA_REQUIRE_CUDA value: /home/django/data
value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411" - name: ONEPANEL_SYNC_DIRECTORY
ports: value: '{{workspace.parameters.sync-directory}}'
- containerPort: 8080 - name: NVIDIA_VISIBLE_DEVICES
name: http value: all
volumeMounts: - name: NVIDIA_DRIVER_CAPABILITIES
- name: data value: compute,utility
mountPath: /home/django/data - name: NVIDIA_REQUIRE_CUDA
- name: keys value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
mountPath: /home/django/keys ports:
- name: logs - containerPort: 8080
mountPath: /home/django/logs name: http
- name: models volumeMounts:
mountPath: /home/django/models - name: data
- name: share mountPath: /home/django/data
mountPath: /home/django/share - name: keys
- name: sys-namespace-config mountPath: /home/django/keys
mountPath: /etc/onepanel - name: logs
readOnly: true mountPath: /home/django/logs
- name: cvat-ui - name: models
image: onepanel/cvat-ui:0.14.0_cvat.1.0.0 mountPath: /home/django/models
ports: - name: share
- containerPort: 80 mountPath: /home/django/share
name: http - name: sys-namespace-config
# You can add multiple FileSyncer sidecar containers if needed mountPath: /etc/onepanel
- name: filesyncer readOnly: true
image: onepanel/filesyncer:{{.ArtifactRepositoryType}} - name: cvat-ui
imagePullPolicy: Always image: onepanel/cvat-ui:0.14.0_cvat.1.0.0
args: ports:
- download - containerPort: 80
- -server-prefix=/sys/filesyncer name: http
env: # You can add multiple FileSyncer sidecar containers if needed
- name: FS_PATH - name: filesyncer
value: /mnt/share image: "onepanel/filesyncer:{{.ArtifactRepositoryType}}"
- name: FS_PREFIX imagePullPolicy: Always
value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}' args:
volumeMounts: - download
- name: share - -server-prefix=/sys/filesyncer
mountPath: /mnt/share env:
- name: sys-namespace-config - name: FS_PATH
mountPath: /etc/onepanel value: /mnt/share
readOnly: true - name: FS_PREFIX
ports: value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
- name: cvat-ui volumeMounts:
port: 80 - name: share
protocol: TCP mountPath: /mnt/share
targetPort: 80 - name: sys-namespace-config
- name: cvat mountPath: /etc/onepanel
port: 8080 readOnly: true
protocol: TCP ports:
targetPort: 8080 - name: cvat-ui
- name: fs port: 80
port: 8888 protocol: TCP
protocol: TCP targetPort: 80
targetPort: 8888 - name: cvat
routes: port: 8080
- match: protocol: TCP
- uri: targetPort: 8080
prefix: /sys/filesyncer - name: fs
route: port: 8888
- destination: protocol: TCP
port: targetPort: 8888
number: 8888 routes:
- match: - match:
- uri: - uri:
regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.* prefix: /sys/filesyncer
- queryParams: route:
id: - destination:
regex: \d+.* port:
route: number: 8888
- destination: - match:
port: - uri:
number: 8080 regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- match: - queryParams:
- uri: id:
prefix: / regex: \d+.*
route: route:
- destination: - destination:
port: port:
number: 80 number: 8080
# DAG Workflow to be executed once a Workspace action completes (optional) - match:
# Uncomment the lines below if you want to send Slack notifications - uri:
#postExecutionWorkflow: prefix: /
# entrypoint: main route:
# templates: - destination:
# - name: main port:
# dag: number: 80
# tasks: # DAG Workflow to be executed once a Workspace action completes (optional)
# - name: slack-notify # Uncomment the lines below if you want to send Slack notifications
# template: slack-notify #postExecutionWorkflow:
# - name: slack-notify # entrypoint: main
# container: # templates:
# image: technosophos/slack-notify # - name: main
# args: # dag:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify # tasks:
# command: # - name: slack-notify
# - sh # template: slack-notify
# - -c # - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -1,159 +1,166 @@
# Workspace arguments metadata:
arguments: name: CVAT
parameters: kind: Workspace
- name: sync-directory version: 20201102104048
displayName: Directory to sync raw input and training output action: update
value: workflow-data description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
hint: Location (relative to current namespace) to sync raw input, models and checkpoints from default object storage to '/share'. spec:
containers: # Workspace arguments
- name: cvat-db arguments:
image: postgres:10-alpine parameters:
env: - name: sync-directory
- name: POSTGRES_USER displayName: Directory to sync raw input and training output
value: root value: workflow-data
- name: POSTGRES_DB hint: Location (relative to current namespace) to sync raw input, models and checkpoints from default object storage to '/share'.
value: cvat containers:
- name: POSTGRES_HOST_AUTH_METHOD - name: cvat-db
value: trust image: postgres:10-alpine
- name: PGDATA env:
value: /var/lib/psql/data - name: POSTGRES_USER
ports: value: root
- containerPort: 5432 - name: POSTGRES_DB
name: tcp value: cvat
volumeMounts: - name: POSTGRES_HOST_AUTH_METHOD
- name: db value: trust
mountPath: /var/lib/psql - name: PGDATA
- name: cvat-redis value: /var/lib/psql/data
image: redis:4.0-alpine ports:
ports: - containerPort: 5432
- containerPort: 6379 name: tcp
name: tcp volumeMounts:
- name: cvat - name: db
image: onepanel/cvat:0.15.0_cvat.1.0.0 mountPath: /var/lib/psql
env: - name: cvat-redis
- name: DJANGO_MODWSGI_EXTRA_ARGS image: redis:4.0-alpine
value: "" ports:
- name: ALLOWED_HOSTS - containerPort: 6379
value: '*' name: tcp
- name: CVAT_REDIS_HOST - name: cvat
value: localhost image: onepanel/cvat:0.15.0_cvat.1.0.0
- name: CVAT_POSTGRES_HOST env:
value: localhost - name: DJANGO_MODWSGI_EXTRA_ARGS
- name: CVAT_SHARE_URL value: ""
value: /cvat/data - name: ALLOWED_HOSTS
- name: CVAT_SHARE_DIR value: '*'
value: /share - name: CVAT_REDIS_HOST
- name: CVAT_KEYS_DIR value: localhost
value: /cvat/keys - name: CVAT_POSTGRES_HOST
- name: CVAT_DATA_DIR value: localhost
value: /cvat/data - name: CVAT_SHARE_URL
- name: CVAT_MODELS_DIR value: /cvat/data
value: /cvat/models - name: CVAT_SHARE_DIR
- name: CVAT_LOGS_DIR value: /share
value: /cvat/logs - name: CVAT_KEYS_DIR
- name: ONEPANEL_SYNC_DIRECTORY value: /cvat/keys
value: '{{workspace.parameters.sync-directory}}' - name: CVAT_DATA_DIR
- name: NVIDIA_VISIBLE_DEVICES value: /cvat/data
value: all - name: CVAT_MODELS_DIR
- name: NVIDIA_DRIVER_CAPABILITIES value: /cvat/models
value: compute,utility - name: CVAT_LOGS_DIR
- name: NVIDIA_REQUIRE_CUDA value: /cvat/logs
value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411" - name: ONEPANEL_SYNC_DIRECTORY
ports: value: '{{workspace.parameters.sync-directory}}'
- containerPort: 8080 - name: NVIDIA_VISIBLE_DEVICES
name: http value: all
volumeMounts: - name: NVIDIA_DRIVER_CAPABILITIES
- name: cvat-data value: compute,utility
mountPath: /cvat - name: NVIDIA_REQUIRE_CUDA
- name: share value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
mountPath: /share ports:
- name: sys-namespace-config - containerPort: 8080
mountPath: /etc/onepanel name: http
readOnly: true volumeMounts:
- name: cvat-ui - name: cvat-data
image: onepanel/cvat-ui:0.15.0_cvat.1.0.0 mountPath: /cvat
ports: - name: share
- containerPort: 80 mountPath: /share
name: http - name: sys-namespace-config
# You can add multiple FileSyncer sidecar containers if needed mountPath: /etc/onepanel
- name: filesyncer readOnly: true
image: onepanel/filesyncer:s3 - name: cvat-ui
imagePullPolicy: Always image: onepanel/cvat-ui:0.15.0_cvat.1.0.0
args: ports:
- download - containerPort: 80
- -server-prefix=/sys/filesyncer name: http
env: # You can add multiple FileSyncer sidecar containers if needed
- name: FS_PATH - name: filesyncer
value: /mnt/share image: onepanel/filesyncer:s3
- name: FS_PREFIX imagePullPolicy: Always
value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}' args:
volumeMounts: - download
- name: share - -server-prefix=/sys/filesyncer
mountPath: /mnt/share env:
- name: sys-namespace-config - name: FS_PATH
mountPath: /etc/onepanel value: /mnt/share
readOnly: true - name: FS_PREFIX
ports: value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
- name: cvat-ui volumeMounts:
port: 80 - name: share
protocol: TCP mountPath: /mnt/share
targetPort: 80 - name: sys-namespace-config
- name: cvat mountPath: /etc/onepanel
port: 8080 readOnly: true
protocol: TCP ports:
targetPort: 8080 - name: cvat-ui
- name: fs port: 80
port: 8888 protocol: TCP
protocol: TCP targetPort: 80
targetPort: 8888 - name: cvat
routes: port: 8080
- match: protocol: TCP
- uri: targetPort: 8080
prefix: /sys/filesyncer - name: fs
route: port: 8888
- destination: protocol: TCP
port: targetPort: 8888
number: 8888 routes:
- match: - match:
- uri: - uri:
regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.* prefix: /sys/filesyncer
- queryParams: route:
id: - destination:
regex: \d+.* port:
route: number: 8888
- destination: - match:
port: - uri:
number: 8080 regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- match: - queryParams:
- uri: id:
prefix: / regex: \d+.*
route: route:
- destination: - destination:
port: port:
number: 80 number: 8080
volumeClaimTemplates: - match:
- metadata: - uri:
name: db prefix: /
spec: route:
accessModes: ["ReadWriteOnce"] - destination:
resources: port:
requests: number: 80
storage: 20Gi volumeClaimTemplates:
# DAG Workflow to be executed once a Workspace action completes (optional) - metadata:
# Uncomment the lines below if you want to send Slack notifications name: db
#postExecutionWorkflow: spec:
# entrypoint: main accessModes: ["ReadWriteOnce"]
# templates: resources:
# - name: main requests:
# dag: storage: 20Gi
# tasks: # DAG Workflow to be executed once a Workspace action completes (optional)
# - name: slack-notify # Uncomment the lines below if you want to send Slack notifications
# template: slack-notify #postExecutionWorkflow:
# - name: slack-notify # entrypoint: main
# container: # templates:
# image: technosophos/slack-notify # - name: main
# args: # dag:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify # tasks:
# command: # - name: slack-notify
# - sh # template: slack-notify
# - -c # - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -1,159 +1,166 @@
# Workspace arguments metadata:
arguments: name: CVAT
parameters: kind: Workspace
- name: sync-directory version: 20201113094916
displayName: Directory to sync raw input and training output action: update
value: workflow-data description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
hint: Location (relative to current namespace) to sync raw input, models and checkpoints from default object storage to '/share'. spec:
containers: # Workspace arguments
- name: cvat-db arguments:
image: postgres:10-alpine parameters:
env: - name: sync-directory
- name: POSTGRES_USER displayName: Directory to sync raw input and training output
value: root value: workflow-data
- name: POSTGRES_DB hint: Location (relative to current namespace) to sync raw input, models and checkpoints from default object storage to '/share'.
value: cvat containers:
- name: POSTGRES_HOST_AUTH_METHOD - name: cvat-db
value: trust image: postgres:10-alpine
- name: PGDATA env:
value: /var/lib/psql/data - name: POSTGRES_USER
ports: value: root
- containerPort: 5432 - name: POSTGRES_DB
name: tcp value: cvat
volumeMounts: - name: POSTGRES_HOST_AUTH_METHOD
- name: db value: trust
mountPath: /var/lib/psql - name: PGDATA
- name: cvat-redis value: /var/lib/psql/data
image: redis:4.0-alpine ports:
ports: - containerPort: 5432
- containerPort: 6379 name: tcp
name: tcp volumeMounts:
- name: cvat - name: db
image: onepanel/cvat:0.16.0_cvat.1.0.0 mountPath: /var/lib/psql
env: - name: cvat-redis
- name: DJANGO_MODWSGI_EXTRA_ARGS image: redis:4.0-alpine
value: "" ports:
- name: ALLOWED_HOSTS - containerPort: 6379
value: '*' name: tcp
- name: CVAT_REDIS_HOST - name: cvat
value: localhost image: onepanel/cvat:0.16.0_cvat.1.0.0
- name: CVAT_POSTGRES_HOST env:
value: localhost - name: DJANGO_MODWSGI_EXTRA_ARGS
- name: CVAT_SHARE_URL value: ""
value: /cvat/data - name: ALLOWED_HOSTS
- name: CVAT_SHARE_DIR value: '*'
value: /share - name: CVAT_REDIS_HOST
- name: CVAT_KEYS_DIR value: localhost
value: /cvat/keys - name: CVAT_POSTGRES_HOST
- name: CVAT_DATA_DIR value: localhost
value: /cvat/data - name: CVAT_SHARE_URL
- name: CVAT_MODELS_DIR value: /cvat/data
value: /cvat/models - name: CVAT_SHARE_DIR
- name: CVAT_LOGS_DIR value: /share
value: /cvat/logs - name: CVAT_KEYS_DIR
- name: ONEPANEL_SYNC_DIRECTORY value: /cvat/keys
value: '{{workspace.parameters.sync-directory}}' - name: CVAT_DATA_DIR
- name: NVIDIA_VISIBLE_DEVICES value: /cvat/data
value: all - name: CVAT_MODELS_DIR
- name: NVIDIA_DRIVER_CAPABILITIES value: /cvat/models
value: compute,utility - name: CVAT_LOGS_DIR
- name: NVIDIA_REQUIRE_CUDA value: /cvat/logs
value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411" - name: ONEPANEL_SYNC_DIRECTORY
ports: value: '{{workspace.parameters.sync-directory}}'
- containerPort: 8080 - name: NVIDIA_VISIBLE_DEVICES
name: http value: all
volumeMounts: - name: NVIDIA_DRIVER_CAPABILITIES
- name: cvat-data value: compute,utility
mountPath: /cvat - name: NVIDIA_REQUIRE_CUDA
- name: share value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
mountPath: /share ports:
- name: sys-namespace-config - containerPort: 8080
mountPath: /etc/onepanel name: http
readOnly: true volumeMounts:
- name: cvat-ui - name: cvat-data
image: onepanel/cvat-ui:0.16.0_cvat.1.0.0 mountPath: /cvat
ports: - name: share
- containerPort: 80 mountPath: /share
name: http - name: sys-namespace-config
# You can add multiple FileSyncer sidecar containers if needed mountPath: /etc/onepanel
- name: filesyncer readOnly: true
image: onepanel/filesyncer:s3 - name: cvat-ui
imagePullPolicy: Always image: onepanel/cvat-ui:0.16.0_cvat.1.0.0
args: ports:
- download - containerPort: 80
- -server-prefix=/sys/filesyncer name: http
env: # You can add multiple FileSyncer sidecar containers if needed
- name: FS_PATH - name: filesyncer
value: /mnt/share image: onepanel/filesyncer:s3
- name: FS_PREFIX imagePullPolicy: Always
value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}' args:
volumeMounts: - download
- name: share - -server-prefix=/sys/filesyncer
mountPath: /mnt/share env:
- name: sys-namespace-config - name: FS_PATH
mountPath: /etc/onepanel value: /mnt/share
readOnly: true - name: FS_PREFIX
ports: value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
- name: cvat-ui volumeMounts:
port: 80 - name: share
protocol: TCP mountPath: /mnt/share
targetPort: 80 - name: sys-namespace-config
- name: cvat mountPath: /etc/onepanel
port: 8080 readOnly: true
protocol: TCP ports:
targetPort: 8080 - name: cvat-ui
- name: fs port: 80
port: 8888 protocol: TCP
protocol: TCP targetPort: 80
targetPort: 8888 - name: cvat
routes: port: 8080
- match: protocol: TCP
- uri: targetPort: 8080
prefix: /sys/filesyncer - name: fs
route: port: 8888
- destination: protocol: TCP
port: targetPort: 8888
number: 8888 routes:
- match: - match:
- uri: - uri:
regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.* prefix: /sys/filesyncer
- queryParams: route:
id: - destination:
regex: \d+.* port:
route: number: 8888
- destination: - match:
port: - uri:
number: 8080 regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- match: - queryParams:
- uri: id:
prefix: / regex: \d+.*
route: route:
- destination: - destination:
port: port:
number: 80 number: 8080
volumeClaimTemplates: - match:
- metadata: - uri:
name: db prefix: /
spec: route:
accessModes: ["ReadWriteOnce"] - destination:
resources: port:
requests: number: 80
storage: 20Gi volumeClaimTemplates:
# DAG Workflow to be executed once a Workspace action completes (optional) - metadata:
# Uncomment the lines below if you want to send Slack notifications name: db
#postExecutionWorkflow: spec:
# entrypoint: main accessModes: ["ReadWriteOnce"]
# templates: resources:
# - name: main requests:
# dag: storage: 20Gi
# tasks: # DAG Workflow to be executed once a Workspace action completes (optional)
# - name: slack-notify # Uncomment the lines below if you want to send Slack notifications
# template: slack-notify #postExecutionWorkflow:
# - name: slack-notify # entrypoint: main
# container: # templates:
# image: technosophos/slack-notify # - name: main
# args: # dag:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify # tasks:
# command: # - name: slack-notify
# - sh # template: slack-notify
# - -c # - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -1,161 +1,168 @@
# Workspace arguments metadata:
arguments: name: CVAT
parameters: kind: Workspace
- name: sync-directory version: 20201115133046
displayName: Directory to sync raw input and training output action: update
value: workflow-data description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
hint: Location (relative to current namespace) to sync raw input, models and checkpoints from default object storage to '/share'. spec:
containers: # Workspace arguments
- name: cvat-db arguments:
image: postgres:10-alpine parameters:
env: - name: sync-directory
- name: POSTGRES_USER displayName: Directory to sync raw input and training output
value: root value: workflow-data
- name: POSTGRES_DB hint: Location (relative to current namespace) to sync raw input, models and checkpoints from default object storage to '/share'.
value: cvat containers:
- name: POSTGRES_HOST_AUTH_METHOD - name: cvat-db
value: trust image: postgres:10-alpine
- name: PGDATA env:
value: /var/lib/psql/data - name: POSTGRES_USER
ports: value: root
- containerPort: 5432 - name: POSTGRES_DB
name: tcp value: cvat
volumeMounts: - name: POSTGRES_HOST_AUTH_METHOD
- name: db value: trust
mountPath: /var/lib/psql - name: PGDATA
- name: cvat-redis value: /var/lib/psql/data
image: redis:4.0-alpine ports:
ports: - containerPort: 5432
- containerPort: 6379 name: tcp
name: tcp volumeMounts:
- name: cvat - name: db
image: onepanel/cvat:0.16.0_cvat.1.0.0 mountPath: /var/lib/psql
env: - name: cvat-redis
- name: DJANGO_MODWSGI_EXTRA_ARGS image: redis:4.0-alpine
value: "" ports:
- name: ALLOWED_HOSTS - containerPort: 6379
value: '*' name: tcp
- name: CVAT_REDIS_HOST - name: cvat
value: localhost image: onepanel/cvat:0.16.0_cvat.1.0.0
- name: CVAT_POSTGRES_HOST env:
value: localhost - name: DJANGO_MODWSGI_EXTRA_ARGS
- name: CVAT_SHARE_URL value: ""
value: /cvat/data - name: ALLOWED_HOSTS
- name: CVAT_SHARE_DIR value: '*'
value: /share - name: CVAT_REDIS_HOST
- name: CVAT_DATA_DIR value: localhost
value: /cvat/data - name: CVAT_POSTGRES_HOST
- name: CVAT_MEDIA_DATA_DIR value: localhost
value: /cvat/data/data - name: CVAT_SHARE_URL
- name: CVAT_KEYS_DIR value: /cvat/data
value: /cvat/data/keys - name: CVAT_SHARE_DIR
- name: CVAT_MODELS_DIR value: /share
value: /cvat/data/models - name: CVAT_DATA_DIR
- name: CVAT_LOGS_DIR value: /cvat/data
value: /cvat/logs - name: CVAT_MEDIA_DATA_DIR
- name: ONEPANEL_SYNC_DIRECTORY value: /cvat/data/data
value: '{{workspace.parameters.sync-directory}}' - name: CVAT_KEYS_DIR
- name: NVIDIA_VISIBLE_DEVICES value: /cvat/data/keys
value: all - name: CVAT_MODELS_DIR
- name: NVIDIA_DRIVER_CAPABILITIES value: /cvat/data/models
value: compute,utility - name: CVAT_LOGS_DIR
- name: NVIDIA_REQUIRE_CUDA value: /cvat/logs
value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411" - name: ONEPANEL_SYNC_DIRECTORY
ports: value: '{{workspace.parameters.sync-directory}}'
- containerPort: 8080 - name: NVIDIA_VISIBLE_DEVICES
name: http value: all
volumeMounts: - name: NVIDIA_DRIVER_CAPABILITIES
- name: cvat-data value: compute,utility
mountPath: /cvat - name: NVIDIA_REQUIRE_CUDA
- name: share value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
mountPath: /share ports:
- name: sys-namespace-config - containerPort: 8080
mountPath: /etc/onepanel name: http
readOnly: true volumeMounts:
- name: cvat-ui - name: cvat-data
image: onepanel/cvat-ui:0.16.0_cvat.1.0.0 mountPath: /cvat
ports: - name: share
- containerPort: 80 mountPath: /share
name: http - name: sys-namespace-config
# You can add multiple FileSyncer sidecar containers if needed mountPath: /etc/onepanel
- name: filesyncer readOnly: true
image: onepanel/filesyncer:s3 - name: cvat-ui
imagePullPolicy: Always image: onepanel/cvat-ui:0.16.0_cvat.1.0.0
args: ports:
- download - containerPort: 80
- -server-prefix=/sys/filesyncer name: http
env: # You can add multiple FileSyncer sidecar containers if needed
- name: FS_PATH - name: filesyncer
value: /mnt/share image: onepanel/filesyncer:s3
- name: FS_PREFIX imagePullPolicy: Always
value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}' args:
volumeMounts: - download
- name: share - -server-prefix=/sys/filesyncer
mountPath: /mnt/share env:
- name: sys-namespace-config - name: FS_PATH
mountPath: /etc/onepanel value: /mnt/share
readOnly: true - name: FS_PREFIX
ports: value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
- name: cvat-ui volumeMounts:
port: 80 - name: share
protocol: TCP mountPath: /mnt/share
targetPort: 80 - name: sys-namespace-config
- name: cvat mountPath: /etc/onepanel
port: 8080 readOnly: true
protocol: TCP ports:
targetPort: 8080 - name: cvat-ui
- name: fs port: 80
port: 8888 protocol: TCP
protocol: TCP targetPort: 80
targetPort: 8888 - name: cvat
routes: port: 8080
- match: protocol: TCP
- uri: targetPort: 8080
prefix: /sys/filesyncer - name: fs
route: port: 8888
- destination: protocol: TCP
port: targetPort: 8888
number: 8888 routes:
- match: - match:
- uri: - uri:
regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.* prefix: /sys/filesyncer
- queryParams: route:
id: - destination:
regex: \d+.* port:
route: number: 8888
- destination: - match:
port: - uri:
number: 8080 regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- match: - queryParams:
- uri: id:
prefix: / regex: \d+.*
route: route:
- destination: - destination:
port: port:
number: 80 number: 8080
volumeClaimTemplates: - match:
- metadata: - uri:
name: db prefix: /
spec: route:
accessModes: ["ReadWriteOnce"] - destination:
resources: port:
requests: number: 80
storage: 20Gi volumeClaimTemplates:
# DAG Workflow to be executed once a Workspace action completes (optional) - metadata:
# Uncomment the lines below if you want to send Slack notifications name: db
#postExecutionWorkflow: spec:
# entrypoint: main accessModes: ["ReadWriteOnce"]
# templates: resources:
# - name: main requests:
# dag: storage: 20Gi
# tasks: # DAG Workflow to be executed once a Workspace action completes (optional)
# - name: slack-notify # Uncomment the lines below if you want to send Slack notifications
# template: slack-notify #postExecutionWorkflow:
# - name: slack-notify # entrypoint: main
# container: # templates:
# image: technosophos/slack-notify # - name: main
# args: # dag:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify # tasks:
# command: # - name: slack-notify
# - sh # template: slack-notify
# - -c # - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -1,163 +1,170 @@
# Workspace arguments metadata:
arguments: name: CVAT
parameters: kind: Workspace
- name: sync-directory version: 20201211161117
displayName: Directory to sync raw input and training output action: update
value: workflow-data description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
hint: Location (relative to current namespace) to sync raw input, models and checkpoints from default object storage to '/share'. spec:
containers: # Workspace arguments
- name: cvat-db arguments:
image: postgres:10-alpine parameters:
env: - name: sync-directory
- name: POSTGRES_USER displayName: Directory to sync raw input and training output
value: root value: workflow-data
- name: POSTGRES_DB hint: Location (relative to current namespace) to sync raw input, models and checkpoints from default object storage to '/share'.
value: cvat containers:
- name: POSTGRES_HOST_AUTH_METHOD - name: cvat-db
value: trust image: postgres:10-alpine
- name: PGDATA env:
value: /var/lib/psql/data - name: POSTGRES_USER
ports: value: root
- containerPort: 5432 - name: POSTGRES_DB
name: tcp value: cvat
volumeMounts: - name: POSTGRES_HOST_AUTH_METHOD
- name: db value: trust
mountPath: /var/lib/psql - name: PGDATA
- name: cvat-redis value: /var/lib/psql/data
image: redis:4.0-alpine ports:
ports: - containerPort: 5432
- containerPort: 6379 name: tcp
name: tcp volumeMounts:
- name: cvat - name: db
image: onepanel/cvat:0.16.0_cvat.1.0.0 mountPath: /var/lib/psql
env: - name: cvat-redis
- name: DJANGO_MODWSGI_EXTRA_ARGS image: redis:4.0-alpine
value: "" ports:
- name: ALLOWED_HOSTS - containerPort: 6379
value: '*' name: tcp
- name: CVAT_REDIS_HOST - name: cvat
value: localhost image: onepanel/cvat:0.16.0_cvat.1.0.0
- name: CVAT_POSTGRES_HOST env:
value: localhost - name: DJANGO_MODWSGI_EXTRA_ARGS
- name: CVAT_SHARE_URL value: ""
value: /cvat/data - name: ALLOWED_HOSTS
- name: CVAT_SHARE_DIR value: '*'
value: /share - name: CVAT_REDIS_HOST
- name: CVAT_DATA_DIR value: localhost
value: /cvat/data - name: CVAT_POSTGRES_HOST
- name: CVAT_MEDIA_DATA_DIR value: localhost
value: /cvat/data/data - name: CVAT_SHARE_URL
- name: CVAT_KEYS_DIR value: /cvat/data
value: /cvat/data/keys - name: CVAT_SHARE_DIR
- name: CVAT_MODELS_DIR value: /share
value: /cvat/data/models - name: CVAT_DATA_DIR
- name: CVAT_LOGS_DIR value: /cvat/data
value: /cvat/logs - name: CVAT_MEDIA_DATA_DIR
- name: ONEPANEL_SYNC_DIRECTORY value: /cvat/data/data
value: '{{workspace.parameters.sync-directory}}' - name: CVAT_KEYS_DIR
- name: NVIDIA_VISIBLE_DEVICES value: /cvat/data/keys
value: all - name: CVAT_MODELS_DIR
- name: NVIDIA_DRIVER_CAPABILITIES value: /cvat/data/models
value: compute,utility - name: CVAT_LOGS_DIR
- name: NVIDIA_REQUIRE_CUDA value: /cvat/logs
value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411" - name: ONEPANEL_SYNC_DIRECTORY
- name: ONEPANEL_MAIN_CONTAINER value: '{{workspace.parameters.sync-directory}}'
value: 'true' - name: NVIDIA_VISIBLE_DEVICES
ports: value: all
- containerPort: 8080 - name: NVIDIA_DRIVER_CAPABILITIES
name: http value: compute,utility
volumeMounts: - name: NVIDIA_REQUIRE_CUDA
- name: cvat-data value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
mountPath: /cvat - name: ONEPANEL_MAIN_CONTAINER
- name: share value: 'true'
mountPath: /share ports:
- name: sys-namespace-config - containerPort: 8080
mountPath: /etc/onepanel name: http
readOnly: true volumeMounts:
- name: cvat-ui - name: cvat-data
image: onepanel/cvat-ui:0.16.0_cvat.1.0.0 mountPath: /cvat
ports: - name: share
- containerPort: 80 mountPath: /share
name: http - name: sys-namespace-config
# You can add multiple FileSyncer sidecar containers if needed mountPath: /etc/onepanel
- name: filesyncer readOnly: true
image: onepanel/filesyncer:s3 - name: cvat-ui
imagePullPolicy: Always image: onepanel/cvat-ui:0.16.0_cvat.1.0.0
args: ports:
- download - containerPort: 80
- -server-prefix=/sys/filesyncer name: http
env: # You can add multiple FileSyncer sidecar containers if needed
- name: FS_PATH - name: filesyncer
value: /mnt/share image: onepanel/filesyncer:s3
- name: FS_PREFIX imagePullPolicy: Always
value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}' args:
volumeMounts: - download
- name: share - -server-prefix=/sys/filesyncer
mountPath: /mnt/share env:
- name: sys-namespace-config - name: FS_PATH
mountPath: /etc/onepanel value: /mnt/share
readOnly: true - name: FS_PREFIX
ports: value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
- name: cvat-ui volumeMounts:
port: 80 - name: share
protocol: TCP mountPath: /mnt/share
targetPort: 80 - name: sys-namespace-config
- name: cvat mountPath: /etc/onepanel
port: 8080 readOnly: true
protocol: TCP ports:
targetPort: 8080 - name: cvat-ui
- name: fs port: 80
port: 8888 protocol: TCP
protocol: TCP targetPort: 80
targetPort: 8888 - name: cvat
routes: port: 8080
- match: protocol: TCP
- uri: targetPort: 8080
prefix: /sys/filesyncer - name: fs
route: port: 8888
- destination: protocol: TCP
port: targetPort: 8888
number: 8888 routes:
- match: - match:
- uri: - uri:
regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.* prefix: /sys/filesyncer
- queryParams: route:
id: - destination:
regex: \d+.* port:
route: number: 8888
- destination: - match:
port: - uri:
number: 8080 regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- match: - queryParams:
- uri: id:
prefix: / regex: \d+.*
route: route:
- destination: - destination:
port: port:
number: 80 number: 8080
volumeClaimTemplates: - match:
- metadata: - uri:
name: db prefix: /
spec: route:
accessModes: ["ReadWriteOnce"] - destination:
resources: port:
requests: number: 80
storage: 20Gi volumeClaimTemplates:
# DAG Workflow to be executed once a Workspace action completes (optional) - metadata:
# Uncomment the lines below if you want to send Slack notifications name: db
#postExecutionWorkflow: spec:
# entrypoint: main accessModes: ["ReadWriteOnce"]
# templates: resources:
# - name: main requests:
# dag: storage: 20Gi
# tasks: # DAG Workflow to be executed once a Workspace action completes (optional)
# - name: slack-notify # Uncomment the lines below if you want to send Slack notifications
# template: slack-notify #postExecutionWorkflow:
# - name: slack-notify # entrypoint: main
# container: # templates:
# image: technosophos/slack-notify # - name: main
# args: # dag:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify # tasks:
# command: # - name: slack-notify
# - sh # template: slack-notify
# - -c # - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -1,163 +1,170 @@
# Workspace arguments metadata:
arguments: name: CVAT
parameters: kind: Workspace
- name: sync-directory version: 20210107094725
displayName: Directory to sync raw input and training output action: update
value: workflow-data description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
hint: Location (relative to current namespace) to sync raw input, models and checkpoints from default object storage to '/share'. spec:
containers: # Workspace arguments
- name: cvat-db arguments:
image: postgres:10-alpine parameters:
env: - name: sync-directory
- name: POSTGRES_USER displayName: Directory to sync raw input and training output
value: root value: workflow-data
- name: POSTGRES_DB hint: Location (relative to current namespace) to sync raw input, models and checkpoints from default object storage to '/share'.
value: cvat containers:
- name: POSTGRES_HOST_AUTH_METHOD - name: cvat-db
value: trust image: postgres:10-alpine
- name: PGDATA env:
value: /var/lib/psql/data - name: POSTGRES_USER
ports: value: root
- containerPort: 5432 - name: POSTGRES_DB
name: tcp value: cvat
volumeMounts: - name: POSTGRES_HOST_AUTH_METHOD
- name: db value: trust
mountPath: /var/lib/psql - name: PGDATA
- name: cvat-redis value: /var/lib/psql/data
image: redis:4.0-alpine ports:
ports: - containerPort: 5432
- containerPort: 6379 name: tcp
name: tcp volumeMounts:
- name: cvat - name: db
image: onepanel/cvat:0.17.0_cvat.1.0.0 mountPath: /var/lib/psql
env: - name: cvat-redis
- name: DJANGO_MODWSGI_EXTRA_ARGS image: redis:4.0-alpine
value: "" ports:
- name: ALLOWED_HOSTS - containerPort: 6379
value: '*' name: tcp
- name: CVAT_REDIS_HOST - name: cvat
value: localhost image: onepanel/cvat:0.17.0_cvat.1.0.0
- name: CVAT_POSTGRES_HOST env:
value: localhost - name: DJANGO_MODWSGI_EXTRA_ARGS
- name: CVAT_SHARE_URL value: ""
value: /cvat/data - name: ALLOWED_HOSTS
- name: CVAT_SHARE_DIR value: '*'
value: /share - name: CVAT_REDIS_HOST
- name: CVAT_DATA_DIR value: localhost
value: /cvat/data - name: CVAT_POSTGRES_HOST
- name: CVAT_MEDIA_DATA_DIR value: localhost
value: /cvat/data/data - name: CVAT_SHARE_URL
- name: CVAT_KEYS_DIR value: /cvat/data
value: /cvat/data/keys - name: CVAT_SHARE_DIR
- name: CVAT_MODELS_DIR value: /share
value: /cvat/data/models - name: CVAT_DATA_DIR
- name: CVAT_LOGS_DIR value: /cvat/data
value: /cvat/logs - name: CVAT_MEDIA_DATA_DIR
- name: ONEPANEL_SYNC_DIRECTORY value: /cvat/data/data
value: '{{workspace.parameters.sync-directory}}' - name: CVAT_KEYS_DIR
- name: NVIDIA_VISIBLE_DEVICES value: /cvat/data/keys
value: all - name: CVAT_MODELS_DIR
- name: NVIDIA_DRIVER_CAPABILITIES value: /cvat/data/models
value: compute,utility - name: CVAT_LOGS_DIR
- name: NVIDIA_REQUIRE_CUDA value: /cvat/logs
value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411" - name: ONEPANEL_SYNC_DIRECTORY
- name: ONEPANEL_MAIN_CONTAINER value: '{{workspace.parameters.sync-directory}}'
value: 'true' - name: NVIDIA_VISIBLE_DEVICES
ports: value: all
- containerPort: 8080 - name: NVIDIA_DRIVER_CAPABILITIES
name: http value: compute,utility
volumeMounts: - name: NVIDIA_REQUIRE_CUDA
- name: cvat-data value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
mountPath: /cvat - name: ONEPANEL_MAIN_CONTAINER
- name: share value: 'true'
mountPath: /share ports:
- name: sys-namespace-config - containerPort: 8080
mountPath: /etc/onepanel name: http
readOnly: true volumeMounts:
- name: cvat-ui - name: cvat-data
image: onepanel/cvat-ui:0.17.0_cvat.1.0.0 mountPath: /cvat
ports: - name: share
- containerPort: 80 mountPath: /share
name: http - name: sys-namespace-config
# You can add multiple FileSyncer sidecar containers if needed mountPath: /etc/onepanel
- name: filesyncer readOnly: true
image: onepanel/filesyncer:0.17.0 - name: cvat-ui
imagePullPolicy: Always image: onepanel/cvat-ui:0.17.0_cvat.1.0.0
args: ports:
- download - containerPort: 80
- -server-prefix=/sys/filesyncer name: http
env: # You can add multiple FileSyncer sidecar containers if needed
- name: FS_PATH - name: filesyncer
value: /mnt/share image: onepanel/filesyncer:0.17.0
- name: FS_PREFIX imagePullPolicy: Always
value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}' args:
volumeMounts: - download
- name: share - -server-prefix=/sys/filesyncer
mountPath: /mnt/share env:
- name: sys-namespace-config - name: FS_PATH
mountPath: /etc/onepanel value: /mnt/share
readOnly: true - name: FS_PREFIX
ports: value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
- name: cvat-ui volumeMounts:
port: 80 - name: share
protocol: TCP mountPath: /mnt/share
targetPort: 80 - name: sys-namespace-config
- name: cvat mountPath: /etc/onepanel
port: 8080 readOnly: true
protocol: TCP ports:
targetPort: 8080 - name: cvat-ui
- name: fs port: 80
port: 8888 protocol: TCP
protocol: TCP targetPort: 80
targetPort: 8888 - name: cvat
routes: port: 8080
- match: protocol: TCP
- uri: targetPort: 8080
prefix: /sys/filesyncer - name: fs
route: port: 8888
- destination: protocol: TCP
port: targetPort: 8888
number: 8888 routes:
- match: - match:
- uri: - uri:
regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.* prefix: /sys/filesyncer
- queryParams: route:
id: - destination:
regex: \d+.* port:
route: number: 8888
- destination: - match:
port: - uri:
number: 8080 regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- match: - queryParams:
- uri: id:
prefix: / regex: \d+.*
route: route:
- destination: - destination:
port: port:
number: 80 number: 8080
volumeClaimTemplates: - match:
- metadata: - uri:
name: db prefix: /
spec: route:
accessModes: ["ReadWriteOnce"] - destination:
resources: port:
requests: number: 80
storage: 20Gi volumeClaimTemplates:
# DAG Workflow to be executed once a Workspace action completes (optional) - metadata:
# Uncomment the lines below if you want to send Slack notifications name: db
#postExecutionWorkflow: spec:
# entrypoint: main accessModes: ["ReadWriteOnce"]
# templates: resources:
# - name: main requests:
# dag: storage: 20Gi
# tasks: # DAG Workflow to be executed once a Workspace action completes (optional)
# - name: slack-notify # Uncomment the lines below if you want to send Slack notifications
# template: slack-notify #postExecutionWorkflow:
# - name: slack-notify # entrypoint: main
# container: # templates:
# image: technosophos/slack-notify # - name: main
# args: # dag:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify # tasks:
# command: # - name: slack-notify
# - sh # template: slack-notify
# - -c # - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -1,134 +1,141 @@
containers: metadata:
- name: cvat-db name: CVAT
image: postgres:10-alpine kind: Workspace
env: version: 20210129134326
- name: POSTGRES_USER action: update
value: root description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
- name: POSTGRES_DB spec:
value: cvat containers:
- name: POSTGRES_HOST_AUTH_METHOD - name: cvat-db
value: trust image: postgres:10-alpine
- name: PGDATA env:
value: /var/lib/psql/data - name: POSTGRES_USER
ports: value: root
- containerPort: 5432 - name: POSTGRES_DB
name: tcp value: cvat
volumeMounts: - name: POSTGRES_HOST_AUTH_METHOD
- name: db value: trust
mountPath: /var/lib/psql - name: PGDATA
- name: cvat-redis value: /var/lib/psql/data
image: redis:4.0-alpine ports:
ports: - containerPort: 5432
- containerPort: 6379 name: tcp
name: tcp volumeMounts:
- name: cvat - name: db
image: onepanel/cvat:v0.18.0_cvat.1.0.0 mountPath: /var/lib/psql
env: - name: cvat-redis
- name: DJANGO_MODWSGI_EXTRA_ARGS image: redis:4.0-alpine
value: "" ports:
- name: ALLOWED_HOSTS - containerPort: 6379
value: '*' name: tcp
- name: CVAT_REDIS_HOST - name: cvat
value: localhost image: onepanel/cvat:v0.18.0_cvat.1.0.0
- name: CVAT_POSTGRES_HOST env:
value: localhost - name: DJANGO_MODWSGI_EXTRA_ARGS
- name: CVAT_SHARE_URL value: ""
value: /cvat/data - name: ALLOWED_HOSTS
- name: CVAT_SHARE_DIR value: '*'
value: /share - name: CVAT_REDIS_HOST
- name: CVAT_DATA_DIR value: localhost
value: /cvat/data - name: CVAT_POSTGRES_HOST
- name: CVAT_MEDIA_DATA_DIR value: localhost
value: /cvat/data/data - name: CVAT_SHARE_URL
- name: CVAT_KEYS_DIR value: /cvat/data
value: /cvat/data/keys - name: CVAT_SHARE_DIR
- name: CVAT_MODELS_DIR value: /share
value: /cvat/data/models - name: CVAT_DATA_DIR
- name: CVAT_LOGS_DIR value: /cvat/data
value: /cvat/logs - name: CVAT_MEDIA_DATA_DIR
- name: CVAT_ANNOTATIONS_OBJECT_STORAGE_PREFIX value: /cvat/data/data
value: 'artifacts/$(ONEPANEL_RESOURCE_NAMESPACE)/annotations/' - name: CVAT_KEYS_DIR
- name: CVAT_ONEPANEL_WORKFLOWS_LABEL value: /cvat/data/keys
value: 'key=used-by,value=cvat' - name: CVAT_MODELS_DIR
- name: NVIDIA_VISIBLE_DEVICES value: /cvat/data/models
value: all - name: CVAT_LOGS_DIR
- name: NVIDIA_DRIVER_CAPABILITIES value: /cvat/logs
value: compute,utility - name: CVAT_ANNOTATIONS_OBJECT_STORAGE_PREFIX
- name: NVIDIA_REQUIRE_CUDA value: 'artifacts/$(ONEPANEL_RESOURCE_NAMESPACE)/annotations/'
value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411" - name: CVAT_ONEPANEL_WORKFLOWS_LABEL
- name: ONEPANEL_MAIN_CONTAINER value: 'key=used-by,value=cvat'
value: 'true' - name: NVIDIA_VISIBLE_DEVICES
ports: value: all
- containerPort: 8080 - name: NVIDIA_DRIVER_CAPABILITIES
name: http value: compute,utility
volumeMounts: - name: NVIDIA_REQUIRE_CUDA
- name: cvat-data value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
mountPath: /cvat - name: ONEPANEL_MAIN_CONTAINER
- name: share value: 'true'
mountPath: /share ports:
- name: sys-namespace-config - containerPort: 8080
mountPath: /etc/onepanel name: http
readOnly: true volumeMounts:
- name: cvat-ui - name: cvat-data
image: onepanel/cvat-ui:v0.18.0_cvat.1.0.0 mountPath: /cvat
ports: - name: share
- containerPort: 80 mountPath: /share
name: http - name: sys-namespace-config
- name: sys-filesyncer mountPath: /etc/onepanel
image: onepanel/filesyncer:v0.18.0 readOnly: true
imagePullPolicy: Always - name: cvat-ui
args: image: onepanel/cvat-ui:v0.18.0_cvat.1.0.0
- server ports:
- -server-prefix=/sys/filesyncer - containerPort: 80
volumeMounts: name: http
- name: share - name: sys-filesyncer
mountPath: /share image: onepanel/filesyncer:v0.18.0
- name: sys-namespace-config imagePullPolicy: Always
mountPath: /etc/onepanel args:
readOnly: true - server
ports: - -server-prefix=/sys/filesyncer
- name: cvat-ui volumeMounts:
port: 80 - name: share
protocol: TCP mountPath: /share
targetPort: 80 - name: sys-namespace-config
- name: cvat mountPath: /etc/onepanel
port: 8080 readOnly: true
protocol: TCP ports:
targetPort: 8080 - name: cvat-ui
- name: fs port: 80
port: 8888 protocol: TCP
protocol: TCP targetPort: 80
targetPort: 8888 - name: cvat
routes: port: 8080
- match: protocol: TCP
- uri: targetPort: 8080
prefix: /sys/filesyncer - name: fs
route: port: 8888
- destination: protocol: TCP
port: targetPort: 8888
number: 8888 routes:
- match: - match:
- uri: - uri:
regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.* prefix: /sys/filesyncer
- queryParams: route:
id: - destination:
regex: \d+.* port:
route: number: 8888
- destination: - match:
port: - uri:
number: 8080 regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- match: - queryParams:
- uri: id:
prefix: / regex: \d+.*
route: route:
- destination: - destination:
port: port:
number: 80 number: 8080
volumeClaimTemplates: - match:
- metadata: - uri:
name: db prefix: /
spec: route:
accessModes: ["ReadWriteOnce"] - destination:
resources: port:
requests: number: 80
storage: 20Gi volumeClaimTemplates:
- metadata:
name: db
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 20Gi

View File

@@ -1,134 +1,141 @@
containers: metadata:
- name: cvat-db name: CVAT
image: postgres:10-alpine kind: Workspace
env: version: 20210224180017
- name: POSTGRES_USER action: update
value: root description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
- name: POSTGRES_DB spec:
value: cvat containers:
- name: POSTGRES_HOST_AUTH_METHOD - name: cvat-db
value: trust image: postgres:10-alpine
- name: PGDATA env:
value: /var/lib/psql/data - name: POSTGRES_USER
ports: value: root
- containerPort: 5432 - name: POSTGRES_DB
name: tcp value: cvat
volumeMounts: - name: POSTGRES_HOST_AUTH_METHOD
- name: db value: trust
mountPath: /var/lib/psql - name: PGDATA
- name: cvat-redis value: /var/lib/psql/data
image: redis:4.0-alpine ports:
ports: - containerPort: 5432
- containerPort: 6379 name: tcp
name: tcp volumeMounts:
- name: cvat - name: db
image: onepanel/cvat:v0.19.0_cvat.1.0.0 mountPath: /var/lib/psql
env: - name: cvat-redis
- name: DJANGO_MODWSGI_EXTRA_ARGS image: redis:4.0-alpine
value: "" ports:
- name: ALLOWED_HOSTS - containerPort: 6379
value: '*' name: tcp
- name: CVAT_REDIS_HOST - name: cvat
value: localhost image: onepanel/cvat:v0.19.0_cvat.1.0.0
- name: CVAT_POSTGRES_HOST env:
value: localhost - name: DJANGO_MODWSGI_EXTRA_ARGS
- name: CVAT_SHARE_URL value: ""
value: /cvat/data - name: ALLOWED_HOSTS
- name: CVAT_SHARE_DIR value: '*'
value: /share - name: CVAT_REDIS_HOST
- name: CVAT_DATA_DIR value: localhost
value: /cvat/data - name: CVAT_POSTGRES_HOST
- name: CVAT_MEDIA_DATA_DIR value: localhost
value: /cvat/data/data - name: CVAT_SHARE_URL
- name: CVAT_KEYS_DIR value: /cvat/data
value: /cvat/data/keys - name: CVAT_SHARE_DIR
- name: CVAT_MODELS_DIR value: /share
value: /cvat/data/models - name: CVAT_DATA_DIR
- name: CVAT_LOGS_DIR value: /cvat/data
value: /cvat/logs - name: CVAT_MEDIA_DATA_DIR
- name: CVAT_ANNOTATIONS_OBJECT_STORAGE_PREFIX value: /cvat/data/data
value: 'artifacts/$(ONEPANEL_RESOURCE_NAMESPACE)/annotations/' - name: CVAT_KEYS_DIR
- name: CVAT_ONEPANEL_WORKFLOWS_LABEL value: /cvat/data/keys
value: 'key=used-by,value=cvat' - name: CVAT_MODELS_DIR
- name: NVIDIA_VISIBLE_DEVICES value: /cvat/data/models
value: all - name: CVAT_LOGS_DIR
- name: NVIDIA_DRIVER_CAPABILITIES value: /cvat/logs
value: compute,utility - name: CVAT_ANNOTATIONS_OBJECT_STORAGE_PREFIX
- name: NVIDIA_REQUIRE_CUDA value: 'artifacts/$(ONEPANEL_RESOURCE_NAMESPACE)/annotations/'
value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411" - name: CVAT_ONEPANEL_WORKFLOWS_LABEL
- name: ONEPANEL_MAIN_CONTAINER value: 'key=used-by,value=cvat'
value: 'true' - name: NVIDIA_VISIBLE_DEVICES
ports: value: all
- containerPort: 8080 - name: NVIDIA_DRIVER_CAPABILITIES
name: http value: compute,utility
volumeMounts: - name: NVIDIA_REQUIRE_CUDA
- name: cvat-data value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
mountPath: /cvat - name: ONEPANEL_MAIN_CONTAINER
- name: share value: 'true'
mountPath: /share ports:
- name: sys-namespace-config - containerPort: 8080
mountPath: /etc/onepanel name: http
readOnly: true volumeMounts:
- name: cvat-ui - name: cvat-data
image: onepanel/cvat-ui:v0.19.0_cvat.1.0.0 mountPath: /cvat
ports: - name: share
- containerPort: 80 mountPath: /share
name: http - name: sys-namespace-config
- name: sys-filesyncer mountPath: /etc/onepanel
image: onepanel/filesyncer:v0.19.0 readOnly: true
imagePullPolicy: Always - name: cvat-ui
args: image: onepanel/cvat-ui:v0.19.0_cvat.1.0.0
- server ports:
- -server-prefix=/sys/filesyncer - containerPort: 80
volumeMounts: name: http
- name: share - name: sys-filesyncer
mountPath: /share image: onepanel/filesyncer:v0.19.0
- name: sys-namespace-config imagePullPolicy: Always
mountPath: /etc/onepanel args:
readOnly: true - server
ports: - -server-prefix=/sys/filesyncer
- name: cvat-ui volumeMounts:
port: 80 - name: share
protocol: TCP mountPath: /share
targetPort: 80 - name: sys-namespace-config
- name: cvat mountPath: /etc/onepanel
port: 8080 readOnly: true
protocol: TCP ports:
targetPort: 8080 - name: cvat-ui
- name: fs port: 80
port: 8888 protocol: TCP
protocol: TCP targetPort: 80
targetPort: 8888 - name: cvat
routes: port: 8080
- match: protocol: TCP
- uri: targetPort: 8080
prefix: /sys/filesyncer - name: fs
route: port: 8888
- destination: protocol: TCP
port: targetPort: 8888
number: 8888 routes:
- match: - match:
- uri: - uri:
regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.* prefix: /sys/filesyncer
- queryParams: route:
id: - destination:
regex: \d+.* port:
route: number: 8888
- destination: - match:
port: - uri:
number: 8080 regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- match: - queryParams:
- uri: id:
prefix: / regex: \d+.*
route: route:
- destination: - destination:
port: port:
number: 80 number: 8080
volumeClaimTemplates: - match:
- metadata: - uri:
name: db prefix: /
spec: route:
accessModes: ["ReadWriteOnce"] - destination:
resources: port:
requests: number: 80
storage: 20Gi volumeClaimTemplates:
- metadata:
name: db
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 20Gi

View File

@@ -1,134 +1,141 @@
containers: metadata:
- name: cvat-db name: CVAT
image: postgres:10-alpine kind: Workspace
env: version: 20210323175655
- name: POSTGRES_USER action: update
value: root description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
- name: POSTGRES_DB spec:
value: cvat containers:
- name: POSTGRES_HOST_AUTH_METHOD - name: cvat-db
value: trust image: postgres:10-alpine
- name: PGDATA env:
value: /var/lib/psql/data - name: POSTGRES_USER
ports: value: root
- containerPort: 5432 - name: POSTGRES_DB
name: tcp value: cvat
volumeMounts: - name: POSTGRES_HOST_AUTH_METHOD
- name: db value: trust
mountPath: /var/lib/psql - name: PGDATA
- name: cvat-redis value: /var/lib/psql/data
image: redis:4.0-alpine ports:
ports: - containerPort: 5432
- containerPort: 6379 name: tcp
name: tcp volumeMounts:
- name: cvat - name: db
image: onepanel/cvat:v0.19.0_cvat.1.0.0 mountPath: /var/lib/psql
env: - name: cvat-redis
- name: DJANGO_MODWSGI_EXTRA_ARGS image: redis:4.0-alpine
value: "" ports:
- name: ALLOWED_HOSTS - containerPort: 6379
value: '*' name: tcp
- name: CVAT_REDIS_HOST - name: cvat
value: localhost image: onepanel/cvat:v0.19.0_cvat.1.0.0
- name: CVAT_POSTGRES_HOST env:
value: localhost - name: DJANGO_MODWSGI_EXTRA_ARGS
- name: CVAT_SHARE_URL value: ""
value: /cvat/data - name: ALLOWED_HOSTS
- name: CVAT_SHARE_DIR value: '*'
value: /share - name: CVAT_REDIS_HOST
- name: CVAT_DATA_DIR value: localhost
value: /cvat/data - name: CVAT_POSTGRES_HOST
- name: CVAT_MEDIA_DATA_DIR value: localhost
value: /cvat/data/data - name: CVAT_SHARE_URL
- name: CVAT_KEYS_DIR value: /cvat/data
value: /cvat/data/keys - name: CVAT_SHARE_DIR
- name: CVAT_MODELS_DIR value: /share
value: /cvat/data/models - name: CVAT_DATA_DIR
- name: CVAT_LOGS_DIR value: /cvat/data
value: /cvat/logs - name: CVAT_MEDIA_DATA_DIR
- name: CVAT_ANNOTATIONS_OBJECT_STORAGE_PREFIX value: /cvat/data/data
value: 'artifacts/$(ONEPANEL_RESOURCE_NAMESPACE)/annotations/' - name: CVAT_KEYS_DIR
- name: CVAT_ONEPANEL_WORKFLOWS_LABEL value: /cvat/data/keys
value: 'key=used-by,value=cvat' - name: CVAT_MODELS_DIR
- name: NVIDIA_VISIBLE_DEVICES value: /cvat/data/models
value: all - name: CVAT_LOGS_DIR
- name: NVIDIA_DRIVER_CAPABILITIES value: /cvat/logs
value: compute,utility - name: CVAT_ANNOTATIONS_OBJECT_STORAGE_PREFIX
- name: NVIDIA_REQUIRE_CUDA value: 'artifacts/$(ONEPANEL_RESOURCE_NAMESPACE)/annotations/'
value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411" - name: CVAT_ONEPANEL_WORKFLOWS_LABEL
- name: ONEPANEL_MAIN_CONTAINER value: 'key=used-by,value=cvat'
value: 'true' - name: NVIDIA_VISIBLE_DEVICES
ports: value: all
- containerPort: 8080 - name: NVIDIA_DRIVER_CAPABILITIES
name: http value: compute,utility
volumeMounts: - name: NVIDIA_REQUIRE_CUDA
- name: cvat-data value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
mountPath: /cvat - name: ONEPANEL_MAIN_CONTAINER
- name: share value: 'true'
mountPath: /share ports:
- name: sys-namespace-config - containerPort: 8080
mountPath: /etc/onepanel name: http
readOnly: true volumeMounts:
- name: cvat-ui - name: cvat-data
image: onepanel/cvat-ui:v0.19.0_cvat.1.0.0 mountPath: /cvat
ports: - name: share
- containerPort: 80 mountPath: /share
name: http - name: sys-namespace-config
- name: sys-filesyncer mountPath: /etc/onepanel
image: onepanel/filesyncer:v0.20.0 readOnly: true
imagePullPolicy: Always - name: cvat-ui
args: image: onepanel/cvat-ui:v0.19.0_cvat.1.0.0
- server ports:
- -server-prefix=/sys/filesyncer - containerPort: 80
volumeMounts: name: http
- name: share - name: sys-filesyncer
mountPath: /share image: onepanel/filesyncer:v0.20.0
- name: sys-namespace-config imagePullPolicy: Always
mountPath: /etc/onepanel args:
readOnly: true - server
ports: - -server-prefix=/sys/filesyncer
- name: cvat-ui volumeMounts:
port: 80 - name: share
protocol: TCP mountPath: /share
targetPort: 80 - name: sys-namespace-config
- name: cvat mountPath: /etc/onepanel
port: 8080 readOnly: true
protocol: TCP ports:
targetPort: 8080 - name: cvat-ui
- name: fs port: 80
port: 8888 protocol: TCP
protocol: TCP targetPort: 80
targetPort: 8888 - name: cvat
routes: port: 8080
- match: protocol: TCP
- uri: targetPort: 8080
prefix: /sys/filesyncer - name: fs
route: port: 8888
- destination: protocol: TCP
port: targetPort: 8888
number: 8888 routes:
- match: - match:
- uri: - uri:
regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.* prefix: /sys/filesyncer
- queryParams: route:
id: - destination:
regex: \d+.* port:
route: number: 8888
- destination: - match:
port: - uri:
number: 8080 regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- match: - queryParams:
- uri: id:
prefix: / regex: \d+.*
route: route:
- destination: - destination:
port: port:
number: 80 number: 8080
volumeClaimTemplates: - match:
- metadata: - uri:
name: db prefix: /
spec: route:
accessModes: ["ReadWriteOnce"] - destination:
resources: port:
requests: number: 80
storage: 20Gi volumeClaimTemplates:
- metadata:
name: db
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 20Gi

View File

@@ -1,134 +1,141 @@
containers: metadata:
- name: cvat-db name: CVAT
image: postgres:10-alpine kind: Workspace
env: version: 20210719190719
- name: POSTGRES_USER action: update
value: root description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
- name: POSTGRES_DB spec:
value: cvat containers:
- name: POSTGRES_HOST_AUTH_METHOD - name: cvat-db
value: trust image: postgres:10-alpine
- name: PGDATA env:
value: /var/lib/psql/data - name: POSTGRES_USER
ports: value: root
- containerPort: 5432 - name: POSTGRES_DB
name: tcp value: cvat
volumeMounts: - name: POSTGRES_HOST_AUTH_METHOD
- name: db value: trust
mountPath: /var/lib/psql - name: PGDATA
- name: cvat-redis value: /var/lib/psql/data
image: redis:4.0-alpine ports:
ports: - containerPort: 5432
- containerPort: 6379 name: tcp
name: tcp volumeMounts:
- name: cvat - name: db
image: onepanel/cvat:v0.19.0_cvat.1.0.0 mountPath: /var/lib/psql
env: - name: cvat-redis
- name: DJANGO_MODWSGI_EXTRA_ARGS image: redis:4.0-alpine
value: "" ports:
- name: ALLOWED_HOSTS - containerPort: 6379
value: '*' name: tcp
- name: CVAT_REDIS_HOST - name: cvat
value: localhost image: onepanel/cvat:v0.19.0_cvat.1.0.0
- name: CVAT_POSTGRES_HOST env:
value: localhost - name: DJANGO_MODWSGI_EXTRA_ARGS
- name: CVAT_SHARE_URL value: ""
value: /cvat/data - name: ALLOWED_HOSTS
- name: CVAT_SHARE_DIR value: '*'
value: /share - name: CVAT_REDIS_HOST
- name: CVAT_DATA_DIR value: localhost
value: /cvat/data - name: CVAT_POSTGRES_HOST
- name: CVAT_MEDIA_DATA_DIR value: localhost
value: /cvat/data/data - name: CVAT_SHARE_URL
- name: CVAT_KEYS_DIR value: /cvat/data
value: /cvat/data/keys - name: CVAT_SHARE_DIR
- name: CVAT_MODELS_DIR value: /share
value: /cvat/data/models - name: CVAT_DATA_DIR
- name: CVAT_LOGS_DIR value: /cvat/data
value: /cvat/logs - name: CVAT_MEDIA_DATA_DIR
- name: CVAT_ANNOTATIONS_OBJECT_STORAGE_PREFIX value: /cvat/data/data
value: 'artifacts/$(ONEPANEL_RESOURCE_NAMESPACE)/annotations/' - name: CVAT_KEYS_DIR
- name: CVAT_ONEPANEL_WORKFLOWS_LABEL value: /cvat/data/keys
value: 'key=used-by,value=cvat' - name: CVAT_MODELS_DIR
- name: NVIDIA_VISIBLE_DEVICES value: /cvat/data/models
value: all - name: CVAT_LOGS_DIR
- name: NVIDIA_DRIVER_CAPABILITIES value: /cvat/logs
value: compute,utility - name: CVAT_ANNOTATIONS_OBJECT_STORAGE_PREFIX
- name: NVIDIA_REQUIRE_CUDA value: 'artifacts/$(ONEPANEL_RESOURCE_NAMESPACE)/annotations/'
value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411" - name: CVAT_ONEPANEL_WORKFLOWS_LABEL
- name: ONEPANEL_MAIN_CONTAINER value: 'key=used-by,value=cvat'
value: 'true' - name: NVIDIA_VISIBLE_DEVICES
ports: value: all
- containerPort: 8080 - name: NVIDIA_DRIVER_CAPABILITIES
name: http value: compute,utility
volumeMounts: - name: NVIDIA_REQUIRE_CUDA
- name: cvat-data value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
mountPath: /cvat - name: ONEPANEL_MAIN_CONTAINER
- name: share value: 'true'
mountPath: /share ports:
- name: sys-namespace-config - containerPort: 8080
mountPath: /etc/onepanel name: http
readOnly: true volumeMounts:
- name: cvat-ui - name: cvat-data
image: onepanel/cvat-ui:v0.19.0_cvat.1.0.0 mountPath: /cvat
ports: - name: share
- containerPort: 80 mountPath: /share
name: http - name: sys-namespace-config
- name: sys-filesyncer mountPath: /etc/onepanel
image: onepanel/filesyncer:v1.0.0 readOnly: true
imagePullPolicy: Always - name: cvat-ui
args: image: onepanel/cvat-ui:v0.19.0_cvat.1.0.0
- server ports:
- -server-prefix=/sys/filesyncer - containerPort: 80
volumeMounts: name: http
- name: share - name: sys-filesyncer
mountPath: /share image: onepanel/filesyncer:v1.0.0
- name: sys-namespace-config imagePullPolicy: Always
mountPath: /etc/onepanel args:
readOnly: true - server
ports: - -server-prefix=/sys/filesyncer
- name: cvat-ui volumeMounts:
port: 80 - name: share
protocol: TCP mountPath: /share
targetPort: 80 - name: sys-namespace-config
- name: cvat mountPath: /etc/onepanel
port: 8080 readOnly: true
protocol: TCP ports:
targetPort: 8080 - name: cvat-ui
- name: fs port: 80
port: 8888 protocol: TCP
protocol: TCP targetPort: 80
targetPort: 8888 - name: cvat
routes: port: 8080
- match: protocol: TCP
- uri: targetPort: 8080
prefix: /sys/filesyncer - name: fs
route: port: 8888
- destination: protocol: TCP
port: targetPort: 8888
number: 8888 routes:
- match: - match:
- uri: - uri:
regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.* prefix: /sys/filesyncer
- queryParams: route:
id: - destination:
regex: \d+.* port:
route: number: 8888
- destination: - match:
port: - uri:
number: 8080 regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- match: - queryParams:
- uri: id:
prefix: / regex: \d+.*
route: route:
- destination: - destination:
port: port:
number: 80 number: 8080
volumeClaimTemplates: - match:
- metadata: - uri:
name: db prefix: /
spec: route:
accessModes: ["ReadWriteOnce"] - destination:
resources: port:
requests: number: 80
storage: 20Gi volumeClaimTemplates:
- metadata:
name: db
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 20Gi

View File

@@ -0,0 +1,143 @@
metadata:
name: CVAT_1.6.0
kind: Workspace
version: 20211028205201
action: create
description: "Powerful and efficient Computer Vision Annotation Tool (CVAT 1.6.0)"
spec:
containers:
- name: cvat-db
image: postgres:10-alpine
env:
- name: POSTGRES_USER
value: root
- name: POSTGRES_DB
value: cvat
- name: POSTGRES_HOST_AUTH_METHOD
value: trust
- name: PGDATA
value: /var/lib/psql/data
ports:
- containerPort: 5432
name: tcp
volumeMounts:
- name: db
mountPath: /var/lib/psql
- name: cvat-redis
image: redis:4.0-alpine
ports:
- containerPort: 6379
name: tcp
- name: cvat
image: onepanel/cvat:v1.0.2_cvat.1.6.0
env:
- name: DJANGO_MODWSGI_EXTRA_ARGS
value: ""
- name: ALLOWED_HOSTS
value: '*'
- name: CVAT_REDIS_HOST
value: localhost
- name: CVAT_POSTGRES_HOST
value: localhost
- name: CVAT_SHARE_URL
value: /cvat/data
- name: CVAT_SHARE_DIR
value: /share
- name: CVAT_DATA_DIR
value: /cvat/data
- name: CVAT_MEDIA_DATA_DIR
value: /cvat/data/data
- name: CVAT_KEYS_DIR
value: /cvat/data/keys
- name: CVAT_MODELS_DIR
value: /cvat/data/models
- name: CVAT_LOGS_DIR
value: /cvat/logs
- name: CVAT_ANNOTATIONS_OBJECT_STORAGE_PREFIX
value: 'artifacts/$(ONEPANEL_RESOURCE_NAMESPACE)/annotations/'
- name: CVAT_ONEPANEL_WORKFLOWS_LABEL
value: 'key=used-by,value=cvat'
- name: NVIDIA_VISIBLE_DEVICES
value: all
- name: NVIDIA_DRIVER_CAPABILITIES
value: compute,utility
- name: NVIDIA_REQUIRE_CUDA
value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
- name: ONEPANEL_MAIN_CONTAINER
value: 'true'
- name: CVAT_SERVERLESS
value: True
ports:
- containerPort: 8080
name: http
volumeMounts:
- name: cvat-data
mountPath: /cvat
- name: share
mountPath: /share
- name: sys-namespace-config
mountPath: /etc/onepanel
readOnly: true
- name: cvat-ui
image: onepanel/cvat-ui:v1.0.2_cvat.1.6.0
ports:
- containerPort: 80
name: http
- name: sys-filesyncer
image: onepanel/filesyncer:v1.0.0
imagePullPolicy: Always
args:
- server
- -server-prefix=/sys/filesyncer
volumeMounts:
- name: share
mountPath: /share
- name: sys-namespace-config
mountPath: /etc/onepanel
readOnly: true
ports:
- name: cvat-ui
port: 80
protocol: TCP
targetPort: 80
- name: cvat
port: 8080
protocol: TCP
targetPort: 8080
- name: fs
port: 8888
protocol: TCP
targetPort: 8888
routes:
- match:
- uri:
prefix: /sys/filesyncer
route:
- destination:
port:
number: 8888
- match:
- uri:
regex: \/?api.*|\/?git.*|\/?tensorflow.*|\/?onepanelio.*|\/?tracking.*|\/?auto_annotation.*|\/?analytics.*|\/?static.*|\/?admin.*|\/?documentation.*|\/?dextr.*|\/?reid.*|\/?django-rq.*
- queryParams:
id:
regex: \d+.*
route:
- destination:
port:
number: 8080
- match:
- uri:
prefix: /
route:
- destination:
port:
number: 80
volumeClaimTemplates:
- metadata:
name: db
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 20Gi

View File

@@ -0,0 +1,64 @@
metadata:
name: JupyterLab
kind: Workspace
version: 20200525160514
action: create
description: "Interactive development environment for notebooks"
spec:
# Docker containers that are part of the Workspace
containers:
- name: jupyterlab-tensorflow
image: jupyter/tensorflow-notebook
command: [start.sh, jupyter]
env:
- name: tornado
value: "{ 'headers': { 'Content-Security-Policy': \"frame-ancestors * 'self'\" } }"
args:
- lab
- --LabApp.token=''
- --LabApp.allow_remote_access=True
- --LabApp.allow_origin="*"
- --LabApp.disable_check_xsrf=True
- --LabApp.trust_xheaders=True
- --LabApp.tornado_settings=$(tornado)
- --notebook-dir='/data'
ports:
- containerPort: 8888
name: jupyterlab
# Volumes to be mounted in this container
# Onepanel will automatically create these volumes and mount them to the container
volumeMounts:
- name: data
mountPath: /data
# Ports that need to be exposed
ports:
- name: jupyterlab
port: 80
protocol: TCP
targetPort: 8888
# Routes that will map to ports
routes:
- match:
- uri:
prefix: /
route:
- destination:
port:
number: 80
# DAG Workflow to be executed once a Workspace action completes
# postExecutionWorkflow:
# entrypoint: main
# templates:
# - name: main
# dag:
# tasks:
# - name: slack-notify
# template: slack-notify
# - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -0,0 +1,65 @@
metadata:
name: JupyterLab
kind: Workspace
version: 20200821162630
action: update
description: "Interactive development environment for notebooks"
spec:
# Docker containers that are part of the Workspace
containers:
- name: jupyterlab-tensorflow
image: onepanel/jupyterlab:1.0.1
command: ["/bin/bash", "-c", "start.sh jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * \'self\'\"}}' --notebook-dir='/data' --allow-root"]
env:
- name: tornado
value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ \'self'\"}}'"
args:
ports:
- containerPort: 8888
name: jupyterlab
- containerPort: 6006
name: tensorboard
volumeMounts:
- name: data
mountPath: /data
ports:
- name: jupyterlab
port: 80
protocol: TCP
targetPort: 8888
- name: tensorboard
port: 6006
protocol: TCP
targetPort: 6006
routes:
- match:
- uri:
prefix: /tensorboard
route:
- destination:
port:
number: 6006
- match:
- uri:
prefix: / #jupyter runs at the default route
route:
- destination:
port:
number: 80
# DAG Workflow to be executed once a Workspace action completes (optional)
#postExecutionWorkflow:
# entrypoint: main
# templates:
# - name: main
# dag:
# tasks:
# - name: slack-notify
# template: slack-notify
# - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -1,58 +1,65 @@
# Docker containers that are part of the Workspace metadata:
containers: name: JupyterLab
- name: jupyterlab-tensorflow kind: Workspace
image: onepanel/jupyterlab:1.0.1 version: 20200929153931
command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * \'self\'\"}}' --notebook-dir='/data' --allow-root"] action: update
env: description: "Interactive development environment for notebooks"
- name: tornado spec:
value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ \'self'\"}}'" # Docker containers that are part of the Workspace
args: containers:
ports: - name: jupyterlab-tensorflow
- containerPort: 8888 image: onepanel/jupyterlab:1.0.1
name: jupyterlab command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * \'self\'\"}}' --notebook-dir='/data' --allow-root"]
- containerPort: 6006 env:
name: tensorboard - name: tornado
volumeMounts: value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ \'self'\"}}'"
- name: data args:
mountPath: /data ports:
ports: - containerPort: 8888
- name: jupyterlab name: jupyterlab
port: 80 - containerPort: 6006
protocol: TCP name: tensorboard
targetPort: 8888 volumeMounts:
- name: tensorboard - name: data
port: 6006 mountPath: /data
protocol: TCP ports:
targetPort: 6006 - name: jupyterlab
routes: port: 80
- match: protocol: TCP
- uri: targetPort: 8888
prefix: /tensorboard - name: tensorboard
route: port: 6006
- destination: protocol: TCP
port: targetPort: 6006
number: 6006 routes:
- match: - match:
- uri: - uri:
prefix: / #jupyter runs at the default route prefix: /tensorboard
route: route:
- destination: - destination:
port: port:
number: 80 number: 6006
# DAG Workflow to be executed once a Workspace action completes (optional) - match:
#postExecutionWorkflow: - uri:
# entrypoint: main prefix: / #jupyter runs at the default route
# templates: route:
# - name: main - destination:
# dag: port:
# tasks: number: 80
# - name: slack-notify # DAG Workflow to be executed once a Workspace action completes (optional)
# template: slack-notify #postExecutionWorkflow:
# - name: slack-notify # entrypoint: main
# container: # templates:
# image: technosophos/slack-notify # - name: main
# args: # dag:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify # tasks:
# command: # - name: slack-notify
# - sh # template: slack-notify
# - -c # - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -1,77 +1,84 @@
# Docker containers that are part of the Workspace metadata:
containers: name: JupyterLab
- name: jupyterlab kind: Workspace
image: onepanel/jupyterlab:1.0.1 version: 20201028145442
command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * \'self\'\"}}' --notebook-dir='/data' --allow-root"] action: update
env: description: "Interactive development environment for notebooks"
- name: tornado spec:
value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ \'self'\"}}'" # Docker containers that are part of the Workspace
ports: containers:
- containerPort: 8888 - name: jupyterlab
name: jupyterlab image: onepanel/jupyterlab:1.0.1
- containerPort: 6006 command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * \'self\'\"}}' --notebook-dir='/data' --allow-root"]
name: tensorboard env:
volumeMounts: - name: tornado
- name: data value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ \'self'\"}}'"
mountPath: /data ports:
lifecycle: - containerPort: 8888
postStart: name: jupyterlab
exec: - containerPort: 6006
command: name: tensorboard
- /bin/sh volumeMounts:
- -c - name: data
- > mountPath: /data
condayml="/data/.environment.yml"; lifecycle:
jupytertxt="/data/.jupexported.txt"; postStart:
if [ -f "$condayml" ]; then conda env update -f $condayml; fi; exec:
if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi; command:
preStop: - /bin/sh
exec: - -c
command: - >
- /bin/sh condayml="/data/.environment.yml";
- -c jupytertxt="/data/.jupexported.txt";
- > if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
conda env export > /data/.environment.yml -n base; if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
jupyter labextension list 1>/dev/null 2> /data/.jup.txt; preStop:
cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt; exec:
ports: command:
- name: jupyterlab - /bin/sh
port: 80 - -c
protocol: TCP - >
targetPort: 8888 conda env export > /data/.environment.yml -n base;
- name: tensorboard jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
port: 6006 cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
protocol: TCP ports:
targetPort: 6006 - name: jupyterlab
routes: port: 80
- match: protocol: TCP
- uri: targetPort: 8888
prefix: /tensorboard - name: tensorboard
route: port: 6006
- destination: protocol: TCP
port: targetPort: 6006
number: 6006 routes:
- match: - match:
- uri: - uri:
prefix: / #jupyter runs at the default route prefix: /tensorboard
route: route:
- destination: - destination:
port: port:
number: 80 number: 6006
# DAG Workflow to be executed once a Workspace action completes (optional) - match:
#postExecutionWorkflow: - uri:
# entrypoint: main prefix: / #jupyter runs at the default route
# templates: route:
# - name: main - destination:
# dag: port:
# tasks: number: 80
# - name: slack-notify # DAG Workflow to be executed once a Workspace action completes (optional)
# template: slack-notify #postExecutionWorkflow:
# - name: slack-notify # entrypoint: main
# container: # templates:
# image: technosophos/slack-notify # - name: main
# args: # dag:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify # tasks:
# command: # - name: slack-notify
# - sh # template: slack-notify
# - -c # - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -1,79 +1,86 @@
# Docker containers that are part of the Workspace metadata:
containers: name: JupyterLab
- name: jupyterlab kind: Workspace
image: onepanel/jupyterlab:1.0.1 version: 20201031165106
command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * \'self\'\"}}' --notebook-dir='/data' --allow-root"] action: update
env: description: "Interactive development environment for notebooks"
- name: tornado spec:
value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ \'self'\"}}'" # Docker containers that are part of the Workspace
- name: TENSORBOARD_PROXY_URL containers:
value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard' - name: jupyterlab
ports: image: onepanel/jupyterlab:1.0.1
- containerPort: 8888 command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * \'self\'\"}}' --notebook-dir='/data' --allow-root"]
name: jupyterlab env:
- containerPort: 6006 - name: tornado
name: tensorboard value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ \'self'\"}}'"
volumeMounts: - name: TENSORBOARD_PROXY_URL
- name: data value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard'
mountPath: /data ports:
lifecycle: - containerPort: 8888
postStart: name: jupyterlab
exec: - containerPort: 6006
command: name: tensorboard
- /bin/sh volumeMounts:
- -c - name: data
- > mountPath: /data
condayml="/data/.environment.yml"; lifecycle:
jupytertxt="/data/.jupexported.txt"; postStart:
if [ -f "$condayml" ]; then conda env update -f $condayml; fi; exec:
if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi; command:
preStop: - /bin/sh
exec: - -c
command: - >
- /bin/sh condayml="/data/.environment.yml";
- -c jupytertxt="/data/.jupexported.txt";
- > if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
conda env export > /data/.environment.yml -n base; if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
jupyter labextension list 1>/dev/null 2> /data/.jup.txt; preStop:
cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt; exec:
ports: command:
- name: jupyterlab - /bin/sh
port: 80 - -c
protocol: TCP - >
targetPort: 8888 conda env export > /data/.environment.yml -n base;
- name: tensorboard jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
port: 6006 cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
protocol: TCP ports:
targetPort: 6006 - name: jupyterlab
routes: port: 80
- match: protocol: TCP
- uri: targetPort: 8888
prefix: /tensorboard - name: tensorboard
route: port: 6006
- destination: protocol: TCP
port: targetPort: 6006
number: 6006 routes:
- match: - match:
- uri: - uri:
prefix: / #jupyter runs at the default route prefix: /tensorboard
route: route:
- destination: - destination:
port: port:
number: 80 number: 6006
# DAG Workflow to be executed once a Workspace action completes (optional) - match:
#postExecutionWorkflow: - uri:
# entrypoint: main prefix: / #jupyter runs at the default route
# templates: route:
# - name: main - destination:
# dag: port:
# tasks: number: 80
# - name: slack-notify # DAG Workflow to be executed once a Workspace action completes (optional)
# template: slack-notify #postExecutionWorkflow:
# - name: slack-notify # entrypoint: main
# container: # templates:
# image: technosophos/slack-notify # - name: main
# args: # dag:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify # tasks:
# command: # - name: slack-notify
# - sh # template: slack-notify
# - -c # - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -1,80 +1,87 @@
# Docker containers that are part of the Workspace metadata:
containers: name: JupyterLab
- name: jupyterlab kind: Workspace
image: onepanel/jupyterlab:1.0.1 version: 20201214133458
command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * \'self\'\"}}' --notebook-dir='/data' --allow-root"] action: update
workingDir: /data description: "Interactive development environment for notebooks"
env: spec:
- name: tornado # Docker containers that are part of the Workspace
value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ \'self'\"}}'" containers:
- name: TENSORBOARD_PROXY_URL - name: jupyterlab
value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard' image: onepanel/jupyterlab:1.0.1
ports: command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * \'self\'\"}}' --notebook-dir='/data' --allow-root"]
- containerPort: 8888 workingDir: /data
name: jupyterlab env:
- containerPort: 6006 - name: tornado
name: tensorboard value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ \'self'\"}}'"
volumeMounts: - name: TENSORBOARD_PROXY_URL
- name: data value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard'
mountPath: /data ports:
lifecycle: - containerPort: 8888
postStart: name: jupyterlab
exec: - containerPort: 6006
command: name: tensorboard
- /bin/sh volumeMounts:
- -c - name: data
- > mountPath: /data
condayml="/data/.environment.yml"; lifecycle:
jupytertxt="/data/.jupexported.txt"; postStart:
if [ -f "$condayml" ]; then conda env update -f $condayml; fi; exec:
if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi; command:
preStop: - /bin/sh
exec: - -c
command: - >
- /bin/sh condayml="/data/.environment.yml";
- -c jupytertxt="/data/.jupexported.txt";
- > if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
conda env export > /data/.environment.yml -n base; if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
jupyter labextension list 1>/dev/null 2> /data/.jup.txt; preStop:
cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt; exec:
ports: command:
- name: jupyterlab - /bin/sh
port: 80 - -c
protocol: TCP - >
targetPort: 8888 conda env export > /data/.environment.yml -n base;
- name: tensorboard jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
port: 6006 cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
protocol: TCP ports:
targetPort: 6006 - name: jupyterlab
routes: port: 80
- match: protocol: TCP
- uri: targetPort: 8888
prefix: /tensorboard - name: tensorboard
route: port: 6006
- destination: protocol: TCP
port: targetPort: 6006
number: 6006 routes:
- match: - match:
- uri: - uri:
prefix: / #jupyter runs at the default route prefix: /tensorboard
route: route:
- destination: - destination:
port: port:
number: 80 number: 6006
# DAG Workflow to be executed once a Workspace action completes (optional) - match:
#postExecutionWorkflow: - uri:
# entrypoint: main prefix: / #jupyter runs at the default route
# templates: route:
# - name: main - destination:
# dag: port:
# tasks: number: 80
# - name: slack-notify # DAG Workflow to be executed once a Workspace action completes (optional)
# template: slack-notify #postExecutionWorkflow:
# - name: slack-notify # entrypoint: main
# container: # templates:
# image: technosophos/slack-notify # - name: main
# args: # dag:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify # tasks:
# command: # - name: slack-notify
# - sh # template: slack-notify
# - -c # - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -1,93 +1,100 @@
# Docker containers that are part of the Workspace metadata:
containers: name: JupyterLab
- name: jupyterlab kind: Workspace
image: onepanel/dl:0.17.0 version: 20201229205644
command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * 'self'\"}}' --notebook-dir='/data' --allow-root"] action: update
workingDir: /data description: "Interactive development environment for notebooks"
env: spec:
- name: tornado # Docker containers that are part of the Workspace
value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ 'self'\"}}'" containers:
- name: TENSORBOARD_PROXY_URL - name: jupyterlab
value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard' image: onepanel/dl:0.17.0
ports: command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * 'self'\"}}' --notebook-dir='/data' --allow-root"]
- containerPort: 8888 workingDir: /data
name: jupyterlab env:
- containerPort: 6006 - name: tornado
name: tensorboard value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ 'self'\"}}'"
- containerPort: 8080 - name: TENSORBOARD_PROXY_URL
name: nni value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard'
volumeMounts: ports:
- name: data - containerPort: 8888
mountPath: /data name: jupyterlab
lifecycle: - containerPort: 6006
postStart: name: tensorboard
exec: - containerPort: 8080
command: name: nni
- /bin/sh volumeMounts:
- -c - name: data
- > mountPath: /data
condayml="/data/.environment.yml"; lifecycle:
jupytertxt="/data/.jupexported.txt"; postStart:
if [ -f "$condayml" ]; then conda env update -f $condayml; fi; exec:
if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi; command:
preStop: - /bin/sh
exec: - -c
command: - >
- /bin/sh condayml="/data/.environment.yml";
- -c jupytertxt="/data/.jupexported.txt";
- > if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
conda env export > /data/.environment.yml -n base; if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
jupyter labextension list 1>/dev/null 2> /data/.jup.txt; preStop:
cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt; exec:
ports: command:
- name: jupyterlab - /bin/sh
port: 80 - -c
protocol: TCP - >
targetPort: 8888 conda env export > /data/.environment.yml -n base;
- name: tensorboard jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
port: 6006 cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
protocol: TCP ports:
targetPort: 6006 - name: jupyterlab
- name: nni port: 80
port: 8080 protocol: TCP
protocol: TCP targetPort: 8888
targetPort: 8080 - name: tensorboard
routes: port: 6006
- match: protocol: TCP
- uri: targetPort: 6006
prefix: /tensorboard - name: nni
route: port: 8080
- destination: protocol: TCP
port: targetPort: 8080
number: 6006 routes:
- match: - match:
- uri: - uri:
prefix: /nni prefix: /tensorboard
route: route:
- destination: - destination:
port: port:
number: 8080 number: 6006
- match: - match:
- uri: - uri:
prefix: / #jupyter runs at the default route prefix: /nni
route: route:
- destination: - destination:
port: port:
number: 80 number: 8080
# DAG Workflow to be executed once a Workspace action completes (optional) - match:
#postExecutionWorkflow: - uri:
# entrypoint: main prefix: / #jupyter runs at the default route
# templates: route:
# - name: main - destination:
# dag: port:
# tasks: number: 80
# - name: slack-notify # DAG Workflow to be executed once a Workspace action completes (optional)
# template: slack-notify #postExecutionWorkflow:
# - name: slack-notify # entrypoint: main
# container: # templates:
# image: technosophos/slack-notify # - name: main
# args: # dag:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify # tasks:
# command: # - name: slack-notify
# - sh # template: slack-notify
# - -c # - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -1,101 +1,108 @@
containers: metadata:
- name: jupyterlab name: JupyterLab
image: onepanel/dl:0.17.0 kind: Workspace
command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * 'self'\"}}' --notebook-dir='/data' --allow-root"] version: 20210129142057
workingDir: /data action: update
env: description: "Interactive development environment for notebooks"
- name: tornado spec:
value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ 'self'\"}}'" containers:
- name: TENSORBOARD_PROXY_URL - name: jupyterlab
value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard' image: onepanel/dl:0.17.0
- name: ONEPANEL_MAIN_CONTAINER command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * 'self'\"}}' --notebook-dir='/data' --allow-root"]
value: 'true' workingDir: /data
ports: env:
- containerPort: 8888 - name: tornado
name: jupyterlab value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ 'self'\"}}'"
- containerPort: 6006 - name: TENSORBOARD_PROXY_URL
name: tensorboard value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard'
- containerPort: 8080 - name: ONEPANEL_MAIN_CONTAINER
name: nni value: 'true'
volumeMounts: ports:
- name: data - containerPort: 8888
mountPath: /data name: jupyterlab
lifecycle: - containerPort: 6006
postStart: name: tensorboard
exec: - containerPort: 8080
command: name: nni
- /bin/sh volumeMounts:
- -c - name: data
- > mountPath: /data
condayml="/data/.environment.yml"; lifecycle:
jupytertxt="/data/.jupexported.txt"; postStart:
if [ -f "$condayml" ]; then conda env update -f $condayml; fi; exec:
if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi; command:
preStop: - /bin/sh
exec: - -c
command: - >
- /bin/sh condayml="/data/.environment.yml";
- -c jupytertxt="/data/.jupexported.txt";
- > if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
conda env export > /data/.environment.yml -n base; if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
jupyter labextension list 1>/dev/null 2> /data/.jup.txt; preStop:
cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt; exec:
- name: sys-filesyncer command:
image: onepanel/filesyncer:v0.18.0 - /bin/sh
imagePullPolicy: Always - -c
args: - >
- server conda env export > /data/.environment.yml -n base;
- -host=localhost:8889 jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
- -server-prefix=/sys/filesyncer cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
volumeMounts: - name: sys-filesyncer
- name: data image: onepanel/filesyncer:v0.18.0
mountPath: /data imagePullPolicy: Always
- name: sys-namespace-config args:
mountPath: /etc/onepanel - server
readOnly: true - -host=localhost:8889
ports: - -server-prefix=/sys/filesyncer
- name: jupyterlab volumeMounts:
port: 80 - name: data
protocol: TCP mountPath: /data
targetPort: 8888 - name: sys-namespace-config
- name: tensorboard mountPath: /etc/onepanel
port: 6006 readOnly: true
protocol: TCP ports:
targetPort: 6006 - name: jupyterlab
- name: nni port: 80
port: 8080 protocol: TCP
protocol: TCP targetPort: 8888
targetPort: 8080 - name: tensorboard
- name: fs port: 6006
port: 8889 protocol: TCP
protocol: TCP targetPort: 6006
targetPort: 8889 - name: nni
routes: port: 8080
- match: protocol: TCP
- uri: targetPort: 8080
prefix: /sys/filesyncer - name: fs
route: port: 8889
- destination: protocol: TCP
port: targetPort: 8889
number: 8889 routes:
- match: - match:
- uri: - uri:
prefix: /tensorboard prefix: /sys/filesyncer
route: route:
- destination: - destination:
port: port:
number: 6006 number: 8889
- match: - match:
- uri: - uri:
prefix: /nni prefix: /tensorboard
route: route:
- destination: - destination:
port: port:
number: 8080 number: 6006
- match: - match:
- uri: - uri:
prefix: / prefix: /nni
route: route:
- destination: - destination:
port: port:
number: 80 number: 8080
- match:
- uri:
prefix: /
route:
- destination:
port:
number: 80

View File

@@ -1,101 +1,108 @@
containers: metadata:
- name: jupyterlab name: JupyterLab
image: onepanel/dl:0.17.0 kind: Workspace
command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * 'self'\"}}' --notebook-dir='/data' --allow-root"] version: 20210224180017
workingDir: /data action: update
env: description: "Interactive development environment for notebooks"
- name: tornado spec:
value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ 'self'\"}}'" containers:
- name: TENSORBOARD_PROXY_URL - name: jupyterlab
value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard' image: onepanel/dl:0.17.0
- name: ONEPANEL_MAIN_CONTAINER command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * 'self'\"}}' --notebook-dir='/data' --allow-root"]
value: 'true' workingDir: /data
ports: env:
- containerPort: 8888 - name: tornado
name: jupyterlab value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ 'self'\"}}'"
- containerPort: 6006 - name: TENSORBOARD_PROXY_URL
name: tensorboard value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard'
- containerPort: 8080 - name: ONEPANEL_MAIN_CONTAINER
name: nni value: 'true'
volumeMounts: ports:
- name: data - containerPort: 8888
mountPath: /data name: jupyterlab
lifecycle: - containerPort: 6006
postStart: name: tensorboard
exec: - containerPort: 8080
command: name: nni
- /bin/sh volumeMounts:
- -c - name: data
- > mountPath: /data
condayml="/data/.environment.yml"; lifecycle:
jupytertxt="/data/.jupexported.txt"; postStart:
if [ -f "$condayml" ]; then conda env update -f $condayml; fi; exec:
if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi; command:
preStop: - /bin/sh
exec: - -c
command: - >
- /bin/sh condayml="/data/.environment.yml";
- -c jupytertxt="/data/.jupexported.txt";
- > if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
conda env export > /data/.environment.yml -n base; if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
jupyter labextension list 1>/dev/null 2> /data/.jup.txt; preStop:
cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt; exec:
- name: sys-filesyncer command:
image: onepanel/filesyncer:v0.19.0 - /bin/sh
imagePullPolicy: Always - -c
args: - >
- server conda env export > /data/.environment.yml -n base;
- -host=localhost:8889 jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
- -server-prefix=/sys/filesyncer cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
volumeMounts: - name: sys-filesyncer
- name: data image: onepanel/filesyncer:v0.19.0
mountPath: /data imagePullPolicy: Always
- name: sys-namespace-config args:
mountPath: /etc/onepanel - server
readOnly: true - -host=localhost:8889
ports: - -server-prefix=/sys/filesyncer
- name: jupyterlab volumeMounts:
port: 80 - name: data
protocol: TCP mountPath: /data
targetPort: 8888 - name: sys-namespace-config
- name: tensorboard mountPath: /etc/onepanel
port: 6006 readOnly: true
protocol: TCP ports:
targetPort: 6006 - name: jupyterlab
- name: nni port: 80
port: 8080 protocol: TCP
protocol: TCP targetPort: 8888
targetPort: 8080 - name: tensorboard
- name: fs port: 6006
port: 8889 protocol: TCP
protocol: TCP targetPort: 6006
targetPort: 8889 - name: nni
routes: port: 8080
- match: protocol: TCP
- uri: targetPort: 8080
prefix: /sys/filesyncer - name: fs
route: port: 8889
- destination: protocol: TCP
port: targetPort: 8889
number: 8889 routes:
- match: - match:
- uri: - uri:
prefix: /tensorboard prefix: /sys/filesyncer
route: route:
- destination: - destination:
port: port:
number: 6006 number: 8889
- match: - match:
- uri: - uri:
prefix: /nni prefix: /tensorboard
route: route:
- destination: - destination:
port: port:
number: 8080 number: 6006
- match: - match:
- uri: - uri:
prefix: / prefix: /nni
route: route:
- destination: - destination:
port: port:
number: 80 number: 8080
- match:
- uri:
prefix: /
route:
- destination:
port:
number: 80

View File

@@ -1,101 +1,108 @@
containers: metadata:
- name: jupyterlab name: JupyterLab
image: onepanel/dl:v0.20.0 kind: Workspace
command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * 'self'\"}}' --notebook-dir='/data' --allow-root"] version: 20210323175655
workingDir: /data action: update
env: description: "Interactive development environment for notebooks"
- name: tornado spec:
value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ 'self'\"}}'" containers:
- name: TENSORBOARD_PROXY_URL - name: jupyterlab
value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard' image: onepanel/dl:v0.20.0
- name: ONEPANEL_MAIN_CONTAINER command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * 'self'\"}}' --notebook-dir='/data' --allow-root"]
value: 'true' workingDir: /data
ports: env:
- containerPort: 8888 - name: tornado
name: jupyterlab value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ 'self'\"}}'"
- containerPort: 6006 - name: TENSORBOARD_PROXY_URL
name: tensorboard value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard'
- containerPort: 8080 - name: ONEPANEL_MAIN_CONTAINER
name: nni value: 'true'
volumeMounts: ports:
- name: data - containerPort: 8888
mountPath: /data name: jupyterlab
lifecycle: - containerPort: 6006
postStart: name: tensorboard
exec: - containerPort: 8080
command: name: nni
- /bin/sh volumeMounts:
- -c - name: data
- > mountPath: /data
condayml="/data/.environment.yml"; lifecycle:
jupytertxt="/data/.jupexported.txt"; postStart:
if [ -f "$condayml" ]; then conda env update -f $condayml; fi; exec:
if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi; command:
preStop: - /bin/sh
exec: - -c
command: - >
- /bin/sh condayml="/data/.environment.yml";
- -c jupytertxt="/data/.jupexported.txt";
- > if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
conda env export > /data/.environment.yml -n base; if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
jupyter labextension list 1>/dev/null 2> /data/.jup.txt; preStop:
cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt; exec:
- name: sys-filesyncer command:
image: onepanel/filesyncer:v0.20.0 - /bin/sh
imagePullPolicy: Always - -c
args: - >
- server conda env export > /data/.environment.yml -n base;
- -host=localhost:8889 jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
- -server-prefix=/sys/filesyncer cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
volumeMounts: - name: sys-filesyncer
- name: data image: onepanel/filesyncer:v0.20.0
mountPath: /data imagePullPolicy: Always
- name: sys-namespace-config args:
mountPath: /etc/onepanel - server
readOnly: true - -host=localhost:8889
ports: - -server-prefix=/sys/filesyncer
- name: jupyterlab volumeMounts:
port: 80 - name: data
protocol: TCP mountPath: /data
targetPort: 8888 - name: sys-namespace-config
- name: tensorboard mountPath: /etc/onepanel
port: 6006 readOnly: true
protocol: TCP ports:
targetPort: 6006 - name: jupyterlab
- name: nni port: 80
port: 8080 protocol: TCP
protocol: TCP targetPort: 8888
targetPort: 8080 - name: tensorboard
- name: fs port: 6006
port: 8889 protocol: TCP
protocol: TCP targetPort: 6006
targetPort: 8889 - name: nni
routes: port: 8080
- match: protocol: TCP
- uri: targetPort: 8080
prefix: /sys/filesyncer - name: fs
route: port: 8889
- destination: protocol: TCP
port: targetPort: 8889
number: 8889 routes:
- match: - match:
- uri: - uri:
prefix: /tensorboard prefix: /sys/filesyncer
route: route:
- destination: - destination:
port: port:
number: 6006 number: 8889
- match: - match:
- uri: - uri:
prefix: /nni prefix: /tensorboard
route: route:
- destination: - destination:
port: port:
number: 8080 number: 6006
- match: - match:
- uri: - uri:
prefix: / prefix: /nni
route: route:
- destination: - destination:
port: port:
number: 80 number: 8080
- match:
- uri:
prefix: /
route:
- destination:
port:
number: 80

View File

@@ -1,101 +1,108 @@
containers: metadata:
- name: jupyterlab name: JupyterLab
image: onepanel/dl:v0.20.0 kind: Workspace
command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * 'self'\"}}' --notebook-dir='/data' --allow-root"] version: 20210719190719
workingDir: /data action: update
env: description: "Interactive development environment for notebooks"
- name: tornado spec:
value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ 'self'\"}}'" containers:
- name: TENSORBOARD_PROXY_URL - name: jupyterlab
value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard' image: onepanel/dl:v0.20.0
- name: ONEPANEL_MAIN_CONTAINER command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * 'self'\"}}' --notebook-dir='/data' --allow-root"]
value: 'true' workingDir: /data
ports: env:
- containerPort: 8888 - name: tornado
name: jupyterlab value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ 'self'\"}}'"
- containerPort: 6006 - name: TENSORBOARD_PROXY_URL
name: tensorboard value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard'
- containerPort: 8080 - name: ONEPANEL_MAIN_CONTAINER
name: nni value: 'true'
volumeMounts: ports:
- name: data - containerPort: 8888
mountPath: /data name: jupyterlab
lifecycle: - containerPort: 6006
postStart: name: tensorboard
exec: - containerPort: 8080
command: name: nni
- /bin/sh volumeMounts:
- -c - name: data
- > mountPath: /data
condayml="/data/.environment.yml"; lifecycle:
jupytertxt="/data/.jupexported.txt"; postStart:
if [ -f "$condayml" ]; then conda env update -f $condayml; fi; exec:
if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi; command:
preStop: - /bin/sh
exec: - -c
command: - >
- /bin/sh condayml="/data/.environment.yml";
- -c jupytertxt="/data/.jupexported.txt";
- > if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
conda env export > /data/.environment.yml -n base; if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
jupyter labextension list 1>/dev/null 2> /data/.jup.txt; preStop:
cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt; exec:
- name: sys-filesyncer command:
image: onepanel/filesyncer:v1.0.0 - /bin/sh
imagePullPolicy: Always - -c
args: - >
- server conda env export > /data/.environment.yml -n base;
- -host=localhost:8889 jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
- -server-prefix=/sys/filesyncer cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
volumeMounts: - name: sys-filesyncer
- name: data image: onepanel/filesyncer:v1.0.0
mountPath: /data imagePullPolicy: Always
- name: sys-namespace-config args:
mountPath: /etc/onepanel - server
readOnly: true - -host=localhost:8889
ports: - -server-prefix=/sys/filesyncer
- name: jupyterlab volumeMounts:
port: 80 - name: data
protocol: TCP mountPath: /data
targetPort: 8888 - name: sys-namespace-config
- name: tensorboard mountPath: /etc/onepanel
port: 6006 readOnly: true
protocol: TCP ports:
targetPort: 6006 - name: jupyterlab
- name: nni port: 80
port: 8080 protocol: TCP
protocol: TCP targetPort: 8888
targetPort: 8080 - name: tensorboard
- name: fs port: 6006
port: 8889 protocol: TCP
protocol: TCP targetPort: 6006
targetPort: 8889 - name: nni
routes: port: 8080
- match: protocol: TCP
- uri: targetPort: 8080
prefix: /sys/filesyncer - name: fs
route: port: 8889
- destination: protocol: TCP
port: targetPort: 8889
number: 8889 routes:
- match: - match:
- uri: - uri:
prefix: /tensorboard prefix: /sys/filesyncer
route: route:
- destination: - destination:
port: port:
number: 6006 number: 8889
- match: - match:
- uri: - uri:
prefix: /nni prefix: /tensorboard
route: route:
- destination: - destination:
port: port:
number: 8080 number: 6006
- match: - match:
- uri: - uri:
prefix: / prefix: /nni
route: route:
- destination: - destination:
port: port:
number: 80 number: 8080
- match:
- uri:
prefix: /
route:
- destination:
port:
number: 80

View File

@@ -1,57 +1,64 @@
arguments: metadata:
parameters: name: "Deep Learning Desktop"
# parameter screen-resolution allows users to select screen resolution kind: Workspace
- name: screen-resolution version: 20210414165510
value: 1680x1050 action: create
type: select.select description: "Deep learning desktop with VNC"
displayName: Screen Resolution spec:
options: arguments:
- name: 1280x1024 parameters:
value: 1280x1024 # parameter screen-resolution allows users to select screen resolution
- name: 1680x1050 - name: screen-resolution
value: 1680x1050 value: 1680x1050
- name: 2880x1800 type: select.select
value: 2880x1800 displayName: Screen Resolution
containers: options:
- name: ubuntu - name: 1280x1024
image: onepanel/vnc:dl-vnc value: 1280x1024
env: - name: 1680x1050
- name: VNC_PASSWORDLESS value: 1680x1050
value: true - name: 2880x1800
- name: VNC_RESOLUTION value: 2880x1800
value: '{{workflow.parameters.screen-resolution}}' containers:
ports: - name: ubuntu
- containerPort: 6901 image: onepanel/vnc:dl-vnc
name: vnc env:
volumeMounts: - name: VNC_PASSWORDLESS
- name: data value: true
mountPath: /data - name: VNC_RESOLUTION
ports: value: '{{workflow.parameters.screen-resolution}}'
- name: vnc ports:
port: 80 - containerPort: 6901
protocol: TCP name: vnc
targetPort: 6901 volumeMounts:
routes: - name: data
- match: mountPath: /data
- uri: ports:
prefix: / - name: vnc
route: port: 80
- destination: protocol: TCP
port: targetPort: 6901
number: 80 routes:
# DAG Workflow to be executed once a Workspace action completes (optional) - match:
#postExecutionWorkflow: - uri:
# entrypoint: main prefix: /
# templates: route:
# - name: main - destination:
# dag: port:
# tasks: number: 80
# - name: slack-notify # DAG Workflow to be executed once a Workspace action completes (optional)
# template: slack-notify #postExecutionWorkflow:
# - name: slack-notify # entrypoint: main
# container: # templates:
# image: technosophos/slack-notify # - name: main
# args: # dag:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify # tasks:
# command: # - name: slack-notify
# - sh # template: slack-notify
# - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh

View File

@@ -1,81 +1,88 @@
arguments: metadata:
parameters: name: "Deep Learning Desktop"
# parameter screen-resolution allows users to select screen resolution kind: Workspace
- name: screen-resolution version: 20210719190719
value: 1680x1050 action: update
type: select.select description: "Deep learning desktop with VNC"
displayName: Screen Resolution spec:
options: arguments:
- name: 1280x1024 parameters:
value: 1280x1024 # parameter screen-resolution allows users to select screen resolution
- name: 1680x1050 - name: screen-resolution
value: 1680x1050 value: 1680x1050
- name: 2880x1800 type: select.select
value: 2880x1800 displayName: Screen Resolution
containers: options:
- name: ubuntu - name: 1280x1024
image: onepanel/vnc:dl-vnc value: 1280x1024
env: - name: 1680x1050
- name: VNC_PASSWORDLESS value: 1680x1050
value: true - name: 2880x1800
- name: VNC_RESOLUTION value: 2880x1800
value: '{{workflow.parameters.screen-resolution}}' containers:
ports: - name: ubuntu
- containerPort: 6901 image: onepanel/vnc:dl-vnc
name: vnc env:
volumeMounts: - name: VNC_PASSWORDLESS
- name: data value: true
mountPath: /data - name: VNC_RESOLUTION
- name: sys-filesyncer value: '{{workflow.parameters.screen-resolution}}'
image: onepanel/filesyncer:v1.0.0 ports:
imagePullPolicy: Always - containerPort: 6901
args: name: vnc
- server volumeMounts:
- -host=localhost:8889 - name: data
- -server-prefix=/sys/filesyncer mountPath: /data
volumeMounts: - name: sys-filesyncer
- name: data image: onepanel/filesyncer:v1.0.0
mountPath: /data imagePullPolicy: Always
- name: sys-namespace-config args:
mountPath: /etc/onepanel - server
readOnly: true - -host=localhost:8889
ports: - -server-prefix=/sys/filesyncer
- name: vnc volumeMounts:
port: 80 - name: data
protocol: TCP mountPath: /data
targetPort: 6901 - name: sys-namespace-config
- name: fs mountPath: /etc/onepanel
port: 8889 readOnly: true
protocol: TCP ports:
targetPort: 8889 - name: vnc
routes: port: 80
- match: protocol: TCP
- uri: targetPort: 6901
prefix: /sys/filesyncer - name: fs
route: port: 8889
- destination: protocol: TCP
port: targetPort: 8889
number: 8889 routes:
- match: - match:
- uri: - uri:
prefix: / prefix: /sys/filesyncer
route: route:
- destination: - destination:
port: port:
number: 80 number: 8889
# DAG Workflow to be executed once a Workspace action completes (optional) - match:
#postExecutionWorkflow: - uri:
# entrypoint: main prefix: /
# templates: route:
# - name: main - destination:
# dag: port:
# tasks: number: 80
# - name: slack-notify # DAG Workflow to be executed once a Workspace action completes (optional)
# template: slack-notify #postExecutionWorkflow:
# - name: slack-notify # entrypoint: main
# container: # templates:
# image: technosophos/slack-notify # - name: main
# args: # dag:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify # tasks:
# command: # - name: slack-notify
# - sh # template: slack-notify
# - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh

View File

@@ -1,41 +1,48 @@
# Docker containers that are part of the Workspace metadata:
containers: name: "Visual Studio Code"
- name: vscode kind: Workspace
image: onepanel/vscode:1.0.0 version: 20200929144301
command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."] action: create
ports: description: "Open source code editor"
- containerPort: 8080 spec:
name: vscode # Docker containers that are part of the Workspace
volumeMounts: containers:
- name: data - name: vscode
mountPath: /data image: onepanel/vscode:1.0.0
ports: command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."]
- name: vscode ports:
port: 8080 - containerPort: 8080
protocol: TCP name: vscode
targetPort: 8080 volumeMounts:
routes: - name: data
- match: mountPath: /data
- uri: ports:
prefix: / #vscode runs at the default route - name: vscode
route: port: 8080
- destination: protocol: TCP
port: targetPort: 8080
number: 8080 routes:
# DAG Workflow to be executed once a Workspace action completes (optional) - match:
#postExecutionWorkflow: - uri:
# entrypoint: main prefix: / #vscode runs at the default route
# templates: route:
# - name: main - destination:
# dag: port:
# tasks: number: 8080
# - name: slack-notify # DAG Workflow to be executed once a Workspace action completes (optional)
# template: slack-notify #postExecutionWorkflow:
# - name: slack-notify # entrypoint: main
# container: # templates:
# image: technosophos/slack-notify # - name: main
# args: # dag:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify # tasks:
# command: # - name: slack-notify
# - sh # template: slack-notify
# - -c # - name: slack-notify
# container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -1,60 +1,66 @@
# Docker containers that are part of the Workspace metadata:
containers: name: "Visual Studio Code"
- name: vscode kind: Workspace
image: onepanel/vscode:1.0.0 version: 20201028145443
command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."] action: update
ports: spec:
- containerPort: 8080 # Docker containers that are part of the Workspace
name: vscode containers:
volumeMounts: - name: vscode
- name: data image: onepanel/vscode:1.0.0
mountPath: /data command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."]
lifecycle: ports:
postStart: - containerPort: 8080
exec: name: vscode
command: volumeMounts:
- /bin/sh - name: data
- -c mountPath: /data
- > lifecycle:
condayml="/data/.environment.yml"; postStart:
vscodetxt="/data/.vscode-extensions.txt"; exec:
if [ -f "$condayml" ]; then conda env update -f $condayml; fi; command:
if [ -f "$vscodetxt" ]; then cat $vscodetxt | xargs -n 1 code-server --install-extension; fi; - /bin/sh
preStop: - -c
exec: - >
command: condayml="/data/.environment.yml";
- /bin/sh vscodetxt="/data/.vscode-extensions.txt";
- -c if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
- > if [ -f "$vscodetxt" ]; then cat $vscodetxt | xargs -n 1 code-server --install-extension; fi;
conda env export > /data/.environment.yml -n base; preStop:
code-server --list-extensions | tail -n +2 > /data/.vscode-extensions.txt; exec:
ports: command:
- name: vscode - /bin/sh
port: 8080 - -c
protocol: TCP - >
targetPort: 8080 conda env export > /data/.environment.yml -n base;
routes: code-server --list-extensions | tail -n +2 > /data/.vscode-extensions.txt;
- match: ports:
- uri: - name: vscode
prefix: / #vscode runs at the default route port: 8080
route: protocol: TCP
- destination: targetPort: 8080
port: routes:
number: 8080 - match:
# DAG Workflow to be executed once a Workspace action completes (optional) - uri:
#postExecutionWorkflow: prefix: / #vscode runs at the default route
# entrypoint: main route:
# templates: - destination:
# - name: main port:
# dag: number: 8080
# tasks: # DAG Workflow to be executed once a Workspace action completes (optional)
# - name: slack-notify #postExecutionWorkflow:
# template: slack-notify # entrypoint: main
# - name: slack-notify # templates:
# container: # - name: main
# image: technosophos/slack-notify # dag:
# args: # tasks:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify # - name: slack-notify
# command: # template: slack-notify
# - sh # - name: slack-notify
# - -c # container:
# image: technosophos/slack-notify
# args:
# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
# command:
# - sh
# - -c

View File

@@ -1,68 +1,74 @@
containers: metadata:
- name: vscode name: "Visual Studio Code"
image: onepanel/vscode:1.0.0 kind: Workspace
command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."] version: 20210129152427
env: action: update
- name: ONEPANEL_MAIN_CONTAINER spec:
value: 'true' containers:
ports: - name: vscode
- containerPort: 8080 image: onepanel/vscode:1.0.0
name: vscode command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."]
volumeMounts: env:
- name: data - name: ONEPANEL_MAIN_CONTAINER
mountPath: /data value: 'true'
lifecycle: ports:
postStart: - containerPort: 8080
exec: name: vscode
command: volumeMounts:
- /bin/sh - name: data
- -c mountPath: /data
- > lifecycle:
condayml="/data/.environment.yml"; postStart:
vscodetxt="/data/.vscode-extensions.txt"; exec:
if [ -f "$condayml" ]; then conda env update -f $condayml; fi; command:
if [ -f "$vscodetxt" ]; then cat $vscodetxt | xargs -n 1 code-server --install-extension; fi; - /bin/sh
preStop: - -c
exec: - >
command: condayml="/data/.environment.yml";
- /bin/sh vscodetxt="/data/.vscode-extensions.txt";
- -c if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
- > if [ -f "$vscodetxt" ]; then cat $vscodetxt | xargs -n 1 code-server --install-extension; fi;
conda env export > /data/.environment.yml -n base; preStop:
code-server --list-extensions | tail -n +2 > /data/.vscode-extensions.txt; exec:
- name: sys-filesyncer command:
image: onepanel/filesyncer:v0.18.0 - /bin/sh
imagePullPolicy: Always - -c
args: - >
- server conda env export > /data/.environment.yml -n base;
- -server-prefix=/sys/filesyncer code-server --list-extensions | tail -n +2 > /data/.vscode-extensions.txt;
volumeMounts: - name: sys-filesyncer
- name: data image: onepanel/filesyncer:v0.18.0
mountPath: /data imagePullPolicy: Always
- name: sys-namespace-config args:
mountPath: /etc/onepanel - server
readOnly: true - -server-prefix=/sys/filesyncer
ports: volumeMounts:
- name: vscode - name: data
port: 8080 mountPath: /data
protocol: TCP - name: sys-namespace-config
targetPort: 8080 mountPath: /etc/onepanel
- name: fs readOnly: true
port: 8888 ports:
protocol: TCP - name: vscode
targetPort: 8888 port: 8080
routes: protocol: TCP
- match: targetPort: 8080
- uri: - name: fs
prefix: /sys/filesyncer port: 8888
route: protocol: TCP
- destination: targetPort: 8888
port: routes:
number: 8888 - match:
- match: - uri:
- uri: prefix: /sys/filesyncer
prefix: / route:
route: - destination:
- destination: port:
port: number: 8888
number: 8080 - match:
- uri:
prefix: /
route:
- destination:
port:
number: 8080

View File

@@ -1,68 +1,74 @@
containers: metadata:
- name: vscode name: "Visual Studio Code"
image: onepanel/vscode:1.0.0 kind: Workspace
command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."] version: 20210224180017
env: action: update
- name: ONEPANEL_MAIN_CONTAINER spec:
value: 'true' containers:
ports: - name: vscode
- containerPort: 8080 image: onepanel/vscode:1.0.0
name: vscode command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."]
volumeMounts: env:
- name: data - name: ONEPANEL_MAIN_CONTAINER
mountPath: /data value: 'true'
lifecycle: ports:
postStart: - containerPort: 8080
exec: name: vscode
command: volumeMounts:
- /bin/sh - name: data
- -c mountPath: /data
- > lifecycle:
condayml="/data/.environment.yml"; postStart:
vscodetxt="/data/.vscode-extensions.txt"; exec:
if [ -f "$condayml" ]; then conda env update -f $condayml; fi; command:
if [ -f "$vscodetxt" ]; then cat $vscodetxt | xargs -n 1 code-server --install-extension; fi; - /bin/sh
preStop: - -c
exec: - >
command: condayml="/data/.environment.yml";
- /bin/sh vscodetxt="/data/.vscode-extensions.txt";
- -c if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
- > if [ -f "$vscodetxt" ]; then cat $vscodetxt | xargs -n 1 code-server --install-extension; fi;
conda env export > /data/.environment.yml -n base; preStop:
code-server --list-extensions | tail -n +2 > /data/.vscode-extensions.txt; exec:
- name: sys-filesyncer command:
image: onepanel/filesyncer:v0.19.0 - /bin/sh
imagePullPolicy: Always - -c
args: - >
- server conda env export > /data/.environment.yml -n base;
- -server-prefix=/sys/filesyncer code-server --list-extensions | tail -n +2 > /data/.vscode-extensions.txt;
volumeMounts: - name: sys-filesyncer
- name: data image: onepanel/filesyncer:v0.19.0
mountPath: /data imagePullPolicy: Always
- name: sys-namespace-config args:
mountPath: /etc/onepanel - server
readOnly: true - -server-prefix=/sys/filesyncer
ports: volumeMounts:
- name: vscode - name: data
port: 8080 mountPath: /data
protocol: TCP - name: sys-namespace-config
targetPort: 8080 mountPath: /etc/onepanel
- name: fs readOnly: true
port: 8888 ports:
protocol: TCP - name: vscode
targetPort: 8888 port: 8080
routes: protocol: TCP
- match: targetPort: 8080
- uri: - name: fs
prefix: /sys/filesyncer port: 8888
route: protocol: TCP
- destination: targetPort: 8888
port: routes:
number: 8888 - match:
- match: - uri:
- uri: prefix: /sys/filesyncer
prefix: / route:
route: - destination:
- destination: port:
port: number: 8888
number: 8080 - match:
- uri:
prefix: /
route:
- destination:
port:
number: 8080

View File

@@ -1,68 +1,74 @@
containers: metadata:
- name: vscode name: "Visual Studio Code"
image: onepanel/vscode:v0.20.0_code-server.3.9.1 kind: Workspace
command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."] version: 20210323175655
env: action: update
- name: ONEPANEL_MAIN_CONTAINER spec:
value: 'true' containers:
ports: - name: vscode
- containerPort: 8080 image: onepanel/vscode:v0.20.0_code-server.3.9.1
name: vscode command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."]
volumeMounts: env:
- name: data - name: ONEPANEL_MAIN_CONTAINER
mountPath: /data value: 'true'
lifecycle: ports:
postStart: - containerPort: 8080
exec: name: vscode
command: volumeMounts:
- /bin/sh - name: data
- -c mountPath: /data
- > lifecycle:
condayml="/data/.environment.yml"; postStart:
vscodetxt="/data/.vscode-extensions.txt"; exec:
if [ -f "$condayml" ]; then conda env update -f $condayml; fi; command:
if [ -f "$vscodetxt" ]; then cat $vscodetxt | xargs -n 1 code-server --install-extension; fi; - /bin/sh
preStop: - -c
exec: - >
command: condayml="/data/.environment.yml";
- /bin/sh vscodetxt="/data/.vscode-extensions.txt";
- -c if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
- > if [ -f "$vscodetxt" ]; then cat $vscodetxt | xargs -n 1 code-server --install-extension; fi;
conda env export > /data/.environment.yml -n base; preStop:
code-server --list-extensions | tail -n +2 > /data/.vscode-extensions.txt; exec:
- name: sys-filesyncer command:
image: onepanel/filesyncer:v0.20.0 - /bin/sh
imagePullPolicy: Always - -c
args: - >
- server conda env export > /data/.environment.yml -n base;
- -server-prefix=/sys/filesyncer code-server --list-extensions | tail -n +2 > /data/.vscode-extensions.txt;
volumeMounts: - name: sys-filesyncer
- name: data image: onepanel/filesyncer:v0.20.0
mountPath: /data imagePullPolicy: Always
- name: sys-namespace-config args:
mountPath: /etc/onepanel - server
readOnly: true - -server-prefix=/sys/filesyncer
ports: volumeMounts:
- name: vscode - name: data
port: 8080 mountPath: /data
protocol: TCP - name: sys-namespace-config
targetPort: 8080 mountPath: /etc/onepanel
- name: fs readOnly: true
port: 8888 ports:
protocol: TCP - name: vscode
targetPort: 8888 port: 8080
routes: protocol: TCP
- match: targetPort: 8080
- uri: - name: fs
prefix: /sys/filesyncer port: 8888
route: protocol: TCP
- destination: targetPort: 8888
port: routes:
number: 8888 - match:
- match: - uri:
- uri: prefix: /sys/filesyncer
prefix: / route:
route: - destination:
- destination: port:
port: number: 8888
number: 8080 - match:
- uri:
prefix: /
route:
- destination:
port:
number: 8080

View File

@@ -1,68 +1,74 @@
containers: metadata:
- name: vscode name: "Visual Studio Code"
image: onepanel/vscode:v0.20.0_code-server.3.9.1 kind: Workspace
command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."] version: 20210719190719
env: action: update
- name: ONEPANEL_MAIN_CONTAINER spec:
value: 'true' containers:
ports: - name: vscode
- containerPort: 8080 image: onepanel/vscode:v0.20.0_code-server.3.9.1
name: vscode command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."]
volumeMounts: env:
- name: data - name: ONEPANEL_MAIN_CONTAINER
mountPath: /data value: 'true'
lifecycle: ports:
postStart: - containerPort: 8080
exec: name: vscode
command: volumeMounts:
- /bin/sh - name: data
- -c mountPath: /data
- > lifecycle:
condayml="/data/.environment.yml"; postStart:
vscodetxt="/data/.vscode-extensions.txt"; exec:
if [ -f "$condayml" ]; then conda env update -f $condayml; fi; command:
if [ -f "$vscodetxt" ]; then cat $vscodetxt | xargs -n 1 code-server --install-extension; fi; - /bin/sh
preStop: - -c
exec: - >
command: condayml="/data/.environment.yml";
- /bin/sh vscodetxt="/data/.vscode-extensions.txt";
- -c if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
- > if [ -f "$vscodetxt" ]; then cat $vscodetxt | xargs -n 1 code-server --install-extension; fi;
conda env export > /data/.environment.yml -n base; preStop:
code-server --list-extensions | tail -n +2 > /data/.vscode-extensions.txt; exec:
- name: sys-filesyncer command:
image: onepanel/filesyncer:v1.0.0 - /bin/sh
imagePullPolicy: Always - -c
args: - >
- server conda env export > /data/.environment.yml -n base;
- -server-prefix=/sys/filesyncer code-server --list-extensions | tail -n +2 > /data/.vscode-extensions.txt;
volumeMounts: - name: sys-filesyncer
- name: data image: onepanel/filesyncer:v1.0.0
mountPath: /data imagePullPolicy: Always
- name: sys-namespace-config args:
mountPath: /etc/onepanel - server
readOnly: true - -server-prefix=/sys/filesyncer
ports: volumeMounts:
- name: vscode - name: data
port: 8080 mountPath: /data
protocol: TCP - name: sys-namespace-config
targetPort: 8080 mountPath: /etc/onepanel
- name: fs readOnly: true
port: 8888 ports:
protocol: TCP - name: vscode
targetPort: 8888 port: 8080
routes: protocol: TCP
- match: targetPort: 8080
- uri: - name: fs
prefix: /sys/filesyncer port: 8888
route: protocol: TCP
- destination: targetPort: 8888
port: routes:
number: 8888 - match:
- match: - uri:
- uri: prefix: /sys/filesyncer
prefix: / route:
route: - destination:
- destination: port:
port: number: 8888
number: 8080 - match:
- uri:
prefix: /
route:
- destination:
port:
number: 8080

Binary file not shown.

Before

Width:  |  Height:  |  Size: 129 KiB

After

Width:  |  Height:  |  Size: 302 KiB

0
manifest/.gitignore vendored Normal file
View File

View File

@@ -0,0 +1,63 @@
{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"name": "minio-gateway",
"namespace": "$(applicationDefaultNamespace)"
},
"spec": {
"replicas": 1,
"selector": {
"matchLabels": {
"app": "minio-gateway"
}
},
"template": {
"metadata": {
"labels": {
"app": "minio-gateway"
},
"annotations": {
"sidecar.istio.io/inject": "false"
}
},
"spec": {
"containers": [
{
"name": "minio-gateway",
"image": "minio/minio:RELEASE.2021-06-17T00-10-46Z.hotfix.49f6035b1",
"args": [
"gateway",
"azure"
],
"env": [
{
"name": "MINIO_ACCESS_KEY",
"valueFrom": {
"secretKeyRef": {
"name": "onepanel",
"key": "artifactRepositoryS3AccessKey"
}
}
},
{
"name": "MINIO_SECRET_KEY",
"valueFrom": {
"secretKeyRef": {
"name": "onepanel",
"key": "artifactRepositoryS3SecretKey"
}
}
}
],
"ports": [
{
"containerPort": 9000
}
]
}
]
}
}
}
}

19
manifest/abs/service.json Normal file
View File

@@ -0,0 +1,19 @@
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "minio-gateway",
"namespace": "$(applicationDefaultNamespace)"
},
"spec": {
"selector": {
"app": "minio-gateway"
},
"ports": [
{
"port": 9000,
"targetPort": 9000
}
]
}
}

View File

@@ -0,0 +1,19 @@
{
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "ClusterRoleBinding",
"metadata": {
"name": "onepanel-kfserving-$(applicationDefaultNamespace)"
},
"subjects": [
{
"kind": "ServiceAccount",
"name": "$(applicationDefaultNamespace)",
"namespace": "$(applicationDefaultNamespace)"
}
],
"roleRef": {
"apiGroup": "rbac.authorization.k8s.io",
"kind": "ClusterRole",
"name": "onepanel-models"
}
}

View File

@@ -0,0 +1,22 @@
{
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
"kind": "ClusterRoleBinding",
"metadata": {
"labels": {
"app": "onepanel"
},
"name": "onepanel-namespaces"
},
"roleRef": {
"apiGroup": "rbac.authorization.k8s.io",
"kind": "ClusterRole",
"name": "onepanel-namespaces"
},
"subjects": [
{
"kind": "ServiceAccount",
"name": "$(applicationDefaultNamespace)",
"namespace": "$(applicationDefaultNamespace)"
}
]
}

View File

@@ -0,0 +1,11 @@
{
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"name": "onepanel",
"namespace": "$(applicationDefaultNamespace)"
},
"data": {
"artifactRepository": "archiveLogs: true\n$(artifactRepositoryProvider)\n"
}
}

View File

@@ -0,0 +1,83 @@
{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"name": "minio-gateway",
"namespace": "$(applicationDefaultNamespace)"
},
"spec": {
"replicas": 1,
"selector": {
"matchLabels": {
"app": "minio-gateway"
}
},
"template": {
"metadata": {
"labels": {
"app": "minio-gateway"
},
"annotations": {
"sidecar.istio.io/inject": "false"
}
},
"spec": {
"containers": [
{
"name": "minio-gateway",
"image": "minio/minio:RELEASE.2021-06-17T00-10-46Z.hotfix.49f6035b1",
"volumeMounts": [
{
"name": "gcs-credentials",
"mountPath": "/etc/gcs",
"readOnly": true
}
],
"args": [
"gateway",
"gcs"
],
"env": [
{
"name": "MINIO_ACCESS_KEY",
"valueFrom": {
"secretKeyRef": {
"name": "onepanel",
"key": "artifactRepositoryS3AccessKey"
}
}
},
{
"name": "MINIO_SECRET_KEY",
"valueFrom": {
"secretKeyRef": {
"name": "onepanel",
"key": "artifactRepositoryS3SecretKey"
}
}
},
{
"name": "GOOGLE_APPLICATION_CREDENTIALS",
"value": "/etc/gcs/credentials.json"
}
]
}
],
"volumes": [
{
"name": "gcs-credentials",
"projected": {
"sources": [
{
"secret": {
"name": "artifact-repository-gcs-credentials"
}
}
]
}
}
]
}
}
}
}

19
manifest/gcs/service.json Normal file
View File

@@ -0,0 +1,19 @@
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "minio-gateway",
"namespace": "$(applicationDefaultNamespace)"
},
"spec": {
"selector": {
"app": "minio-gateway"
},
"ports": [
{
"port": 9000,
"targetPort": 9000
}
]
}
}

View File

@@ -0,0 +1,19 @@
{
"apiVersion": "v1",
"kind": "Secret",
"metadata": {
"name": "kfserving-storage",
"namespace": "$(applicationDefaultNamespace)",
"annotations": {
"serving.kubeflow.org/s3-endpoint": "$(artifactRepositoryS3Endpoint)",
"serving.kubeflow.org/s3-usehttps": "0",
"serving.kubeflow.org/s3-region": "$(artifactRepositoryS3Region)",
"serving.kubeflow.org/s3-useanoncredential": "false"
}
},
"type": "Opaque",
"data": {
"AWS_ACCESS_KEY_ID": "$(artifactRepositoryS3AccessKey)",
"AWS_SECRET_ACCESS_KEY": "$(artifactRepositoryS3SecretKey)"
}
}

View File

@@ -0,0 +1,41 @@
{
"apiVersion": "networking.k8s.io/v1",
"kind": "NetworkPolicy",
"metadata": {
"labels": {
"app": "onepanel"
},
"name": "onepanel",
"namespace": "$(applicationDefaultNamespace)"
},
"spec": {
"egress": [
{
"to": [
{
"ipBlock": {
"cidr": "0.0.0.0/0",
"except": [
"169.254.169.254/32"
]
}
}
]
}
],
"ingress": [
{
"from": [
{
"namespaceSelector": {
"matchLabels": {
"app.kubernetes.io/part-of": "onepanel"
}
}
}
]
}
],
"podSelector": {}
}
}

View File

@@ -0,0 +1,167 @@
{
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
"kind": "Role",
"metadata": {
"labels": {
"app": "onepanel"
},
"name": "onepanel",
"namespace": "$(applicationDefaultNamespace)"
},
"rules": [
{
"apiGroups": [
""
],
"resources": [
"configmaps"
],
"verbs": [
"get"
]
},
{
"apiGroups": [
""
],
"resources": [
"pods",
"pods/log"
],
"verbs": [
"get",
"watch",
"patch"
]
},
{
"apiGroups": [
""
],
"resources": [
"persistentvolumeclaims",
"services",
"secrets"
],
"verbs": [
"get",
"watch",
"list",
"create",
"update",
"patch",
"delete"
]
},
{
"apiGroups": [
"apps"
],
"resources": [
"statefulsets",
"deployments"
],
"verbs": [
"get",
"watch",
"list",
"create",
"update",
"patch",
"delete"
]
},
{
"apiGroups": [
"networking.istio.io"
],
"resources": [
"virtualservices"
],
"verbs": [
"get",
"watch",
"list",
"create",
"update",
"patch",
"delete"
]
},
{
"apiGroups": [
"argoproj.io"
],
"resources": [
"workflows",
"workflowtemplates",
"cronworkflows"
],
"verbs": [
"get",
"watch",
"list",
"create",
"update",
"patch",
"delete"
]
},
{
"apiGroups": [
"onepanel.io"
],
"resources": [
"workspaces"
],
"verbs": [
"get",
"watch",
"list",
"create",
"update",
"patch",
"delete"
]
},
{
"apiGroups": [
"onepanel.io"
],
"resources": [
"services"
],
"verbs": [
"get",
"watch",
"list",
"create",
"update",
"patch",
"delete"
]
},
{
"apiGroups": [
"serving.kubeflow.org"
],
"resources": [
"inferenceservices"
],
"verbs": [
"get",
"watch",
"list",
"create",
"update",
"patch",
"delete"
]
},
{
"apiGroups": [""],
"resources": ["onepanel-service"],
"verbs": ["get", "watch", "list"]
}
]
}

View File

@@ -0,0 +1,28 @@
{
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
"kind": "RoleBinding",
"metadata": {
"labels": {
"app": "onepanel"
},
"name": "onepanel",
"namespace": "$(applicationDefaultNamespace)"
},
"roleRef": {
"apiGroup": "rbac.authorization.k8s.io",
"kind": "Role",
"name": "onepanel"
},
"subjects": [
{
"kind": "ServiceAccount",
"name": "default",
"namespace": "$(applicationDefaultNamespace)"
},
{
"kind": "ServiceAccount",
"name": "$(applicationDefaultNamespace)",
"namespace": "$(applicationDefaultNamespace)"
}
]
}

View File

@@ -0,0 +1,9 @@
{
"apiVersion": "v1",
"kind": "Secret",
"metadata": {
"name": "onepanel-default-env",
"namespace": "$(applicationDefaultNamespace)"
},
"type": "Opaque"
}

View File

@@ -0,0 +1,21 @@
{
"apiVersion": "v1",
"kind": "Secret",
"metadata": {
"name": "onepanel",
"namespace": "$(applicationDefaultNamespace)",
"labels": {
"app.kubernetes.io/component": "onepanel",
"app.kubernetes.io/instance": "onepanel-v0.5.0",
"app.kubernetes.io/managed-by": "onepanel-cli",
"app.kubernetes.io/name": "onepanel",
"app.kubernetes.io/part-of": "onepanel",
"app.kubernetes.io/version": "v0.5.0"
}
},
"data": {
"artifactRepositoryS3AccessKey": "$(artifactRepositoryS3AccessKey)",
"artifactRepositoryS3SecretKey": "$(artifactRepositoryS3SecretKey)"
},
"type": "Opaque"
}

View File

@@ -0,0 +1,8 @@
{
"kind": "ServiceAccount",
"apiVersion": "v1",
"metadata": {
"name": "$(applicationDefaultNamespace)",
"namespace": "$(applicationDefaultNamespace)"
}
}

View File

@@ -0,0 +1,37 @@
{
"apiVersion": "networking.istio.io/v1alpha3",
"kind": "VirtualService",
"metadata": {
"name": "minio",
"namespace": "$(applicationDefaultNamespace)"
},
"spec": {
"hosts": [
"sys-storage-$(applicationDefaultNamespace).$(applicationDomain)"
],
"gateways": [
"istio-system/ingressgateway"
],
"http": [
{
"match": [
{
"uri": {
"prefix": "/"
}
}
],
"route": [
{
"destination": {
"port": {
"number": 9000
},
"host": "minio-gateway.$(applicationDefaultNamespace).svc.cluster.local"
}
}
]
}
]
}
}

View File

@@ -5,14 +5,17 @@ import (
sq "github.com/Masterminds/squirrel" sq "github.com/Masterminds/squirrel"
argoprojv1alpha1 "github.com/argoproj/argo/pkg/client/clientset/versioned/typed/workflow/v1alpha1" argoprojv1alpha1 "github.com/argoproj/argo/pkg/client/clientset/versioned/typed/workflow/v1alpha1"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"github.com/onepanelio/core/pkg/util"
"github.com/onepanelio/core/pkg/util/env" "github.com/onepanelio/core/pkg/util/env"
"github.com/onepanelio/core/pkg/util/gcs" "github.com/onepanelio/core/pkg/util/gcs"
"github.com/onepanelio/core/pkg/util/router" "github.com/onepanelio/core/pkg/util/router"
"github.com/onepanelio/core/pkg/util/s3" "github.com/onepanelio/core/pkg/util/s3"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest" "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/yaml"
"strconv" "strconv"
"time" "time"
) )
@@ -27,6 +30,7 @@ type Client struct {
argoprojV1alpha1 argoprojv1alpha1.ArgoprojV1alpha1Interface argoprojV1alpha1 argoprojv1alpha1.ArgoprojV1alpha1Interface
*DB *DB
systemConfig SystemConfig systemConfig SystemConfig
cache map[string]interface{}
} }
func (c *Client) ArgoprojV1alpha1() argoprojv1alpha1.ArgoprojV1alpha1Interface { func (c *Client) ArgoprojV1alpha1() argoprojv1alpha1.ArgoprojV1alpha1Interface {
@@ -102,6 +106,7 @@ func NewClient(config *Config, db *DB, systemConfig SystemConfig) (client *Clien
argoprojV1alpha1: argoClient, argoprojV1alpha1: argoClient,
DB: db, DB: db,
systemConfig: systemConfig, systemConfig: systemConfig,
cache: make(map[string]interface{}),
}, nil }, nil
} }
@@ -175,7 +180,12 @@ func (c *Client) GetWebRouter() (router.Web, error) {
// GetArtifactRepositoryType returns the configured artifact repository type for the given namespace. // GetArtifactRepositoryType returns the configured artifact repository type for the given namespace.
// possible return values are: "s3", "gcs" // possible return values are: "s3", "gcs"
func (c *Client) GetArtifactRepositoryType(namespace string) (string, error) { func (c *Client) GetArtifactRepositoryType(namespace string) (string, error) {
artifactRepositoryType := "s3" artifactRepositoryType, ok := c.cache["artifactRepositoryType"]
if ok {
return artifactRepositoryType.(string), nil
}
artifactRepositoryType = "s3"
nsConfig, err := c.GetNamespaceConfig(namespace) nsConfig, err := c.GetNamespaceConfig(namespace)
if err != nil { if err != nil {
return "", err return "", err
@@ -184,7 +194,38 @@ func (c *Client) GetArtifactRepositoryType(namespace string) (string, error) {
artifactRepositoryType = "gcs" artifactRepositoryType = "gcs"
} }
return artifactRepositoryType, nil c.cache["artifactRepositoryType"] = artifactRepositoryType
return artifactRepositoryType.(string), nil
}
// GetArtifactRepositorySource returns the original source for the artifact repository
// This can be s3, abs, gcs, etc. Since everything goes through an S3 compatible API,
// it is sometimes useful to know the source.
func (c *Client) GetArtifactRepositorySource(namespace string) (string, error) {
configMap, err := c.getConfigMap(namespace, "onepanel")
if err != nil {
log.WithFields(log.Fields{
"Namespace": namespace,
"Error": err.Error(),
}).Error("getArtifactRepositorySource failed getting config map.")
return "", err
}
config := &NamespaceConfig{
ArtifactRepository: ArtifactRepositoryProvider{},
}
err = yaml.Unmarshal([]byte(configMap.Data["artifactRepository"]), &config.ArtifactRepository)
if err != nil || (config.ArtifactRepository.S3 == nil && config.ArtifactRepository.GCS == nil) {
return "", util.NewUserError(codes.NotFound, "Artifact repository config not found.")
}
if config.ArtifactRepository.S3 != nil {
return config.ArtifactRepository.S3.Source, nil
}
return config.ArtifactRepository.GCS.Source, nil
} }
// getKubernetesTimeout returns the timeout for kubernetes requests. // getKubernetesTimeout returns the timeout for kubernetes requests.

View File

@@ -26,6 +26,7 @@ func (c *Client) getConfigMap(namespace, name string) (configMap *ConfigMap, err
// GetSystemConfig will pull it from the resources // GetSystemConfig will pull it from the resources
func (c *Client) ClearSystemConfigCache() { func (c *Client) ClearSystemConfigCache() {
c.systemConfig = nil c.systemConfig = nil
c.cache = make(map[string]interface{})
} }
// GetSystemConfig loads various system configurations and bundles them into a map. // GetSystemConfig loads various system configurations and bundles them into a map.
@@ -90,17 +91,14 @@ func (c *Client) GetNamespaceConfig(namespace string) (config *NamespaceConfig,
return return
} }
switch { if config.ArtifactRepository.S3 == nil {
case config.ArtifactRepository.S3 != nil:
{
accessKey, _ := base64.StdEncoding.DecodeString(secret.Data[config.ArtifactRepository.S3.AccessKeySecret.Key])
config.ArtifactRepository.S3.AccessKey = string(accessKey)
secretKey, _ := base64.StdEncoding.DecodeString(secret.Data[config.ArtifactRepository.S3.SecretKeySecret.Key])
config.ArtifactRepository.S3.Secretkey = string(secretKey)
}
default:
return nil, util.NewUserError(codes.NotFound, "Artifact repository config not found.") return nil, util.NewUserError(codes.NotFound, "Artifact repository config not found.")
} }
accessKey, _ := base64.StdEncoding.DecodeString(secret.Data[config.ArtifactRepository.S3.AccessKeySecret.Key])
config.ArtifactRepository.S3.AccessKey = string(accessKey)
secretKey, _ := base64.StdEncoding.DecodeString(secret.Data[config.ArtifactRepository.S3.SecretKeySecret.Key])
config.ArtifactRepository.S3.Secretkey = string(secretKey)
return return
} }

View File

@@ -174,6 +174,11 @@ func (s SystemConfig) DatabaseDriverName() *string {
return s.GetValue("databaseDriverName") return s.GetValue("databaseDriverName")
} }
// Provider gets the ONEPANEL_PROVIDER value, or nil.
func (s SystemConfig) Provider() *string {
return s.GetValue("ONEPANEL_PROVIDER")
}
// DatabaseConnection returns system config information to connect to a database // DatabaseConnection returns system config information to connect to a database
func (s SystemConfig) DatabaseConnection() (driverName, dataSourceName string) { func (s SystemConfig) DatabaseConnection() (driverName, dataSourceName string) {
dataSourceName = fmt.Sprintf("host=%v user=%v password=%v dbname=%v sslmode=disable", dataSourceName = fmt.Sprintf("host=%v user=%v password=%v dbname=%v sslmode=disable",
@@ -243,6 +248,7 @@ func (s SystemConfig) HMACKey() []byte {
// by the CLI. CLI will marshal this struct into the correct // by the CLI. CLI will marshal this struct into the correct
// YAML structure for k8s configmap / secret. // YAML structure for k8s configmap / secret.
type ArtifactRepositoryS3Provider struct { type ArtifactRepositoryS3Provider struct {
Source string
KeyFormat string `yaml:"keyFormat"` KeyFormat string `yaml:"keyFormat"`
Bucket string Bucket string
Endpoint string Endpoint string
@@ -260,6 +266,7 @@ type ArtifactRepositoryS3Provider struct {
// by the CLI. CLI will marshal this struct into the correct // by the CLI. CLI will marshal this struct into the correct
// YAML structure for k8s configmap / secret. // YAML structure for k8s configmap / secret.
type ArtifactRepositoryGCSProvider struct { type ArtifactRepositoryGCSProvider struct {
Source string
KeyFormat string `yaml:"keyFormat"` KeyFormat string `yaml:"keyFormat"`
Bucket string Bucket string
Endpoint string Endpoint string

135
pkg/data.go Normal file
View File

@@ -0,0 +1,135 @@
package v1
import (
"github.com/onepanelio/core/pkg/util/data"
"github.com/onepanelio/core/pkg/util/extensions"
)
// createWorkspaceTemplateFromGenericFile will create the workspace template given by {{templateName}} with the contents
// given by {{filename}} for the input {{namespace}}
func (c *Client) createWorkspaceTemplateFromGenericManifest(namespace string, manifestFile *data.ManifestFile) (err error) {
manifest, err := manifestFile.SpecString()
if err != nil {
return err
}
templateName := manifestFile.Metadata.Name
description := manifestFile.Metadata.Description
artifactRepositoryType, err := c.GetArtifactRepositoryType(namespace)
if err != nil {
return err
}
replaceMap := map[string]string{
"{{.ArtifactRepositoryType}}": artifactRepositoryType,
}
manifest = extensions.ReplaceMapValues(manifest, replaceMap)
workspaceTemplate, err := CreateWorkspaceTemplate(templateName)
if err != nil {
return err
}
workspaceTemplate.Manifest = manifest
if description != nil {
workspaceTemplate.Description = *description
}
_, err = c.CreateWorkspaceTemplate(namespace, workspaceTemplate)
return
}
// updateWorkspaceTemplateManifest will update the workspace template given by {{templateName}} with the contents
// given by {{filename}}
func (c *Client) updateWorkspaceTemplateManifest(namespace string, manifestFile *data.ManifestFile) (err error) {
manifest, err := manifestFile.SpecString()
if err != nil {
return err
}
templateName := manifestFile.Metadata.Name
artifactRepositoryType, err := c.GetArtifactRepositoryType(namespace)
if err != nil {
return err
}
replaceMap := map[string]string{
"{{.ArtifactRepositoryType}}": artifactRepositoryType,
}
manifest = extensions.ReplaceMapValues(manifest, replaceMap)
workspaceTemplate, err := CreateWorkspaceTemplate(templateName)
if err != nil {
return err
}
workspaceTemplate.Manifest = manifest
_, err = c.UpdateWorkspaceTemplateManifest(namespace, workspaceTemplate.UID, workspaceTemplate.Manifest)
return
}
// createWorkflowTemplate will create the workflow template given by {{templateName}} with the contents
// given by {{filename}}
func (c *Client) createWorkflowTemplateFromGenericManifest(namespace string, manifestFile *data.ManifestFile) (err error) {
manifest, err := manifestFile.SpecString()
if err != nil {
return err
}
templateName := manifestFile.Metadata.Name
labels := manifestFile.Metadata.Labels
artifactRepositoryType, err := c.GetArtifactRepositoryType(namespace)
if err != nil {
return err
}
replaceMap := map[string]string{
"{{.ArtifactRepositoryType}}": artifactRepositoryType,
}
manifest = extensions.ReplaceMapValues(manifest, replaceMap)
workflowTemplate, err := CreateWorkflowTemplate(templateName)
if err != nil {
return
}
workflowTemplate.Manifest = manifest
workflowTemplate.Labels = labels
_, err = c.CreateWorkflowTemplate(namespace, workflowTemplate)
return
}
// updateWorkflowTemplateManifest will update the workflow template given by {{templateName}} with the contents
// given by {{filename}}
func (c *Client) updateWorkflowTemplateManifest(namespace string, manifestFile *data.ManifestFile) (err error) {
manifest, err := manifestFile.SpecString()
if err != nil {
return err
}
templateName := manifestFile.Metadata.Name
labels := manifestFile.Metadata.Labels
artifactRepositoryType, err := c.GetArtifactRepositoryType(namespace)
if err != nil {
return err
}
replaceMap := map[string]string{
"{{.ArtifactRepositoryType}}": artifactRepositoryType,
}
manifest = extensions.ReplaceMapValues(manifest, replaceMap)
workflowTemplate, err := CreateWorkflowTemplate(templateName)
if err != nil {
return
}
workflowTemplate.Manifest = manifest
workflowTemplate.Labels = labels
_, err = c.CreateWorkflowTemplateVersion(namespace, workflowTemplate)
return
}

43
pkg/istio.go Normal file
View File

@@ -0,0 +1,43 @@
package v1
import (
"fmt"
"github.com/onepanelio/core/pkg/util"
"google.golang.org/grpc/codes"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"strings"
)
const istioVirtualServiceResource = "VirtualServices"
func istioModelRestClient() (*rest.RESTClient, error) {
config := *NewConfig()
config.GroupVersion = &schema.GroupVersion{Group: "networking.istio.io", Version: "v1alpha3"}
config.APIPath = "/apis"
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
return rest.RESTClientFor(&config)
}
// CreateVirtualService creates an istio virtual service
func (c *Client) CreateVirtualService(namespace string, data interface{}) error {
restClient, err := istioModelRestClient()
if err != nil {
return err
}
err = restClient.Post().
Namespace(namespace).
Resource(istioVirtualServiceResource).
Body(data).
Do().
Error()
if err != nil && strings.Contains(err.Error(), "already exists") {
return util.NewUserError(codes.AlreadyExists, fmt.Sprintf("VirtualService already exists"))
}
return err
}

View File

@@ -1,14 +1,39 @@
package v1 package v1
import ( import (
"encoding/base64"
"fmt" "fmt"
"github.com/onepanelio/core/pkg/util"
"github.com/onepanelio/core/pkg/util/data"
"google.golang.org/grpc/codes"
"io/ioutil"
vapps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
vnet "k8s.io/api/networking/v1"
v1rbac "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/json"
"os"
"path/filepath"
"strings"
) )
var onepanelEnabledLabelKey = "onepanel.io/enabled" var onepanelEnabledLabelKey = "onepanel.io/enabled"
func replaceVariables(filepath string, replacements map[string]string) (string, error) {
data, err := ioutil.ReadFile(filepath)
if err != nil {
return "", err
}
dataStr := string(data)
for key, value := range replacements {
dataStr = strings.ReplaceAll(dataStr, key, value)
}
return dataStr, nil
}
func (c *Client) ListOnepanelEnabledNamespaces() (namespaces []*Namespace, err error) { func (c *Client) ListOnepanelEnabledNamespaces() (namespaces []*Namespace, err error) {
namespaceList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{ namespaceList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", onepanelEnabledLabelKey, "true"), LabelSelector: fmt.Sprintf("%s=%s", onepanelEnabledLabelKey, "true"),
@@ -42,6 +67,7 @@ func (c *Client) GetNamespace(name string) (namespace *Namespace, err error) {
return return
} }
// ListNamespaces lists all of the onepanel enabled namespaces
func (c *Client) ListNamespaces() (namespaces []*Namespace, err error) { func (c *Client) ListNamespaces() (namespaces []*Namespace, err error) {
namespaceList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{ namespaceList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", onepanelEnabledLabelKey, "true"), LabelSelector: fmt.Sprintf("%s=%s", onepanelEnabledLabelKey, "true"),
@@ -60,25 +86,433 @@ func (c *Client) ListNamespaces() (namespaces []*Namespace, err error) {
return return
} }
func (c *Client) CreateNamespace(name string) (namespace *Namespace, err error) { // CreateNamespace creates a namespace named {{ name }} assuming the {{ sourceNamespace }} created it
createNamespace := &v1.Namespace{ func (c *Client) CreateNamespace(sourceNamespace, name string) (namespace *Namespace, err error) {
ObjectMeta: metav1.ObjectMeta{ newNamespace := name
Name: name, domain := *c.systemConfig.Domain()
Labels: map[string]string{ artifactRepositorySource, err := c.GetArtifactRepositorySource(sourceNamespace)
"istio-injection": "enabled", if err != nil {
onepanelEnabledLabelKey: "true", return nil, err
},
},
} }
k8Namespace, err := c.CoreV1().Namespaces().Create(createNamespace) config, err := c.GetNamespaceConfig(sourceNamespace)
if err != nil { if err != nil {
return return nil, err
}
if config.ArtifactRepository.S3 == nil {
return nil, util.NewUserError(codes.Internal, "S3 compatible artifact repository not set")
}
accessKey := config.ArtifactRepository.S3.AccessKey
secretKey := config.ArtifactRepository.S3.Secretkey
if err := c.createK8sNamespace(name); err != nil {
return nil, err
}
if err := c.createNetworkPolicy(newNamespace); err != nil {
return nil, err
}
if err := c.createIstioVirtualService(newNamespace, domain); err != nil {
return nil, err
}
if err := c.createRole(newNamespace); err != nil {
return nil, err
}
if err := c.createDefaultSecret(newNamespace); err != nil {
return nil, err
}
if err := c.createSecretOnepanelDefaultNamespace(newNamespace, accessKey, secretKey); err != nil {
return nil, err
}
if err := c.createProviderDependentMinioDeployment(newNamespace, artifactRepositorySource); err != nil {
return nil, err
}
if err := c.createProviderDependentMinioService(newNamespace, artifactRepositorySource); err != nil {
return nil, err
}
if err := c.createNamespaceConfigMap(sourceNamespace, newNamespace); err != nil {
return nil, err
}
if err := c.createNamespaceClusterRoleBinding(newNamespace); err != nil {
return nil, err
}
if err := c.createNamespaceRoleBinding(newNamespace); err != nil {
return nil, err
}
if err := c.createNamespaceServiceAccount(newNamespace); err != nil {
return nil, err
}
if err := c.createNamespaceModelClusterRoleBinding(newNamespace); err != nil {
return nil, err
}
if err := c.createNamespaceTemplates(newNamespace); err != nil {
return nil, err
} }
namespace = &Namespace{ namespace = &Namespace{
Name: k8Namespace.Name, Name: name,
} }
return return
} }
func (c *Client) createK8sNamespace(name string) error {
createNamespace := &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
"istio-injection": "enabled",
onepanelEnabledLabelKey: "true",
"app.kubernetes.io/component": "onepanel",
"app.kubernetes.io/instance": "onepanel-v0.5.0",
"app.kubernetes.io/managed-by": "onepanel-cli",
"app.kubernetes.io/name": "onepanel",
"app.kubernetes.io/part-of": "onepanel",
"app.kubernetes.io/version": "v0.5.0",
},
},
}
_, err := c.CoreV1().Namespaces().Create(createNamespace)
if err != nil && strings.Contains(err.Error(), "already exists") {
return util.NewUserError(codes.AlreadyExists, "Namespace '"+name+"' already exists")
}
return err
}
func (c *Client) createNetworkPolicy(namespace string) error {
replacements := map[string]string{
"$(applicationDefaultNamespace)": namespace,
}
dataStr, err := replaceVariables(filepath.Join("manifest", "networkpolicy-onepanel-defaultnamespace.json"), replacements)
if err != nil {
return err
}
data := []byte(dataStr)
networkPolicy := &vnet.NetworkPolicy{}
if err := json.Unmarshal(data, networkPolicy); err != nil {
return err
}
_, err = c.NetworkingV1().NetworkPolicies(namespace).Create(networkPolicy)
return err
}
func (c *Client) createIstioVirtualService(namespace, domain string) error {
replacements := map[string]string{
"$(applicationDefaultNamespace)": namespace,
"$(applicationDomain)": domain,
}
dataStr, err := replaceVariables(filepath.Join("manifest", "service-minio-onepanel.json"), replacements)
if err != nil {
return err
}
data := []byte(dataStr)
return c.CreateVirtualService(namespace, data)
}
func (c *Client) createRole(namespace string) error {
replacements := map[string]string{
"$(applicationDefaultNamespace)": namespace,
}
dataStr, err := replaceVariables(filepath.Join("manifest", "role-onepanel-defaultnamespace.json"), replacements)
if err != nil {
return err
}
data := []byte(dataStr)
role := &v1rbac.Role{}
if err := json.Unmarshal(data, role); err != nil {
return err
}
_, err = c.RbacV1().Roles(namespace).Create(role)
return err
}
func (c *Client) createDefaultSecret(namespace string) error {
replacements := map[string]string{
"$(applicationDefaultNamespace)": namespace,
}
dataStr, err := replaceVariables(filepath.Join("manifest", "secret-onepanel-default-env-defaultnamespace.json"), replacements)
if err != nil {
return err
}
data := []byte(dataStr)
secret := &v1.Secret{}
if err := json.Unmarshal(data, secret); err != nil {
return err
}
_, err = c.CoreV1().Secrets(namespace).Create(secret)
return err
}
func (c *Client) createSecretOnepanelDefaultNamespace(namespace, accessKey, secretKey string) error {
replacements := map[string]string{
"$(applicationDefaultNamespace)": namespace,
"$(artifactRepositoryS3AccessKey)": base64.StdEncoding.EncodeToString([]byte(accessKey)),
"$(artifactRepositoryS3SecretKey)": base64.StdEncoding.EncodeToString([]byte(secretKey)),
}
dataStr, err := replaceVariables(filepath.Join("manifest", "secret-onepanel-defaultnamespace.json"), replacements)
if err != nil {
return err
}
data := []byte(dataStr)
secret := &v1.Secret{}
if err := json.Unmarshal(data, secret); err != nil {
return err
}
_, err = c.CoreV1().Secrets(namespace).Create(secret)
return err
}
func (c *Client) createProviderDependentMinioDeployment(namespace, artifactRepositoryProvider string) error {
replacements := map[string]string{
"$(applicationDefaultNamespace)": namespace,
}
//// AWS S3 doesn't require a specific artifactRepositoryProvider
if artifactRepositoryProvider == "s3" {
return nil
}
dataStr, err := replaceVariables(filepath.Join("manifest", artifactRepositoryProvider, "deployment.json"), replacements)
if err != nil {
return err
}
data := []byte(dataStr)
deployment := &vapps.Deployment{}
if err := json.Unmarshal(data, deployment); err != nil {
return err
}
_, err = c.AppsV1().Deployments(namespace).Create(deployment)
return err
}
func (c *Client) createProviderDependentMinioService(namespace, artifactRepositoryProvider string) error {
replacements := map[string]string{
"$(applicationDefaultNamespace)": namespace,
}
// AWS S3 doesn't require a specific artifactRepositoryProvider
if artifactRepositoryProvider == "s3" {
return nil
}
dataStr, err := replaceVariables(filepath.Join("manifest", artifactRepositoryProvider, "service.json"), replacements)
if err != nil {
return err
}
data := []byte(dataStr)
service := &v1.Service{}
if err := json.Unmarshal(data, service); err != nil {
return err
}
_, err = c.CoreV1().Services(namespace).Create(service)
return err
}
func (c *Client) createNamespaceClusterRoleBinding(namespace string) error {
replacements := map[string]string{
"$(applicationDefaultNamespace)": namespace,
}
dataStr, err := replaceVariables(filepath.Join("manifest", "clusterrolebinding-onepanel-namespaces-defaultnamespace.json"), replacements)
if err != nil {
return err
}
data := []byte(dataStr)
resource := &v1rbac.ClusterRoleBinding{}
if err := json.Unmarshal(data, resource); err != nil {
return err
}
resource.Name += "-" + namespace
_, err = c.RbacV1().ClusterRoleBindings().Create(resource)
return err
}
func (c *Client) createNamespaceRoleBinding(namespace string) error {
replacements := map[string]string{
"$(applicationDefaultNamespace)": namespace,
}
dataStr, err := replaceVariables(filepath.Join("manifest", "rolebinding-onepanel-defaultnamespace.json"), replacements)
if err != nil {
return err
}
data := []byte(dataStr)
resource := &v1rbac.RoleBinding{}
if err := json.Unmarshal(data, resource); err != nil {
return err
}
_, err = c.RbacV1().RoleBindings(namespace).Create(resource)
return err
}
func (c *Client) createNamespaceServiceAccount(namespace string) error {
replacements := map[string]string{
"$(applicationDefaultNamespace)": namespace,
}
dataStr, err := replaceVariables(filepath.Join("manifest", "service-account.json"), replacements)
if err != nil {
return err
}
data := []byte(dataStr)
resource := &v1.ServiceAccount{}
if err := json.Unmarshal(data, resource); err != nil {
return err
}
_, err = c.CoreV1().ServiceAccounts(namespace).Create(resource)
return err
}
func (c *Client) createNamespaceConfigMap(sourceNamespace, namespace string) error {
sourceConfigMap, err := c.CoreV1().ConfigMaps(sourceNamespace).Get("onepanel", metav1.GetOptions{})
if err != nil {
return err
}
data := sourceConfigMap.Data["artifactRepository"]
sourceKey := "minio-gateway." + sourceNamespace + ".svc.cluster.local:9000"
replaceKey := "minio-gateway." + namespace + ".svc.cluster.local:9000"
data = strings.ReplaceAll(data, sourceKey, replaceKey)
sourceConfigMap.Data["artifactRepository"] = data
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "onepanel",
Namespace: namespace,
},
Data: sourceConfigMap.Data,
}
configMap.Namespace = namespace
_, err = c.CoreV1().ConfigMaps(namespace).Create(configMap)
return err
}
func (c *Client) createNamespaceModelClusterRoleBinding(namespace string) error {
replacements := map[string]string{
"$(applicationDefaultNamespace)": namespace,
}
dataStr, err := replaceVariables(filepath.Join("manifest", "clusterrolebinding-models.json"), replacements)
if err != nil {
return err
}
data := []byte(dataStr)
resource := &v1rbac.ClusterRoleBinding{}
if err := json.Unmarshal(data, resource); err != nil {
return err
}
_, err = c.RbacV1().ClusterRoleBindings().Create(resource)
return err
}
func (c *Client) createNamespaceTemplates(namespace string) error {
wd, err := os.Getwd()
if err != nil {
return err
}
workflowDir := filepath.Join(wd, "db", "yaml")
filepaths := make([]string, 0)
err = filepath.Walk(workflowDir,
func(path string, info os.FileInfo, err error) error {
if !info.IsDir() {
filepaths = append(filepaths, path)
}
return nil
},
)
if err != nil {
return err
}
for _, filename := range filepaths {
manifest, err := data.ManifestFileFromFile(filename)
if err != nil {
return err
}
if manifest.Metadata.Kind == "Workflow" {
if manifest.Metadata.Action == "create" {
if err := c.createWorkflowTemplateFromGenericManifest(namespace, manifest); err != nil {
return err
}
} else {
if err := c.updateWorkflowTemplateManifest(namespace, manifest); err != nil {
return err
}
}
} else if manifest.Metadata.Kind == "Workspace" {
if manifest.Metadata.Action == "create" {
if err := c.createWorkspaceTemplateFromGenericManifest(namespace, manifest); err != nil {
return err
}
} else {
if err := c.updateWorkspaceTemplateManifest(namespace, manifest); err != nil {
return err
}
}
} else {
return fmt.Errorf("unknown manifest type for file %v", filename)
}
}
return nil
}

View File

@@ -0,0 +1,49 @@
package data
import (
"gopkg.in/yaml.v3"
"io/ioutil"
)
// ManifestFile represents a file that contains information about a workflow or workspace template
type ManifestFile struct {
Metadata ManifestFileMetadata `yaml:"metadata"`
Spec interface{} `yaml:"spec"`
}
// ManifestFileMetadata represents information about the tempalte we are working with
type ManifestFileMetadata struct {
Name string
Kind string // {Workflow, Workspace}
Version uint64
Action string // {create,update}
Description *string
Labels map[string]string
Deprecated *bool
Source *string
}
// SpecString returns the spec of a manifest file as a string
func (m *ManifestFile) SpecString() (string, error) {
data, err := yaml.Marshal(m.Spec)
if err != nil {
return "", err
}
return string(data), err
}
// ManifestFileFromFile loads a manifest from a yaml file.
func ManifestFileFromFile(path string) (*ManifestFile, error) {
fileData, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
manifest := &ManifestFile{}
if err := yaml.Unmarshal(fileData, manifest); err != nil {
return nil, err
}
return manifest, nil
}

View File

@@ -213,3 +213,17 @@ func DeleteNode(node *yaml.Node, key *YamlIndex) error {
return nil return nil
} }
// ReplaceMapValues will replace strings that are keys in the input map with their values
// the result is returned
func ReplaceMapValues(value string, replaceMap map[string]string) string {
replacePairs := make([]string, 0)
for key, value := range replaceMap {
replacePairs = append(replacePairs, key)
replacePairs = append(replacePairs, value)
}
return strings.NewReplacer(replacePairs...).
Replace(value)
}

View File

@@ -929,7 +929,14 @@ func (c *Client) CreateWorkflowExecution(namespace string, workflow *WorkflowExe
return nil, fmt.Errorf("workflow Template contained more than 1 workflow execution") return nil, fmt.Errorf("workflow Template contained more than 1 workflow execution")
} }
createdWorkflow, err := c.createWorkflow(namespace, workflowTemplate.ID, workflowTemplate.WorkflowTemplateVersionID, &workflows[0], opts, workflow.Labels) wf := &workflows[0]
if wf.Spec.VolumeClaimGC == nil {
wf.Spec.VolumeClaimGC = &wfv1.VolumeClaimGC{
Strategy: wfv1.VolumeClaimGCOnCompletion,
}
}
createdWorkflow, err := c.createWorkflow(namespace, workflowTemplate.ID, workflowTemplate.WorkflowTemplateVersionID, wf, opts, workflow.Labels)
if err != nil { if err != nil {
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"Namespace": namespace, "Namespace": namespace,

View File

@@ -55,6 +55,22 @@ func (wt *WorkflowTemplate) GenerateUID(name string) error {
return nil return nil
} }
// CreateWorkflowTemplate creates a new workflow template with the given name.
// All fields that can be generated in memory without external requests are filled out, such as the UID.
func CreateWorkflowTemplate(name string) (*WorkflowTemplate, error) {
nameUID, err := uid2.GenerateUID(name, 30)
if err != nil {
return nil, err
}
workflowTemplate := &WorkflowTemplate{
Name: name,
UID: nameUID,
}
return workflowTemplate, nil
}
// GetManifestBytes returns the manifest as []byte // GetManifestBytes returns the manifest as []byte
func (wt *WorkflowTemplate) GetManifestBytes() []byte { func (wt *WorkflowTemplate) GetManifestBytes() []byte {
return []byte(wt.Manifest) return []byte(wt.Manifest)

View File

@@ -350,6 +350,7 @@ func (c *Client) addRuntimeFieldsToWorkspaceTemplate(t wfv1.Template, workspace
env.PrependEnvVarToContainer(container, "ONEPANEL_FQDN", config["ONEPANEL_FQDN"]) env.PrependEnvVarToContainer(container, "ONEPANEL_FQDN", config["ONEPANEL_FQDN"])
env.PrependEnvVarToContainer(container, "ONEPANEL_DOMAIN", config["ONEPANEL_DOMAIN"]) env.PrependEnvVarToContainer(container, "ONEPANEL_DOMAIN", config["ONEPANEL_DOMAIN"])
env.PrependEnvVarToContainer(container, "ONEPANEL_PROVIDER", config["ONEPANEL_PROVIDER"]) env.PrependEnvVarToContainer(container, "ONEPANEL_PROVIDER", config["ONEPANEL_PROVIDER"])
env.PrependEnvVarToContainer(container, "ONEPANEL_SERVING_URL", "kfserving-models-web-app.kfserving-system.svc.cluster.local")
env.PrependEnvVarToContainer(container, "ONEPANEL_RESOURCE_NAMESPACE", "{{workflow.namespace}}") env.PrependEnvVarToContainer(container, "ONEPANEL_RESOURCE_NAMESPACE", "{{workflow.namespace}}")
env.PrependEnvVarToContainer(container, "ONEPANEL_RESOURCE_UID", "{{workflow.parameters.sys-uid}}") env.PrependEnvVarToContainer(container, "ONEPANEL_RESOURCE_UID", "{{workflow.parameters.sys-uid}}")
} }

View File

@@ -51,6 +51,22 @@ func (wt *WorkspaceTemplate) GenerateUID(name string) error {
return nil return nil
} }
// CreateWorkspaceTemplate creates a new workspace template with the given name.
// All fields that can be generated in memory without external requests are filled out, such as the UID.
func CreateWorkspaceTemplate(name string) (*WorkspaceTemplate, error) {
nameUID, err := uid2.GenerateUID(name, 30)
if err != nil {
return nil, err
}
workspaceTemplate := &WorkspaceTemplate{
Name: name,
UID: nameUID,
}
return workspaceTemplate, nil
}
// InjectRuntimeParameters will inject all runtime variables into the WorkflowTemplate's manifest. // InjectRuntimeParameters will inject all runtime variables into the WorkflowTemplate's manifest.
func (wt *WorkspaceTemplate) InjectRuntimeParameters(config SystemConfig) error { func (wt *WorkspaceTemplate) InjectRuntimeParameters(config SystemConfig) error {
if wt.WorkflowTemplate == nil { if wt.WorkflowTemplate == nil {

Some files were not shown because too many files have changed in this diff Show More