diff --git a/db/go/util.go b/db/go/util.go
index 767a1f6..c778c19 100644
--- a/db/go/util.go
+++ b/db/go/util.go
@@ -3,7 +3,9 @@ package migration
import (
"fmt"
v1 "github.com/onepanelio/core/pkg"
+ "github.com/onepanelio/core/pkg/util/data"
uid2 "github.com/onepanelio/core/pkg/util/uid"
+ "path/filepath"
)
// createWorkspaceTemplate will create the workspace template given by {{templateName}} with the contents
@@ -21,7 +23,13 @@ func createWorkspaceTemplate(filename, templateName, description string) error {
return err
}
- newManifest, err := readDataFile(filename)
+ filename = filepath.Join("db", "yaml", filename)
+ manifestFile, err := data.ManifestFileFromFile(filename)
+ if err != nil {
+ return err
+ }
+
+ newManifest, err := manifestFile.SpecString()
if err != nil {
return err
}
@@ -97,12 +105,19 @@ func updateWorkspaceTemplateManifest(filename, templateName string) error {
}
defer client.DB.Close()
+ filename = filepath.Join("db", "yaml", filename)
+
namespaces, err := client.ListOnepanelEnabledNamespaces()
if err != nil {
return err
}
- newManifest, err := readDataFile(filename)
+ manifest, err := data.ManifestFileFromFile(filename)
+ if err != nil {
+ return err
+ }
+
+ newManifest, err := manifest.SpecString()
if err != nil {
return err
}
@@ -145,7 +160,14 @@ func createWorkflowTemplate(filename, templateName string, labels map[string]str
return err
}
- manifest, err := readDataFile(filename)
+ filename = filepath.Join("db", "yaml", filename)
+
+ manifestFile, err := data.ManifestFileFromFile(filename)
+ if err != nil {
+ return err
+ }
+
+ manifest, err := manifestFile.SpecString()
if err != nil {
return err
}
@@ -190,7 +212,14 @@ func updateWorkflowTemplateManifest(filename, templateName string, labels map[st
return err
}
- newManifest, err := readDataFile(filename)
+ filename = filepath.Join("db", "yaml", filename)
+
+ manifestFile, err := data.ManifestFileFromFile(filename)
+ if err != nil {
+ return err
+ }
+
+ newManifest, err := manifestFile.SpecString()
if err != nil {
return err
}
diff --git a/db/yaml/workflows/hyperparameter-tuning/20201225172926.yaml b/db/yaml/workflows/hyperparameter-tuning/20201225172926.yaml
index f9321f5..905b49d 100644
--- a/db/yaml/workflows/hyperparameter-tuning/20201225172926.yaml
+++ b/db/yaml/workflows/hyperparameter-tuning/20201225172926.yaml
@@ -1,183 +1,194 @@
-# source: https://github.com/onepanelio/templates/blob/master/workflows/nni-hyperparameter-tuning/mnist/
-entrypoint: main
-arguments:
- parameters:
- - name: source
- value: https://github.com/onepanelio/templates
- - name: revision
- value: master
- - name: config
- displayName: Configuration
- required: true
- hint: NNI configuration
- type: textarea.textarea
- value: |-
- authorName: Onepanel, Inc.
- experimentName: MNIST TF v2.x
- trialConcurrency: 1
- maxExecDuration: 1h
- maxTrialNum: 10
- trainingServicePlatform: local
- searchSpacePath: search_space.json
- useAnnotation: false
- tuner:
- # gpuIndices: '0' # uncomment and update to the GPU indices to assign this tuner
- builtinTunerName: TPE # choices: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner
- classArgs:
- optimize_mode: maximize # choices: maximize, minimize
- trial:
- command: python main.py --output /mnt/output
- codeDir: .
- # gpuNum: 1 # uncomment and update to number of GPUs
- - name: search-space
- displayName: Search space configuration
- required: true
- type: textarea.textarea
- value: |-
- {
- "dropout_rate": { "_type": "uniform", "_value": [0.5, 0.9] },
- "conv_size": { "_type": "choice", "_value": [2, 3, 5, 7] },
- "hidden_size": { "_type": "choice", "_value": [124, 512, 1024] },
- "batch_size": { "_type": "choice", "_value": [16, 32] },
- "learning_rate": { "_type": "choice", "_value": [0.0001, 0.001, 0.01, 0.1] },
- "epochs": { "_type": "choice", "_value": [10] }
- }
- - displayName: Node pool
- hint: Name of node pool or group to run this workflow task
- type: select.nodepool
- name: sys-node-pool
- value: {{.DefaultNodePoolOption}}
- required: true
+metadata:
+ name: "Hyperparameter Tuning Example"
+ kind: Workflow
+ version: 20201225172926
+ action: create
+ source: "https://github.com/onepanelio/templates/blob/master/workflows/nni-hyperparameter-tuning/mnist/"
+ deprecated: true
+ labels:
+ framework: tensorflow
+ tuner: TPE
+ "created-by": system
+spec:
+ entrypoint: main
+ arguments:
+ parameters:
+ - name: source
+ value: https://github.com/onepanelio/templates
+ - name: revision
+ value: master
+ - name: config
+ displayName: Configuration
+ required: true
+ hint: NNI configuration
+ type: textarea.textarea
+ value: |-
+ authorName: Onepanel, Inc.
+ experimentName: MNIST TF v2.x
+ trialConcurrency: 1
+ maxExecDuration: 1h
+ maxTrialNum: 10
+ trainingServicePlatform: local
+ searchSpacePath: search_space.json
+ useAnnotation: false
+ tuner:
+ # gpuIndices: '0' # uncomment and update to the GPU indices to assign this tuner
+ builtinTunerName: TPE # choices: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner
+ classArgs:
+ optimize_mode: maximize # choices: maximize, minimize
+ trial:
+ command: python main.py --output /mnt/output
+ codeDir: .
+ # gpuNum: 1 # uncomment and update to number of GPUs
+ - name: search-space
+ displayName: Search space configuration
+ required: true
+ type: textarea.textarea
+ value: |-
+ {
+ "dropout_rate": { "_type": "uniform", "_value": [0.5, 0.9] },
+ "conv_size": { "_type": "choice", "_value": [2, 3, 5, 7] },
+ "hidden_size": { "_type": "choice", "_value": [124, 512, 1024] },
+ "batch_size": { "_type": "choice", "_value": [16, 32] },
+ "learning_rate": { "_type": "choice", "_value": [0.0001, 0.001, 0.01, 0.1] },
+ "epochs": { "_type": "choice", "_value": [10] }
+ }
+ - displayName: Node pool
+ hint: Name of node pool or group to run this workflow task
+ type: select.nodepool
+ name: sys-node-pool
+ value: "{{.DefaultNodePoolOption}}"
+ required: true
-volumeClaimTemplates:
- - metadata:
- name: hyperparamtuning-data
- spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 20Gi
- - metadata:
- name: hyperparamtuning-output
- spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 20Gi
+ volumeClaimTemplates:
+ - metadata:
+ name: hyperparamtuning-data
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 20Gi
+ - metadata:
+ name: hyperparamtuning-output
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 20Gi
-templates:
- - name: main
- dag:
- tasks:
- - name: hyperparameter-tuning
- template: hyperparameter-tuning
- - name: workflow-metrics-writer
- template: workflow-metrics-writer
- dependencies: [hyperparameter-tuning]
- arguments:
- # Use sys-metrics artifact output from hyperparameter-tuning Task
- artifacts:
- - name: best-metrics
- from: "{{tasks.hyperparameter-tuning.outputs.artifacts.sys-metrics}}"
- - name: hyperparameter-tuning
- inputs:
- artifacts:
- - name: src
- git:
- repo: '{{workflow.parameters.source}}'
- revision: '{{workflow.parameters.revision}}'
- path: /mnt/data/src
- - name: config
- path: /mnt/data/src/workflows/hyperparameter-tuning/mnist/config.yaml
- raw:
- data: '{{workflow.parameters.config}}'
- - name: search-space
- path: /mnt/data/src/workflows/hyperparameter-tuning/mnist/search_space.json
- raw:
- data: '{{workflow.parameters.search-space}}'
- outputs:
- artifacts:
- - name: output
- path: /mnt/output
- optional: true
- container:
- image: onepanel/dl:0.17.0
- args:
- - --config
- - /mnt/data/src/workflows/hyperparameter-tuning/mnist/config.yaml
- workingDir: /mnt
- volumeMounts:
- - name: hyperparamtuning-data
- mountPath: /mnt/data
- - name: hyperparamtuning-output
- mountPath: /mnt/output
- nodeSelector:
- beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
- sidecars:
- - name: nni-web-ui
- image: 'onepanel/nni-web-ui:0.17.0'
- env:
- - name: ONEPANEL_INTERACTIVE_SIDECAR
- value: 'true'
- ports:
- - containerPort: 9000
- name: nni
- - name: tensorboard
- image: 'tensorflow/tensorflow:2.3.0'
- command:
- - sh
- - '-c'
- env:
- - name: ONEPANEL_INTERACTIVE_SIDECAR
- value: 'true'
+ templates:
+ - name: main
+ dag:
+ tasks:
+ - name: hyperparameter-tuning
+ template: hyperparameter-tuning
+ - name: workflow-metrics-writer
+ template: workflow-metrics-writer
+ dependencies: [hyperparameter-tuning]
+ arguments:
+ # Use sys-metrics artifact output from hyperparameter-tuning Task
+ artifacts:
+ - name: best-metrics
+ from: "{{tasks.hyperparameter-tuning.outputs.artifacts.sys-metrics}}"
+ - name: hyperparameter-tuning
+ inputs:
+ artifacts:
+ - name: src
+ git:
+ repo: '{{workflow.parameters.source}}'
+ revision: '{{workflow.parameters.revision}}'
+ path: /mnt/data/src
+ - name: config
+ path: /mnt/data/src/workflows/hyperparameter-tuning/mnist/config.yaml
+ raw:
+ data: '{{workflow.parameters.config}}'
+ - name: search-space
+ path: /mnt/data/src/workflows/hyperparameter-tuning/mnist/search_space.json
+ raw:
+ data: '{{workflow.parameters.search-space}}'
+ outputs:
+ artifacts:
+ - name: output
+ path: /mnt/output
+ optional: true
+ container:
+ image: onepanel/dl:0.17.0
args:
- # Read logs from /mnt/output/tensorboard - /mnt/output is auto-mounted from volumeMounts
- - tensorboard --logdir /mnt/output/tensorboard
- ports:
- - containerPort: 6006
- name: tensorboard
- - name: workflow-metrics-writer
- inputs:
- artifacts:
- - name: best-metrics
- path: /tmp/sys-metrics.json
- script:
- image: onepanel/python-sdk:v0.16.0
- command: [python, '-u']
- source: |
- import os
- import json
+ - --config
+ - /mnt/data/src/workflows/hyperparameter-tuning/mnist/config.yaml
+ workingDir: /mnt
+ volumeMounts:
+ - name: hyperparamtuning-data
+ mountPath: /mnt/data
+ - name: hyperparamtuning-output
+ mountPath: /mnt/output
+ nodeSelector:
+ beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
+ sidecars:
+ - name: nni-web-ui
+ image: 'onepanel/nni-web-ui:0.17.0'
+ env:
+ - name: ONEPANEL_INTERACTIVE_SIDECAR
+ value: 'true'
+ ports:
+ - containerPort: 9000
+ name: nni
+ - name: tensorboard
+ image: 'tensorflow/tensorflow:2.3.0'
+ command:
+ - sh
+ - '-c'
+ env:
+ - name: ONEPANEL_INTERACTIVE_SIDECAR
+ value: 'true'
+ args:
+ # Read logs from /mnt/output/tensorboard - /mnt/output is auto-mounted from volumeMounts
+ - tensorboard --logdir /mnt/output/tensorboard
+ ports:
+ - containerPort: 6006
+ name: tensorboard
+ - name: workflow-metrics-writer
+ inputs:
+ artifacts:
+ - name: best-metrics
+ path: /tmp/sys-metrics.json
+ script:
+ image: onepanel/python-sdk:v0.16.0
+ command: [python, '-u']
+ source: |
+ import os
+ import json
- import onepanel.core.api
- from onepanel.core.api.models.metric import Metric
- from onepanel.core.api.rest import ApiException
- from onepanel.core.api.models import Parameter
+ import onepanel.core.api
+ from onepanel.core.api.models.metric import Metric
+ from onepanel.core.api.rest import ApiException
+ from onepanel.core.api.models import Parameter
- # Load Task A metrics
- with open('/tmp/sys-metrics.json') as f:
- metrics = json.load(f)
+ # Load Task A metrics
+ with open('/tmp/sys-metrics.json') as f:
+ metrics = json.load(f)
- with open('/var/run/secrets/kubernetes.io/serviceaccount/token') as f:
- token = f.read()
+ with open('/var/run/secrets/kubernetes.io/serviceaccount/token') as f:
+ token = f.read()
- # Configure API authorization
- configuration = onepanel.core.api.Configuration(
- host = os.getenv('ONEPANEL_API_URL'),
- api_key = {
- 'authorization': token
- }
- )
- configuration.api_key_prefix['authorization'] = 'Bearer'
+ # Configure API authorization
+ configuration = onepanel.core.api.Configuration(
+ host = os.getenv('ONEPANEL_API_URL'),
+ api_key = {
+ 'authorization': token
+ }
+ )
+ configuration.api_key_prefix['authorization'] = 'Bearer'
- # Call SDK method to save metrics
- with onepanel.core.api.ApiClient(configuration) as api_client:
- api_instance = onepanel.core.api.WorkflowServiceApi(api_client)
- namespace = '{{workflow.namespace}}'
- uid = '{{workflow.name}}'
- body = onepanel.core.api.AddWorkflowExecutionsMetricsRequest()
- body.metrics = metrics
- try:
- api_response = api_instance.add_workflow_execution_metrics(namespace, uid, body)
- print('Metrics added.')
- except ApiException as e:
- print("Exception when calling WorkflowServiceApi->add_workflow_execution_metrics: %s\n" % e)
\ No newline at end of file
+ # Call SDK method to save metrics
+ with onepanel.core.api.ApiClient(configuration) as api_client:
+ api_instance = onepanel.core.api.WorkflowServiceApi(api_client)
+ namespace = '{{workflow.namespace}}'
+ uid = '{{workflow.name}}'
+ body = onepanel.core.api.AddWorkflowExecutionsMetricsRequest()
+ body.metrics = metrics
+ try:
+ api_response = api_instance.add_workflow_execution_metrics(namespace, uid, body)
+ print('Metrics added.')
+ except ApiException as e:
+ print("Exception when calling WorkflowServiceApi->add_workflow_execution_metrics: %s\n" % e)
\ No newline at end of file
diff --git a/db/yaml/workflows/hyperparameter-tuning/20210118175809.yaml b/db/yaml/workflows/hyperparameter-tuning/20210118175809.yaml
index c3dbaa7..f6e3f62 100644
--- a/db/yaml/workflows/hyperparameter-tuning/20210118175809.yaml
+++ b/db/yaml/workflows/hyperparameter-tuning/20210118175809.yaml
@@ -1,194 +1,205 @@
-# source: https://github.com/onepanelio/templates/blob/master/workflows/nni-hyperparameter-tuning/mnist/
-# Workflow Template example for hyperparameter tuning
-# Documentation: https://docs.onepanel.ai/docs/reference/workflows/hyperparameter-tuning
-#
-# Only change the fields marked with [CHANGE]
-entrypoint: main
-arguments:
- parameters:
+metadata:
+ name: "Hyperparameter Tuning Example"
+ kind: Workflow
+ version: 20210118175809
+ action: update
+ source: "https://github.com/onepanelio/templates/blob/master/workflows/nni-hyperparameter-tuning/mnist/"
+ deprecated: true
+ labels:
+ framework: tensorflow
+ tuner: TPE
+ "created-by": system
+spec:
+ # Workflow Template example for hyperparameter tuning
+ # Documentation: https://docs.onepanel.ai/docs/reference/workflows/hyperparameter-tuning
+ #
+ # Only change the fields marked with [CHANGE]
+ entrypoint: main
+ arguments:
+ parameters:
- # [CHANGE] Path to your training/model architecture code repository
- # Change this value and revision value to your code repository and branch respectively
- - name: source
- value: https://github.com/onepanelio/templates
+ # [CHANGE] Path to your training/model architecture code repository
+ # Change this value and revision value to your code repository and branch respectively
+ - name: source
+ value: https://github.com/onepanelio/templates
- # [CHANGE] Revision is the branch or tag that you want to use
- # You can change this to any tag or branch name in your repository
- - name: revision
- value: v0.18.0
+ # [CHANGE] Revision is the branch or tag that you want to use
+ # You can change this to any tag or branch name in your repository
+ - name: revision
+ value: v0.18.0
- # [CHANGE] Default configuration for the NNI tuner
- # See https://docs.onepanel.ai/docs/reference/workflows/hyperparameter-tuning#understanding-the-configurations
- - name: config
- displayName: Configuration
- required: true
- hint: NNI configuration
- type: textarea.textarea
- value: |-
- authorName: Onepanel, Inc.
- experimentName: MNIST TF v2.x
- trialConcurrency: 1
- maxExecDuration: 1h
- maxTrialNum: 10
- trainingServicePlatform: local
- searchSpacePath: search_space.json
- useAnnotation: false
- tuner:
- # gpuIndices: '0' # uncomment and update to the GPU indices to assign this tuner
- builtinTunerName: TPE # choices: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner
- classArgs:
- optimize_mode: maximize # choices: maximize, minimize
- trial:
- command: python main.py --output /mnt/output
- codeDir: .
- # gpuNum: 1 # uncomment and update to number of GPUs
+ # [CHANGE] Default configuration for the NNI tuner
+ # See https://docs.onepanel.ai/docs/reference/workflows/hyperparameter-tuning#understanding-the-configurations
+ - name: config
+ displayName: Configuration
+ required: true
+ hint: NNI configuration
+ type: textarea.textarea
+ value: |-
+ authorName: Onepanel, Inc.
+ experimentName: MNIST TF v2.x
+ trialConcurrency: 1
+ maxExecDuration: 1h
+ maxTrialNum: 10
+ trainingServicePlatform: local
+ searchSpacePath: search_space.json
+ useAnnotation: false
+ tuner:
+ # gpuIndices: '0' # uncomment and update to the GPU indices to assign this tuner
+ builtinTunerName: TPE # choices: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner
+ classArgs:
+ optimize_mode: maximize # choices: maximize, minimize
+ trial:
+ command: python main.py --output /mnt/output
+ codeDir: .
+ # gpuNum: 1 # uncomment and update to number of GPUs
- # [CHANGE] Search space configuration
- # Change according to your hyperparameters and ranges
- - name: search-space
- displayName: Search space configuration
- required: true
- type: textarea.textarea
- value: |-
- {
- "dropout_rate": { "_type": "uniform", "_value": [0.5, 0.9] },
- "conv_size": { "_type": "choice", "_value": [2, 3, 5, 7] },
- "hidden_size": { "_type": "choice", "_value": [124, 512, 1024] },
- "batch_size": { "_type": "choice", "_value": [16, 32] },
- "learning_rate": { "_type": "choice", "_value": [0.0001, 0.001, 0.01, 0.1] },
- "epochs": { "_type": "choice", "_value": [10] }
- }
+ # [CHANGE] Search space configuration
+ # Change according to your hyperparameters and ranges
+ - name: search-space
+ displayName: Search space configuration
+ required: true
+ type: textarea.textarea
+ value: |-
+ {
+ "dropout_rate": { "_type": "uniform", "_value": [0.5, 0.9] },
+ "conv_size": { "_type": "choice", "_value": [2, 3, 5, 7] },
+ "hidden_size": { "_type": "choice", "_value": [124, 512, 1024] },
+ "batch_size": { "_type": "choice", "_value": [16, 32] },
+ "learning_rate": { "_type": "choice", "_value": [0.0001, 0.001, 0.01, 0.1] },
+ "epochs": { "_type": "choice", "_value": [10] }
+ }
- # Node pool dropdown (Node group in EKS)
- # You can add more of these if you have additional tasks that can run on different node pools
- - displayName: Node pool
- hint: Name of node pool or group to run this workflow task
- type: select.nodepool
- name: sys-node-pool
- value: {{.DefaultNodePoolOption}}
- required: true
+ # Node pool dropdown (Node group in EKS)
+ # You can add more of these if you have additional tasks that can run on different node pools
+ - displayName: Node pool
+ hint: Name of node pool or group to run this workflow task
+ type: select.nodepool
+ name: sys-node-pool
+ value: "{{.DefaultNodePoolOption}}"
+ required: true
-templates:
- - name: main
- dag:
- tasks:
- - name: hyperparameter-tuning
- template: hyperparameter-tuning
- - name: metrics-writer
- template: metrics-writer
- dependencies: [hyperparameter-tuning]
- arguments:
- # Use sys-metrics artifact output from hyperparameter-tuning Task
- # This writes the best metrics to the Workflow
- artifacts:
- - name: sys-metrics
- from: "{{tasks.hyperparameter-tuning.outputs.artifacts.sys-metrics}}"
- - name: hyperparameter-tuning
- inputs:
- artifacts:
- - name: src
- # Clone the above repository into '/mnt/data/src'
- # See https://docs.onepanel.ai/docs/reference/workflows/artifacts#git for private repositories
- git:
- repo: '{{workflow.parameters.source}}'
- revision: '{{workflow.parameters.revision}}'
- path: /mnt/data/src
- # [CHANGE] Path where config.yaml will be generated or already exists
- # Update the path below so that config.yaml is written to the same directory as your main.py file
- # Note that your source code is cloned to /mnt/data/src
- - name: config
- path: /mnt/data/src/workflows/hyperparameter-tuning/mnist/config.yaml
- raw:
- data: '{{workflow.parameters.config}}'
- # [CHANGE] Path where search_space.json will be generated or already exists
- # Update the path below so that search_space.json is written to the same directory as your main.py file
- # Note that your source code is cloned to /mnt/data/src
- - name: search-space
- path: /mnt/data/src/workflows/hyperparameter-tuning/mnist/search_space.json
- raw:
- data: '{{workflow.parameters.search-space}}'
- outputs:
- artifacts:
- - name: output
- path: /mnt/output
- optional: true
- container:
- image: onepanel/dl:0.17.0
- command:
- - sh
- - -c
- args:
- # [CHANGE] Update the config path below to point to config.yaml path as described above
- # Note that you can `pip install` additional tools here if necessary
- - |
- python -u /opt/onepanel/nni/start.py \
- --config /mnt/data/src/workflows/hyperparameter-tuning/mnist/config.yaml
- workingDir: /mnt
- volumeMounts:
- - name: hyperparamtuning-data
- mountPath: /mnt/data
- - name: hyperparamtuning-output
- mountPath: /mnt/output
- nodeSelector:
- {{.NodePoolLabel}}: '{{workflow.parameters.sys-node-pool}}'
- sidecars:
- - name: nni-web-ui
- image: onepanel/nni-web-ui:0.17.0
- env:
- - name: ONEPANEL_INTERACTIVE_SIDECAR
- value: 'true'
- ports:
- - containerPort: 9000
- name: nni
- - name: tensorboard
+ templates:
+ - name: main
+ dag:
+ tasks:
+ - name: hyperparameter-tuning
+ template: hyperparameter-tuning
+ - name: metrics-writer
+ template: metrics-writer
+ dependencies: [hyperparameter-tuning]
+ arguments:
+ # Use sys-metrics artifact output from hyperparameter-tuning Task
+ # This writes the best metrics to the Workflow
+ artifacts:
+ - name: sys-metrics
+ from: "{{tasks.hyperparameter-tuning.outputs.artifacts.sys-metrics}}"
+ - name: hyperparameter-tuning
+ inputs:
+ artifacts:
+ - name: src
+ # Clone the above repository into '/mnt/data/src'
+ # See https://docs.onepanel.ai/docs/reference/workflows/artifacts#git for private repositories
+ git:
+ repo: '{{workflow.parameters.source}}'
+ revision: '{{workflow.parameters.revision}}'
+ path: /mnt/data/src
+ # [CHANGE] Path where config.yaml will be generated or already exists
+ # Update the path below so that config.yaml is written to the same directory as your main.py file
+ # Note that your source code is cloned to /mnt/data/src
+ - name: config
+ path: /mnt/data/src/workflows/hyperparameter-tuning/mnist/config.yaml
+ raw:
+ data: '{{workflow.parameters.config}}'
+ # [CHANGE] Path where search_space.json will be generated or already exists
+ # Update the path below so that search_space.json is written to the same directory as your main.py file
+ # Note that your source code is cloned to /mnt/data/src
+ - name: search-space
+ path: /mnt/data/src/workflows/hyperparameter-tuning/mnist/search_space.json
+ raw:
+ data: '{{workflow.parameters.search-space}}'
+ outputs:
+ artifacts:
+ - name: output
+ path: /mnt/output
+ optional: true
+ container:
image: onepanel/dl:0.17.0
command:
- sh
- - '-c'
- env:
- - name: ONEPANEL_INTERACTIVE_SIDECAR
- value: 'true'
+ - -c
args:
- # Read logs from /mnt/output/tensorboard - /mnt/output is auto-mounted from volumeMounts
- - tensorboard --logdir /mnt/output/tensorboard
- ports:
- - containerPort: 6006
- name: tensorboard
- # Use the metrics-writer tasks to write best metrics to Workflow
- - name: metrics-writer
- inputs:
- artifacts:
- - name: sys-metrics
- path: /tmp/sys-metrics.json
- - git:
- repo: https://github.com/onepanelio/templates.git
- revision: v0.18.0
- name: src
- path: /mnt/src
- container:
- image: onepanel/python-sdk:v0.16.0
- command:
- - python
- - -u
- args:
- - /mnt/src/tasks/metrics-writer/main.py
- - --from_file=/tmp/sys-metrics.json
+ # [CHANGE] Update the config path below to point to config.yaml path as described above
+ # Note that you can `pip install` additional tools here if necessary
+ - |
+ python -u /opt/onepanel/nni/start.py \
+ --config /mnt/data/src/workflows/hyperparameter-tuning/mnist/config.yaml
+ workingDir: /mnt
+ volumeMounts:
+ - name: hyperparamtuning-data
+ mountPath: /mnt/data
+ - name: hyperparamtuning-output
+ mountPath: /mnt/output
+ nodeSelector:
+ "{{.NodePoolLabel}}": '{{workflow.parameters.sys-node-pool}}'
+ sidecars:
+ - name: nni-web-ui
+ image: onepanel/nni-web-ui:0.17.0
+ env:
+ - name: ONEPANEL_INTERACTIVE_SIDECAR
+ value: 'true'
+ ports:
+ - containerPort: 9000
+ name: nni
+ - name: tensorboard
+ image: onepanel/dl:0.17.0
+ command:
+ - sh
+ - '-c'
+ env:
+ - name: ONEPANEL_INTERACTIVE_SIDECAR
+ value: 'true'
+ args:
+ # Read logs from /mnt/output/tensorboard - /mnt/output is auto-mounted from volumeMounts
+ - tensorboard --logdir /mnt/output/tensorboard
+ ports:
+ - containerPort: 6006
+ name: tensorboard
+ # Use the metrics-writer tasks to write best metrics to Workflow
+ - name: metrics-writer
+ inputs:
+ artifacts:
+ - name: sys-metrics
+ path: /tmp/sys-metrics.json
+ - git:
+ repo: https://github.com/onepanelio/templates.git
+ revision: v0.18.0
+ name: src
+ path: /mnt/src
+ container:
+ image: onepanel/python-sdk:v0.16.0
+ command:
+ - python
+ - -u
+ args:
+ - /mnt/src/tasks/metrics-writer/main.py
+ - --from_file=/tmp/sys-metrics.json
-# [CHANGE] Volumes that will mount to /mnt/data (annotated data) and /mnt/output (models, checkpoints, logs)
-# Update this depending on your annotation data, model, checkpoint, logs, etc. sizes
-# Example values: 250Mi, 500Gi, 1Ti
-volumeClaimTemplates:
- - metadata:
- name: hyperparamtuning-data
- spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 20Gi
- - metadata:
- name: hyperparamtuning-output
- spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 20Gi
\ No newline at end of file
+ # [CHANGE] Volumes that will mount to /mnt/data (annotated data) and /mnt/output (models, checkpoints, logs)
+ # Update this depending on your annotation data, model, checkpoint, logs, etc. sizes
+ # Example values: 250Mi, 500Gi, 1Ti
+ volumeClaimTemplates:
+ - metadata:
+ name: hyperparamtuning-data
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 20Gi
+ - metadata:
+ name: hyperparamtuning-output
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 20Gi
\ No newline at end of file
diff --git a/db/yaml/workflows/maskrcnn-training/20200812104328.yaml b/db/yaml/workflows/maskrcnn-training/20200812104328.yaml
new file mode 100644
index 0000000..bb05f6e
--- /dev/null
+++ b/db/yaml/workflows/maskrcnn-training/20200812104328.yaml
@@ -0,0 +1,197 @@
+metadata:
+ name: "MaskRCNN Training"
+ kind: Workflow
+ version: 20200812104328
+ action: create
+ labels:
+ "used-by": "cvat"
+ "created-by": "system"
+spec:
+ arguments:
+ parameters:
+ - name: source
+ value: https://github.com/onepanelio/Mask_RCNN.git
+ displayName: Model source code
+ type: hidden
+ visibility: private
+
+ - name: sys-annotation-path
+ value: annotation-dump/sample_dataset
+ hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated.
+ displayName: Dataset path
+ visibility: private
+
+ - name: sys-output-path
+ value: workflow-data/output/sample_output
+ hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
+ displayName: Workflow output path
+ visibility: private
+
+ - name: sys-finetune-checkpoint
+ value: ''
+ hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
+ displayName: Checkpoint path
+ visibility: public
+
+ - name: sys-num-classes
+ displayName: Number of classes
+ hint: Number of classes (i.e in CVAT taks) + 1 for background
+ value: '81'
+ visibility: private
+
+ - name: extras
+ displayName: Hyperparameters
+ visibility: public
+ type: textarea.textarea
+ value: |-
+ stage-1-epochs=1 # Epochs for network heads
+ stage-2-epochs=2 # Epochs for finetune layers
+ stage-3-epochs=3 # Epochs for all layers
+ hint: "Please refer to our documentation for more information on parameters."
+
+ - name: dump-format
+ type: select.select
+ value: cvat_coco
+ displayName: CVAT dump format
+ visibility: public
+ options:
+ - name: 'MS COCO'
+ value: 'cvat_coco'
+ - name: 'TF Detection API'
+ value: 'cvat_tfrecord'
+
+ - name: tf-image
+ visibility: public
+ value: tensorflow/tensorflow:1.13.1-py3
+ type: select.select
+ displayName: Select TensorFlow image
+ hint: Select the GPU image if you are running on a GPU node pool
+ options:
+ - name: 'TensorFlow 1.13.1 CPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-py3'
+ - name: 'TensorFlow 1.13.1 GPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
+
+ - displayName: Node pool
+ hint: Name of node pool or group to run this workflow task
+ type: select.select
+ visibility: public
+ name: sys-node-pool
+ value: Standard_D4s_v3
+ required: true
+ options:
+ - name: 'CPU: 2, RAM: 8GB'
+ value: Standard_D2s_v3
+ - name: 'CPU: 4, RAM: 16GB'
+ value: Standard_D4s_v3
+ - name: 'GPU: 1xK80, CPU: 6, RAM: 56GB'
+ value: Standard_NC6
+
+ entrypoint: main
+ templates:
+ - dag:
+ tasks:
+ - name: train-model
+ template: tensorflow
+ # Uncomment the lines below if you want to send Slack notifications
+ # - arguments:
+ # artifacts:
+ # - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}'
+ # name: metrics
+ # parameters:
+ # - name: status
+ # value: '{{tasks.train-model.status}}'
+ # dependencies:
+ # - train-model
+ # name: notify-in-slack
+ # template: slack-notify-success
+ name: main
+ - container:
+ args:
+ - |
+ apt-get update \
+ && apt-get install -y git wget libglib2.0-0 libsm6 libxext6 libxrender-dev \
+ && pip install -r requirements.txt \
+ && pip install boto3 pyyaml google-cloud-storage \
+ && git clone https://github.com/waleedka/coco \
+ && cd coco/PythonAPI \
+ && python setup.py build_ext install \
+ && rm -rf build \
+ && cd ../../ \
+ && wget https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5 \
+ && python setup.py install && ls \
+ && python samples/coco/cvat.py train --dataset=/mnt/data/datasets \
+ --model=workflow_maskrcnn \
+ --extras="{{workflow.parameters.extras}}" \
+ --ref_model_path="{{workflow.parameters.sys-finetune-checkpoint}}" \
+ --num_classes="{{workflow.parameters.sys-num-classes}}" \
+ && cd /mnt/src/ \
+ && python prepare_dataset.py /mnt/data/datasets/annotations/instances_default.json
+ command:
+ - sh
+ - -c
+ image: '{{workflow.parameters.tf-image}}'
+ volumeMounts:
+ - mountPath: /mnt/data
+ name: data
+ - mountPath: /mnt/output
+ name: output
+ workingDir: /mnt/src
+ nodeSelector:
+ beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
+ inputs:
+ artifacts:
+ - name: data
+ path: /mnt/data/datasets/
+ "{{.ArtifactRepositoryType}}":
+ key: '{{workflow.namespace}}/{{workflow.parameters.sys-annotation-path}}'
+ - git:
+ repo: '{{workflow.parameters.source}}'
+ revision: "no-boto"
+ name: src
+ path: /mnt/src
+ name: tensorflow
+ outputs:
+ artifacts:
+ - name: model
+ optional: true
+ path: /mnt/output
+ "{{.ArtifactRepositoryType}}":
+ key: '{{workflow.namespace}}/{{workflow.parameters.sys-output-path}}'
+ # Uncomment the lines below if you want to send Slack notifications
+ #- container:
+ # args:
+ # - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}"
+ # SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd
+ # SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify
+ # command:
+ # - sh
+ # - -c
+ # image: technosophos/slack-notify
+ # inputs:
+ # artifacts:
+ # - name: metrics
+ # optional: true
+ # path: /tmp/metrics.json
+ # parameters:
+ # - name: status
+ # name: slack-notify-success
+ volumeClaimTemplates:
+ - metadata:
+ creationTimestamp: null
+ name: data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
+ - metadata:
+ creationTimestamp: null
+ name: output
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
\ No newline at end of file
diff --git a/db/yaml/workflows/maskrcnn-training/20200824095513.yaml b/db/yaml/workflows/maskrcnn-training/20200824095513.yaml
new file mode 100644
index 0000000..6cef4b6
--- /dev/null
+++ b/db/yaml/workflows/maskrcnn-training/20200824095513.yaml
@@ -0,0 +1,191 @@
+metadata:
+ name: "MaskRCNN Training"
+ kind: Workflow
+ version: 20200824095513
+ action: update
+ labels:
+ "used-by": "cvat"
+ "created-by": "system"
+spec:
+ arguments:
+ parameters:
+ - name: source
+ value: https://github.com/onepanelio/Mask_RCNN.git
+ displayName: Model source code
+ type: hidden
+ visibility: private
+
+ - name: cvat-annotation-path
+ value: annotation-dump/sample_dataset
+ hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated.
+ displayName: Dataset path
+ visibility: private
+
+ - name: cvat-output-path
+ value: workflow-data/output/sample_output
+ hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
+ displayName: Workflow output path
+ visibility: private
+
+ - name: cvat-finetune-checkpoint
+ value: ''
+ hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
+ displayName: Checkpoint path
+ visibility: public
+
+ - name: cvat-num-classes
+ displayName: Number of classes
+ hint: Number of classes (i.e in CVAT taks) + 1 for background
+ value: '81'
+ visibility: private
+
+ - name: hyperparameters
+ displayName: Hyperparameters
+ visibility: public
+ type: textarea.textarea
+ value: |-
+ stage-1-epochs=1 # Epochs for network heads
+ stage-2-epochs=2 # Epochs for finetune layers
+ stage-3-epochs=3 # Epochs for all layers
+ hint: "Please refer to our documentation for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow."
+
+ - name: dump-format
+ value: cvat_coco
+ displayName: CVAT dump format
+ visibility: public
+
+ - name: tf-image
+ visibility: public
+ value: tensorflow/tensorflow:1.13.1-py3
+ type: select.select
+ displayName: Select TensorFlow image
+ hint: Select the GPU image if you are running on a GPU node pool
+ options:
+ - name: 'TensorFlow 1.13.1 CPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-py3'
+ - name: 'TensorFlow 1.13.1 GPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
+
+ - displayName: Node pool
+ hint: Name of node pool or group to run this workflow task
+ type: select.select
+ visibility: public
+ name: sys-node-pool
+ value: Standard_D4s_v3
+ required: true
+ options:
+ - name: 'CPU: 2, RAM: 8GB'
+ value: Standard_D2s_v3
+ - name: 'CPU: 4, RAM: 16GB'
+ value: Standard_D4s_v3
+ - name: 'GPU: 1xK80, CPU: 6, RAM: 56GB'
+ value: Standard_NC6
+
+ entrypoint: main
+ templates:
+ - dag:
+ tasks:
+ - name: train-model
+ template: tensorflow
+ # Uncomment the lines below if you want to send Slack notifications
+ # - arguments:
+ # artifacts:
+ # - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}'
+ # name: metrics
+ # parameters:
+ # - name: status
+ # value: '{{tasks.train-model.status}}'
+ # dependencies:
+ # - train-model
+ # name: notify-in-slack
+ # template: slack-notify-success
+ name: main
+ - container:
+ args:
+ - |
+ apt-get update \
+ && apt-get install -y git wget libglib2.0-0 libsm6 libxext6 libxrender-dev \
+ && pip install -r requirements.txt \
+ && pip install boto3 pyyaml google-cloud-storage \
+ && git clone https://github.com/waleedka/coco \
+ && cd coco/PythonAPI \
+ && python setup.py build_ext install \
+ && rm -rf build \
+ && cd ../../ \
+ && wget https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5 \
+ && python setup.py install && ls \
+ && python samples/coco/cvat.py train --dataset=/mnt/data/datasets \
+ --model=workflow_maskrcnn \
+ --extras="{{workflow.parameters.hyperparameters}}" \
+ --ref_model_path="{{workflow.parameters.cvat-finetune-checkpoint}}" \
+ --num_classes="{{workflow.parameters.cvat-num-classes}}" \
+ && cd /mnt/src/ \
+ && python prepare_dataset.py /mnt/data/datasets/annotations/instances_default.json
+ command:
+ - sh
+ - -c
+ image: '{{workflow.parameters.tf-image}}'
+ volumeMounts:
+ - mountPath: /mnt/data
+ name: data
+ - mountPath: /mnt/output
+ name: output
+ workingDir: /mnt/src
+ nodeSelector:
+ beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
+ inputs:
+ artifacts:
+ - name: data
+ path: /mnt/data/datasets/
+ "{{.ArtifactRepositoryType}}":
+ key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
+ - git:
+ repo: '{{workflow.parameters.source}}'
+ revision: "no-boto"
+ name: src
+ path: /mnt/src
+ name: tensorflow
+ outputs:
+ artifacts:
+ - name: model
+ optional: true
+ path: /mnt/output
+ "{{.ArtifactRepositoryType}}":
+ key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
+ # Uncomment the lines below if you want to send Slack notifications
+ #- container:
+ # args:
+ # - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}"
+ # SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd
+ # SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify
+ # command:
+ # - sh
+ # - -c
+ # image: technosophos/slack-notify
+ # inputs:
+ # artifacts:
+ # - name: metrics
+ # optional: true
+ # path: /tmp/metrics.json
+ # parameters:
+ # - name: status
+ # name: slack-notify-success
+ volumeClaimTemplates:
+ - metadata:
+ creationTimestamp: null
+ name: data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
+ - metadata:
+ creationTimestamp: null
+ name: output
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
\ No newline at end of file
diff --git a/db/yaml/workflows/maskrcnn-training/20201115145814.yaml b/db/yaml/workflows/maskrcnn-training/20201115145814.yaml
index ccf2c37..5fcbcb7 100644
--- a/db/yaml/workflows/maskrcnn-training/20201115145814.yaml
+++ b/db/yaml/workflows/maskrcnn-training/20201115145814.yaml
@@ -1,190 +1,199 @@
-entrypoint: main
-arguments:
- parameters:
- - name: source
- value: https://github.com/onepanelio/Mask_RCNN.git
- displayName: Model source code
- type: hidden
- visibility: private
+metadata:
+ name: "MaskRCNN Training"
+ kind: Workflow
+ version: 20201115145814
+ action: update
+ labels:
+ "used-by": "cvat"
+ "created-by": "system"
+spec:
+ entrypoint: main
+ arguments:
+ parameters:
+ - name: source
+ value: https://github.com/onepanelio/Mask_RCNN.git
+ displayName: Model source code
+ type: hidden
+ visibility: private
- - name: cvat-annotation-path
- value: annotation-dump/sample_dataset
- hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated.
- displayName: Dataset path
- visibility: private
+ - name: cvat-annotation-path
+ value: annotation-dump/sample_dataset
+ hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated.
+ displayName: Dataset path
+ visibility: private
- - name: cvat-output-path
- value: workflow-data/output/sample_output
- hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
- displayName: Workflow output path
- visibility: private
+ - name: cvat-output-path
+ value: workflow-data/output/sample_output
+ hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
+ displayName: Workflow output path
+ visibility: private
- - name: cvat-finetune-checkpoint
- value: ''
- hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
- displayName: Checkpoint path
- visibility: public
+ - name: cvat-finetune-checkpoint
+ value: ''
+ hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
+ displayName: Checkpoint path
+ visibility: public
- - name: cvat-num-classes
- displayName: Number of classes
- hint: Number of classes (i.e in CVAT taks) + 1 for background
- value: '81'
- visibility: private
+ - name: cvat-num-classes
+ displayName: Number of classes
+ hint: Number of classes (i.e in CVAT taks) + 1 for background
+ value: '81'
+ visibility: private
- - name: hyperparameters
- displayName: Hyperparameters
- visibility: public
- type: textarea.textarea
- value: |-
- stage-1-epochs=1 # Epochs for network heads
- stage-2-epochs=2 # Epochs for finetune layers
- stage-3-epochs=3 # Epochs for all layers
- hint: "Please refer to our documentation for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow."
+ - name: hyperparameters
+ displayName: Hyperparameters
+ visibility: public
+ type: textarea.textarea
+ value: |-
+ stage-1-epochs=1 # Epochs for network heads
+ stage-2-epochs=2 # Epochs for finetune layers
+ stage-3-epochs=3 # Epochs for all layers
+ hint: "Please refer to our documentation for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow."
- - name: dump-format
- value: cvat_coco
- displayName: CVAT dump format
- visibility: public
+ - name: dump-format
+ value: cvat_coco
+ displayName: CVAT dump format
+ visibility: public
- - name: tf-image
- visibility: public
- value: tensorflow/tensorflow:1.13.1-py3
- type: select.select
- displayName: Select TensorFlow image
- hint: Select the GPU image if you are running on a GPU node pool
- options:
- - name: 'TensorFlow 1.13.1 CPU Image'
- value: 'tensorflow/tensorflow:1.13.1-py3'
- - name: 'TensorFlow 1.13.1 GPU Image'
- value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
+ - name: tf-image
+ visibility: public
+ value: tensorflow/tensorflow:1.13.1-py3
+ type: select.select
+ displayName: Select TensorFlow image
+ hint: Select the GPU image if you are running on a GPU node pool
+ options:
+ - name: 'TensorFlow 1.13.1 CPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-py3'
+ - name: 'TensorFlow 1.13.1 GPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
- - displayName: Node pool
- hint: Name of node pool or group to run this workflow task
- type: select.select
- visibility: public
- name: sys-node-pool
- value: Standard_D4s_v3
- required: true
- options:
- - name: 'CPU: 2, RAM: 8GB'
- value: Standard_D2s_v3
- - name: 'CPU: 4, RAM: 16GB'
- value: Standard_D4s_v3
- - name: 'GPU: 1xK80, CPU: 6, RAM: 56GB'
- value: Standard_NC6
-templates:
- - name: main
- dag:
- tasks:
- - name: train-model
- template: tensorflow
- # Uncomment the lines below if you want to send Slack notifications
- # - arguments:
- # artifacts:
- # - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}'
- # name: metrics
- # parameters:
- # - name: status
- # value: '{{tasks.train-model.status}}'
- # dependencies:
- # - train-model
- # name: notify-in-slack
- # template: slack-notify-success
- - name: tensorflow
- container:
- args:
- - |
- apt-get update \
- && apt-get install -y git wget libglib2.0-0 libsm6 libxext6 libxrender-dev \
- && pip install -r requirements.txt \
- && pip install boto3 pyyaml google-cloud-storage \
- && git clone https://github.com/waleedka/coco \
- && cd coco/PythonAPI \
- && python setup.py build_ext install \
- && rm -rf build \
- && cd ../../ \
- && wget https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5 \
- && python setup.py install && ls \
- && python samples/coco/cvat.py train --dataset=/mnt/data/datasets \
- --model=workflow_maskrcnn \
- --extras="{{workflow.parameters.hyperparameters}}" \
- --ref_model_path="{{workflow.parameters.cvat-finetune-checkpoint}}" \
- --num_classes="{{workflow.parameters.cvat-num-classes}}" \
- && cd /mnt/src/ \
- && python prepare_dataset.py /mnt/data/datasets/annotations/instances_default.json
- command:
- - sh
- - -c
- image: '{{workflow.parameters.tf-image}}'
- volumeMounts:
- - mountPath: /mnt/data
- name: data
- - mountPath: /mnt/output
- name: output
- workingDir: /mnt/src
- nodeSelector:
- beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
- sidecars:
- - name: tensorboard
- image: tensorflow/tensorflow:2.3.0
- command: [sh, -c]
- tty: true
- args: ["tensorboard --logdir /mnt/output/"]
- ports:
- - containerPort: 6006
- name: tensorboard
- inputs:
- artifacts:
- - name: data
- path: /mnt/data/datasets/
- {{.ArtifactRepositoryType}}:
- key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
- - git:
- repo: '{{workflow.parameters.source}}'
- revision: "no-boto"
- name: src
- path: /mnt/src
- outputs:
- artifacts:
- - name: model
- optional: true
- path: /mnt/output
- {{.ArtifactRepositoryType}}:
- key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
-# Uncomment the lines below if you want to send Slack notifications
-#- container:
-# args:
-# - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}"
-# SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd
-# SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify
-# command:
-# - sh
-# - -c
-# image: technosophos/slack-notify
-# inputs:
-# artifacts:
-# - name: metrics
-# optional: true
-# path: /tmp/metrics.json
-# parameters:
-# - name: status
-# name: slack-notify-success
-volumeClaimTemplates:
- - metadata:
- creationTimestamp: null
- name: data
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 200Gi
- - metadata:
- creationTimestamp: null
- name: output
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 200Gi
\ No newline at end of file
+ - displayName: Node pool
+ hint: Name of node pool or group to run this workflow task
+ type: select.select
+ visibility: public
+ name: sys-node-pool
+ value: Standard_D4s_v3
+ required: true
+ options:
+ - name: 'CPU: 2, RAM: 8GB'
+ value: Standard_D2s_v3
+ - name: 'CPU: 4, RAM: 16GB'
+ value: Standard_D4s_v3
+ - name: 'GPU: 1xK80, CPU: 6, RAM: 56GB'
+ value: Standard_NC6
+ templates:
+ - name: main
+ dag:
+ tasks:
+ - name: train-model
+ template: tensorflow
+ # Uncomment the lines below if you want to send Slack notifications
+ # - arguments:
+ # artifacts:
+ # - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}'
+ # name: metrics
+ # parameters:
+ # - name: status
+ # value: '{{tasks.train-model.status}}'
+ # dependencies:
+ # - train-model
+ # name: notify-in-slack
+ # template: slack-notify-success
+ - name: tensorflow
+ container:
+ args:
+ - |
+ apt-get update \
+ && apt-get install -y git wget libglib2.0-0 libsm6 libxext6 libxrender-dev \
+ && pip install -r requirements.txt \
+ && pip install boto3 pyyaml google-cloud-storage \
+ && git clone https://github.com/waleedka/coco \
+ && cd coco/PythonAPI \
+ && python setup.py build_ext install \
+ && rm -rf build \
+ && cd ../../ \
+ && wget https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5 \
+ && python setup.py install && ls \
+ && python samples/coco/cvat.py train --dataset=/mnt/data/datasets \
+ --model=workflow_maskrcnn \
+ --extras="{{workflow.parameters.hyperparameters}}" \
+ --ref_model_path="{{workflow.parameters.cvat-finetune-checkpoint}}" \
+ --num_classes="{{workflow.parameters.cvat-num-classes}}" \
+ && cd /mnt/src/ \
+ && python prepare_dataset.py /mnt/data/datasets/annotations/instances_default.json
+ command:
+ - sh
+ - -c
+ image: '{{workflow.parameters.tf-image}}'
+ volumeMounts:
+ - mountPath: /mnt/data
+ name: data
+ - mountPath: /mnt/output
+ name: output
+ workingDir: /mnt/src
+ nodeSelector:
+ beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
+ sidecars:
+ - name: tensorboard
+ image: tensorflow/tensorflow:2.3.0
+ command: [sh, -c]
+ tty: true
+ args: ["tensorboard --logdir /mnt/output/"]
+ ports:
+ - containerPort: 6006
+ name: tensorboard
+ inputs:
+ artifacts:
+ - name: data
+ path: /mnt/data/datasets/
+ "{{.ArtifactRepositoryType}}":
+ key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
+ - git:
+ repo: '{{workflow.parameters.source}}'
+ revision: "no-boto"
+ name: src
+ path: /mnt/src
+ outputs:
+ artifacts:
+ - name: model
+ optional: true
+ path: /mnt/output
+ "{{.ArtifactRepositoryType}}":
+ key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
+ # Uncomment the lines below if you want to send Slack notifications
+ #- container:
+ # args:
+ # - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}"
+ # SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd
+ # SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify
+ # command:
+ # - sh
+ # - -c
+ # image: technosophos/slack-notify
+ # inputs:
+ # artifacts:
+ # - name: metrics
+ # optional: true
+ # path: /tmp/metrics.json
+ # parameters:
+ # - name: status
+ # name: slack-notify-success
+ volumeClaimTemplates:
+ - metadata:
+ creationTimestamp: null
+ name: data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
+ - metadata:
+ creationTimestamp: null
+ name: output
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
\ No newline at end of file
diff --git a/db/yaml/workflows/maskrcnn-training/20201208155115.yaml b/db/yaml/workflows/maskrcnn-training/20201208155115.yaml
index 053b637..ae2b840 100644
--- a/db/yaml/workflows/maskrcnn-training/20201208155115.yaml
+++ b/db/yaml/workflows/maskrcnn-training/20201208155115.yaml
@@ -1,192 +1,201 @@
-entrypoint: main
-arguments:
- parameters:
- - name: source
- value: https://github.com/onepanelio/Mask_RCNN.git
- displayName: Model source code
- type: hidden
- visibility: private
+metadata:
+ name: "MaskRCNN Training"
+ kind: Workflow
+ version: 20201208155115
+ action: update
+ labels:
+ "used-by": "cvat"
+ "created-by": "system"
+spec:
+ entrypoint: main
+ arguments:
+ parameters:
+ - name: source
+ value: https://github.com/onepanelio/Mask_RCNN.git
+ displayName: Model source code
+ type: hidden
+ visibility: private
- - name: cvat-annotation-path
- value: annotation-dump/sample_dataset
- hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated.
- displayName: Dataset path
- visibility: private
+ - name: cvat-annotation-path
+ value: annotation-dump/sample_dataset
+ hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated.
+ displayName: Dataset path
+ visibility: private
- - name: cvat-output-path
- value: workflow-data/output/sample_output
- hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
- displayName: Workflow output path
- visibility: private
+ - name: cvat-output-path
+ value: workflow-data/output/sample_output
+ hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
+ displayName: Workflow output path
+ visibility: private
- - name: cvat-finetune-checkpoint
- value: ''
- hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
- displayName: Checkpoint path
- visibility: public
+ - name: cvat-finetune-checkpoint
+ value: ''
+ hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
+ displayName: Checkpoint path
+ visibility: public
- - name: cvat-num-classes
- displayName: Number of classes
- hint: Number of classes (i.e in CVAT taks) + 1 for background
- value: '81'
- visibility: private
+ - name: cvat-num-classes
+ displayName: Number of classes
+ hint: Number of classes (i.e in CVAT taks) + 1 for background
+ value: '81'
+ visibility: private
- - name: hyperparameters
- displayName: Hyperparameters
- visibility: public
- type: textarea.textarea
- value: |-
- stage-1-epochs=1 # Epochs for network heads
- stage-2-epochs=2 # Epochs for finetune layers
- stage-3-epochs=3 # Epochs for all layers
- hint: "Please refer to our documentation for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow."
+ - name: hyperparameters
+ displayName: Hyperparameters
+ visibility: public
+ type: textarea.textarea
+ value: |-
+ stage-1-epochs=1 # Epochs for network heads
+ stage-2-epochs=2 # Epochs for finetune layers
+ stage-3-epochs=3 # Epochs for all layers
+ hint: "Please refer to our documentation for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow."
- - name: dump-format
- value: cvat_coco
- displayName: CVAT dump format
- visibility: public
+ - name: dump-format
+ value: cvat_coco
+ displayName: CVAT dump format
+ visibility: public
- - name: tf-image
- visibility: public
- value: tensorflow/tensorflow:1.13.1-py3
- type: select.select
- displayName: Select TensorFlow image
- hint: Select the GPU image if you are running on a GPU node pool
- options:
- - name: 'TensorFlow 1.13.1 CPU Image'
- value: 'tensorflow/tensorflow:1.13.1-py3'
- - name: 'TensorFlow 1.13.1 GPU Image'
- value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
+ - name: tf-image
+ visibility: public
+ value: tensorflow/tensorflow:1.13.1-py3
+ type: select.select
+ displayName: Select TensorFlow image
+ hint: Select the GPU image if you are running on a GPU node pool
+ options:
+ - name: 'TensorFlow 1.13.1 CPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-py3'
+ - name: 'TensorFlow 1.13.1 GPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
- - displayName: Node pool
- hint: Name of node pool or group to run this workflow task
- type: select.select
- visibility: public
- name: sys-node-pool
- value: Standard_D4s_v3
- required: true
- options:
- - name: 'CPU: 2, RAM: 8GB'
- value: Standard_D2s_v3
- - name: 'CPU: 4, RAM: 16GB'
- value: Standard_D4s_v3
- - name: 'GPU: 1xK80, CPU: 6, RAM: 56GB'
- value: Standard_NC6
-templates:
- - name: main
- dag:
- tasks:
- - name: train-model
- template: tensorflow
- # Uncomment the lines below if you want to send Slack notifications
- # - arguments:
- # artifacts:
- # - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}'
- # name: metrics
- # parameters:
- # - name: status
- # value: '{{tasks.train-model.status}}'
- # dependencies:
- # - train-model
- # name: notify-in-slack
- # template: slack-notify-success
- - name: tensorflow
- container:
- args:
- - |
- apt-get update \
- && apt-get install -y git wget libglib2.0-0 libsm6 libxext6 libxrender-dev \
- && pip install -r requirements.txt \
- && pip install boto3 pyyaml google-cloud-storage \
- && git clone https://github.com/waleedka/coco \
- && cd coco/PythonAPI \
- && python setup.py build_ext install \
- && rm -rf build \
- && cd ../../ \
- && wget https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5 \
- && python setup.py install && ls \
- && python samples/coco/cvat.py train --dataset=/mnt/data/datasets \
- --model=workflow_maskrcnn \
- --extras="{{workflow.parameters.hyperparameters}}" \
- --ref_model_path="{{workflow.parameters.cvat-finetune-checkpoint}}" \
- --num_classes="{{workflow.parameters.cvat-num-classes}}" \
- && cd /mnt/src/ \
- && python prepare_dataset.py /mnt/data/datasets/annotations/instances_default.json
- command:
- - sh
- - -c
- image: '{{workflow.parameters.tf-image}}'
- volumeMounts:
- - mountPath: /mnt/data
- name: data
- - mountPath: /mnt/output
- name: output
- workingDir: /mnt/src
- nodeSelector:
- beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
- sidecars:
- - name: tensorboard
- image: tensorflow/tensorflow:2.3.0
- command: [sh, -c]
- env:
- - name: ONEPANEL_INTERACTIVE_SIDECAR
- value: 'true'
- args: ["tensorboard --logdir /mnt/output/"]
- ports:
- - containerPort: 6006
- name: tensorboard
- inputs:
- artifacts:
- - name: data
- path: /mnt/data/datasets/
- {{.ArtifactRepositoryType}}:
- key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
- - git:
- repo: '{{workflow.parameters.source}}'
- revision: "no-boto"
- name: src
- path: /mnt/src
- outputs:
- artifacts:
- - name: model
- optional: true
- path: /mnt/output
- {{.ArtifactRepositoryType}}:
- key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
-# Uncomment the lines below if you want to send Slack notifications
-#- container:
-# args:
-# - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}"
-# SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd
-# SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify
-# command:
-# - sh
-# - -c
-# image: technosophos/slack-notify
-# inputs:
-# artifacts:
-# - name: metrics
-# optional: true
-# path: /tmp/metrics.json
-# parameters:
-# - name: status
-# name: slack-notify-success
-volumeClaimTemplates:
- - metadata:
- creationTimestamp: null
- name: data
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 200Gi
- - metadata:
- creationTimestamp: null
- name: output
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 200Gi
\ No newline at end of file
+ - displayName: Node pool
+ hint: Name of node pool or group to run this workflow task
+ type: select.select
+ visibility: public
+ name: sys-node-pool
+ value: Standard_D4s_v3
+ required: true
+ options:
+ - name: 'CPU: 2, RAM: 8GB'
+ value: Standard_D2s_v3
+ - name: 'CPU: 4, RAM: 16GB'
+ value: Standard_D4s_v3
+ - name: 'GPU: 1xK80, CPU: 6, RAM: 56GB'
+ value: Standard_NC6
+ templates:
+ - name: main
+ dag:
+ tasks:
+ - name: train-model
+ template: tensorflow
+ # Uncomment the lines below if you want to send Slack notifications
+ # - arguments:
+ # artifacts:
+ # - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}'
+ # name: metrics
+ # parameters:
+ # - name: status
+ # value: '{{tasks.train-model.status}}'
+ # dependencies:
+ # - train-model
+ # name: notify-in-slack
+ # template: slack-notify-success
+ - name: tensorflow
+ container:
+ args:
+ - |
+ apt-get update \
+ && apt-get install -y git wget libglib2.0-0 libsm6 libxext6 libxrender-dev \
+ && pip install -r requirements.txt \
+ && pip install boto3 pyyaml google-cloud-storage \
+ && git clone https://github.com/waleedka/coco \
+ && cd coco/PythonAPI \
+ && python setup.py build_ext install \
+ && rm -rf build \
+ && cd ../../ \
+ && wget https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5 \
+ && python setup.py install && ls \
+ && python samples/coco/cvat.py train --dataset=/mnt/data/datasets \
+ --model=workflow_maskrcnn \
+ --extras="{{workflow.parameters.hyperparameters}}" \
+ --ref_model_path="{{workflow.parameters.cvat-finetune-checkpoint}}" \
+ --num_classes="{{workflow.parameters.cvat-num-classes}}" \
+ && cd /mnt/src/ \
+ && python prepare_dataset.py /mnt/data/datasets/annotations/instances_default.json
+ command:
+ - sh
+ - -c
+ image: '{{workflow.parameters.tf-image}}'
+ volumeMounts:
+ - mountPath: /mnt/data
+ name: data
+ - mountPath: /mnt/output
+ name: output
+ workingDir: /mnt/src
+ nodeSelector:
+ beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
+ sidecars:
+ - name: tensorboard
+ image: tensorflow/tensorflow:2.3.0
+ command: [sh, -c]
+ env:
+ - name: ONEPANEL_INTERACTIVE_SIDECAR
+ value: 'true'
+ args: ["tensorboard --logdir /mnt/output/"]
+ ports:
+ - containerPort: 6006
+ name: tensorboard
+ inputs:
+ artifacts:
+ - name: data
+ path: /mnt/data/datasets/
+ "{{.ArtifactRepositoryType}}":
+ key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
+ - git:
+ repo: '{{workflow.parameters.source}}'
+ revision: "no-boto"
+ name: src
+ path: /mnt/src
+ outputs:
+ artifacts:
+ - name: model
+ optional: true
+ path: /mnt/output
+ "{{.ArtifactRepositoryType}}":
+ key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
+ # Uncomment the lines below if you want to send Slack notifications
+ #- container:
+ # args:
+ # - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}"
+ # SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd
+ # SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify
+ # command:
+ # - sh
+ # - -c
+ # image: technosophos/slack-notify
+ # inputs:
+ # artifacts:
+ # - name: metrics
+ # optional: true
+ # path: /tmp/metrics.json
+ # parameters:
+ # - name: status
+ # name: slack-notify-success
+ volumeClaimTemplates:
+ - metadata:
+ creationTimestamp: null
+ name: data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
+ - metadata:
+ creationTimestamp: null
+ name: output
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
\ No newline at end of file
diff --git a/db/yaml/workflows/maskrcnn-training/20201221195937.yaml b/db/yaml/workflows/maskrcnn-training/20201221195937.yaml
index a7d6538..c173322 100644
--- a/db/yaml/workflows/maskrcnn-training/20201221195937.yaml
+++ b/db/yaml/workflows/maskrcnn-training/20201221195937.yaml
@@ -1,149 +1,158 @@
-# source: https://github.com/onepanelio/templates/blob/master/workflows/maskrcnn-training/
-arguments:
- parameters:
- - name: cvat-annotation-path
- value: annotation-dump/sample_dataset
- hint: Path to annotated data in default object storage. In CVAT, this parameter will be pre-populated.
- displayName: Dataset path
- visibility: internal
+metadata:
+ name: "MaskRCNN Training"
+ kind: Workflow
+ version: 20201221195937
+ action: update
+ source: "https://github.com/onepanelio/templates/blob/master/workflows/maskrcnn-training/"
+ labels:
+ "used-by": "cvat"
+ "created-by": "system"
+spec:
+ arguments:
+ parameters:
+ - name: cvat-annotation-path
+ value: annotation-dump/sample_dataset
+ hint: Path to annotated data in default object storage. In CVAT, this parameter will be pre-populated.
+ displayName: Dataset path
+ visibility: internal
- - name: cvat-output-path
- value: workflow-data/output/sample_output
- hint: Path to store output artifacts in default object storage. In CVAT, this parameter will be pre-populated.
- displayName: Workflow output path
- visibility: internal
+ - name: cvat-output-path
+ value: workflow-data/output/sample_output
+ hint: Path to store output artifacts in default object storage. In CVAT, this parameter will be pre-populated.
+ displayName: Workflow output path
+ visibility: internal
- - name: cvat-finetune-checkpoint
- value: ''
- hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
- displayName: Checkpoint path
- visibility: public
+ - name: cvat-finetune-checkpoint
+ value: ''
+ hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
+ displayName: Checkpoint path
+ visibility: public
- - name: cvat-num-classes
- displayName: Number of classes
- hint: Number of classes + 1 for background. In CVAT, this parameter will be pre-populated.
- value: '11'
- visibility: internal
+ - name: cvat-num-classes
+ displayName: Number of classes
+ hint: Number of classes + 1 for background. In CVAT, this parameter will be pre-populated.
+ value: '11'
+ visibility: internal
- - name: hyperparameters
- displayName: Hyperparameters
- visibility: public
- type: textarea.textarea
- value: |-
- stage-1-epochs=1 # Epochs for network heads
- stage-2-epochs=2 # Epochs for finetune layers
- stage-3-epochs=3 # Epochs for all layers
- hint: "See documentation for more information on parameters."
+ - name: hyperparameters
+ displayName: Hyperparameters
+ visibility: public
+ type: textarea.textarea
+ value: |-
+ stage-1-epochs=1 # Epochs for network heads
+ stage-2-epochs=2 # Epochs for finetune layers
+ stage-3-epochs=3 # Epochs for all layers
+ hint: "See documentation for more information on parameters."
- - name: dump-format
- value: cvat_coco
- displayName: CVAT dump format
- visibility: public
+ - name: dump-format
+ value: cvat_coco
+ displayName: CVAT dump format
+ visibility: public
- - name: tf-image
- visibility: public
- value: tensorflow/tensorflow:1.13.1-py3
- type: select.select
- displayName: Select TensorFlow image
- hint: Select the GPU image if you are running on a GPU node pool
- options:
- - name: 'TensorFlow 1.13.1 CPU Image'
- value: 'tensorflow/tensorflow:1.13.1-py3'
- - name: 'TensorFlow 1.13.1 GPU Image'
- value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
+ - name: tf-image
+ visibility: public
+ value: tensorflow/tensorflow:1.13.1-py3
+ type: select.select
+ displayName: Select TensorFlow image
+ hint: Select the GPU image if you are running on a GPU node pool
+ options:
+ - name: 'TensorFlow 1.13.1 CPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-py3'
+ - name: 'TensorFlow 1.13.1 GPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
- - displayName: Node pool
- hint: Name of node pool or group to run this workflow task
- type: select.nodepool
- visibility: public
- name: sys-node-pool
- value: {{.DefaultNodePoolOption}}
- required: true
+ - displayName: Node pool
+ hint: Name of node pool or group to run this workflow task
+ type: select.nodepool
+ visibility: public
+ name: sys-node-pool
+ value: "{{.DefaultNodePoolOption}}"
+ required: true
-entrypoint: main
-templates:
- - dag:
- tasks:
- - name: train-model
- template: tensorflow
- name: main
- - container:
- args:
- - |
- apt-get update \
- && apt-get install -y git wget libglib2.0-0 libsm6 libxext6 libxrender-dev \
- && pip install -r requirements.txt \
- && pip install boto3 pyyaml google-cloud-storage \
- && git clone https://github.com/waleedka/coco \
- && cd coco/PythonAPI \
- && python setup.py build_ext install \
- && rm -rf build \
- && cd ../../ \
- && wget https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5 \
- && python setup.py install && ls \
- && python samples/coco/cvat.py train --dataset=/mnt/data/datasets \
- --model=workflow_maskrcnn \
- --extras="{{workflow.parameters.hyperparameters}}" \
- --ref_model_path="{{workflow.parameters.cvat-finetune-checkpoint}}" \
- --num_classes="{{workflow.parameters.cvat-num-classes}}" \
- && cd /mnt/src/ \
- && python prepare_dataset.py /mnt/data/datasets/annotations/instances_default.json
- command:
- - sh
- - -c
- image: '{{workflow.parameters.tf-image}}'
- volumeMounts:
- - mountPath: /mnt/data
- name: data
- - mountPath: /mnt/output
- name: output
- workingDir: /mnt/src
- sidecars:
- - name: tensorboard
- image: tensorflow/tensorflow:2.3.0
- command: [ sh, -c ]
- env:
- - name: ONEPANEL_INTERACTIVE_SIDECAR
- value: 'true'
- args: [ "tensorboard --logdir /mnt/output/" ]
- ports:
- - containerPort: 6006
- name: tensorboard
- nodeSelector:
- beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
- inputs:
- artifacts:
- - name: data
- path: /mnt/data/datasets/
- s3:
- key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
- - git:
- repo: 'https://github.com/onepanelio/Mask_RCNN.git'
- revision: 'no-boto'
- name: src
- path: /mnt/src
- name: tensorflow
- outputs:
- artifacts:
- - name: model
- optional: true
- path: /mnt/output
- s3:
- key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
-volumeClaimTemplates:
- - metadata:
- name: data
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 200Gi
- - metadata:
- name: output
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 200Gi
+ entrypoint: main
+ templates:
+ - dag:
+ tasks:
+ - name: train-model
+ template: tensorflow
+ name: main
+ - container:
+ args:
+ - |
+ apt-get update \
+ && apt-get install -y git wget libglib2.0-0 libsm6 libxext6 libxrender-dev \
+ && pip install -r requirements.txt \
+ && pip install boto3 pyyaml google-cloud-storage \
+ && git clone https://github.com/waleedka/coco \
+ && cd coco/PythonAPI \
+ && python setup.py build_ext install \
+ && rm -rf build \
+ && cd ../../ \
+ && wget https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5 \
+ && python setup.py install && ls \
+ && python samples/coco/cvat.py train --dataset=/mnt/data/datasets \
+ --model=workflow_maskrcnn \
+ --extras="{{workflow.parameters.hyperparameters}}" \
+ --ref_model_path="{{workflow.parameters.cvat-finetune-checkpoint}}" \
+ --num_classes="{{workflow.parameters.cvat-num-classes}}" \
+ && cd /mnt/src/ \
+ && python prepare_dataset.py /mnt/data/datasets/annotations/instances_default.json
+ command:
+ - sh
+ - -c
+ image: '{{workflow.parameters.tf-image}}'
+ volumeMounts:
+ - mountPath: /mnt/data
+ name: data
+ - mountPath: /mnt/output
+ name: output
+ workingDir: /mnt/src
+ sidecars:
+ - name: tensorboard
+ image: tensorflow/tensorflow:2.3.0
+ command: [ sh, -c ]
+ env:
+ - name: ONEPANEL_INTERACTIVE_SIDECAR
+ value: 'true'
+ args: [ "tensorboard --logdir /mnt/output/" ]
+ ports:
+ - containerPort: 6006
+ name: tensorboard
+ nodeSelector:
+ beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
+ inputs:
+ artifacts:
+ - name: data
+ path: /mnt/data/datasets/
+ s3:
+ key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
+ - git:
+ repo: 'https://github.com/onepanelio/Mask_RCNN.git'
+ revision: 'no-boto'
+ name: src
+ path: /mnt/src
+ name: tensorflow
+ outputs:
+ artifacts:
+ - name: model
+ optional: true
+ path: /mnt/output
+ s3:
+ key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
+ - metadata:
+ name: output
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
diff --git a/db/yaml/workflows/maskrcnn-training/20210118175809.yaml b/db/yaml/workflows/maskrcnn-training/20210118175809.yaml
index 0825587..d3c4a7f 100644
--- a/db/yaml/workflows/maskrcnn-training/20210118175809.yaml
+++ b/db/yaml/workflows/maskrcnn-training/20210118175809.yaml
@@ -1,208 +1,217 @@
-# source: https://github.com/onepanelio/templates/blob/master/workflows/maskrcnn-training/
-arguments:
- parameters:
- - name: cvat-annotation-path
- value: 'artifacts/{{workflow.namespace}}/annotations/'
- hint: Path to annotated data (COCO format) in default object storage. In CVAT, this parameter will be pre-populated.
- displayName: Dataset path
- visibility: internal
+metadata:
+ name: "MaskRCNN Training"
+ kind: Workflow
+ version: 20210118175809
+ action: update
+ source: "https://github.com/onepanelio/templates/blob/master/workflows/maskrcnn-training/"
+ labels:
+ "used-by": "cvat"
+ "created-by": "system"
+spec:
+ arguments:
+ parameters:
+ - name: cvat-annotation-path
+ value: 'artifacts/{{workflow.namespace}}/annotations/'
+ hint: Path to annotated data (COCO format) in default object storage. In CVAT, this parameter will be pre-populated.
+ displayName: Dataset path
+ visibility: internal
- - name: val-split
- value: 10
- displayName: Validation split size
- type: input.number
- visibility: public
- hint: Enter validation set size in percentage of full dataset. (0 - 100)
+ - name: val-split
+ value: 10
+ displayName: Validation split size
+ type: input.number
+ visibility: public
+ hint: Enter validation set size in percentage of full dataset. (0 - 100)
- - name: num-augmentation-cycles
- value: 1
- displayName: Number of augmentation cycles
- type: input.number
- visibility: public
- hint: Number of augmentation cycles, zero means no data augmentation
+ - name: num-augmentation-cycles
+ value: 1
+ displayName: Number of augmentation cycles
+ type: input.number
+ visibility: public
+ hint: Number of augmentation cycles, zero means no data augmentation
- - name: preprocessing-parameters
- value: |-
- RandomBrightnessContrast:
- p: 0.2
- GaussianBlur:
- p: 0.3
- GaussNoise:
- p: 0.4
- HorizontalFlip:
- p: 0.5
- VerticalFlip:
- p: 0.3
- displayName: Preprocessing parameters
- visibility: public
- type: textarea.textarea
- hint: 'See documentation for more information on parameters.'
+ - name: preprocessing-parameters
+ value: |-
+ RandomBrightnessContrast:
+ p: 0.2
+ GaussianBlur:
+ p: 0.3
+ GaussNoise:
+ p: 0.4
+ HorizontalFlip:
+ p: 0.5
+ VerticalFlip:
+ p: 0.3
+ displayName: Preprocessing parameters
+ visibility: public
+ type: textarea.textarea
+ hint: 'See documentation for more information on parameters.'
- - name: cvat-num-classes
- displayName: Number of classes
- hint: Number of classes. In CVAT, this parameter will be pre-populated.
- value: '10'
- visibility: internal
+ - name: cvat-num-classes
+ displayName: Number of classes
+ hint: Number of classes. In CVAT, this parameter will be pre-populated.
+ value: '10'
+ visibility: internal
- - name: hyperparameters
- displayName: Hyperparameters
- visibility: public
- type: textarea.textarea
- value: |-
- stage_1_epochs: 1 # Epochs for network heads
- stage_2_epochs: 1 # Epochs for finetune layers
- stage_3_epochs: 1 # Epochs for all layers
- num_steps: 1000 # Num steps per epoch
- hint: 'See documentation for more information on parameters.'
+ - name: hyperparameters
+ displayName: Hyperparameters
+ visibility: public
+ type: textarea.textarea
+ value: |-
+ stage_1_epochs: 1 # Epochs for network heads
+ stage_2_epochs: 1 # Epochs for finetune layers
+ stage_3_epochs: 1 # Epochs for all layers
+ num_steps: 1000 # Num steps per epoch
+ hint: 'See documentation for more information on parameters.'
- - name: dump-format
- value: cvat_coco
- displayName: CVAT dump format
- visibility: private
+ - name: dump-format
+ value: cvat_coco
+ displayName: CVAT dump format
+ visibility: private
- - name: cvat-finetune-checkpoint
- value: ''
- hint: Path to the last fine-tune checkpoint for this model in default object storage. Leave empty if this is the first time you're training this model.
- displayName: Checkpoint path
- visibility: public
+ - name: cvat-finetune-checkpoint
+ value: ''
+ hint: Path to the last fine-tune checkpoint for this model in default object storage. Leave empty if this is the first time you're training this model.
+ displayName: Checkpoint path
+ visibility: public
- - displayName: Node pool
- hint: Name of node pool or group to run this workflow task
- type: select.nodepool
- visibility: public
- name: sys-node-pool
- value: {{.DefaultNodePoolOption}}
- required: true
+ - displayName: Node pool
+ hint: Name of node pool or group to run this workflow task
+ type: select.nodepool
+ visibility: public
+ name: sys-node-pool
+ value: "{{.DefaultNodePoolOption}}"
+ required: true
-entrypoint: main
-templates:
- - dag:
- tasks:
- - name: preprocessing
- template: preprocessing
- - name: train-model
- template: tensorflow
- dependencies: [preprocessing]
- arguments:
- artifacts:
- - name: data
- from: "{{tasks.preprocessing.outputs.artifacts.processed-data}}"
- name: main
- - container:
- args:
- - |
- pip install pycocotools scikit-image==0.16.2 && \
- cd /mnt/src/train/workflows/maskrcnn-training && \
- python -u main.py train --dataset=/mnt/data/datasets/train_set/ \
- --model=workflow_maskrcnn \
- --extras="{{workflow.parameters.hyperparameters}}" \
- --ref_model_path="{{workflow.parameters.cvat-finetune-checkpoint}}" \
- --num_classes="{{workflow.parameters.cvat-num-classes}}" \
- --val_dataset=/mnt/data/datasets/eval_set/ \
- --use_validation=True
- command:
- - sh
- - -c
- image: onepanel/dl:v0.20.0
- volumeMounts:
- - mountPath: /mnt/data
- name: processed-data
- - mountPath: /mnt/output
- name: output
- workingDir: /mnt/src
- sidecars:
- - name: tensorboard
+ entrypoint: main
+ templates:
+ - dag:
+ tasks:
+ - name: preprocessing
+ template: preprocessing
+ - name: train-model
+ template: tensorflow
+ dependencies: [preprocessing]
+ arguments:
+ artifacts:
+ - name: data
+ from: "{{tasks.preprocessing.outputs.artifacts.processed-data}}"
+ name: main
+ - container:
+ args:
+ - |
+ pip install pycocotools scikit-image==0.16.2 && \
+ cd /mnt/src/train/workflows/maskrcnn-training && \
+ python -u main.py train --dataset=/mnt/data/datasets/train_set/ \
+ --model=workflow_maskrcnn \
+ --extras="{{workflow.parameters.hyperparameters}}" \
+ --ref_model_path="{{workflow.parameters.cvat-finetune-checkpoint}}" \
+ --num_classes="{{workflow.parameters.cvat-num-classes}}" \
+ --val_dataset=/mnt/data/datasets/eval_set/ \
+ --use_validation=True
+ command:
+ - sh
+ - -c
image: onepanel/dl:v0.20.0
- command: [ sh, -c ]
- env:
- - name: ONEPANEL_INTERACTIVE_SIDECAR
- value: 'true'
- args: [ "tensorboard --logdir /mnt/output/tensorboard" ]
- ports:
- - containerPort: 6006
- name: tensorboard
- nodeSelector:
- {{.NodePoolLabel}}: '{{workflow.parameters.sys-node-pool}}'
- inputs:
- artifacts:
- - name: data
- path: /mnt/data/datasets/
- - name: models
- path: /mnt/data/models/
- optional: true
- s3:
- key: '{{workflow.parameters.cvat-finetune-checkpoint}}'
- - git:
- repo: https://github.com/onepanelio/templates.git
- revision: v0.18.0
- name: src
- path: /mnt/src/train
- name: tensorflow
- outputs:
- artifacts:
- - name: model
- optional: true
- path: /mnt/output
- - container:
- args:
- - |
- pip install pycocotools && \
- cd /mnt/src/preprocessing/workflows/albumentations-preprocessing && \
- python -u main.py \
- --data_aug_params="{{workflow.parameters.preprocessing-parameters}}" \
- --val_split={{workflow.parameters.val-split}} \
- --aug_steps={{workflow.parameters.num-augmentation-cycles}}
- command:
- - sh
- - -c
- image: onepanel/dl:v0.20.0
- volumeMounts:
- - mountPath: /mnt/data
- name: data
- - mountPath: /mnt/output
- name: processed-data
- workingDir: /mnt/src
- nodeSelector:
- {{.NodePoolLabel}}: '{{workflow.parameters.sys-node-pool}}'
- inputs:
- artifacts:
- - name: data
- path: /mnt/data/datasets/
- s3:
- key: '{{workflow.parameters.cvat-annotation-path}}'
- - git:
- repo: https://github.com/onepanelio/templates.git
- revision: v0.18.0
- name: src
- path: /mnt/src/preprocessing
- name: preprocessing
- outputs:
- artifacts:
- - name: processed-data
- optional: true
- path: /mnt/output
-volumeClaimTemplates:
- - metadata:
- name: data
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 200Gi
- - metadata:
- name: processed-data
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 200Gi
- - metadata:
- name: output
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 200Gi
+ volumeMounts:
+ - mountPath: /mnt/data
+ name: processed-data
+ - mountPath: /mnt/output
+ name: output
+ workingDir: /mnt/src
+ sidecars:
+ - name: tensorboard
+ image: onepanel/dl:v0.20.0
+ command: [ sh, -c ]
+ env:
+ - name: ONEPANEL_INTERACTIVE_SIDECAR
+ value: 'true'
+ args: [ "tensorboard --logdir /mnt/output/tensorboard" ]
+ ports:
+ - containerPort: 6006
+ name: tensorboard
+ nodeSelector:
+ "{{.NodePoolLabel}}": '{{workflow.parameters.sys-node-pool}}'
+ inputs:
+ artifacts:
+ - name: data
+ path: /mnt/data/datasets/
+ - name: models
+ path: /mnt/data/models/
+ optional: true
+ s3:
+ key: '{{workflow.parameters.cvat-finetune-checkpoint}}'
+ - git:
+ repo: https://github.com/onepanelio/templates.git
+ revision: v0.18.0
+ name: src
+ path: /mnt/src/train
+ name: tensorflow
+ outputs:
+ artifacts:
+ - name: model
+ optional: true
+ path: /mnt/output
+ - container:
+ args:
+ - |
+ pip install pycocotools && \
+ cd /mnt/src/preprocessing/workflows/albumentations-preprocessing && \
+ python -u main.py \
+ --data_aug_params="{{workflow.parameters.preprocessing-parameters}}" \
+ --val_split={{workflow.parameters.val-split}} \
+ --aug_steps={{workflow.parameters.num-augmentation-cycles}}
+ command:
+ - sh
+ - -c
+ image: onepanel/dl:v0.20.0
+ volumeMounts:
+ - mountPath: /mnt/data
+ name: data
+ - mountPath: /mnt/output
+ name: processed-data
+ workingDir: /mnt/src
+ nodeSelector:
+ "{{.NodePoolLabel}}": '{{workflow.parameters.sys-node-pool}}'
+ inputs:
+ artifacts:
+ - name: data
+ path: /mnt/data/datasets/
+ s3:
+ key: '{{workflow.parameters.cvat-annotation-path}}'
+ - git:
+ repo: https://github.com/onepanelio/templates.git
+ revision: v0.18.0
+ name: src
+ path: /mnt/src/preprocessing
+ name: preprocessing
+ outputs:
+ artifacts:
+ - name: processed-data
+ optional: true
+ path: /mnt/output
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
+ - metadata:
+ name: processed-data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
+ - metadata:
+ name: output
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
diff --git a/db/yaml/workflows/pytorch-mnist-training/20200605090509.yaml b/db/yaml/workflows/pytorch-mnist-training/20200605090509.yaml
index 8172a5e..05a3751 100644
--- a/db/yaml/workflows/pytorch-mnist-training/20200605090509.yaml
+++ b/db/yaml/workflows/pytorch-mnist-training/20200605090509.yaml
@@ -1,75 +1,84 @@
-entrypoint: main
-arguments:
- parameters:
- - name: source
- value: https://github.com/onepanelio/pytorch-examples.git
- - name: command
- value: "python mnist/main.py --epochs=1"
-volumeClaimTemplates:
- - metadata:
- name: data
- spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 2Gi
- - metadata:
- name: output
- spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 2Gi
-templates:
- - name: main
- dag:
- tasks:
- - name: train-model
- template: pytorch
- # Uncomment section below to send metrics to Slack
- # - name: notify-in-slack
- # dependencies: [train-model]
- # template: slack-notify-success
- # arguments:
- # parameters:
- # - name: status
- # value: "{{tasks.train-model.status}}"
- # artifacts:
- # - name: metrics
- # from: "{{tasks.train-model.outputs.artifacts.sys-metrics}}"
- - name: pytorch
- inputs:
- artifacts:
- - name: src
- path: /mnt/src
- git:
- repo: "{{workflow.parameters.source}}"
- outputs:
- artifacts:
- - name: model
- path: /mnt/output
- optional: true
- archive:
- none: {}
- container:
- image: pytorch/pytorch:latest
- command: [sh,-c]
- args: ["{{workflow.parameters.command}}"]
- workingDir: /mnt/src
- volumeMounts:
- - name: data
- mountPath: /mnt/data
- - name: output
- mountPath: /mnt/output
- - name: slack-notify-success
- container:
- image: technosophos/slack-notify
- command: [sh,-c]
- args: ['SLACK_USERNAME=Worker SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify']
- inputs:
- parameters:
- - name: status
- artifacts:
- - name: metrics
- path: /tmp/metrics.json
- optional: true
\ No newline at end of file
+metadata:
+ name: "PyTorch Training"
+ kind: Workflow
+ version: 20200605090509
+ action: create
+ labels:
+ "created-by": "system"
+ framework: pytorch
+spec:
+ entrypoint: main
+ arguments:
+ parameters:
+ - name: source
+ value: https://github.com/onepanelio/pytorch-examples.git
+ - name: command
+ value: "python mnist/main.py --epochs=1"
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 2Gi
+ - metadata:
+ name: output
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 2Gi
+ templates:
+ - name: main
+ dag:
+ tasks:
+ - name: train-model
+ template: pytorch
+ # Uncomment section below to send metrics to Slack
+ # - name: notify-in-slack
+ # dependencies: [train-model]
+ # template: slack-notify-success
+ # arguments:
+ # parameters:
+ # - name: status
+ # value: "{{tasks.train-model.status}}"
+ # artifacts:
+ # - name: metrics
+ # from: "{{tasks.train-model.outputs.artifacts.sys-metrics}}"
+ - name: pytorch
+ inputs:
+ artifacts:
+ - name: src
+ path: /mnt/src
+ git:
+ repo: "{{workflow.parameters.source}}"
+ outputs:
+ artifacts:
+ - name: model
+ path: /mnt/output
+ optional: true
+ archive:
+ none: {}
+ container:
+ image: pytorch/pytorch:latest
+ command: [sh,-c]
+ args: ["{{workflow.parameters.command}}"]
+ workingDir: /mnt/src
+ volumeMounts:
+ - name: data
+ mountPath: /mnt/data
+ - name: output
+ mountPath: /mnt/output
+ - name: slack-notify-success
+ container:
+ image: technosophos/slack-notify
+ command: [sh,-c]
+ args: ['SLACK_USERNAME=Worker SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify']
+ inputs:
+ parameters:
+ - name: status
+ artifacts:
+ - name: metrics
+ path: /tmp/metrics.json
+ optional: true
\ No newline at end of file
diff --git a/db/yaml/workflows/pytorch-mnist-training/20201221194344.yaml b/db/yaml/workflows/pytorch-mnist-training/20201221194344.yaml
index aa43901..41d8b60 100644
--- a/db/yaml/workflows/pytorch-mnist-training/20201221194344.yaml
+++ b/db/yaml/workflows/pytorch-mnist-training/20201221194344.yaml
@@ -1,207 +1,216 @@
-# source: https://github.com/onepanelio/templates/blob/master/workflows/pytorch-mnist-training/
-arguments:
- parameters:
- - name: epochs
- value: '10'
- - displayName: Node pool
- hint: Name of node pool or group to run this workflow task
- type: select.nodepool
- name: sys-node-pool
- value: {{.DefaultNodePoolOption}}
- visibility: public
- required: true
-entrypoint: main
-templates:
- - name: main
- dag:
- tasks:
- - name: train-model
- template: train-model
- - name: train-model
- # Indicates that we want to push files in /mnt/output to object storage
- outputs:
- artifacts:
- - name: output
- path: /mnt/output
- optional: true
- script:
- image: onepanel/dl:0.17.0
- command:
- - python
- - '-u'
- source: |
- import json
- import torch
- import torch.nn as nn
- import torch.nn.functional as F
- import torch.optim as optim
- from torchvision import datasets, transforms
- from torch.optim.lr_scheduler import StepLR
- from torch.utils.tensorboard import SummaryWriter
-
-
- class Net(nn.Module):
- def __init__(self):
- super(Net, self).__init__()
- self.conv1 = nn.Conv2d(1, 32, 3, 1)
- self.conv2 = nn.Conv2d(32, 64, 3, 1)
- self.dropout1 = nn.Dropout(0.25)
- self.dropout2 = nn.Dropout(0.5)
- self.fc1 = nn.Linear(9216, 128)
- self.fc2 = nn.Linear(128, 10)
-
- def forward(self, x):
- x = self.conv1(x)
- x = F.relu(x)
- x = self.conv2(x)
- x = F.relu(x)
- x = F.max_pool2d(x, 2)
- x = self.dropout1(x)
- x = torch.flatten(x, 1)
- x = self.fc1(x)
- x = F.relu(x)
- x = self.dropout2(x)
- x = self.fc2(x)
- output = F.log_softmax(x, dim=1)
- return output
-
-
- def train(model, device, train_loader, optimizer, epoch, batch_size, writer):
- model.train()
- for batch_idx, (data, target) in enumerate(train_loader):
- data, target = data.to(device), target.to(device)
- optimizer.zero_grad()
- output = model(data)
- loss = F.nll_loss(output, target)
- loss.backward()
- optimizer.step()
- if batch_idx % 10 == 0:
- print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
- epoch, batch_idx * len(data), len(train_loader.dataset),
- 100. * batch_idx / len(train_loader), loss.item()))
-
- writer.add_scalar('training loss', loss.item(), epoch)
-
-
- def test(model, device, test_loader, epoch, writer):
- model.eval()
- test_loss = 0
- correct = 0
- with torch.no_grad():
- for data, target in test_loader:
- data, target = data.to(device), target.to(device)
- output = model(data)
- test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
- pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
- correct += pred.eq(target.view_as(pred)).sum().item()
-
- loss = test_loss / len(test_loader.dataset)
- accuracy = correct / len(test_loader.dataset)
-
- print('\nTest set: Average loss: {}, Accuracy: {}\n'.format(
- loss, accuracy))
-
- # Store metrics for this task
- metrics = [
- {'name': 'accuracy', 'value': accuracy},
- {'name': 'loss', 'value': loss}
- ]
- with open('/tmp/sys-metrics.json', 'w') as f:
- json.dump(metrics, f)
-
-
- def main(params):
- writer = SummaryWriter(log_dir='/mnt/output/tensorboard')
-
- use_cuda = torch.cuda.is_available()
-
- torch.manual_seed(params['seed'])
-
- device = torch.device('cuda' if use_cuda else 'cpu')
-
- train_kwargs = {'batch_size': params['batch_size']}
- test_kwargs = {'batch_size': params['test_batch_size']}
- if use_cuda:
- cuda_kwargs = {'num_workers': 1,
- 'pin_memory': True,
- 'shuffle': True}
- train_kwargs.update(cuda_kwargs)
- test_kwargs.update(cuda_kwargs)
-
- transform=transforms.Compose([
- transforms.ToTensor(),
- transforms.Normalize((0.1307,), (0.3081,))
- ])
- dataset1 = datasets.MNIST('/mnt/data', train=True, download=True,
- transform=transform)
- dataset2 = datasets.MNIST('/mnt/data', train=False,
- transform=transform)
- train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
- test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
-
- model = Net().to(device)
- optimizer = optim.Adadelta(model.parameters(), lr=params['lr'])
-
- scheduler = StepLR(optimizer, step_size=1, gamma=params['gamma'])
- for epoch in range(1, params['epochs'] + 1):
- train(model, device, train_loader, optimizer, epoch, params['batch_size'], writer)
- test(model, device, test_loader, epoch, writer)
- scheduler.step()
-
- # Save model
- torch.save(model.state_dict(), '/mnt/output/model.pt')
-
- writer.close()
-
-
- if __name__ == '__main__':
- params = {
- 'seed': 1,
- 'batch_size': 64,
- 'test_batch_size': 1000,
- 'epochs': {{workflow.parameters.epochs}},
- 'lr': 0.001,
- 'gamma': 0.7,
- }
- main(params)
- volumeMounts:
- # TensorBoard sidecar will automatically mount these volumes
- # The `data` volume is mounted for saving datasets
- # The `output` volume is mounted to save model output and share TensorBoard logs
- - name: data
- mountPath: /mnt/data
- - name: output
- mountPath: /mnt/output
- nodeSelector:
- beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
- sidecars:
- - name: tensorboard
- image: tensorflow/tensorflow:2.3.0
+metadata:
+ name: "PyTorch Training"
+ kind: Workflow
+ version: 20201221194344
+ action: update
+ source: "https://github.com/onepanelio/templates/blob/master/workflows/pytorch-mnist-training/"
+ labels:
+ "created-by": "system"
+ framework: pytorch
+spec:
+ arguments:
+ parameters:
+ - name: epochs
+ value: '10'
+ - displayName: Node pool
+ hint: Name of node pool or group to run this workflow task
+ type: select.nodepool
+ name: sys-node-pool
+ value: "{{.DefaultNodePoolOption}}"
+ visibility: public
+ required: true
+ entrypoint: main
+ templates:
+ - name: main
+ dag:
+ tasks:
+ - name: train-model
+ template: train-model
+ - name: train-model
+ # Indicates that we want to push files in /mnt/output to object storage
+ outputs:
+ artifacts:
+ - name: output
+ path: /mnt/output
+ optional: true
+ script:
+ image: onepanel/dl:0.17.0
command:
- - sh
- - '-c'
- env:
- - name: ONEPANEL_INTERACTIVE_SIDECAR
- value: 'true'
- args:
- # Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
- - tensorboard --logdir /mnt/output/tensorboard
- ports:
- - containerPort: 6006
- name: tensorboard
-volumeClaimTemplates:
- # Provision volumes for storing data and output
- - metadata:
- name: data
- spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 2Gi
- - metadata:
- name: output
- spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 2Gi
+ - python
+ - '-u'
+ source: |
+ import json
+ import torch
+ import torch.nn as nn
+ import torch.nn.functional as F
+ import torch.optim as optim
+ from torchvision import datasets, transforms
+ from torch.optim.lr_scheduler import StepLR
+ from torch.utils.tensorboard import SummaryWriter
+
+
+ class Net(nn.Module):
+ def __init__(self):
+ super(Net, self).__init__()
+ self.conv1 = nn.Conv2d(1, 32, 3, 1)
+ self.conv2 = nn.Conv2d(32, 64, 3, 1)
+ self.dropout1 = nn.Dropout(0.25)
+ self.dropout2 = nn.Dropout(0.5)
+ self.fc1 = nn.Linear(9216, 128)
+ self.fc2 = nn.Linear(128, 10)
+
+ def forward(self, x):
+ x = self.conv1(x)
+ x = F.relu(x)
+ x = self.conv2(x)
+ x = F.relu(x)
+ x = F.max_pool2d(x, 2)
+ x = self.dropout1(x)
+ x = torch.flatten(x, 1)
+ x = self.fc1(x)
+ x = F.relu(x)
+ x = self.dropout2(x)
+ x = self.fc2(x)
+ output = F.log_softmax(x, dim=1)
+ return output
+
+
+ def train(model, device, train_loader, optimizer, epoch, batch_size, writer):
+ model.train()
+ for batch_idx, (data, target) in enumerate(train_loader):
+ data, target = data.to(device), target.to(device)
+ optimizer.zero_grad()
+ output = model(data)
+ loss = F.nll_loss(output, target)
+ loss.backward()
+ optimizer.step()
+ if batch_idx % 10 == 0:
+ print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
+ epoch, batch_idx * len(data), len(train_loader.dataset),
+ 100. * batch_idx / len(train_loader), loss.item()))
+
+ writer.add_scalar('training loss', loss.item(), epoch)
+
+
+ def test(model, device, test_loader, epoch, writer):
+ model.eval()
+ test_loss = 0
+ correct = 0
+ with torch.no_grad():
+ for data, target in test_loader:
+ data, target = data.to(device), target.to(device)
+ output = model(data)
+ test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
+ pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
+ correct += pred.eq(target.view_as(pred)).sum().item()
+
+ loss = test_loss / len(test_loader.dataset)
+ accuracy = correct / len(test_loader.dataset)
+
+ print('\nTest set: Average loss: {}, Accuracy: {}\n'.format(
+ loss, accuracy))
+
+ # Store metrics for this task
+ metrics = [
+ {'name': 'accuracy', 'value': accuracy},
+ {'name': 'loss', 'value': loss}
+ ]
+ with open('/tmp/sys-metrics.json', 'w') as f:
+ json.dump(metrics, f)
+
+
+ def main(params):
+ writer = SummaryWriter(log_dir='/mnt/output/tensorboard')
+
+ use_cuda = torch.cuda.is_available()
+
+ torch.manual_seed(params['seed'])
+
+ device = torch.device('cuda' if use_cuda else 'cpu')
+
+ train_kwargs = {'batch_size': params['batch_size']}
+ test_kwargs = {'batch_size': params['test_batch_size']}
+ if use_cuda:
+ cuda_kwargs = {'num_workers': 1,
+ 'pin_memory': True,
+ 'shuffle': True}
+ train_kwargs.update(cuda_kwargs)
+ test_kwargs.update(cuda_kwargs)
+
+ transform=transforms.Compose([
+ transforms.ToTensor(),
+ transforms.Normalize((0.1307,), (0.3081,))
+ ])
+ dataset1 = datasets.MNIST('/mnt/data', train=True, download=True,
+ transform=transform)
+ dataset2 = datasets.MNIST('/mnt/data', train=False,
+ transform=transform)
+ train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
+ test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
+
+ model = Net().to(device)
+ optimizer = optim.Adadelta(model.parameters(), lr=params['lr'])
+
+ scheduler = StepLR(optimizer, step_size=1, gamma=params['gamma'])
+ for epoch in range(1, params['epochs'] + 1):
+ train(model, device, train_loader, optimizer, epoch, params['batch_size'], writer)
+ test(model, device, test_loader, epoch, writer)
+ scheduler.step()
+
+ # Save model
+ torch.save(model.state_dict(), '/mnt/output/model.pt')
+
+ writer.close()
+
+
+ if __name__ == '__main__':
+ params = {
+ 'seed': 1,
+ 'batch_size': 64,
+ 'test_batch_size': 1000,
+ 'epochs': {{workflow.parameters.epochs}},
+ 'lr': 0.001,
+ 'gamma': 0.7,
+ }
+ main(params)
+ volumeMounts:
+ # TensorBoard sidecar will automatically mount these volumes
+ # The `data` volume is mounted for saving datasets
+ # The `output` volume is mounted to save model output and share TensorBoard logs
+ - name: data
+ mountPath: /mnt/data
+ - name: output
+ mountPath: /mnt/output
+ nodeSelector:
+ beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
+ sidecars:
+ - name: tensorboard
+ image: tensorflow/tensorflow:2.3.0
+ command:
+ - sh
+ - '-c'
+ env:
+ - name: ONEPANEL_INTERACTIVE_SIDECAR
+ value: 'true'
+ args:
+ # Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
+ - tensorboard --logdir /mnt/output/tensorboard
+ ports:
+ - containerPort: 6006
+ name: tensorboard
+ volumeClaimTemplates:
+ # Provision volumes for storing data and output
+ - metadata:
+ name: data
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 2Gi
+ - metadata:
+ name: output
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 2Gi
diff --git a/db/yaml/workflows/pytorch-mnist-training/20210118175809.yaml b/db/yaml/workflows/pytorch-mnist-training/20210118175809.yaml
index b5f2b99..73e8a2e 100644
--- a/db/yaml/workflows/pytorch-mnist-training/20210118175809.yaml
+++ b/db/yaml/workflows/pytorch-mnist-training/20210118175809.yaml
@@ -1,207 +1,216 @@
-# source: https://github.com/onepanelio/templates/blob/master/workflows/pytorch-mnist-training/
-arguments:
- parameters:
- - name: epochs
- value: '10'
- - displayName: Node pool
- hint: Name of node pool or group to run this workflow task
- type: select.nodepool
- name: sys-node-pool
- value: {{.DefaultNodePoolOption}}
- visibility: public
- required: true
-entrypoint: main
-templates:
- - name: main
- dag:
- tasks:
- - name: train-model
- template: train-model
- - name: train-model
- # Indicates that we want to push files in /mnt/output to object storage
- outputs:
- artifacts:
- - name: output
- path: /mnt/output
- optional: true
- script:
- image: onepanel/dl:0.17.0
- command:
- - python
- - '-u'
- source: |
- import json
- import torch
- import torch.nn as nn
- import torch.nn.functional as F
- import torch.optim as optim
- from torchvision import datasets, transforms
- from torch.optim.lr_scheduler import StepLR
- from torch.utils.tensorboard import SummaryWriter
-
-
- class Net(nn.Module):
- def __init__(self):
- super(Net, self).__init__()
- self.conv1 = nn.Conv2d(1, 32, 3, 1)
- self.conv2 = nn.Conv2d(32, 64, 3, 1)
- self.dropout1 = nn.Dropout(0.25)
- self.dropout2 = nn.Dropout(0.5)
- self.fc1 = nn.Linear(9216, 128)
- self.fc2 = nn.Linear(128, 10)
-
- def forward(self, x):
- x = self.conv1(x)
- x = F.relu(x)
- x = self.conv2(x)
- x = F.relu(x)
- x = F.max_pool2d(x, 2)
- x = self.dropout1(x)
- x = torch.flatten(x, 1)
- x = self.fc1(x)
- x = F.relu(x)
- x = self.dropout2(x)
- x = self.fc2(x)
- output = F.log_softmax(x, dim=1)
- return output
-
-
- def train(model, device, train_loader, optimizer, epoch, batch_size, writer):
- model.train()
- for batch_idx, (data, target) in enumerate(train_loader):
- data, target = data.to(device), target.to(device)
- optimizer.zero_grad()
- output = model(data)
- loss = F.nll_loss(output, target)
- loss.backward()
- optimizer.step()
- if batch_idx % 10 == 0:
- print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
- epoch, batch_idx * len(data), len(train_loader.dataset),
- 100. * batch_idx / len(train_loader), loss.item()))
-
- writer.add_scalar('training loss', loss.item(), epoch)
-
-
- def test(model, device, test_loader, epoch, writer):
- model.eval()
- test_loss = 0
- correct = 0
- with torch.no_grad():
- for data, target in test_loader:
- data, target = data.to(device), target.to(device)
- output = model(data)
- test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
- pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
- correct += pred.eq(target.view_as(pred)).sum().item()
-
- loss = test_loss / len(test_loader.dataset)
- accuracy = correct / len(test_loader.dataset)
-
- print('\nTest set: Average loss: {}, Accuracy: {}\n'.format(
- loss, accuracy))
-
- # Store metrics for this task
- metrics = [
- {'name': 'accuracy', 'value': accuracy},
- {'name': 'loss', 'value': loss}
- ]
- with open('/tmp/sys-metrics.json', 'w') as f:
- json.dump(metrics, f)
-
-
- def main(params):
- writer = SummaryWriter(log_dir='/mnt/output/tensorboard')
-
- use_cuda = torch.cuda.is_available()
-
- torch.manual_seed(params['seed'])
-
- device = torch.device('cuda' if use_cuda else 'cpu')
-
- train_kwargs = {'batch_size': params['batch_size']}
- test_kwargs = {'batch_size': params['test_batch_size']}
- if use_cuda:
- cuda_kwargs = {'num_workers': 1,
- 'pin_memory': True,
- 'shuffle': True}
- train_kwargs.update(cuda_kwargs)
- test_kwargs.update(cuda_kwargs)
-
- transform=transforms.Compose([
- transforms.ToTensor(),
- transforms.Normalize((0.1307,), (0.3081,))
- ])
- dataset1 = datasets.MNIST('/mnt/data', train=True, download=True,
- transform=transform)
- dataset2 = datasets.MNIST('/mnt/data', train=False,
- transform=transform)
- train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
- test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
-
- model = Net().to(device)
- optimizer = optim.Adadelta(model.parameters(), lr=params['lr'])
-
- scheduler = StepLR(optimizer, step_size=1, gamma=params['gamma'])
- for epoch in range(1, params['epochs'] + 1):
- train(model, device, train_loader, optimizer, epoch, params['batch_size'], writer)
- test(model, device, test_loader, epoch, writer)
- scheduler.step()
-
- # Save model
- torch.save(model.state_dict(), '/mnt/output/model.pt')
-
- writer.close()
-
-
- if __name__ == '__main__':
- params = {
- 'seed': 1,
- 'batch_size': 64,
- 'test_batch_size': 1000,
- 'epochs': {{workflow.parameters.epochs}},
- 'lr': 0.001,
- 'gamma': 0.7,
- }
- main(params)
- volumeMounts:
- # TensorBoard sidecar will automatically mount these volumes
- # The `data` volume is mounted for saving datasets
- # The `output` volume is mounted to save model output and share TensorBoard logs
- - name: data
- mountPath: /mnt/data
- - name: output
- mountPath: /mnt/output
- nodeSelector:
- {{.NodePoolLabel}}: '{{workflow.parameters.sys-node-pool}}'
- sidecars:
- - name: tensorboard
+metadata:
+ name: "PyTorch Training"
+ kind: Workflow
+ version: 20210118175809
+ action: update
+ source: "https://github.com/onepanelio/templates/blob/master/workflows/pytorch-mnist-training/"
+ labels:
+ "created-by": "system"
+ framework: pytorch
+spec:
+ arguments:
+ parameters:
+ - name: epochs
+ value: '10'
+ - displayName: Node pool
+ hint: Name of node pool or group to run this workflow task
+ type: select.nodepool
+ name: sys-node-pool
+ value: "{{.DefaultNodePoolOption}}"
+ visibility: public
+ required: true
+ entrypoint: main
+ templates:
+ - name: main
+ dag:
+ tasks:
+ - name: train-model
+ template: train-model
+ - name: train-model
+ # Indicates that we want to push files in /mnt/output to object storage
+ outputs:
+ artifacts:
+ - name: output
+ path: /mnt/output
+ optional: true
+ script:
image: onepanel/dl:0.17.0
command:
- - sh
- - '-c'
- env:
- - name: ONEPANEL_INTERACTIVE_SIDECAR
- value: 'true'
- args:
- # Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
- - tensorboard --logdir /mnt/output/tensorboard
- ports:
- - containerPort: 6006
- name: tensorboard
-volumeClaimTemplates:
- # Provision volumes for storing data and output
- - metadata:
- name: data
- spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 2Gi
- - metadata:
- name: output
- spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 2Gi
+ - python
+ - '-u'
+ source: |
+ import json
+ import torch
+ import torch.nn as nn
+ import torch.nn.functional as F
+ import torch.optim as optim
+ from torchvision import datasets, transforms
+ from torch.optim.lr_scheduler import StepLR
+ from torch.utils.tensorboard import SummaryWriter
+
+
+ class Net(nn.Module):
+ def __init__(self):
+ super(Net, self).__init__()
+ self.conv1 = nn.Conv2d(1, 32, 3, 1)
+ self.conv2 = nn.Conv2d(32, 64, 3, 1)
+ self.dropout1 = nn.Dropout(0.25)
+ self.dropout2 = nn.Dropout(0.5)
+ self.fc1 = nn.Linear(9216, 128)
+ self.fc2 = nn.Linear(128, 10)
+
+ def forward(self, x):
+ x = self.conv1(x)
+ x = F.relu(x)
+ x = self.conv2(x)
+ x = F.relu(x)
+ x = F.max_pool2d(x, 2)
+ x = self.dropout1(x)
+ x = torch.flatten(x, 1)
+ x = self.fc1(x)
+ x = F.relu(x)
+ x = self.dropout2(x)
+ x = self.fc2(x)
+ output = F.log_softmax(x, dim=1)
+ return output
+
+
+ def train(model, device, train_loader, optimizer, epoch, batch_size, writer):
+ model.train()
+ for batch_idx, (data, target) in enumerate(train_loader):
+ data, target = data.to(device), target.to(device)
+ optimizer.zero_grad()
+ output = model(data)
+ loss = F.nll_loss(output, target)
+ loss.backward()
+ optimizer.step()
+ if batch_idx % 10 == 0:
+ print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
+ epoch, batch_idx * len(data), len(train_loader.dataset),
+ 100. * batch_idx / len(train_loader), loss.item()))
+
+ writer.add_scalar('training loss', loss.item(), epoch)
+
+
+ def test(model, device, test_loader, epoch, writer):
+ model.eval()
+ test_loss = 0
+ correct = 0
+ with torch.no_grad():
+ for data, target in test_loader:
+ data, target = data.to(device), target.to(device)
+ output = model(data)
+ test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
+ pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
+ correct += pred.eq(target.view_as(pred)).sum().item()
+
+ loss = test_loss / len(test_loader.dataset)
+ accuracy = correct / len(test_loader.dataset)
+
+ print('\nTest set: Average loss: {}, Accuracy: {}\n'.format(
+ loss, accuracy))
+
+ # Store metrics for this task
+ metrics = [
+ {'name': 'accuracy', 'value': accuracy},
+ {'name': 'loss', 'value': loss}
+ ]
+ with open('/tmp/sys-metrics.json', 'w') as f:
+ json.dump(metrics, f)
+
+
+ def main(params):
+ writer = SummaryWriter(log_dir='/mnt/output/tensorboard')
+
+ use_cuda = torch.cuda.is_available()
+
+ torch.manual_seed(params['seed'])
+
+ device = torch.device('cuda' if use_cuda else 'cpu')
+
+ train_kwargs = {'batch_size': params['batch_size']}
+ test_kwargs = {'batch_size': params['test_batch_size']}
+ if use_cuda:
+ cuda_kwargs = {'num_workers': 1,
+ 'pin_memory': True,
+ 'shuffle': True}
+ train_kwargs.update(cuda_kwargs)
+ test_kwargs.update(cuda_kwargs)
+
+ transform=transforms.Compose([
+ transforms.ToTensor(),
+ transforms.Normalize((0.1307,), (0.3081,))
+ ])
+ dataset1 = datasets.MNIST('/mnt/data', train=True, download=True,
+ transform=transform)
+ dataset2 = datasets.MNIST('/mnt/data', train=False,
+ transform=transform)
+ train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
+ test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
+
+ model = Net().to(device)
+ optimizer = optim.Adadelta(model.parameters(), lr=params['lr'])
+
+ scheduler = StepLR(optimizer, step_size=1, gamma=params['gamma'])
+ for epoch in range(1, params['epochs'] + 1):
+ train(model, device, train_loader, optimizer, epoch, params['batch_size'], writer)
+ test(model, device, test_loader, epoch, writer)
+ scheduler.step()
+
+ # Save model
+ torch.save(model.state_dict(), '/mnt/output/model.pt')
+
+ writer.close()
+
+
+ if __name__ == '__main__':
+ params = {
+ 'seed': 1,
+ 'batch_size': 64,
+ 'test_batch_size': 1000,
+ 'epochs': {{workflow.parameters.epochs}},
+ 'lr': 0.001,
+ 'gamma': 0.7,
+ }
+ main(params)
+ volumeMounts:
+ # TensorBoard sidecar will automatically mount these volumes
+ # The `data` volume is mounted for saving datasets
+ # The `output` volume is mounted to save model output and share TensorBoard logs
+ - name: data
+ mountPath: /mnt/data
+ - name: output
+ mountPath: /mnt/output
+ nodeSelector:
+ "{{.NodePoolLabel}}": '{{workflow.parameters.sys-node-pool}}'
+ sidecars:
+ - name: tensorboard
+ image: onepanel/dl:0.17.0
+ command:
+ - sh
+ - '-c'
+ env:
+ - name: ONEPANEL_INTERACTIVE_SIDECAR
+ value: 'true'
+ args:
+ # Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
+ - tensorboard --logdir /mnt/output/tensorboard
+ ports:
+ - containerPort: 6006
+ name: tensorboard
+ volumeClaimTemplates:
+ # Provision volumes for storing data and output
+ - metadata:
+ name: data
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 2Gi
+ - metadata:
+ name: output
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 2Gi
diff --git a/db/yaml/workflows/pytorch-mnist-training/20210323175655.yaml b/db/yaml/workflows/pytorch-mnist-training/20210323175655.yaml
index cf9859d..d0ce9fa 100644
--- a/db/yaml/workflows/pytorch-mnist-training/20210323175655.yaml
+++ b/db/yaml/workflows/pytorch-mnist-training/20210323175655.yaml
@@ -1,207 +1,216 @@
-# source: https://github.com/onepanelio/templates/blob/master/workflows/pytorch-mnist-training/
-arguments:
- parameters:
- - name: epochs
- value: '10'
- - displayName: Node pool
- hint: Name of node pool or group to run this workflow task
- type: select.nodepool
- name: sys-node-pool
- value: {{.DefaultNodePoolOption}}
- visibility: public
- required: true
-entrypoint: main
-templates:
- - name: main
- dag:
- tasks:
- - name: train-model
- template: train-model
- - name: train-model
- # Indicates that we want to push files in /mnt/output to object storage
- outputs:
- artifacts:
- - name: output
- path: /mnt/output
- optional: true
- script:
- image: onepanel/dl:v0.20.0
- command:
- - python
- - '-u'
- source: |
- import json
- import torch
- import torch.nn as nn
- import torch.nn.functional as F
- import torch.optim as optim
- from torchvision import datasets, transforms
- from torch.optim.lr_scheduler import StepLR
- from torch.utils.tensorboard import SummaryWriter
-
-
- class Net(nn.Module):
- def __init__(self):
- super(Net, self).__init__()
- self.conv1 = nn.Conv2d(1, 32, 3, 1)
- self.conv2 = nn.Conv2d(32, 64, 3, 1)
- self.dropout1 = nn.Dropout(0.25)
- self.dropout2 = nn.Dropout(0.5)
- self.fc1 = nn.Linear(9216, 128)
- self.fc2 = nn.Linear(128, 10)
-
- def forward(self, x):
- x = self.conv1(x)
- x = F.relu(x)
- x = self.conv2(x)
- x = F.relu(x)
- x = F.max_pool2d(x, 2)
- x = self.dropout1(x)
- x = torch.flatten(x, 1)
- x = self.fc1(x)
- x = F.relu(x)
- x = self.dropout2(x)
- x = self.fc2(x)
- output = F.log_softmax(x, dim=1)
- return output
-
-
- def train(model, device, train_loader, optimizer, epoch, batch_size, writer):
- model.train()
- for batch_idx, (data, target) in enumerate(train_loader):
- data, target = data.to(device), target.to(device)
- optimizer.zero_grad()
- output = model(data)
- loss = F.nll_loss(output, target)
- loss.backward()
- optimizer.step()
- if batch_idx % 10 == 0:
- print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
- epoch, batch_idx * len(data), len(train_loader.dataset),
- 100. * batch_idx / len(train_loader), loss.item()))
-
- writer.add_scalar('training loss', loss.item(), epoch)
-
-
- def test(model, device, test_loader, epoch, writer):
- model.eval()
- test_loss = 0
- correct = 0
- with torch.no_grad():
- for data, target in test_loader:
- data, target = data.to(device), target.to(device)
- output = model(data)
- test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
- pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
- correct += pred.eq(target.view_as(pred)).sum().item()
-
- loss = test_loss / len(test_loader.dataset)
- accuracy = correct / len(test_loader.dataset)
-
- print('\nTest set: Average loss: {}, Accuracy: {}\n'.format(
- loss, accuracy))
-
- # Store metrics for this task
- metrics = [
- {'name': 'accuracy', 'value': accuracy},
- {'name': 'loss', 'value': loss}
- ]
- with open('/mnt/tmp/sys-metrics.json', 'w') as f:
- json.dump(metrics, f)
-
-
- def main(params):
- writer = SummaryWriter(log_dir='/mnt/output/tensorboard')
-
- use_cuda = torch.cuda.is_available()
-
- torch.manual_seed(params['seed'])
-
- device = torch.device('cuda' if use_cuda else 'cpu')
-
- train_kwargs = {'batch_size': params['batch_size']}
- test_kwargs = {'batch_size': params['test_batch_size']}
- if use_cuda:
- cuda_kwargs = {'num_workers': 1,
- 'pin_memory': True,
- 'shuffle': True}
- train_kwargs.update(cuda_kwargs)
- test_kwargs.update(cuda_kwargs)
-
- transform=transforms.Compose([
- transforms.ToTensor(),
- transforms.Normalize((0.1307,), (0.3081,))
- ])
- dataset1 = datasets.MNIST('/mnt/data', train=True, download=True,
- transform=transform)
- dataset2 = datasets.MNIST('/mnt/data', train=False,
- transform=transform)
- train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
- test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
-
- model = Net().to(device)
- optimizer = optim.Adadelta(model.parameters(), lr=params['lr'])
-
- scheduler = StepLR(optimizer, step_size=1, gamma=params['gamma'])
- for epoch in range(1, params['epochs'] + 1):
- train(model, device, train_loader, optimizer, epoch, params['batch_size'], writer)
- test(model, device, test_loader, epoch, writer)
- scheduler.step()
-
- # Save model
- torch.save(model.state_dict(), '/mnt/output/model.pt')
-
- writer.close()
-
-
- if __name__ == '__main__':
- params = {
- 'seed': 1,
- 'batch_size': 64,
- 'test_batch_size': 1000,
- 'epochs': {{workflow.parameters.epochs}},
- 'lr': 0.001,
- 'gamma': 0.7,
- }
- main(params)
- volumeMounts:
- # TensorBoard sidecar will automatically mount these volumes
- # The `data` volume is mounted for saving datasets
- # The `output` volume is mounted to save model output and share TensorBoard logs
- - name: data
- mountPath: /mnt/data
- - name: output
- mountPath: /mnt/output
- nodeSelector:
- {{.NodePoolLabel}}: '{{workflow.parameters.sys-node-pool}}'
- sidecars:
- - name: tensorboard
+metadata:
+ name: "PyTorch Training"
+ kind: Workflow
+ version: 20210323175655
+ action: update
+ source: "https://github.com/onepanelio/templates/blob/master/workflows/pytorch-mnist-training/"
+ labels:
+ "created-by": "system"
+ framework: pytorch
+spec:
+ arguments:
+ parameters:
+ - name: epochs
+ value: '10'
+ - displayName: Node pool
+ hint: Name of node pool or group to run this workflow task
+ type: select.nodepool
+ name: sys-node-pool
+ value: "{{.DefaultNodePoolOption}}"
+ visibility: public
+ required: true
+ entrypoint: main
+ templates:
+ - name: main
+ dag:
+ tasks:
+ - name: train-model
+ template: train-model
+ - name: train-model
+ # Indicates that we want to push files in /mnt/output to object storage
+ outputs:
+ artifacts:
+ - name: output
+ path: /mnt/output
+ optional: true
+ script:
image: onepanel/dl:v0.20.0
command:
- - sh
- - '-c'
- env:
- - name: ONEPANEL_INTERACTIVE_SIDECAR
- value: 'true'
- args:
- # Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
- - tensorboard --logdir /mnt/output/tensorboard
- ports:
- - containerPort: 6006
- name: tensorboard
-volumeClaimTemplates:
- # Provision volumes for storing data and output
- - metadata:
- name: data
- spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 2Gi
- - metadata:
- name: output
- spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 2Gi
+ - python
+ - '-u'
+ source: |
+ import json
+ import torch
+ import torch.nn as nn
+ import torch.nn.functional as F
+ import torch.optim as optim
+ from torchvision import datasets, transforms
+ from torch.optim.lr_scheduler import StepLR
+ from torch.utils.tensorboard import SummaryWriter
+
+
+ class Net(nn.Module):
+ def __init__(self):
+ super(Net, self).__init__()
+ self.conv1 = nn.Conv2d(1, 32, 3, 1)
+ self.conv2 = nn.Conv2d(32, 64, 3, 1)
+ self.dropout1 = nn.Dropout(0.25)
+ self.dropout2 = nn.Dropout(0.5)
+ self.fc1 = nn.Linear(9216, 128)
+ self.fc2 = nn.Linear(128, 10)
+
+ def forward(self, x):
+ x = self.conv1(x)
+ x = F.relu(x)
+ x = self.conv2(x)
+ x = F.relu(x)
+ x = F.max_pool2d(x, 2)
+ x = self.dropout1(x)
+ x = torch.flatten(x, 1)
+ x = self.fc1(x)
+ x = F.relu(x)
+ x = self.dropout2(x)
+ x = self.fc2(x)
+ output = F.log_softmax(x, dim=1)
+ return output
+
+
+ def train(model, device, train_loader, optimizer, epoch, batch_size, writer):
+ model.train()
+ for batch_idx, (data, target) in enumerate(train_loader):
+ data, target = data.to(device), target.to(device)
+ optimizer.zero_grad()
+ output = model(data)
+ loss = F.nll_loss(output, target)
+ loss.backward()
+ optimizer.step()
+ if batch_idx % 10 == 0:
+ print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
+ epoch, batch_idx * len(data), len(train_loader.dataset),
+ 100. * batch_idx / len(train_loader), loss.item()))
+
+ writer.add_scalar('training loss', loss.item(), epoch)
+
+
+ def test(model, device, test_loader, epoch, writer):
+ model.eval()
+ test_loss = 0
+ correct = 0
+ with torch.no_grad():
+ for data, target in test_loader:
+ data, target = data.to(device), target.to(device)
+ output = model(data)
+ test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
+ pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
+ correct += pred.eq(target.view_as(pred)).sum().item()
+
+ loss = test_loss / len(test_loader.dataset)
+ accuracy = correct / len(test_loader.dataset)
+
+ print('\nTest set: Average loss: {}, Accuracy: {}\n'.format(
+ loss, accuracy))
+
+ # Store metrics for this task
+ metrics = [
+ {'name': 'accuracy', 'value': accuracy},
+ {'name': 'loss', 'value': loss}
+ ]
+ with open('/mnt/tmp/sys-metrics.json', 'w') as f:
+ json.dump(metrics, f)
+
+
+ def main(params):
+ writer = SummaryWriter(log_dir='/mnt/output/tensorboard')
+
+ use_cuda = torch.cuda.is_available()
+
+ torch.manual_seed(params['seed'])
+
+ device = torch.device('cuda' if use_cuda else 'cpu')
+
+ train_kwargs = {'batch_size': params['batch_size']}
+ test_kwargs = {'batch_size': params['test_batch_size']}
+ if use_cuda:
+ cuda_kwargs = {'num_workers': 1,
+ 'pin_memory': True,
+ 'shuffle': True}
+ train_kwargs.update(cuda_kwargs)
+ test_kwargs.update(cuda_kwargs)
+
+ transform=transforms.Compose([
+ transforms.ToTensor(),
+ transforms.Normalize((0.1307,), (0.3081,))
+ ])
+ dataset1 = datasets.MNIST('/mnt/data', train=True, download=True,
+ transform=transform)
+ dataset2 = datasets.MNIST('/mnt/data', train=False,
+ transform=transform)
+ train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
+ test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
+
+ model = Net().to(device)
+ optimizer = optim.Adadelta(model.parameters(), lr=params['lr'])
+
+ scheduler = StepLR(optimizer, step_size=1, gamma=params['gamma'])
+ for epoch in range(1, params['epochs'] + 1):
+ train(model, device, train_loader, optimizer, epoch, params['batch_size'], writer)
+ test(model, device, test_loader, epoch, writer)
+ scheduler.step()
+
+ # Save model
+ torch.save(model.state_dict(), '/mnt/output/model.pt')
+
+ writer.close()
+
+
+ if __name__ == '__main__':
+ params = {
+ 'seed': 1,
+ 'batch_size': 64,
+ 'test_batch_size': 1000,
+ 'epochs': {{workflow.parameters.epochs}},
+ 'lr': 0.001,
+ 'gamma': 0.7,
+ }
+ main(params)
+ volumeMounts:
+ # TensorBoard sidecar will automatically mount these volumes
+ # The `data` volume is mounted for saving datasets
+ # The `output` volume is mounted to save model output and share TensorBoard logs
+ - name: data
+ mountPath: /mnt/data
+ - name: output
+ mountPath: /mnt/output
+ nodeSelector:
+ "{{.NodePoolLabel}}": '{{workflow.parameters.sys-node-pool}}'
+ sidecars:
+ - name: tensorboard
+ image: onepanel/dl:v0.20.0
+ command:
+ - sh
+ - '-c'
+ env:
+ - name: ONEPANEL_INTERACTIVE_SIDECAR
+ value: 'true'
+ args:
+ # Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
+ - tensorboard --logdir /mnt/output/tensorboard
+ ports:
+ - containerPort: 6006
+ name: tensorboard
+ volumeClaimTemplates:
+ # Provision volumes for storing data and output
+ - metadata:
+ name: data
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 2Gi
+ - metadata:
+ name: output
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 2Gi
diff --git a/db/yaml/workflows/tensorflow-mnist-training/20200605090535.yaml b/db/yaml/workflows/tensorflow-mnist-training/20200605090535.yaml
index 8ac9c56..5244ac5 100644
--- a/db/yaml/workflows/tensorflow-mnist-training/20200605090535.yaml
+++ b/db/yaml/workflows/tensorflow-mnist-training/20200605090535.yaml
@@ -1,76 +1,85 @@
-# source: https://github.com/onepanelio/templates/blob/master/workflows/tensorflow-mnist-training/template.yaml
-entrypoint: main
-arguments:
- parameters:
- - name: source
- value: https://github.com/onepanelio/tensorflow-examples.git
- - name: command
- value: "python mnist/main.py --epochs=5"
-volumeClaimTemplates:
- - metadata:
- name: data
- spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 2Gi
- - metadata:
- name: output
- spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 2Gi
-templates:
- - name: main
- dag:
- tasks:
- - name: train-model
- template: pytorch
- # Uncomment section below to send metrics to Slack
- # - name: notify-in-slack
- # dependencies: [train-model]
- # template: slack-notify-success
- # arguments:
- # parameters:
- # - name: status
- # value: "{{tasks.train-model.status}}"
- # artifacts:
- # - name: metrics
- # from: "{{tasks.train-model.outputs.artifacts.sys-metrics}}"
- - name: pytorch
- inputs:
- artifacts:
- - name: src
- path: /mnt/src
- git:
- repo: "{{workflow.parameters.source}}"
- outputs:
- artifacts:
- - name: model
- path: /mnt/output
- optional: true
- archive:
- none: {}
- container:
- image: tensorflow/tensorflow:latest
- command: [sh,-c]
- args: ["{{workflow.parameters.command}}"]
- workingDir: /mnt/src
- volumeMounts:
- - name: data
- mountPath: /mnt/data
- - name: output
- mountPath: /mnt/output
- - name: slack-notify-success
- container:
- image: technosophos/slack-notify
- command: [sh,-c]
- args: ['SLACK_USERNAME=Worker SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify']
- inputs:
- parameters:
- - name: status
- artifacts:
- - name: metrics
- path: /tmp/metrics.json
- optional: true
\ No newline at end of file
+metadata:
+ name: "TensorFlow Training"
+ kind: Workflow
+ version: 20200605090535
+ action: create
+ source: "https://github.com/onepanelio/templates/blob/master/workflows/tensorflow-mnist-training/template.yaml"
+ labels:
+ "created-by": "system"
+ framework: tensorflow
+spec:
+ entrypoint: main
+ arguments:
+ parameters:
+ - name: source
+ value: https://github.com/onepanelio/tensorflow-examples.git
+ - name: command
+ value: "python mnist/main.py --epochs=5"
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 2Gi
+ - metadata:
+ name: output
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 2Gi
+ templates:
+ - name: main
+ dag:
+ tasks:
+ - name: train-model
+ template: pytorch
+ # Uncomment section below to send metrics to Slack
+ # - name: notify-in-slack
+ # dependencies: [train-model]
+ # template: slack-notify-success
+ # arguments:
+ # parameters:
+ # - name: status
+ # value: "{{tasks.train-model.status}}"
+ # artifacts:
+ # - name: metrics
+ # from: "{{tasks.train-model.outputs.artifacts.sys-metrics}}"
+ - name: pytorch
+ inputs:
+ artifacts:
+ - name: src
+ path: /mnt/src
+ git:
+ repo: "{{workflow.parameters.source}}"
+ outputs:
+ artifacts:
+ - name: model
+ path: /mnt/output
+ optional: true
+ archive:
+ none: {}
+ container:
+ image: tensorflow/tensorflow:latest
+ command: [sh,-c]
+ args: ["{{workflow.parameters.command}}"]
+ workingDir: /mnt/src
+ volumeMounts:
+ - name: data
+ mountPath: /mnt/data
+ - name: output
+ mountPath: /mnt/output
+ - name: slack-notify-success
+ container:
+ image: technosophos/slack-notify
+ command: [sh,-c]
+ args: ['SLACK_USERNAME=Worker SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify']
+ inputs:
+ parameters:
+ - name: status
+ artifacts:
+ - name: metrics
+ path: /tmp/metrics.json
+ optional: true
\ No newline at end of file
diff --git a/db/yaml/workflows/tensorflow-mnist-training/20201209124226.yaml b/db/yaml/workflows/tensorflow-mnist-training/20201209124226.yaml
index eb14b9a..9cbf872 100644
--- a/db/yaml/workflows/tensorflow-mnist-training/20201209124226.yaml
+++ b/db/yaml/workflows/tensorflow-mnist-training/20201209124226.yaml
@@ -1,71 +1,80 @@
-# source: https://github.com/onepanelio/templates/blob/master/workflows/tensorflow-mnist-training/template.yaml
-arguments:
- parameters:
- - name: epochs
- value: '10'
-entrypoint: main
-templates:
- - name: main
- dag:
- tasks:
- - name: train-model
- template: tf-dense
- - name: tf-dense
- script:
- image: tensorflow/tensorflow:2.3.0
- command:
- - python
- - '-u'
- source: |
- import tensorflow as tf
- import datetime
- mnist = tf.keras.datasets.mnist
- (x_train, y_train),(x_test, y_test) = mnist.load_data()
- x_train, x_test = x_train / 255.0, x_test / 255.0
- def create_model():
- return tf.keras.models.Sequential([
- tf.keras.layers.Flatten(input_shape=(28, 28)),
- tf.keras.layers.Dense(512, activation='relu'),
- tf.keras.layers.Dropout(0.2),
- tf.keras.layers.Dense(10, activation='softmax')
- ])
- model = create_model()
- model.compile(optimizer='adam',
- loss='sparse_categorical_crossentropy',
- metrics=['accuracy'])
- # Write logs to /mnt/output
- log_dir = "/mnt/output/logs/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
- tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
- history = model.fit(x=x_train,
- y=y_train,
- epochs={{workflow.parameters.epochs}},
- validation_data=(x_test, y_test),
- callbacks=[tensorboard_callback])
- volumeMounts:
- # TensorBoard sidecar will automatically mount this volume
- - name: output
- mountPath: /mnt/output
- sidecars:
- - name: tensorboard
- image: 'tensorflow/tensorflow:2.3.0'
+metadata:
+ name: "TensorFlow Training"
+ kind: Workflow
+ version: 20201209124226
+ action: update
+ source: "https://github.com/onepanelio/templates/blob/master/workflows/tensorflow-mnist-training/template.yaml"
+ labels:
+ "created-by": "system"
+ framework: tensorflow
+spec:
+ arguments:
+ parameters:
+ - name: epochs
+ value: '10'
+ entrypoint: main
+ templates:
+ - name: main
+ dag:
+ tasks:
+ - name: train-model
+ template: tf-dense
+ - name: tf-dense
+ script:
+ image: tensorflow/tensorflow:2.3.0
command:
- - sh
- - '-c'
- env:
- - name: ONEPANEL_INTERACTIVE_SIDECAR
- value: 'true'
- args:
- # Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
- - tensorboard --logdir /mnt/output/
- ports:
- - containerPort: 6006
- name: tensorboard
-volumeClaimTemplates:
- # Provision a volume that can be shared between main container and TensorBoard side car
- - metadata:
- name: output
- spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 2Gi
\ No newline at end of file
+ - python
+ - '-u'
+ source: |
+ import tensorflow as tf
+ import datetime
+ mnist = tf.keras.datasets.mnist
+ (x_train, y_train),(x_test, y_test) = mnist.load_data()
+ x_train, x_test = x_train / 255.0, x_test / 255.0
+ def create_model():
+ return tf.keras.models.Sequential([
+ tf.keras.layers.Flatten(input_shape=(28, 28)),
+ tf.keras.layers.Dense(512, activation='relu'),
+ tf.keras.layers.Dropout(0.2),
+ tf.keras.layers.Dense(10, activation='softmax')
+ ])
+ model = create_model()
+ model.compile(optimizer='adam',
+ loss='sparse_categorical_crossentropy',
+ metrics=['accuracy'])
+ # Write logs to /mnt/output
+ log_dir = "/mnt/output/logs/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
+ tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
+ history = model.fit(x=x_train,
+ y=y_train,
+ epochs={{workflow.parameters.epochs}},
+ validation_data=(x_test, y_test),
+ callbacks=[tensorboard_callback])
+ volumeMounts:
+ # TensorBoard sidecar will automatically mount this volume
+ - name: output
+ mountPath: /mnt/output
+ sidecars:
+ - name: tensorboard
+ image: 'tensorflow/tensorflow:2.3.0'
+ command:
+ - sh
+ - '-c'
+ env:
+ - name: ONEPANEL_INTERACTIVE_SIDECAR
+ value: 'true'
+ args:
+ # Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
+ - tensorboard --logdir /mnt/output/
+ ports:
+ - containerPort: 6006
+ name: tensorboard
+ volumeClaimTemplates:
+ # Provision a volume that can be shared between main container and TensorBoard side car
+ - metadata:
+ name: output
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 2Gi
\ No newline at end of file
diff --git a/db/yaml/workflows/tensorflow-mnist-training/20201223062947.yaml b/db/yaml/workflows/tensorflow-mnist-training/20201223062947.yaml
index d400d6f..7c49d24 100644
--- a/db/yaml/workflows/tensorflow-mnist-training/20201223062947.yaml
+++ b/db/yaml/workflows/tensorflow-mnist-training/20201223062947.yaml
@@ -1,118 +1,127 @@
-# source: https://github.com/onepanelio/templates/blob/master/workflows/tensorflow-mnist-training/
-arguments:
- parameters:
- - name: epochs
- value: '10'
- - displayName: Node pool
- hint: Name of node pool or group to run this workflow task
- type: select.nodepool
- name: sys-node-pool
- value: {{.DefaultNodePoolOption}}
- visibility: public
- required: true
-entrypoint: main
-templates:
- - name: main
- dag:
- tasks:
- - name: train-model
- template: train-model
- - name: train-model
- # Indicates that we want to push files in /mnt/output to object storage
- outputs:
- artifacts:
- - name: output
- path: /mnt/output
- optional: true
- script:
- image: onepanel/dl:0.17.0
- command:
- - python
- - '-u'
- source: |
- import json
- import tensorflow as tf
-
- mnist = tf.keras.datasets.mnist
-
- (x_train, y_train),(x_test, y_test) = mnist.load_data()
- x_train, x_test = x_train / 255.0, x_test / 255.0
- x_train = x_train[..., tf.newaxis]
- x_test = x_test[..., tf.newaxis]
-
- model = tf.keras.Sequential([
- tf.keras.layers.Conv2D(filters=32, kernel_size=5, activation='relu'),
- tf.keras.layers.MaxPool2D(pool_size=2),
- tf.keras.layers.Conv2D(filters=64, kernel_size=5, activation='relu'),
- tf.keras.layers.MaxPool2D(pool_size=2),
- tf.keras.layers.Flatten(),
- tf.keras.layers.Dense(units=124, activation='relu'),
- tf.keras.layers.Dropout(rate=0.75),
- tf.keras.layers.Dense(units=10, activation='softmax')
- ])
- model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001),
- loss='sparse_categorical_crossentropy',
- metrics=['accuracy'])
-
- # Write TensorBoard logs to /mnt/output
- log_dir = '/mnt/output/tensorboard/'
- tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
-
- model.fit(x=x_train,
- y=y_train,
- epochs={{workflow.parameters.epochs}},
- validation_data=(x_test, y_test),
- callbacks=[tensorboard_callback])
-
- # Store metrics for this task
- loss, accuracy = model.evaluate(x_test, y_test)
- metrics = [
- {'name': 'accuracy', 'value': accuracy},
- {'name': 'loss', 'value': loss}
- ]
- with open('/tmp/sys-metrics.json', 'w') as f:
- json.dump(metrics, f)
-
- # Save model
- model.save('/mnt/output/model.h5')
- volumeMounts:
- # TensorBoard sidecar will automatically mount these volumes
- # The `data` volume is mounted to support Keras datasets
- # The `output` volume is mounted to save model output and share TensorBoard logs
- - name: data
- mountPath: /home/root/.keras/datasets
- - name: output
- mountPath: /mnt/output
- nodeSelector:
- beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
- sidecars:
- - name: tensorboard
- image: tensorflow/tensorflow:2.3.0
+metadata:
+ name: "TensorFlow Training"
+ kind: Workflow
+ version: 20201223062947
+ action: update
+ source: "https://github.com/onepanelio/templates/blob/master/workflows/tensorflow-mnist-training/"
+ labels:
+ "created-by": "system"
+ framework: tensorflow
+spec:
+ arguments:
+ parameters:
+ - name: epochs
+ value: '10'
+ - displayName: Node pool
+ hint: Name of node pool or group to run this workflow task
+ type: select.nodepool
+ name: sys-node-pool
+ value: "{{.DefaultNodePoolOption}}"
+ visibility: public
+ required: true
+ entrypoint: main
+ templates:
+ - name: main
+ dag:
+ tasks:
+ - name: train-model
+ template: train-model
+ - name: train-model
+ # Indicates that we want to push files in /mnt/output to object storage
+ outputs:
+ artifacts:
+ - name: output
+ path: /mnt/output
+ optional: true
+ script:
+ image: onepanel/dl:0.17.0
command:
- - sh
- - '-c'
- env:
- - name: ONEPANEL_INTERACTIVE_SIDECAR
- value: 'true'
- args:
- # Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
- - tensorboard --logdir /mnt/output/tensorboard
- ports:
- - containerPort: 6006
- name: tensorboard
-volumeClaimTemplates:
- # Provision volumes for storing data and output
- - metadata:
- name: data
- spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 2Gi
- - metadata:
- name: output
- spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 2Gi
+ - python
+ - '-u'
+ source: |
+ import json
+ import tensorflow as tf
+
+ mnist = tf.keras.datasets.mnist
+
+ (x_train, y_train),(x_test, y_test) = mnist.load_data()
+ x_train, x_test = x_train / 255.0, x_test / 255.0
+ x_train = x_train[..., tf.newaxis]
+ x_test = x_test[..., tf.newaxis]
+
+ model = tf.keras.Sequential([
+ tf.keras.layers.Conv2D(filters=32, kernel_size=5, activation='relu'),
+ tf.keras.layers.MaxPool2D(pool_size=2),
+ tf.keras.layers.Conv2D(filters=64, kernel_size=5, activation='relu'),
+ tf.keras.layers.MaxPool2D(pool_size=2),
+ tf.keras.layers.Flatten(),
+ tf.keras.layers.Dense(units=124, activation='relu'),
+ tf.keras.layers.Dropout(rate=0.75),
+ tf.keras.layers.Dense(units=10, activation='softmax')
+ ])
+ model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001),
+ loss='sparse_categorical_crossentropy',
+ metrics=['accuracy'])
+
+ # Write TensorBoard logs to /mnt/output
+ log_dir = '/mnt/output/tensorboard/'
+ tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
+
+ model.fit(x=x_train,
+ y=y_train,
+ epochs={{workflow.parameters.epochs}},
+ validation_data=(x_test, y_test),
+ callbacks=[tensorboard_callback])
+
+ # Store metrics for this task
+ loss, accuracy = model.evaluate(x_test, y_test)
+ metrics = [
+ {'name': 'accuracy', 'value': accuracy},
+ {'name': 'loss', 'value': loss}
+ ]
+ with open('/tmp/sys-metrics.json', 'w') as f:
+ json.dump(metrics, f)
+
+ # Save model
+ model.save('/mnt/output/model.h5')
+ volumeMounts:
+ # TensorBoard sidecar will automatically mount these volumes
+ # The `data` volume is mounted to support Keras datasets
+ # The `output` volume is mounted to save model output and share TensorBoard logs
+ - name: data
+ mountPath: /home/root/.keras/datasets
+ - name: output
+ mountPath: /mnt/output
+ nodeSelector:
+ beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
+ sidecars:
+ - name: tensorboard
+ image: tensorflow/tensorflow:2.3.0
+ command:
+ - sh
+ - '-c'
+ env:
+ - name: ONEPANEL_INTERACTIVE_SIDECAR
+ value: 'true'
+ args:
+ # Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
+ - tensorboard --logdir /mnt/output/tensorboard
+ ports:
+ - containerPort: 6006
+ name: tensorboard
+ volumeClaimTemplates:
+ # Provision volumes for storing data and output
+ - metadata:
+ name: data
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 2Gi
+ - metadata:
+ name: output
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 2Gi
diff --git a/db/yaml/workflows/tensorflow-mnist-training/20210118175809.yaml b/db/yaml/workflows/tensorflow-mnist-training/20210118175809.yaml
index 51a3025..1d8820c 100644
--- a/db/yaml/workflows/tensorflow-mnist-training/20210118175809.yaml
+++ b/db/yaml/workflows/tensorflow-mnist-training/20210118175809.yaml
@@ -1,118 +1,127 @@
-# source: https://github.com/onepanelio/templates/blob/master/workflows/tensorflow-mnist-training/
-arguments:
- parameters:
- - name: epochs
- value: '10'
- - displayName: Node pool
- hint: Name of node pool or group to run this workflow task
- type: select.nodepool
- name: sys-node-pool
- value: {{.DefaultNodePoolOption}}
- visibility: public
- required: true
-entrypoint: main
-templates:
- - name: main
- dag:
- tasks:
- - name: train-model
- template: train-model
- - name: train-model
- # Indicates that we want to push files in /mnt/output to object storage
- outputs:
- artifacts:
- - name: output
- path: /mnt/output
- optional: true
- script:
- image: onepanel/dl:0.17.0
- command:
- - python
- - '-u'
- source: |
- import json
- import tensorflow as tf
-
- mnist = tf.keras.datasets.mnist
-
- (x_train, y_train),(x_test, y_test) = mnist.load_data()
- x_train, x_test = x_train / 255.0, x_test / 255.0
- x_train = x_train[..., tf.newaxis]
- x_test = x_test[..., tf.newaxis]
-
- model = tf.keras.Sequential([
- tf.keras.layers.Conv2D(filters=32, kernel_size=5, activation='relu'),
- tf.keras.layers.MaxPool2D(pool_size=2),
- tf.keras.layers.Conv2D(filters=64, kernel_size=5, activation='relu'),
- tf.keras.layers.MaxPool2D(pool_size=2),
- tf.keras.layers.Flatten(),
- tf.keras.layers.Dense(units=124, activation='relu'),
- tf.keras.layers.Dropout(rate=0.75),
- tf.keras.layers.Dense(units=10, activation='softmax')
- ])
- model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001),
- loss='sparse_categorical_crossentropy',
- metrics=['accuracy'])
-
- # Write TensorBoard logs to /mnt/output
- log_dir = '/mnt/output/tensorboard/'
- tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
-
- model.fit(x=x_train,
- y=y_train,
- epochs={{workflow.parameters.epochs}},
- validation_data=(x_test, y_test),
- callbacks=[tensorboard_callback])
-
- # Store metrics for this task
- loss, accuracy = model.evaluate(x_test, y_test)
- metrics = [
- {'name': 'accuracy', 'value': accuracy},
- {'name': 'loss', 'value': loss}
- ]
- with open('/tmp/sys-metrics.json', 'w') as f:
- json.dump(metrics, f)
-
- # Save model
- model.save('/mnt/output/model.h5')
- volumeMounts:
- # TensorBoard sidecar will automatically mount these volumes
- # The `data` volume is mounted to support Keras datasets
- # The `output` volume is mounted to save model output and share TensorBoard logs
- - name: data
- mountPath: /home/root/.keras/datasets
- - name: output
- mountPath: /mnt/output
- nodeSelector:
- {{.NodePoolLabel}}: '{{workflow.parameters.sys-node-pool}}'
- sidecars:
- - name: tensorboard
+metadata:
+ name: "TensorFlow Training"
+ kind: Workflow
+ version: 20210118175809
+ action: update
+ source: "https://github.com/onepanelio/templates/blob/master/workflows/tensorflow-mnist-training/"
+ labels:
+ "created-by": "system"
+ framework: tensorflow
+spec:
+ arguments:
+ parameters:
+ - name: epochs
+ value: '10'
+ - displayName: Node pool
+ hint: Name of node pool or group to run this workflow task
+ type: select.nodepool
+ name: sys-node-pool
+ value: "{{.DefaultNodePoolOption}}"
+ visibility: public
+ required: true
+ entrypoint: main
+ templates:
+ - name: main
+ dag:
+ tasks:
+ - name: train-model
+ template: train-model
+ - name: train-model
+ # Indicates that we want to push files in /mnt/output to object storage
+ outputs:
+ artifacts:
+ - name: output
+ path: /mnt/output
+ optional: true
+ script:
image: onepanel/dl:0.17.0
command:
- - sh
- - '-c'
- env:
- - name: ONEPANEL_INTERACTIVE_SIDECAR
- value: 'true'
- args:
- # Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
- - tensorboard --logdir /mnt/output/tensorboard
- ports:
- - containerPort: 6006
- name: tensorboard
-volumeClaimTemplates:
- # Provision volumes for storing data and output
- - metadata:
- name: data
- spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 2Gi
- - metadata:
- name: output
- spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 2Gi
+ - python
+ - '-u'
+ source: |
+ import json
+ import tensorflow as tf
+
+ mnist = tf.keras.datasets.mnist
+
+ (x_train, y_train),(x_test, y_test) = mnist.load_data()
+ x_train, x_test = x_train / 255.0, x_test / 255.0
+ x_train = x_train[..., tf.newaxis]
+ x_test = x_test[..., tf.newaxis]
+
+ model = tf.keras.Sequential([
+ tf.keras.layers.Conv2D(filters=32, kernel_size=5, activation='relu'),
+ tf.keras.layers.MaxPool2D(pool_size=2),
+ tf.keras.layers.Conv2D(filters=64, kernel_size=5, activation='relu'),
+ tf.keras.layers.MaxPool2D(pool_size=2),
+ tf.keras.layers.Flatten(),
+ tf.keras.layers.Dense(units=124, activation='relu'),
+ tf.keras.layers.Dropout(rate=0.75),
+ tf.keras.layers.Dense(units=10, activation='softmax')
+ ])
+ model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001),
+ loss='sparse_categorical_crossentropy',
+ metrics=['accuracy'])
+
+ # Write TensorBoard logs to /mnt/output
+ log_dir = '/mnt/output/tensorboard/'
+ tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
+
+ model.fit(x=x_train,
+ y=y_train,
+ epochs={{workflow.parameters.epochs}},
+ validation_data=(x_test, y_test),
+ callbacks=[tensorboard_callback])
+
+ # Store metrics for this task
+ loss, accuracy = model.evaluate(x_test, y_test)
+ metrics = [
+ {'name': 'accuracy', 'value': accuracy},
+ {'name': 'loss', 'value': loss}
+ ]
+ with open('/tmp/sys-metrics.json', 'w') as f:
+ json.dump(metrics, f)
+
+ # Save model
+ model.save('/mnt/output/model.h5')
+ volumeMounts:
+ # TensorBoard sidecar will automatically mount these volumes
+ # The `data` volume is mounted to support Keras datasets
+ # The `output` volume is mounted to save model output and share TensorBoard logs
+ - name: data
+ mountPath: /home/root/.keras/datasets
+ - name: output
+ mountPath: /mnt/output
+ nodeSelector:
+ "{{.NodePoolLabel}}": '{{workflow.parameters.sys-node-pool}}'
+ sidecars:
+ - name: tensorboard
+ image: onepanel/dl:0.17.0
+ command:
+ - sh
+ - '-c'
+ env:
+ - name: ONEPANEL_INTERACTIVE_SIDECAR
+ value: 'true'
+ args:
+ # Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
+ - tensorboard --logdir /mnt/output/tensorboard
+ ports:
+ - containerPort: 6006
+ name: tensorboard
+ volumeClaimTemplates:
+ # Provision volumes for storing data and output
+ - metadata:
+ name: data
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 2Gi
+ - metadata:
+ name: output
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 2Gi
diff --git a/db/yaml/workflows/tensorflow-mnist-training/20210323175655.yaml b/db/yaml/workflows/tensorflow-mnist-training/20210323175655.yaml
index d9556a7..1fea386 100644
--- a/db/yaml/workflows/tensorflow-mnist-training/20210323175655.yaml
+++ b/db/yaml/workflows/tensorflow-mnist-training/20210323175655.yaml
@@ -1,118 +1,127 @@
-# source: https://github.com/onepanelio/templates/blob/master/workflows/tensorflow-mnist-training/
-arguments:
- parameters:
- - name: epochs
- value: '10'
- - displayName: Node pool
- hint: Name of node pool or group to run this workflow task
- type: select.nodepool
- name: sys-node-pool
- value: {{.DefaultNodePoolOption}}
- visibility: public
- required: true
-entrypoint: main
-templates:
- - name: main
- dag:
- tasks:
- - name: train-model
- template: train-model
- - name: train-model
- # Indicates that we want to push files in /mnt/output to object storage
- outputs:
- artifacts:
- - name: output
- path: /mnt/output
- optional: true
- script:
- image: onepanel/dl:v0.20.0
- command:
- - python
- - '-u'
- source: |
- import json
- import tensorflow as tf
-
- mnist = tf.keras.datasets.mnist
-
- (x_train, y_train),(x_test, y_test) = mnist.load_data()
- x_train, x_test = x_train / 255.0, x_test / 255.0
- x_train = x_train[..., tf.newaxis]
- x_test = x_test[..., tf.newaxis]
-
- model = tf.keras.Sequential([
- tf.keras.layers.Conv2D(filters=32, kernel_size=5, activation='relu'),
- tf.keras.layers.MaxPool2D(pool_size=2),
- tf.keras.layers.Conv2D(filters=64, kernel_size=5, activation='relu'),
- tf.keras.layers.MaxPool2D(pool_size=2),
- tf.keras.layers.Flatten(),
- tf.keras.layers.Dense(units=124, activation='relu'),
- tf.keras.layers.Dropout(rate=0.75),
- tf.keras.layers.Dense(units=10, activation='softmax')
- ])
- model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001),
- loss='sparse_categorical_crossentropy',
- metrics=['accuracy'])
-
- # Write TensorBoard logs to /mnt/output
- log_dir = '/mnt/output/tensorboard/'
- tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
-
- model.fit(x=x_train,
- y=y_train,
- epochs={{workflow.parameters.epochs}},
- validation_data=(x_test, y_test),
- callbacks=[tensorboard_callback])
-
- # Store metrics for this task
- loss, accuracy = model.evaluate(x_test, y_test)
- metrics = [
- {'name': 'accuracy', 'value': accuracy},
- {'name': 'loss', 'value': loss}
- ]
- with open('/mnt/tmp/sys-metrics.json', 'w') as f:
- json.dump(metrics, f)
-
- # Save model
- model.save('/mnt/output/model.h5')
- volumeMounts:
- # TensorBoard sidecar will automatically mount these volumes
- # The `data` volume is mounted to support Keras datasets
- # The `output` volume is mounted to save model output and share TensorBoard logs
- - name: data
- mountPath: /home/root/.keras/datasets
- - name: output
- mountPath: /mnt/output
- nodeSelector:
- {{.NodePoolLabel}}: '{{workflow.parameters.sys-node-pool}}'
- sidecars:
- - name: tensorboard
+metadata:
+ name: "TensorFlow Training"
+ kind: Workflow
+ version: 20210323175655
+ action: update
+ source: "https://github.com/onepanelio/templates/blob/master/workflows/tensorflow-mnist-training/"
+ labels:
+ "created-by": "system"
+ framework: tensorflow
+spec:
+ arguments:
+ parameters:
+ - name: epochs
+ value: '10'
+ - displayName: Node pool
+ hint: Name of node pool or group to run this workflow task
+ type: select.nodepool
+ name: sys-node-pool
+ value: "{{.DefaultNodePoolOption}}"
+ visibility: public
+ required: true
+ entrypoint: main
+ templates:
+ - name: main
+ dag:
+ tasks:
+ - name: train-model
+ template: train-model
+ - name: train-model
+ # Indicates that we want to push files in /mnt/output to object storage
+ outputs:
+ artifacts:
+ - name: output
+ path: /mnt/output
+ optional: true
+ script:
image: onepanel/dl:v0.20.0
command:
- - sh
- - '-c'
- env:
- - name: ONEPANEL_INTERACTIVE_SIDECAR
- value: 'true'
- args:
- # Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
- - tensorboard --logdir /mnt/output/tensorboard
- ports:
- - containerPort: 6006
- name: tensorboard
-volumeClaimTemplates:
- # Provision volumes for storing data and output
- - metadata:
- name: data
- spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 2Gi
- - metadata:
- name: output
- spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 2Gi
+ - python
+ - '-u'
+ source: |
+ import json
+ import tensorflow as tf
+
+ mnist = tf.keras.datasets.mnist
+
+ (x_train, y_train),(x_test, y_test) = mnist.load_data()
+ x_train, x_test = x_train / 255.0, x_test / 255.0
+ x_train = x_train[..., tf.newaxis]
+ x_test = x_test[..., tf.newaxis]
+
+ model = tf.keras.Sequential([
+ tf.keras.layers.Conv2D(filters=32, kernel_size=5, activation='relu'),
+ tf.keras.layers.MaxPool2D(pool_size=2),
+ tf.keras.layers.Conv2D(filters=64, kernel_size=5, activation='relu'),
+ tf.keras.layers.MaxPool2D(pool_size=2),
+ tf.keras.layers.Flatten(),
+ tf.keras.layers.Dense(units=124, activation='relu'),
+ tf.keras.layers.Dropout(rate=0.75),
+ tf.keras.layers.Dense(units=10, activation='softmax')
+ ])
+ model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001),
+ loss='sparse_categorical_crossentropy',
+ metrics=['accuracy'])
+
+ # Write TensorBoard logs to /mnt/output
+ log_dir = '/mnt/output/tensorboard/'
+ tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
+
+ model.fit(x=x_train,
+ y=y_train,
+ epochs={{workflow.parameters.epochs}},
+ validation_data=(x_test, y_test),
+ callbacks=[tensorboard_callback])
+
+ # Store metrics for this task
+ loss, accuracy = model.evaluate(x_test, y_test)
+ metrics = [
+ {'name': 'accuracy', 'value': accuracy},
+ {'name': 'loss', 'value': loss}
+ ]
+ with open('/mnt/tmp/sys-metrics.json', 'w') as f:
+ json.dump(metrics, f)
+
+ # Save model
+ model.save('/mnt/output/model.h5')
+ volumeMounts:
+ # TensorBoard sidecar will automatically mount these volumes
+ # The `data` volume is mounted to support Keras datasets
+ # The `output` volume is mounted to save model output and share TensorBoard logs
+ - name: data
+ mountPath: /home/root/.keras/datasets
+ - name: output
+ mountPath: /mnt/output
+ nodeSelector:
+ "{{.NodePoolLabel}}": '{{workflow.parameters.sys-node-pool}}'
+ sidecars:
+ - name: tensorboard
+ image: onepanel/dl:v0.20.0
+ command:
+ - sh
+ - '-c'
+ env:
+ - name: ONEPANEL_INTERACTIVE_SIDECAR
+ value: 'true'
+ args:
+ # Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
+ - tensorboard --logdir /mnt/output/tensorboard
+ ports:
+ - containerPort: 6006
+ name: tensorboard
+ volumeClaimTemplates:
+ # Provision volumes for storing data and output
+ - metadata:
+ name: data
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 2Gi
+ - metadata:
+ name: output
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 2Gi
diff --git a/db/yaml/workflows/tf-object-detection-training/20200812104328.yaml b/db/yaml/workflows/tf-object-detection-training/20200812104328.yaml
new file mode 100644
index 0000000..dbafc09
--- /dev/null
+++ b/db/yaml/workflows/tf-object-detection-training/20200812104328.yaml
@@ -0,0 +1,221 @@
+metadata:
+ name: "TF Object Detection Training"
+ kind: Workflow
+ version: 20200812104328
+ action: create
+ source: "https://github.com/onepanelio/templates/blob/master/workflows/tf-object-detection-training/"
+ labels:
+ "created-by": "system"
+ "used-by": "cvat"
+spec:
+ arguments:
+ parameters:
+ - name: source
+ value: https://github.com/tensorflow/models.git
+ displayName: Model source code
+ type: hidden
+ visibility: private
+
+ - name: trainingsource
+ value: https://github.com/onepanelio/cvat-training.git
+ type: hidden
+ visibility: private
+
+ - name: revision
+ value: v1.13.0
+ type: hidden
+ visibility: private
+
+ - name: sys-annotation-path
+ value: annotation-dump/sample_dataset
+ displayName: Dataset path
+ hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated.
+
+ - name: sys-output-path
+ value: workflow-data/output/sample_output
+ hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
+ displayName: Workflow output path
+ visibility: private
+
+ - name: ref-model
+ value: frcnn-res50-coco
+ displayName: Model
+ hint: TF Detection API's model to use for training.
+ type: select.select
+ visibility: public
+ options:
+ - name: 'Faster RCNN-ResNet 101-COCO'
+ value: frcnn-res101-coco
+ - name: 'Faster RCNN-ResNet 101-Low Proposal-COCO'
+ value: frcnn-res101-low
+ - name: 'Faster RCNN-ResNet 50-COCO'
+ value: frcnn-res50-coco
+ - name: 'Faster RCNN-NAS-COCO'
+ value: frcnn-nas-coco
+ - name: 'SSD MobileNet V1-COCO'
+ value: ssd-mobilenet-v1-coco2
+ - name: 'SSD MobileNet V2-COCO'
+ value: ssd-mobilenet-v2-coco
+ - name: 'SSDLite MobileNet-COCO'
+ value: ssdlite-mobilenet-coco
+
+ - name: extras
+ value: |-
+ epochs=1000
+ displayName: Hyperparameters
+ visibility: public
+ type: textarea.textarea
+ hint: "Please refer to our documentation for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow."
+
+ - name: sys-finetune-checkpoint
+ value: ''
+ hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
+ displayName: Checkpoint path
+ visibility: public
+
+ - name: sys-num-classes
+ value: '81'
+ hint: Number of classes
+ displayName: Number of classes
+ visibility: private
+
+ - name: tf-image
+ value: tensorflow/tensorflow:1.13.1-py3
+ type: select.select
+ displayName: Select TensorFlow image
+ visibility: public
+ hint: Select the GPU image if you are running on a GPU node pool
+ options:
+ - name: 'TensorFlow 1.13.1 CPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-py3'
+ - name: 'TensorFlow 1.13.1 GPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
+
+ - displayName: Node pool
+ hint: Name of node pool or group to run this workflow task
+ type: select.select
+ name: sys-node-pool
+ value: Standard_D4s_v3
+ visibility: public
+ required: true
+ options:
+ - name: 'CPU: 2, RAM: 8GB'
+ value: Standard_D2s_v3
+ - name: 'CPU: 4, RAM: 16GB'
+ value: Standard_D4s_v3
+ - name: 'GPU: 1xK80, CPU: 6, RAM: 56GB'
+ value: Standard_NC6
+ - name: dump-format
+ value: cvat_tfrecord
+ visibility: public
+ entrypoint: main
+ templates:
+ - dag:
+ tasks:
+ - name: train-model
+ template: tensorflow
+ # Uncomment the lines below if you want to send Slack notifications
+ # - arguments:
+ # artifacts:
+ # - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}'
+ # name: metrics
+ # parameters:
+ # - name: status
+ # value: '{{tasks.train-model.status}}'
+ # dependencies:
+ # - train-model
+ # name: notify-in-slack
+ # template: slack-notify-success
+ name: main
+ - container:
+ args:
+ - |
+ apt-get update && \
+ apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \
+ pip install pillow lxml Cython contextlib2 jupyter matplotlib numpy scipy boto3 pycocotools pyyaml google-cloud-storage && \
+ cd /mnt/src/tf/research && \
+ export PYTHONPATH=$PYTHONPATH:` + "`pwd`:`pwd`/slim" + ` && \
+ cd /mnt/src/train && \
+ python convert_workflow.py \
+ --extras="{{workflow.parameters.extras}}" \
+ --model="{{workflow.parameters.ref-model}}" \
+ --num_classes="{{workflow.parameters.sys-num-classes}}" \
+ --sys_finetune_checkpoint={{workflow.parameters.sys-finetune-checkpoint}}
+ command:
+ - sh
+ - -c
+ image: '{{workflow.parameters.tf-image}}'
+ volumeMounts:
+ - mountPath: /mnt/data
+ name: data
+ - mountPath: /mnt/output
+ name: output
+ workingDir: /mnt/src
+ nodeSelector:
+ beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
+ inputs:
+ artifacts:
+ - name: data
+ path: /mnt/data/datasets/
+ "{{.ArtifactRepositoryType}}":
+ key: '{{workflow.namespace}}/{{workflow.parameters.sys-annotation-path}}'
+ - name: models
+ path: /mnt/data/models/
+ optional: true
+ "{{.ArtifactRepositoryType}}":
+ key: '{{workflow.namespace}}/{{workflow.parameters.sys-finetune-checkpoint}}'
+ - git:
+ repo: '{{workflow.parameters.source}}'
+ revision: '{{workflow.parameters.revision}}'
+ name: src
+ path: /mnt/src/tf
+ - git:
+ repo: '{{workflow.parameters.trainingsource}}'
+ revision: 'optional-artifacts'
+ name: tsrc
+ path: /mnt/src/train
+ name: tensorflow
+ outputs:
+ artifacts:
+ - name: model
+ optional: true
+ path: /mnt/output
+ "{{.ArtifactRepositoryType}}":
+ key: '{{workflow.namespace}}/{{workflow.parameters.sys-output-path}}'
+ # Uncomment the lines below if you want to send Slack notifications
+ #- container:
+ # args:
+ # - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}"
+ # SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd
+ # SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify
+ # command:
+ # - sh
+ # - -c
+ # image: technosophos/slack-notify
+ # inputs:
+ # artifacts:
+ # - name: metrics
+ # optional: true
+ # path: /tmp/metrics.json
+ # parameters:
+ # - name: status
+ # name: slack-notify-success
+ volumeClaimTemplates:
+ - metadata:
+ creationTimestamp: null
+ name: data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
+ - metadata:
+ creationTimestamp: null
+ name: output
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
\ No newline at end of file
diff --git a/db/yaml/workflows/tf-object-detection-training/20200824101019.yaml b/db/yaml/workflows/tf-object-detection-training/20200824101019.yaml
new file mode 100644
index 0000000..58f25d4
--- /dev/null
+++ b/db/yaml/workflows/tf-object-detection-training/20200824101019.yaml
@@ -0,0 +1,222 @@
+metadata:
+ name: "TF Object Detection Training"
+ kind: Workflow
+ version: 20200824101019
+ action: update
+ source: "https://github.com/onepanelio/templates/blob/master/workflows/tf-object-detection-training/"
+ labels:
+ "created-by": "system"
+ "used-by": "cvat"
+spec:
+ arguments:
+ parameters:
+ - name: source
+ value: https://github.com/tensorflow/models.git
+ displayName: Model source code
+ type: hidden
+ visibility: private
+
+ - name: trainingsource
+ value: https://github.com/onepanelio/cvat-training.git
+ type: hidden
+ visibility: private
+
+ - name: revision
+ value: v1.13.0
+ type: hidden
+ visibility: private
+
+ - name: cvat-annotation-path
+ value: annotation-dump/sample_dataset
+ displayName: Dataset path
+ hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated.
+ visibility: private
+
+ - name: cvat-output-path
+ value: workflow-data/output/sample_output
+ hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
+ displayName: Workflow output path
+ visibility: private
+
+ - name: cvat-model
+ value: frcnn-res50-coco
+ displayName: Model
+ hint: TF Detection API's model to use for training.
+ type: select.select
+ visibility: public
+ options:
+ - name: 'Faster RCNN-ResNet 101-COCO'
+ value: frcnn-res101-coco
+ - name: 'Faster RCNN-ResNet 101-Low Proposal-COCO'
+ value: frcnn-res101-low
+ - name: 'Faster RCNN-ResNet 50-COCO'
+ value: frcnn-res50-coco
+ - name: 'Faster RCNN-NAS-COCO'
+ value: frcnn-nas-coco
+ - name: 'SSD MobileNet V1-COCO'
+ value: ssd-mobilenet-v1-coco2
+ - name: 'SSD MobileNet V2-COCO'
+ value: ssd-mobilenet-v2-coco
+ - name: 'SSDLite MobileNet-COCO'
+ value: ssdlite-mobilenet-coco
+
+ - name: hyperparameters
+ value: |-
+ num-steps=10000
+ displayName: Hyperparameters
+ visibility: public
+ type: textarea.textarea
+ hint: "Please refer to our documentation for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow."
+
+ - name: cvat-finetune-checkpoint
+ value: ''
+ hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
+ displayName: Checkpoint path
+ visibility: public
+
+ - name: cvat-num-classes
+ value: '81'
+ hint: Number of classes
+ displayName: Number of classes
+ visibility: private
+
+ - name: tf-image
+ value: tensorflow/tensorflow:1.13.1-py3
+ type: select.select
+ displayName: Select TensorFlow image
+ visibility: public
+ hint: Select the GPU image if you are running on a GPU node pool
+ options:
+ - name: 'TensorFlow 1.13.1 CPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-py3'
+ - name: 'TensorFlow 1.13.1 GPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
+
+ - displayName: Node pool
+ hint: Name of node pool or group to run this workflow task
+ type: select.select
+ name: sys-node-pool
+ value: Standard_D4s_v3
+ visibility: public
+ required: true
+ options:
+ - name: 'CPU: 2, RAM: 8GB'
+ value: Standard_D2s_v3
+ - name: 'CPU: 4, RAM: 16GB'
+ value: Standard_D4s_v3
+ - name: 'GPU: 1xK80, CPU: 6, RAM: 56GB'
+ value: Standard_NC6
+ - name: dump-format
+ value: cvat_tfrecord
+ visibility: public
+ entrypoint: main
+ templates:
+ - dag:
+ tasks:
+ - name: train-model
+ template: tensorflow
+ # Uncomment the lines below if you want to send Slack notifications
+ # - arguments:
+ # artifacts:
+ # - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}'
+ # name: metrics
+ # parameters:
+ # - name: status
+ # value: '{{tasks.train-model.status}}'
+ # dependencies:
+ # - train-model
+ # name: notify-in-slack
+ # template: slack-notify-success
+ name: main
+ - container:
+ args:
+ - |
+ apt-get update && \
+ apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \
+ pip install pillow lxml Cython contextlib2 jupyter matplotlib numpy scipy boto3 pycocotools pyyaml google-cloud-storage && \
+ cd /mnt/src/tf/research && \
+ export PYTHONPATH=$PYTHONPATH:` + "`pwd`:`pwd`" + `/slim && \
+ cd /mnt/src/train && \
+ python convert_workflow.py \
+ --extras="{{workflow.parameters.hyperparameters}}" \
+ --model="{{workflow.parameters.cvat-model}}" \
+ --num_classes="{{workflow.parameters.cvat-num-classes}}" \
+ --sys_finetune_checkpoint={{workflow.parameters.cvat-finetune-checkpoint}}
+ command:
+ - sh
+ - -c
+ image: '{{workflow.parameters.tf-image}}'
+ volumeMounts:
+ - mountPath: /mnt/data
+ name: data
+ - mountPath: /mnt/output
+ name: output
+ workingDir: /mnt/src
+ nodeSelector:
+ beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
+ inputs:
+ artifacts:
+ - name: data
+ path: /mnt/data/datasets/
+ "{{.ArtifactRepositoryType}}":
+ key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
+ - name: models
+ path: /mnt/data/models/
+ optional: true
+ "{{.ArtifactRepositoryType}}":
+ key: '{{workflow.namespace}}/{{workflow.parameters.cvat-finetune-checkpoint}}'
+ - git:
+ repo: '{{workflow.parameters.source}}'
+ revision: '{{workflow.parameters.revision}}'
+ name: src
+ path: /mnt/src/tf
+ - git:
+ repo: '{{workflow.parameters.trainingsource}}'
+ revision: 'optional-artifacts'
+ name: tsrc
+ path: /mnt/src/train
+ name: tensorflow
+ outputs:
+ artifacts:
+ - name: model
+ optional: true
+ path: /mnt/output
+ "{{.ArtifactRepositoryType}}":
+ key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
+ # Uncomment the lines below if you want to send Slack notifications
+ #- container:
+ # args:
+ # - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}"
+ # SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd
+ # SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify
+ # command:
+ # - sh
+ # - -c
+ # image: technosophos/slack-notify
+ # inputs:
+ # artifacts:
+ # - name: metrics
+ # optional: true
+ # path: /tmp/metrics.json
+ # parameters:
+ # - name: status
+ # name: slack-notify-success
+ volumeClaimTemplates:
+ - metadata:
+ creationTimestamp: null
+ name: data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
+ - metadata:
+ creationTimestamp: null
+ name: output
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
\ No newline at end of file
diff --git a/db/yaml/workflows/tf-object-detection-training/20201115134934.yaml b/db/yaml/workflows/tf-object-detection-training/20201115134934.yaml
index d47f940..b9a8622 100644
--- a/db/yaml/workflows/tf-object-detection-training/20201115134934.yaml
+++ b/db/yaml/workflows/tf-object-detection-training/20201115134934.yaml
@@ -1,221 +1,231 @@
-entrypoint: main
-arguments:
- parameters:
- - name: source
- value: https://github.com/tensorflow/models.git
- displayName: Model source code
- type: hidden
- visibility: private
+metadata:
+ name: "TF Object Detection Training"
+ kind: Workflow
+ version: 20201115134934
+ action: update
+ source: "https://github.com/onepanelio/templates/blob/master/workflows/tf-object-detection-training/"
+ labels:
+ "created-by": "system"
+ "used-by": "cvat"
+spec:
+ entrypoint: main
+ arguments:
+ parameters:
+ - name: source
+ value: https://github.com/tensorflow/models.git
+ displayName: Model source code
+ type: hidden
+ visibility: private
- - name: trainingsource
- value: https://github.com/onepanelio/cvat-training.git
- type: hidden
- visibility: private
+ - name: trainingsource
+ value: https://github.com/onepanelio/cvat-training.git
+ type: hidden
+ visibility: private
- - name: revision
- value: v1.13.0
- type: hidden
- visibility: private
+ - name: revision
+ value: v1.13.0
+ type: hidden
+ visibility: private
- - name: cvat-annotation-path
- value: annotation-dump/sample_dataset
- displayName: Dataset path
- hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated.
- visibility: private
+ - name: cvat-annotation-path
+ value: annotation-dump/sample_dataset
+ displayName: Dataset path
+ hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated.
+ visibility: private
- - name: cvat-output-path
- value: workflow-data/output/sample_output
- hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
- displayName: Workflow output path
- visibility: private
+ - name: cvat-output-path
+ value: workflow-data/output/sample_output
+ hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
+ displayName: Workflow output path
+ visibility: private
- - name: cvat-model
- value: frcnn-res50-coco
- displayName: Model
- hint: TF Detection API's model to use for training.
- type: select.select
- visibility: public
- options:
- - name: 'Faster RCNN-ResNet 101-COCO'
- value: frcnn-res101-coco
- - name: 'Faster RCNN-ResNet 101-Low Proposal-COCO'
- value: frcnn-res101-low
- - name: 'Faster RCNN-ResNet 50-COCO'
- value: frcnn-res50-coco
- - name: 'Faster RCNN-NAS-COCO'
- value: frcnn-nas-coco
- - name: 'SSD MobileNet V1-COCO'
- value: ssd-mobilenet-v1-coco2
- - name: 'SSD MobileNet V2-COCO'
- value: ssd-mobilenet-v2-coco
- - name: 'SSDLite MobileNet-COCO'
- value: ssdlite-mobilenet-coco
+ - name: cvat-model
+ value: frcnn-res50-coco
+ displayName: Model
+ hint: TF Detection API's model to use for training.
+ type: select.select
+ visibility: public
+ options:
+ - name: 'Faster RCNN-ResNet 101-COCO'
+ value: frcnn-res101-coco
+ - name: 'Faster RCNN-ResNet 101-Low Proposal-COCO'
+ value: frcnn-res101-low
+ - name: 'Faster RCNN-ResNet 50-COCO'
+ value: frcnn-res50-coco
+ - name: 'Faster RCNN-NAS-COCO'
+ value: frcnn-nas-coco
+ - name: 'SSD MobileNet V1-COCO'
+ value: ssd-mobilenet-v1-coco2
+ - name: 'SSD MobileNet V2-COCO'
+ value: ssd-mobilenet-v2-coco
+ - name: 'SSDLite MobileNet-COCO'
+ value: ssdlite-mobilenet-coco
- - name: hyperparameters
- value: |-
- num-steps=10000
- displayName: Hyperparameters
- visibility: public
- type: textarea.textarea
- hint: "Please refer to our documentation for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow."
+ - name: hyperparameters
+ value: |-
+ num-steps=10000
+ displayName: Hyperparameters
+ visibility: public
+ type: textarea.textarea
+ hint: "Please refer to our documentation for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow."
- - name: cvat-finetune-checkpoint
- value: ''
- hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
- displayName: Checkpoint path
- visibility: public
+ - name: cvat-finetune-checkpoint
+ value: ''
+ hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
+ displayName: Checkpoint path
+ visibility: public
- - name: cvat-num-classes
- value: '81'
- hint: Number of classes
- displayName: Number of classes
- visibility: private
+ - name: cvat-num-classes
+ value: '81'
+ hint: Number of classes
+ displayName: Number of classes
+ visibility: private
- - name: tf-image
- value: tensorflow/tensorflow:1.13.1-py3
- type: select.select
- displayName: Select TensorFlow image
- visibility: public
- hint: Select the GPU image if you are running on a GPU node pool
- options:
- - name: 'TensorFlow 1.13.1 CPU Image'
- value: 'tensorflow/tensorflow:1.13.1-py3'
- - name: 'TensorFlow 1.13.1 GPU Image'
- value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
+ - name: tf-image
+ value: tensorflow/tensorflow:1.13.1-py3
+ type: select.select
+ displayName: Select TensorFlow image
+ visibility: public
+ hint: Select the GPU image if you are running on a GPU node pool
+ options:
+ - name: 'TensorFlow 1.13.1 CPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-py3'
+ - name: 'TensorFlow 1.13.1 GPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
- - displayName: Node pool
- hint: Name of node pool or group to run this workflow task
- type: select.select
- name: sys-node-pool
- value: Standard_D4s_v3
- visibility: public
- required: true
- options:
- - name: 'CPU: 2, RAM: 8GB'
- value: Standard_D2s_v3
- - name: 'CPU: 4, RAM: 16GB'
- value: Standard_D4s_v3
- - name: 'GPU: 1xK80, CPU: 6, RAM: 56GB'
- value: Standard_NC6
- - name: dump-format
- value: cvat_tfrecord
- visibility: public
-templates:
- - name: main
- dag:
- tasks:
- - name: train-model
- template: tensorflow
- # Uncomment the lines below if you want to send Slack notifications
- # - arguments:
- # artifacts:
- # - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}'
- # name: metrics
- # parameters:
- # - name: status
- # value: '{{tasks.train-model.status}}'
- # dependencies:
- # - train-model
- # name: notify-in-slack
- # template: slack-notify-success
- - name: tensorflow
- container:
- args:
- - |
- apt-get update && \
- apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \
- pip install pillow lxml Cython contextlib2 jupyter matplotlib numpy scipy boto3 pycocotools pyyaml google-cloud-storage && \
- cd /mnt/src/tf/research && \
- export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim && \
- cd /mnt/src/train && \
- python convert_workflow.py \
- --extras="{{workflow.parameters.hyperparameters}}" \
- --model="{{workflow.parameters.cvat-model}}" \
- --num_classes="{{workflow.parameters.cvat-num-classes}}" \
- --sys_finetune_checkpoint={{workflow.parameters.cvat-finetune-checkpoint}}
- command:
- - sh
- - -c
- image: '{{workflow.parameters.tf-image}}'
- volumeMounts:
- - mountPath: /mnt/data
- name: data
- - mountPath: /mnt/output
- name: output
- workingDir: /mnt/src
- nodeSelector:
- beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
- sidecars:
- - name: tensorboard
- image: tensorflow/tensorflow:2.3.0
- command: [sh, -c]
- tty: true
- args: ["tensorboard --logdir /mnt/output/"]
- ports:
- - containerPort: 6006
- name: tensorboard
- inputs:
- artifacts:
- - name: data
- path: /mnt/data/datasets/
- {{.ArtifactRepositoryType}}:
- key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
- - name: models
- path: /mnt/data/models/
- optional: true
- {{.ArtifactRepositoryType}}:
- key: '{{workflow.namespace}}/{{workflow.parameters.cvat-finetune-checkpoint}}'
- - git:
- repo: '{{workflow.parameters.source}}'
- revision: '{{workflow.parameters.revision}}'
- name: src
- path: /mnt/src/tf
- - git:
- repo: '{{workflow.parameters.trainingsource}}'
- revision: 'optional-artifacts'
- name: tsrc
- path: /mnt/src/train
- outputs:
- artifacts:
- - name: model
- optional: true
- path: /mnt/output
- {{.ArtifactRepositoryType}}:
- key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
-# Uncomment the lines below if you want to send Slack notifications
-#- container:
-# args:
-# - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}"
-# SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd
-# SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify
-# command:
-# - sh
-# - -c
-# image: technosophos/slack-notify
-# inputs:
-# artifacts:
-# - name: metrics
-# optional: true
-# path: /tmp/metrics.json
-# parameters:
-# - name: status
-# name: slack-notify-success
-volumeClaimTemplates:
- - metadata:
- creationTimestamp: null
- name: data
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 200Gi
- - metadata:
- creationTimestamp: null
- name: output
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 200Gi
\ No newline at end of file
+ - displayName: Node pool
+ hint: Name of node pool or group to run this workflow task
+ type: select.select
+ name: sys-node-pool
+ value: Standard_D4s_v3
+ visibility: public
+ required: true
+ options:
+ - name: 'CPU: 2, RAM: 8GB'
+ value: Standard_D2s_v3
+ - name: 'CPU: 4, RAM: 16GB'
+ value: Standard_D4s_v3
+ - name: 'GPU: 1xK80, CPU: 6, RAM: 56GB'
+ value: Standard_NC6
+ - name: dump-format
+ value: cvat_tfrecord
+ visibility: public
+ templates:
+ - name: main
+ dag:
+ tasks:
+ - name: train-model
+ template: tensorflow
+ # Uncomment the lines below if you want to send Slack notifications
+ # - arguments:
+ # artifacts:
+ # - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}'
+ # name: metrics
+ # parameters:
+ # - name: status
+ # value: '{{tasks.train-model.status}}'
+ # dependencies:
+ # - train-model
+ # name: notify-in-slack
+ # template: slack-notify-success
+ - name: tensorflow
+ container:
+ args:
+ - |
+ apt-get update && \
+ apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \
+ pip install pillow lxml Cython contextlib2 jupyter matplotlib numpy scipy boto3 pycocotools pyyaml google-cloud-storage && \
+ cd /mnt/src/tf/research && \
+ export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim && \
+ cd /mnt/src/train && \
+ python convert_workflow.py \
+ --extras="{{workflow.parameters.hyperparameters}}" \
+ --model="{{workflow.parameters.cvat-model}}" \
+ --num_classes="{{workflow.parameters.cvat-num-classes}}" \
+ --sys_finetune_checkpoint={{workflow.parameters.cvat-finetune-checkpoint}}
+ command:
+ - sh
+ - -c
+ image: '{{workflow.parameters.tf-image}}'
+ volumeMounts:
+ - mountPath: /mnt/data
+ name: data
+ - mountPath: /mnt/output
+ name: output
+ workingDir: /mnt/src
+ nodeSelector:
+ beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
+ sidecars:
+ - name: tensorboard
+ image: tensorflow/tensorflow:2.3.0
+ command: [sh, -c]
+ tty: true
+ args: ["tensorboard --logdir /mnt/output/"]
+ ports:
+ - containerPort: 6006
+ name: tensorboard
+ inputs:
+ artifacts:
+ - name: data
+ path: /mnt/data/datasets/
+ "{{.ArtifactRepositoryType}}":
+ key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
+ - name: models
+ path: /mnt/data/models/
+ optional: true
+ "{{.ArtifactRepositoryType}}":
+ key: '{{workflow.namespace}}/{{workflow.parameters.cvat-finetune-checkpoint}}'
+ - git:
+ repo: '{{workflow.parameters.source}}'
+ revision: '{{workflow.parameters.revision}}'
+ name: src
+ path: /mnt/src/tf
+ - git:
+ repo: '{{workflow.parameters.trainingsource}}'
+ revision: 'optional-artifacts'
+ name: tsrc
+ path: /mnt/src/train
+ outputs:
+ artifacts:
+ - name: model
+ optional: true
+ path: /mnt/output
+ "{{.ArtifactRepositoryType}}":
+ key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
+ # Uncomment the lines below if you want to send Slack notifications
+ #- container:
+ # args:
+ # - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}"
+ # SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd
+ # SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify
+ # command:
+ # - sh
+ # - -c
+ # image: technosophos/slack-notify
+ # inputs:
+ # artifacts:
+ # - name: metrics
+ # optional: true
+ # path: /tmp/metrics.json
+ # parameters:
+ # - name: status
+ # name: slack-notify-success
+ volumeClaimTemplates:
+ - metadata:
+ creationTimestamp: null
+ name: data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
+ - metadata:
+ creationTimestamp: null
+ name: output
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
\ No newline at end of file
diff --git a/db/yaml/workflows/tf-object-detection-training/20201130130433.yaml b/db/yaml/workflows/tf-object-detection-training/20201130130433.yaml
index 33775d0..eba869c 100644
--- a/db/yaml/workflows/tf-object-detection-training/20201130130433.yaml
+++ b/db/yaml/workflows/tf-object-detection-training/20201130130433.yaml
@@ -1,221 +1,231 @@
-entrypoint: main
-arguments:
- parameters:
- - name: source
- value: https://github.com/tensorflow/models.git
- displayName: Model source code
- type: hidden
- visibility: private
+metadata:
+ name: "TF Object Detection Training"
+ kind: Workflow
+ version: 20201130130433
+ action: update
+ source: "https://github.com/onepanelio/templates/blob/master/workflows/tf-object-detection-training/"
+ labels:
+ "created-by": "system"
+ "used-by": "cvat"
+spec:
+ entrypoint: main
+ arguments:
+ parameters:
+ - name: source
+ value: https://github.com/tensorflow/models.git
+ displayName: Model source code
+ type: hidden
+ visibility: private
- - name: trainingsource
- value: https://github.com/onepanelio/cvat-training.git
- type: hidden
- visibility: private
+ - name: trainingsource
+ value: https://github.com/onepanelio/cvat-training.git
+ type: hidden
+ visibility: private
- - name: revision
- value: v1.13.0
- type: hidden
- visibility: private
+ - name: revision
+ value: v1.13.0
+ type: hidden
+ visibility: private
- - name: cvat-annotation-path
- value: annotation-dump/sample_dataset
- displayName: Dataset path
- hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated.
- visibility: private
+ - name: cvat-annotation-path
+ value: annotation-dump/sample_dataset
+ displayName: Dataset path
+ hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated.
+ visibility: private
- - name: cvat-output-path
- value: workflow-data/output/sample_output
- hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
- displayName: Workflow output path
- visibility: private
+ - name: cvat-output-path
+ value: workflow-data/output/sample_output
+ hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
+ displayName: Workflow output path
+ visibility: private
- - name: cvat-model
- value: frcnn-res50-coco
- displayName: Model
- hint: TF Detection API's model to use for training.
- type: select.select
- visibility: public
- options:
- - name: 'Faster RCNN-ResNet 101-COCO'
- value: frcnn-res101-coco
- - name: 'Faster RCNN-ResNet 101-Low Proposal-COCO'
- value: frcnn-res101-low
- - name: 'Faster RCNN-ResNet 50-COCO'
- value: frcnn-res50-coco
- - name: 'Faster RCNN-NAS-COCO'
- value: frcnn-nas-coco
- - name: 'SSD MobileNet V1-COCO'
- value: ssd-mobilenet-v1-coco2
- - name: 'SSD MobileNet V2-COCO'
- value: ssd-mobilenet-v2-coco
- - name: 'SSDLite MobileNet-COCO'
- value: ssdlite-mobilenet-coco
+ - name: cvat-model
+ value: frcnn-res50-coco
+ displayName: Model
+ hint: TF Detection API's model to use for training.
+ type: select.select
+ visibility: public
+ options:
+ - name: 'Faster RCNN-ResNet 101-COCO'
+ value: frcnn-res101-coco
+ - name: 'Faster RCNN-ResNet 101-Low Proposal-COCO'
+ value: frcnn-res101-low
+ - name: 'Faster RCNN-ResNet 50-COCO'
+ value: frcnn-res50-coco
+ - name: 'Faster RCNN-NAS-COCO'
+ value: frcnn-nas-coco
+ - name: 'SSD MobileNet V1-COCO'
+ value: ssd-mobilenet-v1-coco2
+ - name: 'SSD MobileNet V2-COCO'
+ value: ssd-mobilenet-v2-coco
+ - name: 'SSDLite MobileNet-COCO'
+ value: ssdlite-mobilenet-coco
- - name: hyperparameters
- value: |-
- num-steps=10000
- displayName: Hyperparameters
- visibility: public
- type: textarea.textarea
- hint: "Please refer to our documentation for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow."
+ - name: hyperparameters
+ value: |-
+ num-steps=10000
+ displayName: Hyperparameters
+ visibility: public
+ type: textarea.textarea
+ hint: "Please refer to our documentation for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow."
- - name: cvat-finetune-checkpoint
- value: ''
- hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
- displayName: Checkpoint path
- visibility: public
+ - name: cvat-finetune-checkpoint
+ value: ''
+ hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
+ displayName: Checkpoint path
+ visibility: public
- - name: cvat-num-classes
- value: '81'
- hint: Number of classes
- displayName: Number of classes
- visibility: private
+ - name: cvat-num-classes
+ value: '81'
+ hint: Number of classes
+ displayName: Number of classes
+ visibility: private
- - name: tf-image
- value: tensorflow/tensorflow:1.13.1-py3
- type: select.select
- displayName: Select TensorFlow image
- visibility: public
- hint: Select the GPU image if you are running on a GPU node pool
- options:
- - name: 'TensorFlow 1.13.1 CPU Image'
- value: 'tensorflow/tensorflow:1.13.1-py3'
- - name: 'TensorFlow 1.13.1 GPU Image'
- value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
+ - name: tf-image
+ value: tensorflow/tensorflow:1.13.1-py3
+ type: select.select
+ displayName: Select TensorFlow image
+ visibility: public
+ hint: Select the GPU image if you are running on a GPU node pool
+ options:
+ - name: 'TensorFlow 1.13.1 CPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-py3'
+ - name: 'TensorFlow 1.13.1 GPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
- - displayName: Node pool
- hint: Name of node pool or group to run this workflow task
- type: select.select
- name: sys-node-pool
- value: Standard_D4s_v3
- visibility: public
- required: true
- options:
- - name: 'CPU: 2, RAM: 8GB'
- value: Standard_D2s_v3
- - name: 'CPU: 4, RAM: 16GB'
- value: Standard_D4s_v3
- - name: 'GPU: 1xK80, CPU: 6, RAM: 56GB'
- value: Standard_NC6
- - name: dump-format
- value: cvat_tfrecord
- visibility: public
-templates:
- - name: main
- dag:
- tasks:
- - name: train-model
- template: tensorflow
- # Uncomment the lines below if you want to send Slack notifications
- # - arguments:
- # artifacts:
- # - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}'
- # name: metrics
- # parameters:
- # - name: status
- # value: '{{tasks.train-model.status}}'
- # dependencies:
- # - train-model
- # name: notify-in-slack
- # template: slack-notify-success
- - name: tensorflow
- container:
- args:
- - |
- apt-get update && \
- apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \
- pip install pillow lxml Cython contextlib2 jupyter matplotlib numpy scipy boto3 pycocotools pyyaml google-cloud-storage && \
- cd /mnt/src/tf/research && \
- export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim && \
- cd /mnt/src/train && \
- python convert_workflow.py \
- --extras="{{workflow.parameters.hyperparameters}}" \
- --model="{{workflow.parameters.cvat-model}}" \
- --num_classes="{{workflow.parameters.cvat-num-classes}}" \
- --sys_finetune_checkpoint={{workflow.parameters.cvat-finetune-checkpoint}}
- command:
- - sh
- - -c
- image: '{{workflow.parameters.tf-image}}'
- volumeMounts:
- - mountPath: /mnt/data
- name: data
- - mountPath: /mnt/output
- name: output
- workingDir: /mnt/src
- nodeSelector:
- beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
- sidecars:
- - name: tensorboard
- image: tensorflow/tensorflow:2.3.0
- command: [sh, -c]
- tty: true
- args: ["tensorboard --logdir /mnt/output/"]
- ports:
- - containerPort: 6006
- name: tensorboard
- inputs:
- artifacts:
- - name: data
- path: /mnt/data/datasets/
- {{.ArtifactRepositoryType}}:
- key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
- - name: models
- path: /mnt/data/models/
- optional: true
- {{.ArtifactRepositoryType}}:
- key: '{{workflow.parameters.cvat-finetune-checkpoint}}'
- - git:
- repo: '{{workflow.parameters.source}}'
- revision: '{{workflow.parameters.revision}}'
- name: src
- path: /mnt/src/tf
- - git:
- repo: '{{workflow.parameters.trainingsource}}'
- revision: 'optional-artifacts'
- name: tsrc
- path: /mnt/src/train
- outputs:
- artifacts:
- - name: model
- optional: true
- path: /mnt/output
- {{.ArtifactRepositoryType}}:
- key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
-# Uncomment the lines below if you want to send Slack notifications
-#- container:
-# args:
-# - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}"
-# SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd
-# SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify
-# command:
-# - sh
-# - -c
-# image: technosophos/slack-notify
-# inputs:
-# artifacts:
-# - name: metrics
-# optional: true
-# path: /tmp/metrics.json
-# parameters:
-# - name: status
-# name: slack-notify-success
-volumeClaimTemplates:
- - metadata:
- creationTimestamp: null
- name: data
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 200Gi
- - metadata:
- creationTimestamp: null
- name: output
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 200Gi
\ No newline at end of file
+ - displayName: Node pool
+ hint: Name of node pool or group to run this workflow task
+ type: select.select
+ name: sys-node-pool
+ value: Standard_D4s_v3
+ visibility: public
+ required: true
+ options:
+ - name: 'CPU: 2, RAM: 8GB'
+ value: Standard_D2s_v3
+ - name: 'CPU: 4, RAM: 16GB'
+ value: Standard_D4s_v3
+ - name: 'GPU: 1xK80, CPU: 6, RAM: 56GB'
+ value: Standard_NC6
+ - name: dump-format
+ value: cvat_tfrecord
+ visibility: public
+ templates:
+ - name: main
+ dag:
+ tasks:
+ - name: train-model
+ template: tensorflow
+ # Uncomment the lines below if you want to send Slack notifications
+ # - arguments:
+ # artifacts:
+ # - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}'
+ # name: metrics
+ # parameters:
+ # - name: status
+ # value: '{{tasks.train-model.status}}'
+ # dependencies:
+ # - train-model
+ # name: notify-in-slack
+ # template: slack-notify-success
+ - name: tensorflow
+ container:
+ args:
+ - |
+ apt-get update && \
+ apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \
+ pip install pillow lxml Cython contextlib2 jupyter matplotlib numpy scipy boto3 pycocotools pyyaml google-cloud-storage && \
+ cd /mnt/src/tf/research && \
+ export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim && \
+ cd /mnt/src/train && \
+ python convert_workflow.py \
+ --extras="{{workflow.parameters.hyperparameters}}" \
+ --model="{{workflow.parameters.cvat-model}}" \
+ --num_classes="{{workflow.parameters.cvat-num-classes}}" \
+ --sys_finetune_checkpoint={{workflow.parameters.cvat-finetune-checkpoint}}
+ command:
+ - sh
+ - -c
+ image: '{{workflow.parameters.tf-image}}'
+ volumeMounts:
+ - mountPath: /mnt/data
+ name: data
+ - mountPath: /mnt/output
+ name: output
+ workingDir: /mnt/src
+ nodeSelector:
+ beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
+ sidecars:
+ - name: tensorboard
+ image: tensorflow/tensorflow:2.3.0
+ command: [sh, -c]
+ tty: true
+ args: ["tensorboard --logdir /mnt/output/"]
+ ports:
+ - containerPort: 6006
+ name: tensorboard
+ inputs:
+ artifacts:
+ - name: data
+ path: /mnt/data/datasets/
+ "{{.ArtifactRepositoryType}}":
+ key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
+ - name: models
+ path: /mnt/data/models/
+ optional: true
+ "{{.ArtifactRepositoryType}}":
+ key: '{{workflow.parameters.cvat-finetune-checkpoint}}'
+ - git:
+ repo: '{{workflow.parameters.source}}'
+ revision: '{{workflow.parameters.revision}}'
+ name: src
+ path: /mnt/src/tf
+ - git:
+ repo: '{{workflow.parameters.trainingsource}}'
+ revision: 'optional-artifacts'
+ name: tsrc
+ path: /mnt/src/train
+ outputs:
+ artifacts:
+ - name: model
+ optional: true
+ path: /mnt/output
+ "{{.ArtifactRepositoryType}}":
+ key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
+ # Uncomment the lines below if you want to send Slack notifications
+ #- container:
+ # args:
+ # - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}"
+ # SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd
+ # SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify
+ # command:
+ # - sh
+ # - -c
+ # image: technosophos/slack-notify
+ # inputs:
+ # artifacts:
+ # - name: metrics
+ # optional: true
+ # path: /tmp/metrics.json
+ # parameters:
+ # - name: status
+ # name: slack-notify-success
+ volumeClaimTemplates:
+ - metadata:
+ creationTimestamp: null
+ name: data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
+ - metadata:
+ creationTimestamp: null
+ name: output
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
\ No newline at end of file
diff --git a/db/yaml/workflows/tf-object-detection-training/20201208155115.yaml b/db/yaml/workflows/tf-object-detection-training/20201208155115.yaml
index f252d6f..f9ee651 100644
--- a/db/yaml/workflows/tf-object-detection-training/20201208155115.yaml
+++ b/db/yaml/workflows/tf-object-detection-training/20201208155115.yaml
@@ -1,223 +1,233 @@
-entrypoint: main
-arguments:
- parameters:
- - name: source
- value: https://github.com/tensorflow/models.git
- displayName: Model source code
- type: hidden
- visibility: private
+metadata:
+ name: "TF Object Detection Training"
+ kind: Workflow
+ version: 20201208155115
+ action: update
+ source: "https://github.com/onepanelio/templates/blob/master/workflows/tf-object-detection-training/"
+ labels:
+ "created-by": "system"
+ "used-by": "cvat"
+spec:
+ entrypoint: main
+ arguments:
+ parameters:
+ - name: source
+ value: https://github.com/tensorflow/models.git
+ displayName: Model source code
+ type: hidden
+ visibility: private
- - name: trainingsource
- value: https://github.com/onepanelio/cvat-training.git
- type: hidden
- visibility: private
+ - name: trainingsource
+ value: https://github.com/onepanelio/cvat-training.git
+ type: hidden
+ visibility: private
- - name: revision
- value: v1.13.0
- type: hidden
- visibility: private
+ - name: revision
+ value: v1.13.0
+ type: hidden
+ visibility: private
- - name: cvat-annotation-path
- value: annotation-dump/sample_dataset
- displayName: Dataset path
- hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated.
- visibility: private
+ - name: cvat-annotation-path
+ value: annotation-dump/sample_dataset
+ displayName: Dataset path
+ hint: Path to annotated data in default object storage (i.e S3). In CVAT, this parameter will be pre-populated.
+ visibility: private
- - name: cvat-output-path
- value: workflow-data/output/sample_output
- hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
- displayName: Workflow output path
- visibility: private
+ - name: cvat-output-path
+ value: workflow-data/output/sample_output
+ hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
+ displayName: Workflow output path
+ visibility: private
- - name: cvat-model
- value: frcnn-res50-coco
- displayName: Model
- hint: TF Detection API's model to use for training.
- type: select.select
- visibility: public
- options:
- - name: 'Faster RCNN-ResNet 101-COCO'
- value: frcnn-res101-coco
- - name: 'Faster RCNN-ResNet 101-Low Proposal-COCO'
- value: frcnn-res101-low
- - name: 'Faster RCNN-ResNet 50-COCO'
- value: frcnn-res50-coco
- - name: 'Faster RCNN-NAS-COCO'
- value: frcnn-nas-coco
- - name: 'SSD MobileNet V1-COCO'
- value: ssd-mobilenet-v1-coco2
- - name: 'SSD MobileNet V2-COCO'
- value: ssd-mobilenet-v2-coco
- - name: 'SSDLite MobileNet-COCO'
- value: ssdlite-mobilenet-coco
+ - name: cvat-model
+ value: frcnn-res50-coco
+ displayName: Model
+ hint: TF Detection API's model to use for training.
+ type: select.select
+ visibility: public
+ options:
+ - name: 'Faster RCNN-ResNet 101-COCO'
+ value: frcnn-res101-coco
+ - name: 'Faster RCNN-ResNet 101-Low Proposal-COCO'
+ value: frcnn-res101-low
+ - name: 'Faster RCNN-ResNet 50-COCO'
+ value: frcnn-res50-coco
+ - name: 'Faster RCNN-NAS-COCO'
+ value: frcnn-nas-coco
+ - name: 'SSD MobileNet V1-COCO'
+ value: ssd-mobilenet-v1-coco2
+ - name: 'SSD MobileNet V2-COCO'
+ value: ssd-mobilenet-v2-coco
+ - name: 'SSDLite MobileNet-COCO'
+ value: ssdlite-mobilenet-coco
- - name: hyperparameters
- value: |-
- num-steps=10000
- displayName: Hyperparameters
- visibility: public
- type: textarea.textarea
- hint: "Please refer to our documentation for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow."
+ - name: hyperparameters
+ value: |-
+ num-steps=10000
+ displayName: Hyperparameters
+ visibility: public
+ type: textarea.textarea
+ hint: "Please refer to our documentation for more information on parameters. Number of classes will be automatically populated if you had 'sys-num-classes' parameter in a workflow."
- - name: cvat-finetune-checkpoint
- value: ''
- hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
- displayName: Checkpoint path
- visibility: public
+ - name: cvat-finetune-checkpoint
+ value: ''
+ hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
+ displayName: Checkpoint path
+ visibility: public
- - name: cvat-num-classes
- value: '81'
- hint: Number of classes
- displayName: Number of classes
- visibility: private
+ - name: cvat-num-classes
+ value: '81'
+ hint: Number of classes
+ displayName: Number of classes
+ visibility: private
- - name: tf-image
- value: tensorflow/tensorflow:1.13.1-py3
- type: select.select
- displayName: Select TensorFlow image
- visibility: public
- hint: Select the GPU image if you are running on a GPU node pool
- options:
- - name: 'TensorFlow 1.13.1 CPU Image'
- value: 'tensorflow/tensorflow:1.13.1-py3'
- - name: 'TensorFlow 1.13.1 GPU Image'
- value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
+ - name: tf-image
+ value: tensorflow/tensorflow:1.13.1-py3
+ type: select.select
+ displayName: Select TensorFlow image
+ visibility: public
+ hint: Select the GPU image if you are running on a GPU node pool
+ options:
+ - name: 'TensorFlow 1.13.1 CPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-py3'
+ - name: 'TensorFlow 1.13.1 GPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
- - displayName: Node pool
- hint: Name of node pool or group to run this workflow task
- type: select.select
- name: sys-node-pool
- value: Standard_D4s_v3
- visibility: public
- required: true
- options:
- - name: 'CPU: 2, RAM: 8GB'
- value: Standard_D2s_v3
- - name: 'CPU: 4, RAM: 16GB'
- value: Standard_D4s_v3
- - name: 'GPU: 1xK80, CPU: 6, RAM: 56GB'
- value: Standard_NC6
- - name: dump-format
- value: cvat_tfrecord
- visibility: public
-templates:
- - name: main
- dag:
- tasks:
- - name: train-model
- template: tensorflow
- # Uncomment the lines below if you want to send Slack notifications
- # - arguments:
- # artifacts:
- # - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}'
- # name: metrics
- # parameters:
- # - name: status
- # value: '{{tasks.train-model.status}}'
- # dependencies:
- # - train-model
- # name: notify-in-slack
- # template: slack-notify-success
- - name: tensorflow
- container:
- args:
- - |
- apt-get update && \
- apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \
- pip install pillow lxml Cython contextlib2 jupyter matplotlib numpy scipy boto3 pycocotools pyyaml google-cloud-storage && \
- cd /mnt/src/tf/research && \
- export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim && \
- cd /mnt/src/train && \
- python convert_workflow.py \
- --extras="{{workflow.parameters.hyperparameters}}" \
- --model="{{workflow.parameters.cvat-model}}" \
- --num_classes="{{workflow.parameters.cvat-num-classes}}" \
- --sys_finetune_checkpoint={{workflow.parameters.cvat-finetune-checkpoint}}
- command:
- - sh
- - -c
- image: '{{workflow.parameters.tf-image}}'
- volumeMounts:
- - mountPath: /mnt/data
- name: data
- - mountPath: /mnt/output
- name: output
- workingDir: /mnt/src
- nodeSelector:
- beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
- sidecars:
- - name: tensorboard
- image: tensorflow/tensorflow:2.3.0
- command: [sh, -c]
- env:
- - name: ONEPANEL_INTERACTIVE_SIDECAR
- value: 'true'
- args: ["tensorboard --logdir /mnt/output/"]
- ports:
- - containerPort: 6006
- name: tensorboard
- inputs:
- artifacts:
- - name: data
- path: /mnt/data/datasets/
- {{.ArtifactRepositoryType}}:
- key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
- - name: models
- path: /mnt/data/models/
- optional: true
- {{.ArtifactRepositoryType}}:
- key: '{{workflow.parameters.cvat-finetune-checkpoint}}'
- - git:
- repo: '{{workflow.parameters.source}}'
- revision: '{{workflow.parameters.revision}}'
- name: src
- path: /mnt/src/tf
- - git:
- repo: '{{workflow.parameters.trainingsource}}'
- revision: 'optional-artifacts'
- name: tsrc
- path: /mnt/src/train
- outputs:
- artifacts:
- - name: model
- optional: true
- path: /mnt/output
- {{.ArtifactRepositoryType}}:
- key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
-# Uncomment the lines below if you want to send Slack notifications
-#- container:
-# args:
-# - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}"
-# SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd
-# SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify
-# command:
-# - sh
-# - -c
-# image: technosophos/slack-notify
-# inputs:
-# artifacts:
-# - name: metrics
-# optional: true
-# path: /tmp/metrics.json
-# parameters:
-# - name: status
-# name: slack-notify-success
-volumeClaimTemplates:
- - metadata:
- creationTimestamp: null
- name: data
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 200Gi
- - metadata:
- creationTimestamp: null
- name: output
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 200Gi
\ No newline at end of file
+ - displayName: Node pool
+ hint: Name of node pool or group to run this workflow task
+ type: select.select
+ name: sys-node-pool
+ value: Standard_D4s_v3
+ visibility: public
+ required: true
+ options:
+ - name: 'CPU: 2, RAM: 8GB'
+ value: Standard_D2s_v3
+ - name: 'CPU: 4, RAM: 16GB'
+ value: Standard_D4s_v3
+ - name: 'GPU: 1xK80, CPU: 6, RAM: 56GB'
+ value: Standard_NC6
+ - name: dump-format
+ value: cvat_tfrecord
+ visibility: public
+ templates:
+ - name: main
+ dag:
+ tasks:
+ - name: train-model
+ template: tensorflow
+ # Uncomment the lines below if you want to send Slack notifications
+ # - arguments:
+ # artifacts:
+ # - from: '{{tasks.train-model.outputs.artifacts.sys-metrics}}'
+ # name: metrics
+ # parameters:
+ # - name: status
+ # value: '{{tasks.train-model.status}}'
+ # dependencies:
+ # - train-model
+ # name: notify-in-slack
+ # template: slack-notify-success
+ - name: tensorflow
+ container:
+ args:
+ - |
+ apt-get update && \
+ apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \
+ pip install pillow lxml Cython contextlib2 jupyter matplotlib numpy scipy boto3 pycocotools pyyaml google-cloud-storage && \
+ cd /mnt/src/tf/research && \
+ export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim && \
+ cd /mnt/src/train && \
+ python convert_workflow.py \
+ --extras="{{workflow.parameters.hyperparameters}}" \
+ --model="{{workflow.parameters.cvat-model}}" \
+ --num_classes="{{workflow.parameters.cvat-num-classes}}" \
+ --sys_finetune_checkpoint={{workflow.parameters.cvat-finetune-checkpoint}}
+ command:
+ - sh
+ - -c
+ image: '{{workflow.parameters.tf-image}}'
+ volumeMounts:
+ - mountPath: /mnt/data
+ name: data
+ - mountPath: /mnt/output
+ name: output
+ workingDir: /mnt/src
+ nodeSelector:
+ beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
+ sidecars:
+ - name: tensorboard
+ image: tensorflow/tensorflow:2.3.0
+ command: [sh, -c]
+ env:
+ - name: ONEPANEL_INTERACTIVE_SIDECAR
+ value: 'true'
+ args: ["tensorboard --logdir /mnt/output/"]
+ ports:
+ - containerPort: 6006
+ name: tensorboard
+ inputs:
+ artifacts:
+ - name: data
+ path: /mnt/data/datasets/
+ "{{.ArtifactRepositoryType}}":
+ key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
+ - name: models
+ path: /mnt/data/models/
+ optional: true
+ "{{.ArtifactRepositoryType}}":
+ key: '{{workflow.parameters.cvat-finetune-checkpoint}}'
+ - git:
+ repo: '{{workflow.parameters.source}}'
+ revision: '{{workflow.parameters.revision}}'
+ name: src
+ path: /mnt/src/tf
+ - git:
+ repo: '{{workflow.parameters.trainingsource}}'
+ revision: 'optional-artifacts'
+ name: tsrc
+ path: /mnt/src/train
+ outputs:
+ artifacts:
+ - name: model
+ optional: true
+ path: /mnt/output
+ "{{.ArtifactRepositoryType}}":
+ key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
+ # Uncomment the lines below if you want to send Slack notifications
+ #- container:
+ # args:
+ # - SLACK_USERNAME=Onepanel SLACK_TITLE="{{workflow.name}} {{inputs.parameters.status}}"
+ # SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd
+ # SLACK_MESSAGE=$(cat /tmp/metrics.json)} ./slack-notify
+ # command:
+ # - sh
+ # - -c
+ # image: technosophos/slack-notify
+ # inputs:
+ # artifacts:
+ # - name: metrics
+ # optional: true
+ # path: /tmp/metrics.json
+ # parameters:
+ # - name: status
+ # name: slack-notify-success
+ volumeClaimTemplates:
+ - metadata:
+ creationTimestamp: null
+ name: data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
+ - metadata:
+ creationTimestamp: null
+ name: output
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
\ No newline at end of file
diff --git a/db/yaml/workflows/tf-object-detection-training/20201223202929.yaml b/db/yaml/workflows/tf-object-detection-training/20201223202929.yaml
index 32e0896..36b71f8 100644
--- a/db/yaml/workflows/tf-object-detection-training/20201223202929.yaml
+++ b/db/yaml/workflows/tf-object-detection-training/20201223202929.yaml
@@ -1,165 +1,174 @@
-# source: https://github.com/onepanelio/templates/blob/master/workflows/tf-object-detection-training/
-arguments:
- parameters:
- - name: cvat-annotation-path
- value: annotation-dump/sample_dataset
- displayName: Dataset path
- hint: Path to annotated data (TFRecord format) in default object storage. In CVAT, this parameter will be pre-populated.
- visibility: internal
+metadata:
+ name: "TF Object Detection Training"
+ kind: Workflow
+ version: 20201223202929
+ action: update
+ source: "https://github.com/onepanelio/templates/blob/master/workflows/tf-object-detection-training/"
+ labels:
+ "created-by": "system"
+ "used-by": "cvat"
+spec:
+ arguments:
+ parameters:
+ - name: cvat-annotation-path
+ value: annotation-dump/sample_dataset
+ displayName: Dataset path
+ hint: Path to annotated data (TFRecord format) in default object storage. In CVAT, this parameter will be pre-populated.
+ visibility: internal
- - name: cvat-output-path
- value: workflow-data/output/sample_output
- hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
- displayName: Workflow output path
- visibility: internal
+ - name: cvat-output-path
+ value: workflow-data/output/sample_output
+ hint: Path to store output artifacts in default object storage (i.e s3). In CVAT, this parameter will be pre-populated.
+ displayName: Workflow output path
+ visibility: internal
- - name: cvat-model
- value: frcnn-res50-coco
- displayName: Model
- hint: TF Detection API's model to use for training.
- type: select.select
- visibility: public
- options:
- - name: 'Faster RCNN-ResNet 101-COCO'
- value: frcnn-res101-coco
- - name: 'Faster RCNN-ResNet 101-Low Proposal-COCO'
- value: frcnn-res101-low
- - name: 'Faster RCNN-ResNet 50-COCO'
- value: frcnn-res50-coco
- - name: 'Faster RCNN-NAS-COCO'
- value: frcnn-nas-coco
- - name: 'SSD MobileNet V1-COCO'
- value: ssd-mobilenet-v1-coco2
- - name: 'SSD MobileNet V2-COCO'
- value: ssd-mobilenet-v2-coco
- - name: 'SSDLite MobileNet-COCO'
- value: ssdlite-mobilenet-coco
+ - name: cvat-model
+ value: frcnn-res50-coco
+ displayName: Model
+ hint: TF Detection API's model to use for training.
+ type: select.select
+ visibility: public
+ options:
+ - name: 'Faster RCNN-ResNet 101-COCO'
+ value: frcnn-res101-coco
+ - name: 'Faster RCNN-ResNet 101-Low Proposal-COCO'
+ value: frcnn-res101-low
+ - name: 'Faster RCNN-ResNet 50-COCO'
+ value: frcnn-res50-coco
+ - name: 'Faster RCNN-NAS-COCO'
+ value: frcnn-nas-coco
+ - name: 'SSD MobileNet V1-COCO'
+ value: ssd-mobilenet-v1-coco2
+ - name: 'SSD MobileNet V2-COCO'
+ value: ssd-mobilenet-v2-coco
+ - name: 'SSDLite MobileNet-COCO'
+ value: ssdlite-mobilenet-coco
- - name: hyperparameters
- value: |-
- num-steps=10000
- displayName: Hyperparameters
- visibility: public
- type: textarea.textarea
- hint: 'See documentation for more information on parameters.'
+ - name: hyperparameters
+ value: |-
+ num-steps=10000
+ displayName: Hyperparameters
+ visibility: public
+ type: textarea.textarea
+ hint: 'See documentation for more information on parameters.'
- - name: cvat-finetune-checkpoint
- value: ''
- hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
- displayName: Checkpoint path
- visibility: public
+ - name: cvat-finetune-checkpoint
+ value: ''
+ hint: Select the last fine-tune checkpoint for this model. It may take up to 5 minutes for a recent checkpoint show here. Leave empty if this is the first time you're training this model.
+ displayName: Checkpoint path
+ visibility: public
- - name: cvat-num-classes
- value: '10'
- hint: Number of classes. In CVAT, this parameter will be pre-populated.
- displayName: Number of classes
- visibility: internal
+ - name: cvat-num-classes
+ value: '10'
+ hint: Number of classes. In CVAT, this parameter will be pre-populated.
+ displayName: Number of classes
+ visibility: internal
- - name: tf-image
- value: tensorflow/tensorflow:1.13.1-py3
- type: select.select
- displayName: Select TensorFlow image
- visibility: public
- hint: Select the GPU image if you are running on a GPU node pool
- options:
- - name: 'TensorFlow 1.13.1 CPU Image'
- value: 'tensorflow/tensorflow:1.13.1-py3'
- - name: 'TensorFlow 1.13.1 GPU Image'
- value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
+ - name: tf-image
+ value: tensorflow/tensorflow:1.13.1-py3
+ type: select.select
+ displayName: Select TensorFlow image
+ visibility: public
+ hint: Select the GPU image if you are running on a GPU node pool
+ options:
+ - name: 'TensorFlow 1.13.1 CPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-py3'
+ - name: 'TensorFlow 1.13.1 GPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
- - name: dump-format
- value: cvat_tfrecord
- visibility: public
+ - name: dump-format
+ value: cvat_tfrecord
+ visibility: public
- - displayName: Node pool
- hint: Name of node pool or group to run this workflow task
- type: select.nodepool
- name: sys-node-pool
- value: {{.DefaultNodePoolOption}}
- visibility: public
- required: true
+ - displayName: Node pool
+ hint: Name of node pool or group to run this workflow task
+ type: select.nodepool
+ name: sys-node-pool
+ value: "{{.DefaultNodePoolOption}}"
+ visibility: public
+ required: true
-entrypoint: main
-templates:
- - dag:
- tasks:
- - name: train-model
- template: tensorflow
- name: main
- - container:
- args:
- - |
- apt-get update && \
- apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \
- pip install pillow lxml Cython contextlib2 matplotlib numpy scipy pycocotools pyyaml test-generator && \
- cd /mnt/src/tf/research && \
- export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim && \
- mkdir -p /mnt/src/protoc && \
- wget -P /mnt/src/protoc https://github.com/protocolbuffers/protobuf/releases/download/v3.10.1/protoc-3.10.1-linux-x86_64.zip && \
- cd /mnt/src/protoc/ && \
- unzip protoc-3.10.1-linux-x86_64.zip && \
- cd /mnt/src/tf/research/ && \
- /mnt/src/protoc/bin/protoc object_detection/protos/*.proto --python_out=. && \
- cd /mnt/src/train/workflows/tf-object-detection-training && \
- python main.py \
- --extras="{{workflow.parameters.hyperparameters}}" \
- --model="{{workflow.parameters.cvat-model}}" \
- --num_classes="{{workflow.parameters.cvat-num-classes}}" \
- --sys_finetune_checkpoint={{workflow.parameters.cvat-finetune-checkpoint}}
- command:
- - sh
- - -c
- image: '{{workflow.parameters.tf-image}}'
- volumeMounts:
- - mountPath: /mnt/data
- name: data
- - mountPath: /mnt/output
- name: output
- workingDir: /mnt/src
- nodeSelector:
- beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
- inputs:
- artifacts:
- - name: data
- path: /mnt/data/datasets/
- s3:
- key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
- - name: models
- path: /mnt/data/models/
- optional: true
- s3:
- key: '{{workflow.parameters.cvat-finetune-checkpoint}}'
- - git:
- repo: https://github.com/tensorflow/models.git
- revision: v1.13.0
- name: src
- path: /mnt/src/tf
- - git:
- repo: https://github.com/onepanelio/templates.git
- name: tsrc
- path: /mnt/src/train
- name: tensorflow
- outputs:
- artifacts:
- - name: model
- optional: true
- path: /mnt/output
- s3:
- key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
-volumeClaimTemplates:
- - metadata:
- name: data
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 200Gi
- - metadata:
- name: output
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 200Gi
+ entrypoint: main
+ templates:
+ - dag:
+ tasks:
+ - name: train-model
+ template: tensorflow
+ name: main
+ - container:
+ args:
+ - |
+ apt-get update && \
+ apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \
+ pip install pillow lxml Cython contextlib2 matplotlib numpy scipy pycocotools pyyaml test-generator && \
+ cd /mnt/src/tf/research && \
+ export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim && \
+ mkdir -p /mnt/src/protoc && \
+ wget -P /mnt/src/protoc https://github.com/protocolbuffers/protobuf/releases/download/v3.10.1/protoc-3.10.1-linux-x86_64.zip && \
+ cd /mnt/src/protoc/ && \
+ unzip protoc-3.10.1-linux-x86_64.zip && \
+ cd /mnt/src/tf/research/ && \
+ /mnt/src/protoc/bin/protoc object_detection/protos/*.proto --python_out=. && \
+ cd /mnt/src/train/workflows/tf-object-detection-training && \
+ python main.py \
+ --extras="{{workflow.parameters.hyperparameters}}" \
+ --model="{{workflow.parameters.cvat-model}}" \
+ --num_classes="{{workflow.parameters.cvat-num-classes}}" \
+ --sys_finetune_checkpoint={{workflow.parameters.cvat-finetune-checkpoint}}
+ command:
+ - sh
+ - -c
+ image: '{{workflow.parameters.tf-image}}'
+ volumeMounts:
+ - mountPath: /mnt/data
+ name: data
+ - mountPath: /mnt/output
+ name: output
+ workingDir: /mnt/src
+ nodeSelector:
+ beta.kubernetes.io/instance-type: '{{workflow.parameters.sys-node-pool}}'
+ inputs:
+ artifacts:
+ - name: data
+ path: /mnt/data/datasets/
+ s3:
+ key: '{{workflow.namespace}}/{{workflow.parameters.cvat-annotation-path}}'
+ - name: models
+ path: /mnt/data/models/
+ optional: true
+ s3:
+ key: '{{workflow.parameters.cvat-finetune-checkpoint}}'
+ - git:
+ repo: https://github.com/tensorflow/models.git
+ revision: v1.13.0
+ name: src
+ path: /mnt/src/tf
+ - git:
+ repo: https://github.com/onepanelio/templates.git
+ name: tsrc
+ path: /mnt/src/train
+ name: tensorflow
+ outputs:
+ artifacts:
+ - name: model
+ optional: true
+ path: /mnt/output
+ s3:
+ key: '{{workflow.namespace}}/{{workflow.parameters.cvat-output-path}}/{{workflow.name}}'
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
+ - metadata:
+ name: output
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
diff --git a/db/yaml/workflows/tf-object-detection-training/20210118175809.yaml b/db/yaml/workflows/tf-object-detection-training/20210118175809.yaml
index a09548f..b1a716f 100644
--- a/db/yaml/workflows/tf-object-detection-training/20210118175809.yaml
+++ b/db/yaml/workflows/tf-object-detection-training/20210118175809.yaml
@@ -1,260 +1,269 @@
-# source: https://github.com/onepanelio/templates/blob/master/workflows/tf-object-detection-training/
-arguments:
- parameters:
- - name: cvat-annotation-path
- value: 'artifacts/{{workflow.namespace}}/annotations/'
- hint: Path to annotated data (COCO format) in default object storage. In CVAT, this parameter will be pre-populated.
- displayName: Dataset path
- visibility: internal
+metadata:
+ name: "TF Object Detection Training"
+ kind: Workflow
+ version: 20210118175809
+ action: update
+ source: "https://github.com/onepanelio/templates/blob/master/workflows/tf-object-detection-training/"
+ labels:
+ "created-by": "system"
+ "used-by": "cvat"
+spec:
+ arguments:
+ parameters:
+ - name: cvat-annotation-path
+ value: 'artifacts/{{workflow.namespace}}/annotations/'
+ hint: Path to annotated data (COCO format) in default object storage. In CVAT, this parameter will be pre-populated.
+ displayName: Dataset path
+ visibility: internal
- - name: val-split
- value: 10
- displayName: Validation split size
- type: input.number
- visibility: public
- hint: Enter validation set size in percentage of full dataset. (0 - 100)
+ - name: val-split
+ value: 10
+ displayName: Validation split size
+ type: input.number
+ visibility: public
+ hint: Enter validation set size in percentage of full dataset. (0 - 100)
- - name: num-augmentation-cycles
- value: 1
- displayName: Number of augmentation cycles
- type: input.number
- visibility: public
- hint: Number of augmentation cycles, zero means no data augmentation
+ - name: num-augmentation-cycles
+ value: 1
+ displayName: Number of augmentation cycles
+ type: input.number
+ visibility: public
+ hint: Number of augmentation cycles, zero means no data augmentation
- - name: preprocessing-parameters
- value: |-
- RandomBrightnessContrast:
- p: 0.2
- GaussianBlur:
- p: 0.3
- GaussNoise:
- p: 0.4
- HorizontalFlip:
- p: 0.5
- VerticalFlip:
- p: 0.3
- displayName: Preprocessing parameters
- visibility: public
- type: textarea.textarea
- hint: 'See documentation for more information on parameters.'
+ - name: preprocessing-parameters
+ value: |-
+ RandomBrightnessContrast:
+ p: 0.2
+ GaussianBlur:
+ p: 0.3
+ GaussNoise:
+ p: 0.4
+ HorizontalFlip:
+ p: 0.5
+ VerticalFlip:
+ p: 0.3
+ displayName: Preprocessing parameters
+ visibility: public
+ type: textarea.textarea
+ hint: 'See documentation for more information on parameters.'
- - name: cvat-model
- value: frcnn-res50-coco
- displayName: Model
- hint: TF Detection API's model to use for training.
- type: select.select
- visibility: public
- options:
- - name: 'Faster RCNN-ResNet 101-COCO'
- value: frcnn-res101-coco
- - name: 'Faster RCNN-ResNet 101-Low Proposal-COCO'
- value: frcnn-res101-low
- - name: 'Faster RCNN-ResNet 50-COCO'
- value: frcnn-res50-coco
- - name: 'Faster RCNN-NAS-COCO'
- value: frcnn-nas-coco
- - name: 'SSD MobileNet V1-COCO'
- value: ssd-mobilenet-v1-coco2
- - name: 'SSD MobileNet V2-COCO'
- value: ssd-mobilenet-v2-coco
- - name: 'SSDLite MobileNet-COCO'
- value: ssdlite-mobilenet-coco
+ - name: cvat-model
+ value: frcnn-res50-coco
+ displayName: Model
+ hint: TF Detection API's model to use for training.
+ type: select.select
+ visibility: public
+ options:
+ - name: 'Faster RCNN-ResNet 101-COCO'
+ value: frcnn-res101-coco
+ - name: 'Faster RCNN-ResNet 101-Low Proposal-COCO'
+ value: frcnn-res101-low
+ - name: 'Faster RCNN-ResNet 50-COCO'
+ value: frcnn-res50-coco
+ - name: 'Faster RCNN-NAS-COCO'
+ value: frcnn-nas-coco
+ - name: 'SSD MobileNet V1-COCO'
+ value: ssd-mobilenet-v1-coco2
+ - name: 'SSD MobileNet V2-COCO'
+ value: ssd-mobilenet-v2-coco
+ - name: 'SSDLite MobileNet-COCO'
+ value: ssdlite-mobilenet-coco
- - name: cvat-num-classes
- value: '10'
- hint: Number of classes. In CVAT, this parameter will be pre-populated.
- displayName: Number of classes
- visibility: internal
+ - name: cvat-num-classes
+ value: '10'
+ hint: Number of classes. In CVAT, this parameter will be pre-populated.
+ displayName: Number of classes
+ visibility: internal
- - name: hyperparameters
- value: |-
- num_steps: 10000
- displayName: Hyperparameters
- visibility: public
- type: textarea.textarea
- hint: 'See documentation for more information on parameters.'
+ - name: hyperparameters
+ value: |-
+ num_steps: 10000
+ displayName: Hyperparameters
+ visibility: public
+ type: textarea.textarea
+ hint: 'See documentation for more information on parameters.'
- - name: dump-format
- value: cvat_coco
- displayName: CVAT dump format
- visibility: private
+ - name: dump-format
+ value: cvat_coco
+ displayName: CVAT dump format
+ visibility: private
- - name: cvat-finetune-checkpoint
- value: ''
- hint: Path to the last fine-tune checkpoint for this model in default object storage. Leave empty if this is the first time you're training this model.
- displayName: Checkpoint path
- visibility: public
+ - name: cvat-finetune-checkpoint
+ value: ''
+ hint: Path to the last fine-tune checkpoint for this model in default object storage. Leave empty if this is the first time you're training this model.
+ displayName: Checkpoint path
+ visibility: public
- - name: tf-image
- value: tensorflow/tensorflow:1.13.1-py3
- type: select.select
- displayName: Select TensorFlow image
- visibility: public
- hint: Select the GPU image if you are running on a GPU node pool
- options:
- - name: 'TensorFlow 1.13.1 CPU Image'
- value: 'tensorflow/tensorflow:1.13.1-py3'
- - name: 'TensorFlow 1.13.1 GPU Image'
- value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
+ - name: tf-image
+ value: tensorflow/tensorflow:1.13.1-py3
+ type: select.select
+ displayName: Select TensorFlow image
+ visibility: public
+ hint: Select the GPU image if you are running on a GPU node pool
+ options:
+ - name: 'TensorFlow 1.13.1 CPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-py3'
+ - name: 'TensorFlow 1.13.1 GPU Image'
+ value: 'tensorflow/tensorflow:1.13.1-gpu-py3'
- - displayName: Node pool
- hint: Name of node pool or group to run this workflow task
- type: select.nodepool
- name: sys-node-pool
- value: {{.DefaultNodePoolOption}}
- visibility: public
- required: true
+ - displayName: Node pool
+ hint: Name of node pool or group to run this workflow task
+ type: select.nodepool
+ name: sys-node-pool
+ value: "{{.DefaultNodePoolOption}}"
+ visibility: public
+ required: true
-entrypoint: main
-templates:
- - dag:
- tasks:
- - name: preprocessing
- template: preprocessing
- - name: train-model
- template: tensorflow
- dependencies: [preprocessing]
- arguments:
- artifacts:
- - name: data
- from: "{{tasks.preprocessing.outputs.artifacts.processed-data}}"
- name: main
- - container:
- args:
- - |
- apt-get update && \
- apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \
- pip install --upgrade pip && \
- pip install pillow lxml Cython contextlib2 matplotlib numpy scipy pycocotools pyyaml test-generator && \
- cd /mnt/src/tf/research && \
- export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim && \
- mkdir -p /mnt/src/protoc && \
- wget -P /mnt/src/protoc https://github.com/protocolbuffers/protobuf/releases/download/v3.10.1/protoc-3.10.1-linux-x86_64.zip && \
- cd /mnt/src/protoc/ && \
- unzip protoc-3.10.1-linux-x86_64.zip && \
- cd /mnt/src/tf/research/ && \
- /mnt/src/protoc/bin/protoc object_detection/protos/*.proto --python_out=. && \
- cd /mnt/src/train/workflows/tf-object-detection-training && \
- python main.py \
- --extras="{{workflow.parameters.hyperparameters}}" \
- --model="{{workflow.parameters.cvat-model}}" \
- --num_classes="{{workflow.parameters.cvat-num-classes}}" \
- --sys_finetune_checkpoint="{{workflow.parameters.cvat-finetune-checkpoint}}" \
- --from_preprocessing=True
- command:
- - sh
- - -c
- image: '{{workflow.parameters.tf-image}}'
- volumeMounts:
- - mountPath: /mnt/data
- name: processed-data
- - mountPath: /mnt/output
- name: output
- workingDir: /mnt/src
- nodeSelector:
- {{.NodePoolLabel}}: '{{workflow.parameters.sys-node-pool}}'
- inputs:
- artifacts:
- - name: data
- path: /mnt/data/datasets/
- - name: models
- path: /mnt/data/models/
- optional: true
- s3:
- key: '{{workflow.parameters.cvat-finetune-checkpoint}}'
- - git:
- repo: https://github.com/tensorflow/models.git
- revision: v1.13.0
- name: src
- path: /mnt/src/tf
- - git:
- repo: https://github.com/onepanelio/templates.git
- revision: v0.18.0
- name: tsrc
- path: /mnt/src/train
- name: tensorflow
- outputs:
- artifacts:
- - name: model
- optional: true
- path: /mnt/output
- sidecars:
- - name: tensorboard
- image: '{{workflow.parameters.tf-image}}'
+ entrypoint: main
+ templates:
+ - dag:
+ tasks:
+ - name: preprocessing
+ template: preprocessing
+ - name: train-model
+ template: tensorflow
+ dependencies: [preprocessing]
+ arguments:
+ artifacts:
+ - name: data
+ from: "{{tasks.preprocessing.outputs.artifacts.processed-data}}"
+ name: main
+ - container:
+ args:
+ - |
+ apt-get update && \
+ apt-get install -y python3-pip git wget unzip libglib2.0-0 libsm6 libxext6 libxrender-dev && \
+ pip install --upgrade pip && \
+ pip install pillow lxml Cython contextlib2 matplotlib numpy scipy pycocotools pyyaml test-generator && \
+ cd /mnt/src/tf/research && \
+ export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim && \
+ mkdir -p /mnt/src/protoc && \
+ wget -P /mnt/src/protoc https://github.com/protocolbuffers/protobuf/releases/download/v3.10.1/protoc-3.10.1-linux-x86_64.zip && \
+ cd /mnt/src/protoc/ && \
+ unzip protoc-3.10.1-linux-x86_64.zip && \
+ cd /mnt/src/tf/research/ && \
+ /mnt/src/protoc/bin/protoc object_detection/protos/*.proto --python_out=. && \
+ cd /mnt/src/train/workflows/tf-object-detection-training && \
+ python main.py \
+ --extras="{{workflow.parameters.hyperparameters}}" \
+ --model="{{workflow.parameters.cvat-model}}" \
+ --num_classes="{{workflow.parameters.cvat-num-classes}}" \
+ --sys_finetune_checkpoint="{{workflow.parameters.cvat-finetune-checkpoint}}" \
+ --from_preprocessing=True
command:
- sh
- - '-c'
- env:
- - name: ONEPANEL_INTERACTIVE_SIDECAR
- value: 'true'
+ - -c
+ image: '{{workflow.parameters.tf-image}}'
+ volumeMounts:
+ - mountPath: /mnt/data
+ name: processed-data
+ - mountPath: /mnt/output
+ name: output
+ workingDir: /mnt/src
+ nodeSelector:
+ "{{.NodePoolLabel}}": '{{workflow.parameters.sys-node-pool}}'
+ inputs:
+ artifacts:
+ - name: data
+ path: /mnt/data/datasets/
+ - name: models
+ path: /mnt/data/models/
+ optional: true
+ s3:
+ key: '{{workflow.parameters.cvat-finetune-checkpoint}}'
+ - git:
+ repo: https://github.com/tensorflow/models.git
+ revision: v1.13.0
+ name: src
+ path: /mnt/src/tf
+ - git:
+ repo: https://github.com/onepanelio/templates.git
+ revision: v0.18.0
+ name: tsrc
+ path: /mnt/src/train
+ name: tensorflow
+ outputs:
+ artifacts:
+ - name: model
+ optional: true
+ path: /mnt/output
+ sidecars:
+ - name: tensorboard
+ image: '{{workflow.parameters.tf-image}}'
+ command:
+ - sh
+ - '-c'
+ env:
+ - name: ONEPANEL_INTERACTIVE_SIDECAR
+ value: 'true'
+ args:
+ # Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
+ - tensorboard --logdir /mnt/output/checkpoints/
+ ports:
+ - containerPort: 6006
+ name: tensorboard
+ - container:
args:
- # Read logs from /mnt/output - this directory is auto-mounted from volumeMounts
- - tensorboard --logdir /mnt/output/checkpoints/
- ports:
- - containerPort: 6006
- name: tensorboard
- - container:
- args:
- - |
- pip install --upgrade pip &&\
- pip install opencv-python albumentations tqdm pyyaml pycocotools && \
- cd /mnt/src/preprocessing/workflows/albumentations-preprocessing && \
- python -u main.py \
- --data_aug_params="{{workflow.parameters.preprocessing-parameters}}" \
- --format="tfrecord" \
- --val_split={{workflow.parameters.val-split}} \
- --aug_steps={{workflow.parameters.num-augmentation-cycles}}
- command:
- - sh
- - -c
- image: '{{workflow.parameters.tf-image}}'
- volumeMounts:
- - mountPath: /mnt/data
- name: data
- - mountPath: /mnt/output
- name: processed-data
- workingDir: /mnt/src
- nodeSelector:
- {{.NodePoolLabel}}: '{{workflow.parameters.sys-node-pool}}'
- inputs:
- artifacts:
- - name: data
- path: /mnt/data/datasets/
- s3:
- key: '{{workflow.parameters.cvat-annotation-path}}'
- - git:
- repo: https://github.com/onepanelio/templates.git
- revision: v0.18.0
- name: src
- path: /mnt/src/preprocessing
- name: preprocessing
- outputs:
- artifacts:
- - name: processed-data
- optional: true
- path: /mnt/output
-volumeClaimTemplates:
- - metadata:
- name: data
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 200Gi
- - metadata:
- name: processed-data
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 200Gi
- - metadata:
- name: output
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 200Gi
+ - |
+ pip install --upgrade pip &&\
+ pip install opencv-python albumentations tqdm pyyaml pycocotools && \
+ cd /mnt/src/preprocessing/workflows/albumentations-preprocessing && \
+ python -u main.py \
+ --data_aug_params="{{workflow.parameters.preprocessing-parameters}}" \
+ --format="tfrecord" \
+ --val_split={{workflow.parameters.val-split}} \
+ --aug_steps={{workflow.parameters.num-augmentation-cycles}}
+ command:
+ - sh
+ - -c
+ image: '{{workflow.parameters.tf-image}}'
+ volumeMounts:
+ - mountPath: /mnt/data
+ name: data
+ - mountPath: /mnt/output
+ name: processed-data
+ workingDir: /mnt/src
+ nodeSelector:
+ "{{.NodePoolLabel}}": '{{workflow.parameters.sys-node-pool}}'
+ inputs:
+ artifacts:
+ - name: data
+ path: /mnt/data/datasets/
+ s3:
+ key: '{{workflow.parameters.cvat-annotation-path}}'
+ - git:
+ repo: https://github.com/onepanelio/templates.git
+ revision: v0.18.0
+ name: src
+ path: /mnt/src/preprocessing
+ name: preprocessing
+ outputs:
+ artifacts:
+ - name: processed-data
+ optional: true
+ path: /mnt/output
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
+ - metadata:
+ name: processed-data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
+ - metadata:
+ name: output
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
diff --git a/db/yaml/workspaces/cvat/20200528140124.yaml b/db/yaml/workspaces/cvat/20200528140124.yaml
new file mode 100644
index 0000000..1a8494b
--- /dev/null
+++ b/db/yaml/workspaces/cvat/20200528140124.yaml
@@ -0,0 +1,105 @@
+metadata:
+ name: CVAT
+ kind: Workspace
+ version: 20200528140124
+ action: create
+ description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
+spec:
+ # Docker containers that are part of the Workspace
+ containers:
+ - name: cvat-db
+ image: postgres:10-alpine
+ env:
+ - name: POSTGRES_USER
+ value: root
+ - name: POSTGRES_DB
+ value: cvat
+ - name: POSTGRES_HOST_AUTH_METHOD
+ value: trust
+ - name: PGDATA
+ value: /var/lib/psql/data
+ ports:
+ - containerPort: 5432
+ name: tcp
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/psql
+ - name: cvat-redis
+ image: redis:4.0-alpine
+ ports:
+ - containerPort: 6379
+ name: tcp
+ - name: cvat
+ image: onepanel/cvat:v0.7.0
+ env:
+ - name: DJANGO_MODWSGI_EXTRA_ARGS
+ value: ""
+ - name: ALLOWED_HOSTS
+ value: '*'
+ - name: CVAT_REDIS_HOST
+ value: localhost
+ - name: CVAT_POSTGRES_HOST
+ value: localhost
+ - name: CVAT_SHARE_URL
+ value: /home/django/data
+ ports:
+ - containerPort: 8080
+ name: http
+ volumeMounts:
+ - name: data
+ mountPath: /home/django/data
+ - name: keys
+ mountPath: /home/django/keys
+ - name: logs
+ mountPath: /home/django/logs
+ - name: models
+ mountPath: /home/django/models
+ - name: cvat-ui
+ image: onepanel/cvat-ui:v0.7.0
+ ports:
+ - containerPort: 80
+ name: http
+ ports:
+ - name: cvat-ui
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: cvat
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ routes:
+ - match:
+ - uri:
+ regex: /api/.*|/git/.*|/tensorflow/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
+ - queryParams:
+ id:
+ regex: \d+.*
+ route:
+ - destination:
+ port:
+ number: 8080
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
+ # DAG Workflow to be executed once a Workspace action completes
+ # postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
\ No newline at end of file
diff --git a/db/yaml/workspaces/cvat/20200626113635.yaml b/db/yaml/workspaces/cvat/20200626113635.yaml
new file mode 100644
index 0000000..cd16ec6
--- /dev/null
+++ b/db/yaml/workspaces/cvat/20200626113635.yaml
@@ -0,0 +1,116 @@
+metadata:
+ name: CVAT
+ kind: Workspace
+ version: 20200626113635
+ action: update
+ description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
+spec:
+ # Docker containers that are part of the Workspace
+ containers:
+ - name: cvat-db
+ image: postgres:10-alpine
+ env:
+ - name: POSTGRES_USER
+ value: root
+ - name: POSTGRES_DB
+ value: cvat
+ - name: POSTGRES_HOST_AUTH_METHOD
+ value: trust
+ - name: PGDATA
+ value: /var/lib/psql/data
+ ports:
+ - containerPort: 5432
+ name: tcp
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/psql
+ - name: cvat-redis
+ image: redis:4.0-alpine
+ ports:
+ - containerPort: 6379
+ name: tcp
+ - name: cvat
+ image: onepanel/cvat:v0.7.6
+ env:
+ - name: DJANGO_MODWSGI_EXTRA_ARGS
+ value: ""
+ - name: ALLOWED_HOSTS
+ value: '*'
+ - name: CVAT_REDIS_HOST
+ value: localhost
+ - name: CVAT_POSTGRES_HOST
+ value: localhost
+ - name: CVAT_SHARE_URL
+ value: /home/django/data
+ ports:
+ - containerPort: 8080
+ name: http
+ volumeMounts:
+ - name: data
+ mountPath: /home/django/data
+ - name: keys
+ mountPath: /home/django/keys
+ - name: logs
+ mountPath: /home/django/logs
+ - name: models
+ mountPath: /home/django/models
+ - name: share
+ mountPath: /home/django/share
+ - name: cvat-ui
+ image: onepanel/cvat-ui:v0.7.5
+ ports:
+ - containerPort: 80
+ name: http
+ - name: filesyncer
+ image: onepanel/filesyncer:v0.0.4
+ command: ['python3', 'main.py']
+ volumeMounts:
+ - name: share
+ mountPath: /mnt/share
+ ports:
+ - name: cvat-ui
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: cvat
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ routes:
+ - match:
+ - uri:
+ regex: /api/.*|/git/.*|/tensorflow/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
+ - queryParams:
+ id:
+ regex: \d+.*
+ route:
+ - destination:
+ port:
+ number: 8080
+ timeout: 600s
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
+ timeout: 600s
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ # Uncomment the lines below if you want to send Slack notifications
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
\ No newline at end of file
diff --git a/db/yaml/workspaces/cvat/20200704151301.yaml b/db/yaml/workspaces/cvat/20200704151301.yaml
new file mode 100644
index 0000000..46dff39
--- /dev/null
+++ b/db/yaml/workspaces/cvat/20200704151301.yaml
@@ -0,0 +1,118 @@
+metadata:
+ name: CVAT
+ kind: Workspace
+ version: 20200704151301
+ action: update
+ description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
+spec:
+ # Docker containers that are part of the Workspace
+ containers:
+ - name: cvat-db
+ image: postgres:10-alpine
+ env:
+ - name: POSTGRES_USER
+ value: root
+ - name: POSTGRES_DB
+ value: cvat
+ - name: POSTGRES_HOST_AUTH_METHOD
+ value: trust
+ - name: PGDATA
+ value: /var/lib/psql/data
+ ports:
+ - containerPort: 5432
+ name: tcp
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/psql
+ - name: cvat-redis
+ image: redis:4.0-alpine
+ ports:
+ - containerPort: 6379
+ name: tcp
+ - name: cvat
+ image: onepanel/cvat:v0.7.10-stable
+ env:
+ - name: DJANGO_MODWSGI_EXTRA_ARGS
+ value: ""
+ - name: ALLOWED_HOSTS
+ value: '*'
+ - name: CVAT_REDIS_HOST
+ value: localhost
+ - name: CVAT_POSTGRES_HOST
+ value: localhost
+ - name: CVAT_SHARE_URL
+ value: /home/django/data
+ ports:
+ - containerPort: 8080
+ name: http
+ volumeMounts:
+ - name: data
+ mountPath: /home/django/data
+ - name: keys
+ mountPath: /home/django/keys
+ - name: logs
+ mountPath: /home/django/logs
+ - name: models
+ mountPath: /home/django/models
+ - name: share
+ mountPath: /home/django/share
+ - name: cvat-ui
+ image: onepanel/cvat-ui:v0.7.10-stable
+ ports:
+ - containerPort: 80
+ name: http
+ # Uncomment following lines to enable S3 FileSyncer
+ # Refer to https://docs.onepanel.ai/docs/getting-started/use-cases/computervision/annotation/cvat/cvat_quick_guide#setting-up-environment-variables
+ #- name: filesyncer
+ # image: onepanel/filesyncer:v0.0.4
+ # command: ['python3', 'main.py']
+ # volumeMounts:
+ # - name: share
+ # mountPath: /mnt/share
+ ports:
+ - name: cvat-ui
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: cvat
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ routes:
+ - match:
+ - uri:
+ regex: /api/.*|/git/.*|/tensorflow/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
+ - queryParams:
+ id:
+ regex: \d+.*
+ route:
+ - destination:
+ port:
+ number: 8080
+ timeout: 600s
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
+ timeout: 600s
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ # Uncomment the lines below if you want to send Slack notifications
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
\ No newline at end of file
diff --git a/db/yaml/workspaces/cvat/20200724220450.yaml b/db/yaml/workspaces/cvat/20200724220450.yaml
new file mode 100644
index 0000000..120b533
--- /dev/null
+++ b/db/yaml/workspaces/cvat/20200724220450.yaml
@@ -0,0 +1,135 @@
+metadata:
+ name: CVAT
+ kind: Workspace
+ version: 20200724220450
+ action: update
+ description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
+spec:
+ # Workspace arguments
+ arguments:
+ parameters:
+ - name: storage-prefix
+ displayName: Directory in default object storage
+ value: data
+ hint: Location of data and models in default object storage, will continuously sync to '/mnt/share'
+ containers:
+ - name: cvat-db
+ image: postgres:10-alpine
+ env:
+ - name: POSTGRES_USER
+ value: root
+ - name: POSTGRES_DB
+ value: cvat
+ - name: POSTGRES_HOST_AUTH_METHOD
+ value: trust
+ - name: PGDATA
+ value: /var/lib/psql/data
+ ports:
+ - containerPort: 5432
+ name: tcp
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/psql
+ - name: cvat-redis
+ image: redis:4.0-alpine
+ ports:
+ - containerPort: 6379
+ name: tcp
+ - name: cvat
+ image: onepanel/cvat:v0.7.10-stable
+ env:
+ - name: DJANGO_MODWSGI_EXTRA_ARGS
+ value: ""
+ - name: ALLOWED_HOSTS
+ value: '*'
+ - name: CVAT_REDIS_HOST
+ value: localhost
+ - name: CVAT_POSTGRES_HOST
+ value: localhost
+ - name: CVAT_SHARE_URL
+ value: /home/django/data
+ ports:
+ - containerPort: 8080
+ name: http
+ volumeMounts:
+ - name: data
+ mountPath: /home/django/data
+ - name: keys
+ mountPath: /home/django/keys
+ - name: logs
+ mountPath: /home/django/logs
+ - name: models
+ mountPath: /home/django/models
+ - name: share
+ mountPath: /home/django/share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ - name: cvat-ui
+ image: onepanel/cvat-ui:v0.7.10-stable
+ ports:
+ - containerPort: 80
+ name: http
+ # You can add multiple FileSyncer sidecar containers if needed
+ - name: filesyncer
+ image: "onepanel/filesyncer:{{.ArtifactRepositoryType}}"
+ args:
+ - download
+ env:
+ - name: FS_PATH
+ value: /mnt/share
+ - name: FS_PREFIX
+ value: '{{workspace.parameters.storage-prefix}}'
+ volumeMounts:
+ - name: share
+ mountPath: /mnt/share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: cvat-ui
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: cvat
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ routes:
+ - match:
+ - uri:
+ regex: /api/.*|/git/.*|/tensorflow/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
+ - queryParams:
+ id:
+ regex: \d+.*
+ route:
+ - destination:
+ port:
+ number: 8080
+ timeout: 600s
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
+ timeout: 600s
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ # Uncomment the lines below if you want to send Slack notifications
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
\ No newline at end of file
diff --git a/db/yaml/workspaces/cvat/20200812113316.yaml b/db/yaml/workspaces/cvat/20200812113316.yaml
new file mode 100644
index 0000000..27f580e
--- /dev/null
+++ b/db/yaml/workspaces/cvat/20200812113316.yaml
@@ -0,0 +1,144 @@
+metadata:
+ name: CVAT
+ kind: Workspace
+ version: 20200812113316
+ action: update
+ description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
+spec:
+ # Workspace arguments
+ arguments:
+ parameters:
+ - name: sync-directory
+ displayName: Directory to sync raw input and training output
+ value: workflow-data
+ hint: Location to sync raw input, models and checkpoints from default object storage. Note that this will be relative to the current namespace.
+ containers:
+ - name: cvat-db
+ image: postgres:10-alpine
+ env:
+ - name: POSTGRES_USER
+ value: root
+ - name: POSTGRES_DB
+ value: cvat
+ - name: POSTGRES_HOST_AUTH_METHOD
+ value: trust
+ - name: PGDATA
+ value: /var/lib/psql/data
+ ports:
+ - containerPort: 5432
+ name: tcp
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/psql
+ - name: cvat-redis
+ image: redis:4.0-alpine
+ ports:
+ - containerPort: 6379
+ name: tcp
+ - name: cvat
+ image: onepanel/cvat:0.12.0_cvat.1.0.0-beta.2-cuda
+ env:
+ - name: DJANGO_MODWSGI_EXTRA_ARGS
+ value: ""
+ - name: ALLOWED_HOSTS
+ value: '*'
+ - name: CVAT_REDIS_HOST
+ value: localhost
+ - name: CVAT_POSTGRES_HOST
+ value: localhost
+ - name: CVAT_SHARE_URL
+ value: /home/django/data
+ - name: ONEPANEL_SYNC_DIRECTORY
+ value: '{{workspace.parameters.sync-directory}}'
+ - name: NVIDIA_VISIBLE_DEVICES
+ value: all
+ - name: NVIDIA_DRIVER_CAPABILITIES
+ value: compute,utility
+ - name: NVIDIA_REQUIRE_CUDA
+ value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
+ ports:
+ - containerPort: 8080
+ name: http
+ volumeMounts:
+ - name: data
+ mountPath: /home/django/data
+ - name: keys
+ mountPath: /home/django/keys
+ - name: logs
+ mountPath: /home/django/logs
+ - name: models
+ mountPath: /home/django/models
+ - name: share
+ mountPath: /home/django/share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ - name: cvat-ui
+ image: onepanel/cvat-ui:0.12.0_cvat.1.0.0-beta.2
+ ports:
+ - containerPort: 80
+ name: http
+ # You can add multiple FileSyncer sidecar containers if needed
+ - name: filesyncer
+ image: "onepanel/filesyncer:{{.ArtifactRepositoryType}}"
+ imagePullPolicy: Always
+ args:
+ - download
+ env:
+ - name: FS_PATH
+ value: /mnt/share
+ - name: FS_PREFIX
+ value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
+ volumeMounts:
+ - name: share
+ mountPath: /mnt/share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: cvat-ui
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: cvat
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ routes:
+ - match:
+ - uri:
+ regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
+ - queryParams:
+ id:
+ regex: \d+.*
+ route:
+ - destination:
+ port:
+ number: 8080
+ timeout: 600s
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
+ timeout: 600s
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ # Uncomment the lines below if you want to send Slack notifications
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
\ No newline at end of file
diff --git a/db/yaml/workspaces/cvat/20200824101905.yaml b/db/yaml/workspaces/cvat/20200824101905.yaml
new file mode 100644
index 0000000..565c24a
--- /dev/null
+++ b/db/yaml/workspaces/cvat/20200824101905.yaml
@@ -0,0 +1,144 @@
+metadata:
+ name: CVAT
+ kind: Workspace
+ version: 20200824101905
+ action: update
+ description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
+spec:
+ # Workspace arguments
+ arguments:
+ parameters:
+ - name: sync-directory
+ displayName: Directory to sync raw input and training output
+ value: workflow-data
+ hint: Location to sync raw input, models and checkpoints from default object storage. Note that this will be relative to the current namespace.
+ containers:
+ - name: cvat-db
+ image: postgres:10-alpine
+ env:
+ - name: POSTGRES_USER
+ value: root
+ - name: POSTGRES_DB
+ value: cvat
+ - name: POSTGRES_HOST_AUTH_METHOD
+ value: trust
+ - name: PGDATA
+ value: /var/lib/psql/data
+ ports:
+ - containerPort: 5432
+ name: tcp
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/psql
+ - name: cvat-redis
+ image: redis:4.0-alpine
+ ports:
+ - containerPort: 6379
+ name: tcp
+ - name: cvat
+ image: onepanel/cvat:0.12.0-rc.6_cvat.1.0.0
+ env:
+ - name: DJANGO_MODWSGI_EXTRA_ARGS
+ value: ""
+ - name: ALLOWED_HOSTS
+ value: '*'
+ - name: CVAT_REDIS_HOST
+ value: localhost
+ - name: CVAT_POSTGRES_HOST
+ value: localhost
+ - name: CVAT_SHARE_URL
+ value: /home/django/data
+ - name: ONEPANEL_SYNC_DIRECTORY
+ value: '{{workspace.parameters.sync-directory}}'
+ - name: NVIDIA_VISIBLE_DEVICES
+ value: all
+ - name: NVIDIA_DRIVER_CAPABILITIES
+ value: compute,utility
+ - name: NVIDIA_REQUIRE_CUDA
+ value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
+ ports:
+ - containerPort: 8080
+ name: http
+ volumeMounts:
+ - name: data
+ mountPath: /home/django/data
+ - name: keys
+ mountPath: /home/django/keys
+ - name: logs
+ mountPath: /home/django/logs
+ - name: models
+ mountPath: /home/django/models
+ - name: share
+ mountPath: /home/django/share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ - name: cvat-ui
+ image: onepanel/cvat-ui:0.12.0-rc.1_cvat.1.0.0
+ ports:
+ - containerPort: 80
+ name: http
+ # You can add multiple FileSyncer sidecar containers if needed
+ - name: filesyncer
+ image: "onepanel/filesyncer:{{.ArtifactRepositoryType}}"
+ imagePullPolicy: Always
+ args:
+ - download
+ env:
+ - name: FS_PATH
+ value: /mnt/share
+ - name: FS_PREFIX
+ value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
+ volumeMounts:
+ - name: share
+ mountPath: /mnt/share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: cvat-ui
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: cvat
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ routes:
+ - match:
+ - uri:
+ regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
+ - queryParams:
+ id:
+ regex: \d+.*
+ route:
+ - destination:
+ port:
+ number: 8080
+ timeout: 600s
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
+ timeout: 600s
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ # Uncomment the lines below if you want to send Slack notifications
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
\ No newline at end of file
diff --git a/db/yaml/workspaces/cvat/20200825154403.yaml b/db/yaml/workspaces/cvat/20200825154403.yaml
new file mode 100644
index 0000000..a1a46df
--- /dev/null
+++ b/db/yaml/workspaces/cvat/20200825154403.yaml
@@ -0,0 +1,144 @@
+metadata:
+ name: CVAT
+ kind: Workspace
+ version: 20200825154403
+ action: update
+ description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
+spec:
+ # Workspace arguments
+ arguments:
+ parameters:
+ - name: sync-directory
+ displayName: Directory to sync raw input and training output
+ value: workflow-data
+ hint: Location to sync raw input, models and checkpoints from default object storage. Note that this will be relative to the current namespace.
+ containers:
+ - name: cvat-db
+ image: postgres:10-alpine
+ env:
+ - name: POSTGRES_USER
+ value: root
+ - name: POSTGRES_DB
+ value: cvat
+ - name: POSTGRES_HOST_AUTH_METHOD
+ value: trust
+ - name: PGDATA
+ value: /var/lib/psql/data
+ ports:
+ - containerPort: 5432
+ name: tcp
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/psql
+ - name: cvat-redis
+ image: redis:4.0-alpine
+ ports:
+ - containerPort: 6379
+ name: tcp
+ - name: cvat
+ image: onepanel/cvat:0.12.0_cvat.1.0.0
+ env:
+ - name: DJANGO_MODWSGI_EXTRA_ARGS
+ value: ""
+ - name: ALLOWED_HOSTS
+ value: '*'
+ - name: CVAT_REDIS_HOST
+ value: localhost
+ - name: CVAT_POSTGRES_HOST
+ value: localhost
+ - name: CVAT_SHARE_URL
+ value: /home/django/data
+ - name: ONEPANEL_SYNC_DIRECTORY
+ value: '{{workspace.parameters.sync-directory}}'
+ - name: NVIDIA_VISIBLE_DEVICES
+ value: all
+ - name: NVIDIA_DRIVER_CAPABILITIES
+ value: compute,utility
+ - name: NVIDIA_REQUIRE_CUDA
+ value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
+ ports:
+ - containerPort: 8080
+ name: http
+ volumeMounts:
+ - name: data
+ mountPath: /home/django/data
+ - name: keys
+ mountPath: /home/django/keys
+ - name: logs
+ mountPath: /home/django/logs
+ - name: models
+ mountPath: /home/django/models
+ - name: share
+ mountPath: /home/django/share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ - name: cvat-ui
+ image: onepanel/cvat-ui:0.12.0_cvat.1.0.0
+ ports:
+ - containerPort: 80
+ name: http
+ # You can add multiple FileSyncer sidecar containers if needed
+ - name: filesyncer
+ image: "onepanel/filesyncer:{{.ArtifactRepositoryType}}"
+ imagePullPolicy: Always
+ args:
+ - download
+ env:
+ - name: FS_PATH
+ value: /mnt/share
+ - name: FS_PREFIX
+ value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
+ volumeMounts:
+ - name: share
+ mountPath: /mnt/share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: cvat-ui
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: cvat
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ routes:
+ - match:
+ - uri:
+ regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
+ - queryParams:
+ id:
+ regex: \d+.*
+ route:
+ - destination:
+ port:
+ number: 8080
+ timeout: 600s
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
+ timeout: 600s
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ # Uncomment the lines below if you want to send Slack notifications
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
\ No newline at end of file
diff --git a/db/yaml/workspaces/cvat/20200826185926.yaml b/db/yaml/workspaces/cvat/20200826185926.yaml
new file mode 100644
index 0000000..dda33b6
--- /dev/null
+++ b/db/yaml/workspaces/cvat/20200826185926.yaml
@@ -0,0 +1,156 @@
+metadata:
+ name: CVAT
+ kind: Workspace
+ version: 20200826185926
+ action: update
+ description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
+spec:
+ # Workspace arguments
+ arguments:
+ parameters:
+ - name: sync-directory
+ displayName: Directory to sync raw input and training output
+ value: workflow-data
+ hint: Location to sync raw input, models and checkpoints from default object storage. Note that this will be relative to the current namespace.
+ containers:
+ - name: cvat-db
+ image: postgres:10-alpine
+ env:
+ - name: POSTGRES_USER
+ value: root
+ - name: POSTGRES_DB
+ value: cvat
+ - name: POSTGRES_HOST_AUTH_METHOD
+ value: trust
+ - name: PGDATA
+ value: /var/lib/psql/data
+ ports:
+ - containerPort: 5432
+ name: tcp
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/psql
+ - name: cvat-redis
+ image: redis:4.0-alpine
+ ports:
+ - containerPort: 6379
+ name: tcp
+ - name: cvat
+ image: onepanel/cvat:0.12.0_cvat.1.0.0
+ env:
+ - name: DJANGO_MODWSGI_EXTRA_ARGS
+ value: ""
+ - name: ALLOWED_HOSTS
+ value: '*'
+ - name: CVAT_REDIS_HOST
+ value: localhost
+ - name: CVAT_POSTGRES_HOST
+ value: localhost
+ - name: CVAT_SHARE_URL
+ value: /home/django/data
+ - name: ONEPANEL_SYNC_DIRECTORY
+ value: '{{workspace.parameters.sync-directory}}'
+ - name: NVIDIA_VISIBLE_DEVICES
+ value: all
+ - name: NVIDIA_DRIVER_CAPABILITIES
+ value: compute,utility
+ - name: NVIDIA_REQUIRE_CUDA
+ value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
+ ports:
+ - containerPort: 8080
+ name: http
+ volumeMounts:
+ - name: data
+ mountPath: /home/django/data
+ - name: keys
+ mountPath: /home/django/keys
+ - name: logs
+ mountPath: /home/django/logs
+ - name: models
+ mountPath: /home/django/models
+ - name: share
+ mountPath: /home/django/share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ - name: cvat-ui
+ image: onepanel/cvat-ui:0.12.0_cvat.1.0.0
+ ports:
+ - containerPort: 80
+ name: http
+ # You can add multiple FileSyncer sidecar containers if needed
+ - name: filesyncer
+ image: "onepanel/filesyncer:{{.ArtifactRepositoryType}}"
+ imagePullPolicy: Always
+ args:
+ - download
+ - -server-prefix=/sys/filesyncer
+ env:
+ - name: FS_PATH
+ value: /mnt/share
+ - name: FS_PREFIX
+ value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
+ volumeMounts:
+ - name: share
+ mountPath: /mnt/share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: cvat-ui
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: cvat
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: fs
+ port: 8888
+ protocol: TCP
+ targetPort: 8888
+ routes:
+ - match:
+ - uri:
+ prefix: /sys/filesyncer
+ route:
+ - destination:
+ port:
+ number: 8888
+ - match:
+ - uri:
+ regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
+ - queryParams:
+ id:
+ regex: \d+.*
+ route:
+ - destination:
+ port:
+ number: 8080
+ timeout: 600s
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
+ timeout: 600s
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ # Uncomment the lines below if you want to send Slack notifications
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
\ No newline at end of file
diff --git a/db/yaml/workspaces/cvat/20201001070806.yaml b/db/yaml/workspaces/cvat/20201001070806.yaml
new file mode 100644
index 0000000..90b1aa8
--- /dev/null
+++ b/db/yaml/workspaces/cvat/20201001070806.yaml
@@ -0,0 +1,154 @@
+metadata:
+ name: CVAT
+ kind: Workspace
+ version: 20201001070806
+ action: update
+ description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
+spec:
+ # Workspace arguments
+ arguments:
+ parameters:
+ - name: sync-directory
+ displayName: Directory to sync raw input and training output
+ value: workflow-data
+ hint: Location to sync raw input, models and checkpoints from default object storage. Note that this will be relative to the current namespace.
+ containers:
+ - name: cvat-db
+ image: postgres:10-alpine
+ env:
+ - name: POSTGRES_USER
+ value: root
+ - name: POSTGRES_DB
+ value: cvat
+ - name: POSTGRES_HOST_AUTH_METHOD
+ value: trust
+ - name: PGDATA
+ value: /var/lib/psql/data
+ ports:
+ - containerPort: 5432
+ name: tcp
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/psql
+ - name: cvat-redis
+ image: redis:4.0-alpine
+ ports:
+ - containerPort: 6379
+ name: tcp
+ - name: cvat
+ image: onepanel/cvat:0.12.1_cvat.1.0.0
+ env:
+ - name: DJANGO_MODWSGI_EXTRA_ARGS
+ value: ""
+ - name: ALLOWED_HOSTS
+ value: '*'
+ - name: CVAT_REDIS_HOST
+ value: localhost
+ - name: CVAT_POSTGRES_HOST
+ value: localhost
+ - name: CVAT_SHARE_URL
+ value: /home/django/data
+ - name: ONEPANEL_SYNC_DIRECTORY
+ value: '{{workspace.parameters.sync-directory}}'
+ - name: NVIDIA_VISIBLE_DEVICES
+ value: all
+ - name: NVIDIA_DRIVER_CAPABILITIES
+ value: compute,utility
+ - name: NVIDIA_REQUIRE_CUDA
+ value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
+ ports:
+ - containerPort: 8080
+ name: http
+ volumeMounts:
+ - name: data
+ mountPath: /home/django/data
+ - name: keys
+ mountPath: /home/django/keys
+ - name: logs
+ mountPath: /home/django/logs
+ - name: models
+ mountPath: /home/django/models
+ - name: share
+ mountPath: /home/django/share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ - name: cvat-ui
+ image: onepanel/cvat-ui:0.12.1_cvat.1.0.0
+ ports:
+ - containerPort: 80
+ name: http
+ # You can add multiple FileSyncer sidecar containers if needed
+ - name: filesyncer
+ image: "onepanel/filesyncer:{{.ArtifactRepositoryType}}"
+ imagePullPolicy: Always
+ args:
+ - download
+ - -server-prefix=/sys/filesyncer
+ env:
+ - name: FS_PATH
+ value: /mnt/share
+ - name: FS_PREFIX
+ value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
+ volumeMounts:
+ - name: share
+ mountPath: /mnt/share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: cvat-ui
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: cvat
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: fs
+ port: 8888
+ protocol: TCP
+ targetPort: 8888
+ routes:
+ - match:
+ - uri:
+ prefix: /sys/filesyncer
+ route:
+ - destination:
+ port:
+ number: 8888
+ - match:
+ - uri:
+ regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
+ - queryParams:
+ id:
+ regex: \d+.*
+ route:
+ - destination:
+ port:
+ number: 8080
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ # Uncomment the lines below if you want to send Slack notifications
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
\ No newline at end of file
diff --git a/db/yaml/workspaces/cvat/20201016170415.yaml b/db/yaml/workspaces/cvat/20201016170415.yaml
index e864247..6839607 100644
--- a/db/yaml/workspaces/cvat/20201016170415.yaml
+++ b/db/yaml/workspaces/cvat/20201016170415.yaml
@@ -1,147 +1,154 @@
-# Workspace arguments
-arguments:
- parameters:
- - name: sync-directory
- displayName: Directory to sync raw input and training output
- value: workflow-data
- hint: Location to sync raw input, models and checkpoints from default object storage. Note that this will be relative to the current namespace.
-containers:
- - name: cvat-db
- image: postgres:10-alpine
- env:
- - name: POSTGRES_USER
- value: root
- - name: POSTGRES_DB
- value: cvat
- - name: POSTGRES_HOST_AUTH_METHOD
- value: trust
- - name: PGDATA
- value: /var/lib/psql/data
- ports:
- - containerPort: 5432
- name: tcp
- volumeMounts:
- - name: db
- mountPath: /var/lib/psql
- - name: cvat-redis
- image: redis:4.0-alpine
- ports:
- - containerPort: 6379
- name: tcp
- - name: cvat
- image: onepanel/cvat:0.14.0_cvat.1.0.0
- env:
- - name: DJANGO_MODWSGI_EXTRA_ARGS
- value: ""
- - name: ALLOWED_HOSTS
- value: '*'
- - name: CVAT_REDIS_HOST
- value: localhost
- - name: CVAT_POSTGRES_HOST
- value: localhost
- - name: CVAT_SHARE_URL
- value: /home/django/data
- - name: ONEPANEL_SYNC_DIRECTORY
- value: '{{workspace.parameters.sync-directory}}'
- - name: NVIDIA_VISIBLE_DEVICES
- value: all
- - name: NVIDIA_DRIVER_CAPABILITIES
- value: compute,utility
- - name: NVIDIA_REQUIRE_CUDA
- value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
- ports:
- - containerPort: 8080
- name: http
- volumeMounts:
- - name: data
- mountPath: /home/django/data
- - name: keys
- mountPath: /home/django/keys
- - name: logs
- mountPath: /home/django/logs
- - name: models
- mountPath: /home/django/models
- - name: share
- mountPath: /home/django/share
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
- - name: cvat-ui
- image: onepanel/cvat-ui:0.14.0_cvat.1.0.0
- ports:
- - containerPort: 80
- name: http
- # You can add multiple FileSyncer sidecar containers if needed
- - name: filesyncer
- image: onepanel/filesyncer:{{.ArtifactRepositoryType}}
- imagePullPolicy: Always
- args:
- - download
- - -server-prefix=/sys/filesyncer
- env:
- - name: FS_PATH
- value: /mnt/share
- - name: FS_PREFIX
- value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
- volumeMounts:
- - name: share
- mountPath: /mnt/share
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
-ports:
- - name: cvat-ui
- port: 80
- protocol: TCP
- targetPort: 80
- - name: cvat
- port: 8080
- protocol: TCP
- targetPort: 8080
- - name: fs
- port: 8888
- protocol: TCP
- targetPort: 8888
-routes:
- - match:
- - uri:
- prefix: /sys/filesyncer
- route:
- - destination:
- port:
- number: 8888
- - match:
- - uri:
- regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- - queryParams:
- id:
- regex: \d+.*
- route:
- - destination:
- port:
- number: 8080
- - match:
- - uri:
- prefix: /
- route:
- - destination:
- port:
- number: 80
-# DAG Workflow to be executed once a Workspace action completes (optional)
-# Uncomment the lines below if you want to send Slack notifications
-#postExecutionWorkflow:
-# entrypoint: main
-# templates:
-# - name: main
-# dag:
-# tasks:
-# - name: slack-notify
-# template: slack-notify
-# - name: slack-notify
-# container:
-# image: technosophos/slack-notify
-# args:
-# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
-# command:
-# - sh
-# - -c
\ No newline at end of file
+metadata:
+ name: CVAT
+ kind: Workspace
+ version: 20201016170415
+ action: update
+ description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
+spec:
+ # Workspace arguments
+ arguments:
+ parameters:
+ - name: sync-directory
+ displayName: Directory to sync raw input and training output
+ value: workflow-data
+ hint: Location to sync raw input, models and checkpoints from default object storage. Note that this will be relative to the current namespace.
+ containers:
+ - name: cvat-db
+ image: postgres:10-alpine
+ env:
+ - name: POSTGRES_USER
+ value: root
+ - name: POSTGRES_DB
+ value: cvat
+ - name: POSTGRES_HOST_AUTH_METHOD
+ value: trust
+ - name: PGDATA
+ value: /var/lib/psql/data
+ ports:
+ - containerPort: 5432
+ name: tcp
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/psql
+ - name: cvat-redis
+ image: redis:4.0-alpine
+ ports:
+ - containerPort: 6379
+ name: tcp
+ - name: cvat
+ image: onepanel/cvat:0.14.0_cvat.1.0.0
+ env:
+ - name: DJANGO_MODWSGI_EXTRA_ARGS
+ value: ""
+ - name: ALLOWED_HOSTS
+ value: '*'
+ - name: CVAT_REDIS_HOST
+ value: localhost
+ - name: CVAT_POSTGRES_HOST
+ value: localhost
+ - name: CVAT_SHARE_URL
+ value: /home/django/data
+ - name: ONEPANEL_SYNC_DIRECTORY
+ value: '{{workspace.parameters.sync-directory}}'
+ - name: NVIDIA_VISIBLE_DEVICES
+ value: all
+ - name: NVIDIA_DRIVER_CAPABILITIES
+ value: compute,utility
+ - name: NVIDIA_REQUIRE_CUDA
+ value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
+ ports:
+ - containerPort: 8080
+ name: http
+ volumeMounts:
+ - name: data
+ mountPath: /home/django/data
+ - name: keys
+ mountPath: /home/django/keys
+ - name: logs
+ mountPath: /home/django/logs
+ - name: models
+ mountPath: /home/django/models
+ - name: share
+ mountPath: /home/django/share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ - name: cvat-ui
+ image: onepanel/cvat-ui:0.14.0_cvat.1.0.0
+ ports:
+ - containerPort: 80
+ name: http
+ # You can add multiple FileSyncer sidecar containers if needed
+ - name: filesyncer
+ image: "onepanel/filesyncer:{{.ArtifactRepositoryType}}"
+ imagePullPolicy: Always
+ args:
+ - download
+ - -server-prefix=/sys/filesyncer
+ env:
+ - name: FS_PATH
+ value: /mnt/share
+ - name: FS_PREFIX
+ value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
+ volumeMounts:
+ - name: share
+ mountPath: /mnt/share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: cvat-ui
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: cvat
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: fs
+ port: 8888
+ protocol: TCP
+ targetPort: 8888
+ routes:
+ - match:
+ - uri:
+ prefix: /sys/filesyncer
+ route:
+ - destination:
+ port:
+ number: 8888
+ - match:
+ - uri:
+ regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
+ - queryParams:
+ id:
+ regex: \d+.*
+ route:
+ - destination:
+ port:
+ number: 8080
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ # Uncomment the lines below if you want to send Slack notifications
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
\ No newline at end of file
diff --git a/db/yaml/workspaces/cvat/20201102104048.yaml b/db/yaml/workspaces/cvat/20201102104048.yaml
index 5575c69..d779479 100644
--- a/db/yaml/workspaces/cvat/20201102104048.yaml
+++ b/db/yaml/workspaces/cvat/20201102104048.yaml
@@ -1,159 +1,166 @@
-# Workspace arguments
-arguments:
- parameters:
- - name: sync-directory
- displayName: Directory to sync raw input and training output
- value: workflow-data
- hint: Location (relative to current namespace) to sync raw input, models and checkpoints from default object storage to '/share'.
-containers:
- - name: cvat-db
- image: postgres:10-alpine
- env:
- - name: POSTGRES_USER
- value: root
- - name: POSTGRES_DB
- value: cvat
- - name: POSTGRES_HOST_AUTH_METHOD
- value: trust
- - name: PGDATA
- value: /var/lib/psql/data
- ports:
- - containerPort: 5432
- name: tcp
- volumeMounts:
- - name: db
- mountPath: /var/lib/psql
- - name: cvat-redis
- image: redis:4.0-alpine
- ports:
- - containerPort: 6379
- name: tcp
- - name: cvat
- image: onepanel/cvat:0.15.0_cvat.1.0.0
- env:
- - name: DJANGO_MODWSGI_EXTRA_ARGS
- value: ""
- - name: ALLOWED_HOSTS
- value: '*'
- - name: CVAT_REDIS_HOST
- value: localhost
- - name: CVAT_POSTGRES_HOST
- value: localhost
- - name: CVAT_SHARE_URL
- value: /cvat/data
- - name: CVAT_SHARE_DIR
- value: /share
- - name: CVAT_KEYS_DIR
- value: /cvat/keys
- - name: CVAT_DATA_DIR
- value: /cvat/data
- - name: CVAT_MODELS_DIR
- value: /cvat/models
- - name: CVAT_LOGS_DIR
- value: /cvat/logs
- - name: ONEPANEL_SYNC_DIRECTORY
- value: '{{workspace.parameters.sync-directory}}'
- - name: NVIDIA_VISIBLE_DEVICES
- value: all
- - name: NVIDIA_DRIVER_CAPABILITIES
- value: compute,utility
- - name: NVIDIA_REQUIRE_CUDA
- value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
- ports:
- - containerPort: 8080
- name: http
- volumeMounts:
- - name: cvat-data
- mountPath: /cvat
- - name: share
- mountPath: /share
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
- - name: cvat-ui
- image: onepanel/cvat-ui:0.15.0_cvat.1.0.0
- ports:
- - containerPort: 80
- name: http
- # You can add multiple FileSyncer sidecar containers if needed
- - name: filesyncer
- image: onepanel/filesyncer:s3
- imagePullPolicy: Always
- args:
- - download
- - -server-prefix=/sys/filesyncer
- env:
- - name: FS_PATH
- value: /mnt/share
- - name: FS_PREFIX
- value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
- volumeMounts:
- - name: share
- mountPath: /mnt/share
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
-ports:
- - name: cvat-ui
- port: 80
- protocol: TCP
- targetPort: 80
- - name: cvat
- port: 8080
- protocol: TCP
- targetPort: 8080
- - name: fs
- port: 8888
- protocol: TCP
- targetPort: 8888
-routes:
- - match:
- - uri:
- prefix: /sys/filesyncer
- route:
- - destination:
- port:
- number: 8888
- - match:
- - uri:
- regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- - queryParams:
- id:
- regex: \d+.*
- route:
- - destination:
- port:
- number: 8080
- - match:
- - uri:
- prefix: /
- route:
- - destination:
- port:
- number: 80
-volumeClaimTemplates:
- - metadata:
- name: db
- spec:
- accessModes: ["ReadWriteOnce"]
- resources:
- requests:
- storage: 20Gi
-# DAG Workflow to be executed once a Workspace action completes (optional)
-# Uncomment the lines below if you want to send Slack notifications
-#postExecutionWorkflow:
-# entrypoint: main
-# templates:
-# - name: main
-# dag:
-# tasks:
-# - name: slack-notify
-# template: slack-notify
-# - name: slack-notify
-# container:
-# image: technosophos/slack-notify
-# args:
-# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
-# command:
-# - sh
-# - -c
\ No newline at end of file
+metadata:
+ name: CVAT
+ kind: Workspace
+ version: 20201102104048
+ action: update
+ description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
+spec:
+ # Workspace arguments
+ arguments:
+ parameters:
+ - name: sync-directory
+ displayName: Directory to sync raw input and training output
+ value: workflow-data
+ hint: Location (relative to current namespace) to sync raw input, models and checkpoints from default object storage to '/share'.
+ containers:
+ - name: cvat-db
+ image: postgres:10-alpine
+ env:
+ - name: POSTGRES_USER
+ value: root
+ - name: POSTGRES_DB
+ value: cvat
+ - name: POSTGRES_HOST_AUTH_METHOD
+ value: trust
+ - name: PGDATA
+ value: /var/lib/psql/data
+ ports:
+ - containerPort: 5432
+ name: tcp
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/psql
+ - name: cvat-redis
+ image: redis:4.0-alpine
+ ports:
+ - containerPort: 6379
+ name: tcp
+ - name: cvat
+ image: onepanel/cvat:0.15.0_cvat.1.0.0
+ env:
+ - name: DJANGO_MODWSGI_EXTRA_ARGS
+ value: ""
+ - name: ALLOWED_HOSTS
+ value: '*'
+ - name: CVAT_REDIS_HOST
+ value: localhost
+ - name: CVAT_POSTGRES_HOST
+ value: localhost
+ - name: CVAT_SHARE_URL
+ value: /cvat/data
+ - name: CVAT_SHARE_DIR
+ value: /share
+ - name: CVAT_KEYS_DIR
+ value: /cvat/keys
+ - name: CVAT_DATA_DIR
+ value: /cvat/data
+ - name: CVAT_MODELS_DIR
+ value: /cvat/models
+ - name: CVAT_LOGS_DIR
+ value: /cvat/logs
+ - name: ONEPANEL_SYNC_DIRECTORY
+ value: '{{workspace.parameters.sync-directory}}'
+ - name: NVIDIA_VISIBLE_DEVICES
+ value: all
+ - name: NVIDIA_DRIVER_CAPABILITIES
+ value: compute,utility
+ - name: NVIDIA_REQUIRE_CUDA
+ value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
+ ports:
+ - containerPort: 8080
+ name: http
+ volumeMounts:
+ - name: cvat-data
+ mountPath: /cvat
+ - name: share
+ mountPath: /share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ - name: cvat-ui
+ image: onepanel/cvat-ui:0.15.0_cvat.1.0.0
+ ports:
+ - containerPort: 80
+ name: http
+ # You can add multiple FileSyncer sidecar containers if needed
+ - name: filesyncer
+ image: onepanel/filesyncer:s3
+ imagePullPolicy: Always
+ args:
+ - download
+ - -server-prefix=/sys/filesyncer
+ env:
+ - name: FS_PATH
+ value: /mnt/share
+ - name: FS_PREFIX
+ value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
+ volumeMounts:
+ - name: share
+ mountPath: /mnt/share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: cvat-ui
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: cvat
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: fs
+ port: 8888
+ protocol: TCP
+ targetPort: 8888
+ routes:
+ - match:
+ - uri:
+ prefix: /sys/filesyncer
+ route:
+ - destination:
+ port:
+ number: 8888
+ - match:
+ - uri:
+ regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
+ - queryParams:
+ id:
+ regex: \d+.*
+ route:
+ - destination:
+ port:
+ number: 8080
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
+ volumeClaimTemplates:
+ - metadata:
+ name: db
+ spec:
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: 20Gi
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ # Uncomment the lines below if you want to send Slack notifications
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
\ No newline at end of file
diff --git a/db/yaml/workspaces/cvat/20201113094916.yaml b/db/yaml/workspaces/cvat/20201113094916.yaml
index 77256ef..37c6f44 100644
--- a/db/yaml/workspaces/cvat/20201113094916.yaml
+++ b/db/yaml/workspaces/cvat/20201113094916.yaml
@@ -1,159 +1,166 @@
-# Workspace arguments
-arguments:
- parameters:
- - name: sync-directory
- displayName: Directory to sync raw input and training output
- value: workflow-data
- hint: Location (relative to current namespace) to sync raw input, models and checkpoints from default object storage to '/share'.
-containers:
- - name: cvat-db
- image: postgres:10-alpine
- env:
- - name: POSTGRES_USER
- value: root
- - name: POSTGRES_DB
- value: cvat
- - name: POSTGRES_HOST_AUTH_METHOD
- value: trust
- - name: PGDATA
- value: /var/lib/psql/data
- ports:
- - containerPort: 5432
- name: tcp
- volumeMounts:
- - name: db
- mountPath: /var/lib/psql
- - name: cvat-redis
- image: redis:4.0-alpine
- ports:
- - containerPort: 6379
- name: tcp
- - name: cvat
- image: onepanel/cvat:0.16.0_cvat.1.0.0
- env:
- - name: DJANGO_MODWSGI_EXTRA_ARGS
- value: ""
- - name: ALLOWED_HOSTS
- value: '*'
- - name: CVAT_REDIS_HOST
- value: localhost
- - name: CVAT_POSTGRES_HOST
- value: localhost
- - name: CVAT_SHARE_URL
- value: /cvat/data
- - name: CVAT_SHARE_DIR
- value: /share
- - name: CVAT_KEYS_DIR
- value: /cvat/keys
- - name: CVAT_DATA_DIR
- value: /cvat/data
- - name: CVAT_MODELS_DIR
- value: /cvat/models
- - name: CVAT_LOGS_DIR
- value: /cvat/logs
- - name: ONEPANEL_SYNC_DIRECTORY
- value: '{{workspace.parameters.sync-directory}}'
- - name: NVIDIA_VISIBLE_DEVICES
- value: all
- - name: NVIDIA_DRIVER_CAPABILITIES
- value: compute,utility
- - name: NVIDIA_REQUIRE_CUDA
- value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
- ports:
- - containerPort: 8080
- name: http
- volumeMounts:
- - name: cvat-data
- mountPath: /cvat
- - name: share
- mountPath: /share
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
- - name: cvat-ui
- image: onepanel/cvat-ui:0.16.0_cvat.1.0.0
- ports:
- - containerPort: 80
- name: http
- # You can add multiple FileSyncer sidecar containers if needed
- - name: filesyncer
- image: onepanel/filesyncer:s3
- imagePullPolicy: Always
- args:
- - download
- - -server-prefix=/sys/filesyncer
- env:
- - name: FS_PATH
- value: /mnt/share
- - name: FS_PREFIX
- value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
- volumeMounts:
- - name: share
- mountPath: /mnt/share
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
-ports:
- - name: cvat-ui
- port: 80
- protocol: TCP
- targetPort: 80
- - name: cvat
- port: 8080
- protocol: TCP
- targetPort: 8080
- - name: fs
- port: 8888
- protocol: TCP
- targetPort: 8888
-routes:
- - match:
- - uri:
- prefix: /sys/filesyncer
- route:
- - destination:
- port:
- number: 8888
- - match:
- - uri:
- regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- - queryParams:
- id:
- regex: \d+.*
- route:
- - destination:
- port:
- number: 8080
- - match:
- - uri:
- prefix: /
- route:
- - destination:
- port:
- number: 80
-volumeClaimTemplates:
- - metadata:
- name: db
- spec:
- accessModes: ["ReadWriteOnce"]
- resources:
- requests:
- storage: 20Gi
-# DAG Workflow to be executed once a Workspace action completes (optional)
-# Uncomment the lines below if you want to send Slack notifications
-#postExecutionWorkflow:
-# entrypoint: main
-# templates:
-# - name: main
-# dag:
-# tasks:
-# - name: slack-notify
-# template: slack-notify
-# - name: slack-notify
-# container:
-# image: technosophos/slack-notify
-# args:
-# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
-# command:
-# - sh
-# - -c
\ No newline at end of file
+metadata:
+ name: CVAT
+ kind: Workspace
+ version: 20201113094916
+ action: update
+ description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
+spec:
+ # Workspace arguments
+ arguments:
+ parameters:
+ - name: sync-directory
+ displayName: Directory to sync raw input and training output
+ value: workflow-data
+ hint: Location (relative to current namespace) to sync raw input, models and checkpoints from default object storage to '/share'.
+ containers:
+ - name: cvat-db
+ image: postgres:10-alpine
+ env:
+ - name: POSTGRES_USER
+ value: root
+ - name: POSTGRES_DB
+ value: cvat
+ - name: POSTGRES_HOST_AUTH_METHOD
+ value: trust
+ - name: PGDATA
+ value: /var/lib/psql/data
+ ports:
+ - containerPort: 5432
+ name: tcp
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/psql
+ - name: cvat-redis
+ image: redis:4.0-alpine
+ ports:
+ - containerPort: 6379
+ name: tcp
+ - name: cvat
+ image: onepanel/cvat:0.16.0_cvat.1.0.0
+ env:
+ - name: DJANGO_MODWSGI_EXTRA_ARGS
+ value: ""
+ - name: ALLOWED_HOSTS
+ value: '*'
+ - name: CVAT_REDIS_HOST
+ value: localhost
+ - name: CVAT_POSTGRES_HOST
+ value: localhost
+ - name: CVAT_SHARE_URL
+ value: /cvat/data
+ - name: CVAT_SHARE_DIR
+ value: /share
+ - name: CVAT_KEYS_DIR
+ value: /cvat/keys
+ - name: CVAT_DATA_DIR
+ value: /cvat/data
+ - name: CVAT_MODELS_DIR
+ value: /cvat/models
+ - name: CVAT_LOGS_DIR
+ value: /cvat/logs
+ - name: ONEPANEL_SYNC_DIRECTORY
+ value: '{{workspace.parameters.sync-directory}}'
+ - name: NVIDIA_VISIBLE_DEVICES
+ value: all
+ - name: NVIDIA_DRIVER_CAPABILITIES
+ value: compute,utility
+ - name: NVIDIA_REQUIRE_CUDA
+ value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
+ ports:
+ - containerPort: 8080
+ name: http
+ volumeMounts:
+ - name: cvat-data
+ mountPath: /cvat
+ - name: share
+ mountPath: /share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ - name: cvat-ui
+ image: onepanel/cvat-ui:0.16.0_cvat.1.0.0
+ ports:
+ - containerPort: 80
+ name: http
+ # You can add multiple FileSyncer sidecar containers if needed
+ - name: filesyncer
+ image: onepanel/filesyncer:s3
+ imagePullPolicy: Always
+ args:
+ - download
+ - -server-prefix=/sys/filesyncer
+ env:
+ - name: FS_PATH
+ value: /mnt/share
+ - name: FS_PREFIX
+ value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
+ volumeMounts:
+ - name: share
+ mountPath: /mnt/share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: cvat-ui
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: cvat
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: fs
+ port: 8888
+ protocol: TCP
+ targetPort: 8888
+ routes:
+ - match:
+ - uri:
+ prefix: /sys/filesyncer
+ route:
+ - destination:
+ port:
+ number: 8888
+ - match:
+ - uri:
+ regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
+ - queryParams:
+ id:
+ regex: \d+.*
+ route:
+ - destination:
+ port:
+ number: 8080
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
+ volumeClaimTemplates:
+ - metadata:
+ name: db
+ spec:
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: 20Gi
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ # Uncomment the lines below if you want to send Slack notifications
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
\ No newline at end of file
diff --git a/db/yaml/workspaces/cvat/20201115133046.yaml b/db/yaml/workspaces/cvat/20201115133046.yaml
index 731f96c..4fd2243 100644
--- a/db/yaml/workspaces/cvat/20201115133046.yaml
+++ b/db/yaml/workspaces/cvat/20201115133046.yaml
@@ -1,161 +1,168 @@
-# Workspace arguments
-arguments:
- parameters:
- - name: sync-directory
- displayName: Directory to sync raw input and training output
- value: workflow-data
- hint: Location (relative to current namespace) to sync raw input, models and checkpoints from default object storage to '/share'.
-containers:
- - name: cvat-db
- image: postgres:10-alpine
- env:
- - name: POSTGRES_USER
- value: root
- - name: POSTGRES_DB
- value: cvat
- - name: POSTGRES_HOST_AUTH_METHOD
- value: trust
- - name: PGDATA
- value: /var/lib/psql/data
- ports:
- - containerPort: 5432
- name: tcp
- volumeMounts:
- - name: db
- mountPath: /var/lib/psql
- - name: cvat-redis
- image: redis:4.0-alpine
- ports:
- - containerPort: 6379
- name: tcp
- - name: cvat
- image: onepanel/cvat:0.16.0_cvat.1.0.0
- env:
- - name: DJANGO_MODWSGI_EXTRA_ARGS
- value: ""
- - name: ALLOWED_HOSTS
- value: '*'
- - name: CVAT_REDIS_HOST
- value: localhost
- - name: CVAT_POSTGRES_HOST
- value: localhost
- - name: CVAT_SHARE_URL
- value: /cvat/data
- - name: CVAT_SHARE_DIR
- value: /share
- - name: CVAT_DATA_DIR
- value: /cvat/data
- - name: CVAT_MEDIA_DATA_DIR
- value: /cvat/data/data
- - name: CVAT_KEYS_DIR
- value: /cvat/data/keys
- - name: CVAT_MODELS_DIR
- value: /cvat/data/models
- - name: CVAT_LOGS_DIR
- value: /cvat/logs
- - name: ONEPANEL_SYNC_DIRECTORY
- value: '{{workspace.parameters.sync-directory}}'
- - name: NVIDIA_VISIBLE_DEVICES
- value: all
- - name: NVIDIA_DRIVER_CAPABILITIES
- value: compute,utility
- - name: NVIDIA_REQUIRE_CUDA
- value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
- ports:
- - containerPort: 8080
- name: http
- volumeMounts:
- - name: cvat-data
- mountPath: /cvat
- - name: share
- mountPath: /share
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
- - name: cvat-ui
- image: onepanel/cvat-ui:0.16.0_cvat.1.0.0
- ports:
- - containerPort: 80
- name: http
- # You can add multiple FileSyncer sidecar containers if needed
- - name: filesyncer
- image: onepanel/filesyncer:s3
- imagePullPolicy: Always
- args:
- - download
- - -server-prefix=/sys/filesyncer
- env:
- - name: FS_PATH
- value: /mnt/share
- - name: FS_PREFIX
- value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
- volumeMounts:
- - name: share
- mountPath: /mnt/share
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
-ports:
- - name: cvat-ui
- port: 80
- protocol: TCP
- targetPort: 80
- - name: cvat
- port: 8080
- protocol: TCP
- targetPort: 8080
- - name: fs
- port: 8888
- protocol: TCP
- targetPort: 8888
-routes:
- - match:
- - uri:
- prefix: /sys/filesyncer
- route:
- - destination:
- port:
- number: 8888
- - match:
- - uri:
- regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- - queryParams:
- id:
- regex: \d+.*
- route:
- - destination:
- port:
- number: 8080
- - match:
- - uri:
- prefix: /
- route:
- - destination:
- port:
- number: 80
-volumeClaimTemplates:
- - metadata:
- name: db
- spec:
- accessModes: ["ReadWriteOnce"]
- resources:
- requests:
- storage: 20Gi
-# DAG Workflow to be executed once a Workspace action completes (optional)
-# Uncomment the lines below if you want to send Slack notifications
-#postExecutionWorkflow:
-# entrypoint: main
-# templates:
-# - name: main
-# dag:
-# tasks:
-# - name: slack-notify
-# template: slack-notify
-# - name: slack-notify
-# container:
-# image: technosophos/slack-notify
-# args:
-# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
-# command:
-# - sh
-# - -c
\ No newline at end of file
+metadata:
+ name: CVAT
+ kind: Workspace
+ version: 20201115133046
+ action: update
+ description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
+spec:
+ # Workspace arguments
+ arguments:
+ parameters:
+ - name: sync-directory
+ displayName: Directory to sync raw input and training output
+ value: workflow-data
+ hint: Location (relative to current namespace) to sync raw input, models and checkpoints from default object storage to '/share'.
+ containers:
+ - name: cvat-db
+ image: postgres:10-alpine
+ env:
+ - name: POSTGRES_USER
+ value: root
+ - name: POSTGRES_DB
+ value: cvat
+ - name: POSTGRES_HOST_AUTH_METHOD
+ value: trust
+ - name: PGDATA
+ value: /var/lib/psql/data
+ ports:
+ - containerPort: 5432
+ name: tcp
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/psql
+ - name: cvat-redis
+ image: redis:4.0-alpine
+ ports:
+ - containerPort: 6379
+ name: tcp
+ - name: cvat
+ image: onepanel/cvat:0.16.0_cvat.1.0.0
+ env:
+ - name: DJANGO_MODWSGI_EXTRA_ARGS
+ value: ""
+ - name: ALLOWED_HOSTS
+ value: '*'
+ - name: CVAT_REDIS_HOST
+ value: localhost
+ - name: CVAT_POSTGRES_HOST
+ value: localhost
+ - name: CVAT_SHARE_URL
+ value: /cvat/data
+ - name: CVAT_SHARE_DIR
+ value: /share
+ - name: CVAT_DATA_DIR
+ value: /cvat/data
+ - name: CVAT_MEDIA_DATA_DIR
+ value: /cvat/data/data
+ - name: CVAT_KEYS_DIR
+ value: /cvat/data/keys
+ - name: CVAT_MODELS_DIR
+ value: /cvat/data/models
+ - name: CVAT_LOGS_DIR
+ value: /cvat/logs
+ - name: ONEPANEL_SYNC_DIRECTORY
+ value: '{{workspace.parameters.sync-directory}}'
+ - name: NVIDIA_VISIBLE_DEVICES
+ value: all
+ - name: NVIDIA_DRIVER_CAPABILITIES
+ value: compute,utility
+ - name: NVIDIA_REQUIRE_CUDA
+ value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
+ ports:
+ - containerPort: 8080
+ name: http
+ volumeMounts:
+ - name: cvat-data
+ mountPath: /cvat
+ - name: share
+ mountPath: /share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ - name: cvat-ui
+ image: onepanel/cvat-ui:0.16.0_cvat.1.0.0
+ ports:
+ - containerPort: 80
+ name: http
+ # You can add multiple FileSyncer sidecar containers if needed
+ - name: filesyncer
+ image: onepanel/filesyncer:s3
+ imagePullPolicy: Always
+ args:
+ - download
+ - -server-prefix=/sys/filesyncer
+ env:
+ - name: FS_PATH
+ value: /mnt/share
+ - name: FS_PREFIX
+ value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
+ volumeMounts:
+ - name: share
+ mountPath: /mnt/share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: cvat-ui
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: cvat
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: fs
+ port: 8888
+ protocol: TCP
+ targetPort: 8888
+ routes:
+ - match:
+ - uri:
+ prefix: /sys/filesyncer
+ route:
+ - destination:
+ port:
+ number: 8888
+ - match:
+ - uri:
+ regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
+ - queryParams:
+ id:
+ regex: \d+.*
+ route:
+ - destination:
+ port:
+ number: 8080
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
+ volumeClaimTemplates:
+ - metadata:
+ name: db
+ spec:
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: 20Gi
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ # Uncomment the lines below if you want to send Slack notifications
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
\ No newline at end of file
diff --git a/db/yaml/workspaces/cvat/20201211161117.yaml b/db/yaml/workspaces/cvat/20201211161117.yaml
index 32c44c4..5285134 100644
--- a/db/yaml/workspaces/cvat/20201211161117.yaml
+++ b/db/yaml/workspaces/cvat/20201211161117.yaml
@@ -1,163 +1,170 @@
-# Workspace arguments
-arguments:
- parameters:
- - name: sync-directory
- displayName: Directory to sync raw input and training output
- value: workflow-data
- hint: Location (relative to current namespace) to sync raw input, models and checkpoints from default object storage to '/share'.
-containers:
- - name: cvat-db
- image: postgres:10-alpine
- env:
- - name: POSTGRES_USER
- value: root
- - name: POSTGRES_DB
- value: cvat
- - name: POSTGRES_HOST_AUTH_METHOD
- value: trust
- - name: PGDATA
- value: /var/lib/psql/data
- ports:
- - containerPort: 5432
- name: tcp
- volumeMounts:
- - name: db
- mountPath: /var/lib/psql
- - name: cvat-redis
- image: redis:4.0-alpine
- ports:
- - containerPort: 6379
- name: tcp
- - name: cvat
- image: onepanel/cvat:0.16.0_cvat.1.0.0
- env:
- - name: DJANGO_MODWSGI_EXTRA_ARGS
- value: ""
- - name: ALLOWED_HOSTS
- value: '*'
- - name: CVAT_REDIS_HOST
- value: localhost
- - name: CVAT_POSTGRES_HOST
- value: localhost
- - name: CVAT_SHARE_URL
- value: /cvat/data
- - name: CVAT_SHARE_DIR
- value: /share
- - name: CVAT_DATA_DIR
- value: /cvat/data
- - name: CVAT_MEDIA_DATA_DIR
- value: /cvat/data/data
- - name: CVAT_KEYS_DIR
- value: /cvat/data/keys
- - name: CVAT_MODELS_DIR
- value: /cvat/data/models
- - name: CVAT_LOGS_DIR
- value: /cvat/logs
- - name: ONEPANEL_SYNC_DIRECTORY
- value: '{{workspace.parameters.sync-directory}}'
- - name: NVIDIA_VISIBLE_DEVICES
- value: all
- - name: NVIDIA_DRIVER_CAPABILITIES
- value: compute,utility
- - name: NVIDIA_REQUIRE_CUDA
- value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
- - name: ONEPANEL_MAIN_CONTAINER
- value: 'true'
- ports:
- - containerPort: 8080
- name: http
- volumeMounts:
- - name: cvat-data
- mountPath: /cvat
- - name: share
- mountPath: /share
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
- - name: cvat-ui
- image: onepanel/cvat-ui:0.16.0_cvat.1.0.0
- ports:
- - containerPort: 80
- name: http
- # You can add multiple FileSyncer sidecar containers if needed
- - name: filesyncer
- image: onepanel/filesyncer:s3
- imagePullPolicy: Always
- args:
- - download
- - -server-prefix=/sys/filesyncer
- env:
- - name: FS_PATH
- value: /mnt/share
- - name: FS_PREFIX
- value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
- volumeMounts:
- - name: share
- mountPath: /mnt/share
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
-ports:
- - name: cvat-ui
- port: 80
- protocol: TCP
- targetPort: 80
- - name: cvat
- port: 8080
- protocol: TCP
- targetPort: 8080
- - name: fs
- port: 8888
- protocol: TCP
- targetPort: 8888
-routes:
- - match:
- - uri:
- prefix: /sys/filesyncer
- route:
- - destination:
- port:
- number: 8888
- - match:
- - uri:
- regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- - queryParams:
- id:
- regex: \d+.*
- route:
- - destination:
- port:
- number: 8080
- - match:
- - uri:
- prefix: /
- route:
- - destination:
- port:
- number: 80
-volumeClaimTemplates:
- - metadata:
- name: db
- spec:
- accessModes: ["ReadWriteOnce"]
- resources:
- requests:
- storage: 20Gi
-# DAG Workflow to be executed once a Workspace action completes (optional)
-# Uncomment the lines below if you want to send Slack notifications
-#postExecutionWorkflow:
-# entrypoint: main
-# templates:
-# - name: main
-# dag:
-# tasks:
-# - name: slack-notify
-# template: slack-notify
-# - name: slack-notify
-# container:
-# image: technosophos/slack-notify
-# args:
-# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
-# command:
-# - sh
-# - -c
\ No newline at end of file
+metadata:
+ name: CVAT
+ kind: Workspace
+ version: 20201211161117
+ action: update
+ description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
+spec:
+ # Workspace arguments
+ arguments:
+ parameters:
+ - name: sync-directory
+ displayName: Directory to sync raw input and training output
+ value: workflow-data
+ hint: Location (relative to current namespace) to sync raw input, models and checkpoints from default object storage to '/share'.
+ containers:
+ - name: cvat-db
+ image: postgres:10-alpine
+ env:
+ - name: POSTGRES_USER
+ value: root
+ - name: POSTGRES_DB
+ value: cvat
+ - name: POSTGRES_HOST_AUTH_METHOD
+ value: trust
+ - name: PGDATA
+ value: /var/lib/psql/data
+ ports:
+ - containerPort: 5432
+ name: tcp
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/psql
+ - name: cvat-redis
+ image: redis:4.0-alpine
+ ports:
+ - containerPort: 6379
+ name: tcp
+ - name: cvat
+ image: onepanel/cvat:0.16.0_cvat.1.0.0
+ env:
+ - name: DJANGO_MODWSGI_EXTRA_ARGS
+ value: ""
+ - name: ALLOWED_HOSTS
+ value: '*'
+ - name: CVAT_REDIS_HOST
+ value: localhost
+ - name: CVAT_POSTGRES_HOST
+ value: localhost
+ - name: CVAT_SHARE_URL
+ value: /cvat/data
+ - name: CVAT_SHARE_DIR
+ value: /share
+ - name: CVAT_DATA_DIR
+ value: /cvat/data
+ - name: CVAT_MEDIA_DATA_DIR
+ value: /cvat/data/data
+ - name: CVAT_KEYS_DIR
+ value: /cvat/data/keys
+ - name: CVAT_MODELS_DIR
+ value: /cvat/data/models
+ - name: CVAT_LOGS_DIR
+ value: /cvat/logs
+ - name: ONEPANEL_SYNC_DIRECTORY
+ value: '{{workspace.parameters.sync-directory}}'
+ - name: NVIDIA_VISIBLE_DEVICES
+ value: all
+ - name: NVIDIA_DRIVER_CAPABILITIES
+ value: compute,utility
+ - name: NVIDIA_REQUIRE_CUDA
+ value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
+ - name: ONEPANEL_MAIN_CONTAINER
+ value: 'true'
+ ports:
+ - containerPort: 8080
+ name: http
+ volumeMounts:
+ - name: cvat-data
+ mountPath: /cvat
+ - name: share
+ mountPath: /share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ - name: cvat-ui
+ image: onepanel/cvat-ui:0.16.0_cvat.1.0.0
+ ports:
+ - containerPort: 80
+ name: http
+ # You can add multiple FileSyncer sidecar containers if needed
+ - name: filesyncer
+ image: onepanel/filesyncer:s3
+ imagePullPolicy: Always
+ args:
+ - download
+ - -server-prefix=/sys/filesyncer
+ env:
+ - name: FS_PATH
+ value: /mnt/share
+ - name: FS_PREFIX
+ value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
+ volumeMounts:
+ - name: share
+ mountPath: /mnt/share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: cvat-ui
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: cvat
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: fs
+ port: 8888
+ protocol: TCP
+ targetPort: 8888
+ routes:
+ - match:
+ - uri:
+ prefix: /sys/filesyncer
+ route:
+ - destination:
+ port:
+ number: 8888
+ - match:
+ - uri:
+ regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
+ - queryParams:
+ id:
+ regex: \d+.*
+ route:
+ - destination:
+ port:
+ number: 8080
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
+ volumeClaimTemplates:
+ - metadata:
+ name: db
+ spec:
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: 20Gi
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ # Uncomment the lines below if you want to send Slack notifications
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
\ No newline at end of file
diff --git a/db/yaml/workspaces/cvat/20210107094725.yaml b/db/yaml/workspaces/cvat/20210107094725.yaml
index 89e679b..8829922 100644
--- a/db/yaml/workspaces/cvat/20210107094725.yaml
+++ b/db/yaml/workspaces/cvat/20210107094725.yaml
@@ -1,163 +1,170 @@
-# Workspace arguments
-arguments:
- parameters:
- - name: sync-directory
- displayName: Directory to sync raw input and training output
- value: workflow-data
- hint: Location (relative to current namespace) to sync raw input, models and checkpoints from default object storage to '/share'.
-containers:
- - name: cvat-db
- image: postgres:10-alpine
- env:
- - name: POSTGRES_USER
- value: root
- - name: POSTGRES_DB
- value: cvat
- - name: POSTGRES_HOST_AUTH_METHOD
- value: trust
- - name: PGDATA
- value: /var/lib/psql/data
- ports:
- - containerPort: 5432
- name: tcp
- volumeMounts:
- - name: db
- mountPath: /var/lib/psql
- - name: cvat-redis
- image: redis:4.0-alpine
- ports:
- - containerPort: 6379
- name: tcp
- - name: cvat
- image: onepanel/cvat:0.17.0_cvat.1.0.0
- env:
- - name: DJANGO_MODWSGI_EXTRA_ARGS
- value: ""
- - name: ALLOWED_HOSTS
- value: '*'
- - name: CVAT_REDIS_HOST
- value: localhost
- - name: CVAT_POSTGRES_HOST
- value: localhost
- - name: CVAT_SHARE_URL
- value: /cvat/data
- - name: CVAT_SHARE_DIR
- value: /share
- - name: CVAT_DATA_DIR
- value: /cvat/data
- - name: CVAT_MEDIA_DATA_DIR
- value: /cvat/data/data
- - name: CVAT_KEYS_DIR
- value: /cvat/data/keys
- - name: CVAT_MODELS_DIR
- value: /cvat/data/models
- - name: CVAT_LOGS_DIR
- value: /cvat/logs
- - name: ONEPANEL_SYNC_DIRECTORY
- value: '{{workspace.parameters.sync-directory}}'
- - name: NVIDIA_VISIBLE_DEVICES
- value: all
- - name: NVIDIA_DRIVER_CAPABILITIES
- value: compute,utility
- - name: NVIDIA_REQUIRE_CUDA
- value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
- - name: ONEPANEL_MAIN_CONTAINER
- value: 'true'
- ports:
- - containerPort: 8080
- name: http
- volumeMounts:
- - name: cvat-data
- mountPath: /cvat
- - name: share
- mountPath: /share
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
- - name: cvat-ui
- image: onepanel/cvat-ui:0.17.0_cvat.1.0.0
- ports:
- - containerPort: 80
- name: http
- # You can add multiple FileSyncer sidecar containers if needed
- - name: filesyncer
- image: onepanel/filesyncer:0.17.0
- imagePullPolicy: Always
- args:
- - download
- - -server-prefix=/sys/filesyncer
- env:
- - name: FS_PATH
- value: /mnt/share
- - name: FS_PREFIX
- value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
- volumeMounts:
- - name: share
- mountPath: /mnt/share
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
-ports:
- - name: cvat-ui
- port: 80
- protocol: TCP
- targetPort: 80
- - name: cvat
- port: 8080
- protocol: TCP
- targetPort: 8080
- - name: fs
- port: 8888
- protocol: TCP
- targetPort: 8888
-routes:
- - match:
- - uri:
- prefix: /sys/filesyncer
- route:
- - destination:
- port:
- number: 8888
- - match:
- - uri:
- regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- - queryParams:
- id:
- regex: \d+.*
- route:
- - destination:
- port:
- number: 8080
- - match:
- - uri:
- prefix: /
- route:
- - destination:
- port:
- number: 80
-volumeClaimTemplates:
- - metadata:
- name: db
- spec:
- accessModes: ["ReadWriteOnce"]
- resources:
- requests:
- storage: 20Gi
-# DAG Workflow to be executed once a Workspace action completes (optional)
-# Uncomment the lines below if you want to send Slack notifications
-#postExecutionWorkflow:
-# entrypoint: main
-# templates:
-# - name: main
-# dag:
-# tasks:
-# - name: slack-notify
-# template: slack-notify
-# - name: slack-notify
-# container:
-# image: technosophos/slack-notify
-# args:
-# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
-# command:
-# - sh
-# - -c
\ No newline at end of file
+metadata:
+ name: CVAT
+ kind: Workspace
+ version: 20210107094725
+ action: update
+ description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
+spec:
+ # Workspace arguments
+ arguments:
+ parameters:
+ - name: sync-directory
+ displayName: Directory to sync raw input and training output
+ value: workflow-data
+ hint: Location (relative to current namespace) to sync raw input, models and checkpoints from default object storage to '/share'.
+ containers:
+ - name: cvat-db
+ image: postgres:10-alpine
+ env:
+ - name: POSTGRES_USER
+ value: root
+ - name: POSTGRES_DB
+ value: cvat
+ - name: POSTGRES_HOST_AUTH_METHOD
+ value: trust
+ - name: PGDATA
+ value: /var/lib/psql/data
+ ports:
+ - containerPort: 5432
+ name: tcp
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/psql
+ - name: cvat-redis
+ image: redis:4.0-alpine
+ ports:
+ - containerPort: 6379
+ name: tcp
+ - name: cvat
+ image: onepanel/cvat:0.17.0_cvat.1.0.0
+ env:
+ - name: DJANGO_MODWSGI_EXTRA_ARGS
+ value: ""
+ - name: ALLOWED_HOSTS
+ value: '*'
+ - name: CVAT_REDIS_HOST
+ value: localhost
+ - name: CVAT_POSTGRES_HOST
+ value: localhost
+ - name: CVAT_SHARE_URL
+ value: /cvat/data
+ - name: CVAT_SHARE_DIR
+ value: /share
+ - name: CVAT_DATA_DIR
+ value: /cvat/data
+ - name: CVAT_MEDIA_DATA_DIR
+ value: /cvat/data/data
+ - name: CVAT_KEYS_DIR
+ value: /cvat/data/keys
+ - name: CVAT_MODELS_DIR
+ value: /cvat/data/models
+ - name: CVAT_LOGS_DIR
+ value: /cvat/logs
+ - name: ONEPANEL_SYNC_DIRECTORY
+ value: '{{workspace.parameters.sync-directory}}'
+ - name: NVIDIA_VISIBLE_DEVICES
+ value: all
+ - name: NVIDIA_DRIVER_CAPABILITIES
+ value: compute,utility
+ - name: NVIDIA_REQUIRE_CUDA
+ value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
+ - name: ONEPANEL_MAIN_CONTAINER
+ value: 'true'
+ ports:
+ - containerPort: 8080
+ name: http
+ volumeMounts:
+ - name: cvat-data
+ mountPath: /cvat
+ - name: share
+ mountPath: /share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ - name: cvat-ui
+ image: onepanel/cvat-ui:0.17.0_cvat.1.0.0
+ ports:
+ - containerPort: 80
+ name: http
+ # You can add multiple FileSyncer sidecar containers if needed
+ - name: filesyncer
+ image: onepanel/filesyncer:0.17.0
+ imagePullPolicy: Always
+ args:
+ - download
+ - -server-prefix=/sys/filesyncer
+ env:
+ - name: FS_PATH
+ value: /mnt/share
+ - name: FS_PREFIX
+ value: '{{workflow.namespace}}/{{workspace.parameters.sync-directory}}'
+ volumeMounts:
+ - name: share
+ mountPath: /mnt/share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: cvat-ui
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: cvat
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: fs
+ port: 8888
+ protocol: TCP
+ targetPort: 8888
+ routes:
+ - match:
+ - uri:
+ prefix: /sys/filesyncer
+ route:
+ - destination:
+ port:
+ number: 8888
+ - match:
+ - uri:
+ regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
+ - queryParams:
+ id:
+ regex: \d+.*
+ route:
+ - destination:
+ port:
+ number: 8080
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
+ volumeClaimTemplates:
+ - metadata:
+ name: db
+ spec:
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: 20Gi
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ # Uncomment the lines below if you want to send Slack notifications
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
\ No newline at end of file
diff --git a/db/yaml/workspaces/cvat/20210129134326.yaml b/db/yaml/workspaces/cvat/20210129134326.yaml
index 916655c..b49415d 100644
--- a/db/yaml/workspaces/cvat/20210129134326.yaml
+++ b/db/yaml/workspaces/cvat/20210129134326.yaml
@@ -1,134 +1,141 @@
-containers:
- - name: cvat-db
- image: postgres:10-alpine
- env:
- - name: POSTGRES_USER
- value: root
- - name: POSTGRES_DB
- value: cvat
- - name: POSTGRES_HOST_AUTH_METHOD
- value: trust
- - name: PGDATA
- value: /var/lib/psql/data
- ports:
- - containerPort: 5432
- name: tcp
- volumeMounts:
- - name: db
- mountPath: /var/lib/psql
- - name: cvat-redis
- image: redis:4.0-alpine
- ports:
- - containerPort: 6379
- name: tcp
- - name: cvat
- image: onepanel/cvat:v0.18.0_cvat.1.0.0
- env:
- - name: DJANGO_MODWSGI_EXTRA_ARGS
- value: ""
- - name: ALLOWED_HOSTS
- value: '*'
- - name: CVAT_REDIS_HOST
- value: localhost
- - name: CVAT_POSTGRES_HOST
- value: localhost
- - name: CVAT_SHARE_URL
- value: /cvat/data
- - name: CVAT_SHARE_DIR
- value: /share
- - name: CVAT_DATA_DIR
- value: /cvat/data
- - name: CVAT_MEDIA_DATA_DIR
- value: /cvat/data/data
- - name: CVAT_KEYS_DIR
- value: /cvat/data/keys
- - name: CVAT_MODELS_DIR
- value: /cvat/data/models
- - name: CVAT_LOGS_DIR
- value: /cvat/logs
- - name: CVAT_ANNOTATIONS_OBJECT_STORAGE_PREFIX
- value: 'artifacts/$(ONEPANEL_RESOURCE_NAMESPACE)/annotations/'
- - name: CVAT_ONEPANEL_WORKFLOWS_LABEL
- value: 'key=used-by,value=cvat'
- - name: NVIDIA_VISIBLE_DEVICES
- value: all
- - name: NVIDIA_DRIVER_CAPABILITIES
- value: compute,utility
- - name: NVIDIA_REQUIRE_CUDA
- value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
- - name: ONEPANEL_MAIN_CONTAINER
- value: 'true'
- ports:
- - containerPort: 8080
- name: http
- volumeMounts:
- - name: cvat-data
- mountPath: /cvat
- - name: share
- mountPath: /share
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
- - name: cvat-ui
- image: onepanel/cvat-ui:v0.18.0_cvat.1.0.0
- ports:
- - containerPort: 80
- name: http
- - name: sys-filesyncer
- image: onepanel/filesyncer:v0.18.0
- imagePullPolicy: Always
- args:
- - server
- - -server-prefix=/sys/filesyncer
- volumeMounts:
- - name: share
- mountPath: /share
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
-ports:
- - name: cvat-ui
- port: 80
- protocol: TCP
- targetPort: 80
- - name: cvat
- port: 8080
- protocol: TCP
- targetPort: 8080
- - name: fs
- port: 8888
- protocol: TCP
- targetPort: 8888
-routes:
- - match:
- - uri:
- prefix: /sys/filesyncer
- route:
- - destination:
- port:
- number: 8888
- - match:
- - uri:
- regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- - queryParams:
- id:
- regex: \d+.*
- route:
- - destination:
- port:
- number: 8080
- - match:
- - uri:
- prefix: /
- route:
- - destination:
- port:
- number: 80
-volumeClaimTemplates:
- - metadata:
- name: db
- spec:
- accessModes: ["ReadWriteOnce"]
- resources:
- requests:
- storage: 20Gi
+metadata:
+ name: CVAT
+ kind: Workspace
+ version: 20210129134326
+ action: update
+ description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
+spec:
+ containers:
+ - name: cvat-db
+ image: postgres:10-alpine
+ env:
+ - name: POSTGRES_USER
+ value: root
+ - name: POSTGRES_DB
+ value: cvat
+ - name: POSTGRES_HOST_AUTH_METHOD
+ value: trust
+ - name: PGDATA
+ value: /var/lib/psql/data
+ ports:
+ - containerPort: 5432
+ name: tcp
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/psql
+ - name: cvat-redis
+ image: redis:4.0-alpine
+ ports:
+ - containerPort: 6379
+ name: tcp
+ - name: cvat
+ image: onepanel/cvat:v0.18.0_cvat.1.0.0
+ env:
+ - name: DJANGO_MODWSGI_EXTRA_ARGS
+ value: ""
+ - name: ALLOWED_HOSTS
+ value: '*'
+ - name: CVAT_REDIS_HOST
+ value: localhost
+ - name: CVAT_POSTGRES_HOST
+ value: localhost
+ - name: CVAT_SHARE_URL
+ value: /cvat/data
+ - name: CVAT_SHARE_DIR
+ value: /share
+ - name: CVAT_DATA_DIR
+ value: /cvat/data
+ - name: CVAT_MEDIA_DATA_DIR
+ value: /cvat/data/data
+ - name: CVAT_KEYS_DIR
+ value: /cvat/data/keys
+ - name: CVAT_MODELS_DIR
+ value: /cvat/data/models
+ - name: CVAT_LOGS_DIR
+ value: /cvat/logs
+ - name: CVAT_ANNOTATIONS_OBJECT_STORAGE_PREFIX
+ value: 'artifacts/$(ONEPANEL_RESOURCE_NAMESPACE)/annotations/'
+ - name: CVAT_ONEPANEL_WORKFLOWS_LABEL
+ value: 'key=used-by,value=cvat'
+ - name: NVIDIA_VISIBLE_DEVICES
+ value: all
+ - name: NVIDIA_DRIVER_CAPABILITIES
+ value: compute,utility
+ - name: NVIDIA_REQUIRE_CUDA
+ value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
+ - name: ONEPANEL_MAIN_CONTAINER
+ value: 'true'
+ ports:
+ - containerPort: 8080
+ name: http
+ volumeMounts:
+ - name: cvat-data
+ mountPath: /cvat
+ - name: share
+ mountPath: /share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ - name: cvat-ui
+ image: onepanel/cvat-ui:v0.18.0_cvat.1.0.0
+ ports:
+ - containerPort: 80
+ name: http
+ - name: sys-filesyncer
+ image: onepanel/filesyncer:v0.18.0
+ imagePullPolicy: Always
+ args:
+ - server
+ - -server-prefix=/sys/filesyncer
+ volumeMounts:
+ - name: share
+ mountPath: /share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: cvat-ui
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: cvat
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: fs
+ port: 8888
+ protocol: TCP
+ targetPort: 8888
+ routes:
+ - match:
+ - uri:
+ prefix: /sys/filesyncer
+ route:
+ - destination:
+ port:
+ number: 8888
+ - match:
+ - uri:
+ regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
+ - queryParams:
+ id:
+ regex: \d+.*
+ route:
+ - destination:
+ port:
+ number: 8080
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
+ volumeClaimTemplates:
+ - metadata:
+ name: db
+ spec:
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: 20Gi
diff --git a/db/yaml/workspaces/cvat/20210224180017.yaml b/db/yaml/workspaces/cvat/20210224180017.yaml
index 6010ec0..2512087 100644
--- a/db/yaml/workspaces/cvat/20210224180017.yaml
+++ b/db/yaml/workspaces/cvat/20210224180017.yaml
@@ -1,134 +1,141 @@
-containers:
- - name: cvat-db
- image: postgres:10-alpine
- env:
- - name: POSTGRES_USER
- value: root
- - name: POSTGRES_DB
- value: cvat
- - name: POSTGRES_HOST_AUTH_METHOD
- value: trust
- - name: PGDATA
- value: /var/lib/psql/data
- ports:
- - containerPort: 5432
- name: tcp
- volumeMounts:
- - name: db
- mountPath: /var/lib/psql
- - name: cvat-redis
- image: redis:4.0-alpine
- ports:
- - containerPort: 6379
- name: tcp
- - name: cvat
- image: onepanel/cvat:v0.19.0_cvat.1.0.0
- env:
- - name: DJANGO_MODWSGI_EXTRA_ARGS
- value: ""
- - name: ALLOWED_HOSTS
- value: '*'
- - name: CVAT_REDIS_HOST
- value: localhost
- - name: CVAT_POSTGRES_HOST
- value: localhost
- - name: CVAT_SHARE_URL
- value: /cvat/data
- - name: CVAT_SHARE_DIR
- value: /share
- - name: CVAT_DATA_DIR
- value: /cvat/data
- - name: CVAT_MEDIA_DATA_DIR
- value: /cvat/data/data
- - name: CVAT_KEYS_DIR
- value: /cvat/data/keys
- - name: CVAT_MODELS_DIR
- value: /cvat/data/models
- - name: CVAT_LOGS_DIR
- value: /cvat/logs
- - name: CVAT_ANNOTATIONS_OBJECT_STORAGE_PREFIX
- value: 'artifacts/$(ONEPANEL_RESOURCE_NAMESPACE)/annotations/'
- - name: CVAT_ONEPANEL_WORKFLOWS_LABEL
- value: 'key=used-by,value=cvat'
- - name: NVIDIA_VISIBLE_DEVICES
- value: all
- - name: NVIDIA_DRIVER_CAPABILITIES
- value: compute,utility
- - name: NVIDIA_REQUIRE_CUDA
- value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
- - name: ONEPANEL_MAIN_CONTAINER
- value: 'true'
- ports:
- - containerPort: 8080
- name: http
- volumeMounts:
- - name: cvat-data
- mountPath: /cvat
- - name: share
- mountPath: /share
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
- - name: cvat-ui
- image: onepanel/cvat-ui:v0.19.0_cvat.1.0.0
- ports:
- - containerPort: 80
- name: http
- - name: sys-filesyncer
- image: onepanel/filesyncer:v0.19.0
- imagePullPolicy: Always
- args:
- - server
- - -server-prefix=/sys/filesyncer
- volumeMounts:
- - name: share
- mountPath: /share
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
-ports:
- - name: cvat-ui
- port: 80
- protocol: TCP
- targetPort: 80
- - name: cvat
- port: 8080
- protocol: TCP
- targetPort: 8080
- - name: fs
- port: 8888
- protocol: TCP
- targetPort: 8888
-routes:
- - match:
- - uri:
- prefix: /sys/filesyncer
- route:
- - destination:
- port:
- number: 8888
- - match:
- - uri:
- regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- - queryParams:
- id:
- regex: \d+.*
- route:
- - destination:
- port:
- number: 8080
- - match:
- - uri:
- prefix: /
- route:
- - destination:
- port:
- number: 80
-volumeClaimTemplates:
- - metadata:
- name: db
- spec:
- accessModes: ["ReadWriteOnce"]
- resources:
- requests:
- storage: 20Gi
+metadata:
+ name: CVAT
+ kind: Workspace
+ version: 20210224180017
+ action: update
+ description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
+spec:
+ containers:
+ - name: cvat-db
+ image: postgres:10-alpine
+ env:
+ - name: POSTGRES_USER
+ value: root
+ - name: POSTGRES_DB
+ value: cvat
+ - name: POSTGRES_HOST_AUTH_METHOD
+ value: trust
+ - name: PGDATA
+ value: /var/lib/psql/data
+ ports:
+ - containerPort: 5432
+ name: tcp
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/psql
+ - name: cvat-redis
+ image: redis:4.0-alpine
+ ports:
+ - containerPort: 6379
+ name: tcp
+ - name: cvat
+ image: onepanel/cvat:v0.19.0_cvat.1.0.0
+ env:
+ - name: DJANGO_MODWSGI_EXTRA_ARGS
+ value: ""
+ - name: ALLOWED_HOSTS
+ value: '*'
+ - name: CVAT_REDIS_HOST
+ value: localhost
+ - name: CVAT_POSTGRES_HOST
+ value: localhost
+ - name: CVAT_SHARE_URL
+ value: /cvat/data
+ - name: CVAT_SHARE_DIR
+ value: /share
+ - name: CVAT_DATA_DIR
+ value: /cvat/data
+ - name: CVAT_MEDIA_DATA_DIR
+ value: /cvat/data/data
+ - name: CVAT_KEYS_DIR
+ value: /cvat/data/keys
+ - name: CVAT_MODELS_DIR
+ value: /cvat/data/models
+ - name: CVAT_LOGS_DIR
+ value: /cvat/logs
+ - name: CVAT_ANNOTATIONS_OBJECT_STORAGE_PREFIX
+ value: 'artifacts/$(ONEPANEL_RESOURCE_NAMESPACE)/annotations/'
+ - name: CVAT_ONEPANEL_WORKFLOWS_LABEL
+ value: 'key=used-by,value=cvat'
+ - name: NVIDIA_VISIBLE_DEVICES
+ value: all
+ - name: NVIDIA_DRIVER_CAPABILITIES
+ value: compute,utility
+ - name: NVIDIA_REQUIRE_CUDA
+ value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
+ - name: ONEPANEL_MAIN_CONTAINER
+ value: 'true'
+ ports:
+ - containerPort: 8080
+ name: http
+ volumeMounts:
+ - name: cvat-data
+ mountPath: /cvat
+ - name: share
+ mountPath: /share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ - name: cvat-ui
+ image: onepanel/cvat-ui:v0.19.0_cvat.1.0.0
+ ports:
+ - containerPort: 80
+ name: http
+ - name: sys-filesyncer
+ image: onepanel/filesyncer:v0.19.0
+ imagePullPolicy: Always
+ args:
+ - server
+ - -server-prefix=/sys/filesyncer
+ volumeMounts:
+ - name: share
+ mountPath: /share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: cvat-ui
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: cvat
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: fs
+ port: 8888
+ protocol: TCP
+ targetPort: 8888
+ routes:
+ - match:
+ - uri:
+ prefix: /sys/filesyncer
+ route:
+ - destination:
+ port:
+ number: 8888
+ - match:
+ - uri:
+ regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
+ - queryParams:
+ id:
+ regex: \d+.*
+ route:
+ - destination:
+ port:
+ number: 8080
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
+ volumeClaimTemplates:
+ - metadata:
+ name: db
+ spec:
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: 20Gi
diff --git a/db/yaml/workspaces/cvat/20210323175655.yaml b/db/yaml/workspaces/cvat/20210323175655.yaml
index 5a4cb3e..44e7e3d 100644
--- a/db/yaml/workspaces/cvat/20210323175655.yaml
+++ b/db/yaml/workspaces/cvat/20210323175655.yaml
@@ -1,134 +1,141 @@
-containers:
- - name: cvat-db
- image: postgres:10-alpine
- env:
- - name: POSTGRES_USER
- value: root
- - name: POSTGRES_DB
- value: cvat
- - name: POSTGRES_HOST_AUTH_METHOD
- value: trust
- - name: PGDATA
- value: /var/lib/psql/data
- ports:
- - containerPort: 5432
- name: tcp
- volumeMounts:
- - name: db
- mountPath: /var/lib/psql
- - name: cvat-redis
- image: redis:4.0-alpine
- ports:
- - containerPort: 6379
- name: tcp
- - name: cvat
- image: onepanel/cvat:v0.19.0_cvat.1.0.0
- env:
- - name: DJANGO_MODWSGI_EXTRA_ARGS
- value: ""
- - name: ALLOWED_HOSTS
- value: '*'
- - name: CVAT_REDIS_HOST
- value: localhost
- - name: CVAT_POSTGRES_HOST
- value: localhost
- - name: CVAT_SHARE_URL
- value: /cvat/data
- - name: CVAT_SHARE_DIR
- value: /share
- - name: CVAT_DATA_DIR
- value: /cvat/data
- - name: CVAT_MEDIA_DATA_DIR
- value: /cvat/data/data
- - name: CVAT_KEYS_DIR
- value: /cvat/data/keys
- - name: CVAT_MODELS_DIR
- value: /cvat/data/models
- - name: CVAT_LOGS_DIR
- value: /cvat/logs
- - name: CVAT_ANNOTATIONS_OBJECT_STORAGE_PREFIX
- value: 'artifacts/$(ONEPANEL_RESOURCE_NAMESPACE)/annotations/'
- - name: CVAT_ONEPANEL_WORKFLOWS_LABEL
- value: 'key=used-by,value=cvat'
- - name: NVIDIA_VISIBLE_DEVICES
- value: all
- - name: NVIDIA_DRIVER_CAPABILITIES
- value: compute,utility
- - name: NVIDIA_REQUIRE_CUDA
- value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
- - name: ONEPANEL_MAIN_CONTAINER
- value: 'true'
- ports:
- - containerPort: 8080
- name: http
- volumeMounts:
- - name: cvat-data
- mountPath: /cvat
- - name: share
- mountPath: /share
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
- - name: cvat-ui
- image: onepanel/cvat-ui:v0.19.0_cvat.1.0.0
- ports:
- - containerPort: 80
- name: http
- - name: sys-filesyncer
- image: onepanel/filesyncer:v0.20.0
- imagePullPolicy: Always
- args:
- - server
- - -server-prefix=/sys/filesyncer
- volumeMounts:
- - name: share
- mountPath: /share
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
-ports:
- - name: cvat-ui
- port: 80
- protocol: TCP
- targetPort: 80
- - name: cvat
- port: 8080
- protocol: TCP
- targetPort: 8080
- - name: fs
- port: 8888
- protocol: TCP
- targetPort: 8888
-routes:
- - match:
- - uri:
- prefix: /sys/filesyncer
- route:
- - destination:
- port:
- number: 8888
- - match:
- - uri:
- regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- - queryParams:
- id:
- regex: \d+.*
- route:
- - destination:
- port:
- number: 8080
- - match:
- - uri:
- prefix: /
- route:
- - destination:
- port:
- number: 80
-volumeClaimTemplates:
- - metadata:
- name: db
- spec:
- accessModes: ["ReadWriteOnce"]
- resources:
- requests:
- storage: 20Gi
+metadata:
+ name: CVAT
+ kind: Workspace
+ version: 20210323175655
+ action: update
+ description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
+spec:
+ containers:
+ - name: cvat-db
+ image: postgres:10-alpine
+ env:
+ - name: POSTGRES_USER
+ value: root
+ - name: POSTGRES_DB
+ value: cvat
+ - name: POSTGRES_HOST_AUTH_METHOD
+ value: trust
+ - name: PGDATA
+ value: /var/lib/psql/data
+ ports:
+ - containerPort: 5432
+ name: tcp
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/psql
+ - name: cvat-redis
+ image: redis:4.0-alpine
+ ports:
+ - containerPort: 6379
+ name: tcp
+ - name: cvat
+ image: onepanel/cvat:v0.19.0_cvat.1.0.0
+ env:
+ - name: DJANGO_MODWSGI_EXTRA_ARGS
+ value: ""
+ - name: ALLOWED_HOSTS
+ value: '*'
+ - name: CVAT_REDIS_HOST
+ value: localhost
+ - name: CVAT_POSTGRES_HOST
+ value: localhost
+ - name: CVAT_SHARE_URL
+ value: /cvat/data
+ - name: CVAT_SHARE_DIR
+ value: /share
+ - name: CVAT_DATA_DIR
+ value: /cvat/data
+ - name: CVAT_MEDIA_DATA_DIR
+ value: /cvat/data/data
+ - name: CVAT_KEYS_DIR
+ value: /cvat/data/keys
+ - name: CVAT_MODELS_DIR
+ value: /cvat/data/models
+ - name: CVAT_LOGS_DIR
+ value: /cvat/logs
+ - name: CVAT_ANNOTATIONS_OBJECT_STORAGE_PREFIX
+ value: 'artifacts/$(ONEPANEL_RESOURCE_NAMESPACE)/annotations/'
+ - name: CVAT_ONEPANEL_WORKFLOWS_LABEL
+ value: 'key=used-by,value=cvat'
+ - name: NVIDIA_VISIBLE_DEVICES
+ value: all
+ - name: NVIDIA_DRIVER_CAPABILITIES
+ value: compute,utility
+ - name: NVIDIA_REQUIRE_CUDA
+ value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
+ - name: ONEPANEL_MAIN_CONTAINER
+ value: 'true'
+ ports:
+ - containerPort: 8080
+ name: http
+ volumeMounts:
+ - name: cvat-data
+ mountPath: /cvat
+ - name: share
+ mountPath: /share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ - name: cvat-ui
+ image: onepanel/cvat-ui:v0.19.0_cvat.1.0.0
+ ports:
+ - containerPort: 80
+ name: http
+ - name: sys-filesyncer
+ image: onepanel/filesyncer:v0.20.0
+ imagePullPolicy: Always
+ args:
+ - server
+ - -server-prefix=/sys/filesyncer
+ volumeMounts:
+ - name: share
+ mountPath: /share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: cvat-ui
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: cvat
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: fs
+ port: 8888
+ protocol: TCP
+ targetPort: 8888
+ routes:
+ - match:
+ - uri:
+ prefix: /sys/filesyncer
+ route:
+ - destination:
+ port:
+ number: 8888
+ - match:
+ - uri:
+ regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
+ - queryParams:
+ id:
+ regex: \d+.*
+ route:
+ - destination:
+ port:
+ number: 8080
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
+ volumeClaimTemplates:
+ - metadata:
+ name: db
+ spec:
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: 20Gi
diff --git a/db/yaml/workspaces/cvat/20210719190719.yaml b/db/yaml/workspaces/cvat/20210719190719.yaml
index 919d3f9..b083bec 100644
--- a/db/yaml/workspaces/cvat/20210719190719.yaml
+++ b/db/yaml/workspaces/cvat/20210719190719.yaml
@@ -1,134 +1,141 @@
-containers:
- - name: cvat-db
- image: postgres:10-alpine
- env:
- - name: POSTGRES_USER
- value: root
- - name: POSTGRES_DB
- value: cvat
- - name: POSTGRES_HOST_AUTH_METHOD
- value: trust
- - name: PGDATA
- value: /var/lib/psql/data
- ports:
- - containerPort: 5432
- name: tcp
- volumeMounts:
- - name: db
- mountPath: /var/lib/psql
- - name: cvat-redis
- image: redis:4.0-alpine
- ports:
- - containerPort: 6379
- name: tcp
- - name: cvat
- image: onepanel/cvat:v0.19.0_cvat.1.0.0
- env:
- - name: DJANGO_MODWSGI_EXTRA_ARGS
- value: ""
- - name: ALLOWED_HOSTS
- value: '*'
- - name: CVAT_REDIS_HOST
- value: localhost
- - name: CVAT_POSTGRES_HOST
- value: localhost
- - name: CVAT_SHARE_URL
- value: /cvat/data
- - name: CVAT_SHARE_DIR
- value: /share
- - name: CVAT_DATA_DIR
- value: /cvat/data
- - name: CVAT_MEDIA_DATA_DIR
- value: /cvat/data/data
- - name: CVAT_KEYS_DIR
- value: /cvat/data/keys
- - name: CVAT_MODELS_DIR
- value: /cvat/data/models
- - name: CVAT_LOGS_DIR
- value: /cvat/logs
- - name: CVAT_ANNOTATIONS_OBJECT_STORAGE_PREFIX
- value: 'artifacts/$(ONEPANEL_RESOURCE_NAMESPACE)/annotations/'
- - name: CVAT_ONEPANEL_WORKFLOWS_LABEL
- value: 'key=used-by,value=cvat'
- - name: NVIDIA_VISIBLE_DEVICES
- value: all
- - name: NVIDIA_DRIVER_CAPABILITIES
- value: compute,utility
- - name: NVIDIA_REQUIRE_CUDA
- value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
- - name: ONEPANEL_MAIN_CONTAINER
- value: 'true'
- ports:
- - containerPort: 8080
- name: http
- volumeMounts:
- - name: cvat-data
- mountPath: /cvat
- - name: share
- mountPath: /share
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
- - name: cvat-ui
- image: onepanel/cvat-ui:v0.19.0_cvat.1.0.0
- ports:
- - containerPort: 80
- name: http
- - name: sys-filesyncer
- image: onepanel/filesyncer:v1.0.0
- imagePullPolicy: Always
- args:
- - server
- - -server-prefix=/sys/filesyncer
- volumeMounts:
- - name: share
- mountPath: /share
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
-ports:
- - name: cvat-ui
- port: 80
- protocol: TCP
- targetPort: 80
- - name: cvat
- port: 8080
- protocol: TCP
- targetPort: 8080
- - name: fs
- port: 8888
- protocol: TCP
- targetPort: 8888
-routes:
- - match:
- - uri:
- prefix: /sys/filesyncer
- route:
- - destination:
- port:
- number: 8888
- - match:
- - uri:
- regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
- - queryParams:
- id:
- regex: \d+.*
- route:
- - destination:
- port:
- number: 8080
- - match:
- - uri:
- prefix: /
- route:
- - destination:
- port:
- number: 80
-volumeClaimTemplates:
- - metadata:
- name: db
- spec:
- accessModes: ["ReadWriteOnce"]
- resources:
- requests:
- storage: 20Gi
+metadata:
+ name: CVAT
+ kind: Workspace
+ version: 20210719190719
+ action: update
+ description: "Powerful and efficient Computer Vision Annotation Tool (CVAT)"
+spec:
+ containers:
+ - name: cvat-db
+ image: postgres:10-alpine
+ env:
+ - name: POSTGRES_USER
+ value: root
+ - name: POSTGRES_DB
+ value: cvat
+ - name: POSTGRES_HOST_AUTH_METHOD
+ value: trust
+ - name: PGDATA
+ value: /var/lib/psql/data
+ ports:
+ - containerPort: 5432
+ name: tcp
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/psql
+ - name: cvat-redis
+ image: redis:4.0-alpine
+ ports:
+ - containerPort: 6379
+ name: tcp
+ - name: cvat
+ image: onepanel/cvat:v0.19.0_cvat.1.0.0
+ env:
+ - name: DJANGO_MODWSGI_EXTRA_ARGS
+ value: ""
+ - name: ALLOWED_HOSTS
+ value: '*'
+ - name: CVAT_REDIS_HOST
+ value: localhost
+ - name: CVAT_POSTGRES_HOST
+ value: localhost
+ - name: CVAT_SHARE_URL
+ value: /cvat/data
+ - name: CVAT_SHARE_DIR
+ value: /share
+ - name: CVAT_DATA_DIR
+ value: /cvat/data
+ - name: CVAT_MEDIA_DATA_DIR
+ value: /cvat/data/data
+ - name: CVAT_KEYS_DIR
+ value: /cvat/data/keys
+ - name: CVAT_MODELS_DIR
+ value: /cvat/data/models
+ - name: CVAT_LOGS_DIR
+ value: /cvat/logs
+ - name: CVAT_ANNOTATIONS_OBJECT_STORAGE_PREFIX
+ value: 'artifacts/$(ONEPANEL_RESOURCE_NAMESPACE)/annotations/'
+ - name: CVAT_ONEPANEL_WORKFLOWS_LABEL
+ value: 'key=used-by,value=cvat'
+ - name: NVIDIA_VISIBLE_DEVICES
+ value: all
+ - name: NVIDIA_DRIVER_CAPABILITIES
+ value: compute,utility
+ - name: NVIDIA_REQUIRE_CUDA
+ value: "cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
+ - name: ONEPANEL_MAIN_CONTAINER
+ value: 'true'
+ ports:
+ - containerPort: 8080
+ name: http
+ volumeMounts:
+ - name: cvat-data
+ mountPath: /cvat
+ - name: share
+ mountPath: /share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ - name: cvat-ui
+ image: onepanel/cvat-ui:v0.19.0_cvat.1.0.0
+ ports:
+ - containerPort: 80
+ name: http
+ - name: sys-filesyncer
+ image: onepanel/filesyncer:v1.0.0
+ imagePullPolicy: Always
+ args:
+ - server
+ - -server-prefix=/sys/filesyncer
+ volumeMounts:
+ - name: share
+ mountPath: /share
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: cvat-ui
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: cvat
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: fs
+ port: 8888
+ protocol: TCP
+ targetPort: 8888
+ routes:
+ - match:
+ - uri:
+ prefix: /sys/filesyncer
+ route:
+ - destination:
+ port:
+ number: 8888
+ - match:
+ - uri:
+ regex: /api/.*|/git/.*|/tensorflow/.*|/onepanelio/.*|/tracking/.*|/auto_annotation/.*|/analytics/.*|/static/.*|/admin/.*|/documentation/.*|/dextr/.*|/reid/.*
+ - queryParams:
+ id:
+ regex: \d+.*
+ route:
+ - destination:
+ port:
+ number: 8080
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
+ volumeClaimTemplates:
+ - metadata:
+ name: db
+ spec:
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: 20Gi
diff --git a/db/yaml/workspaces/jupyterlab/20200525160514.yaml b/db/yaml/workspaces/jupyterlab/20200525160514.yaml
new file mode 100644
index 0000000..7451cc5
--- /dev/null
+++ b/db/yaml/workspaces/jupyterlab/20200525160514.yaml
@@ -0,0 +1,64 @@
+metadata:
+ name: JupyterLab
+ kind: Workspace
+ version: 20200525160514
+ action: create
+ description: "Interactive development environment for notebooks"
+spec:
+ # Docker containers that are part of the Workspace
+ containers:
+ - name: jupyterlab-tensorflow
+ image: jupyter/tensorflow-notebook
+ command: [start.sh, jupyter]
+ env:
+ - name: tornado
+ value: "{ 'headers': { 'Content-Security-Policy': \"frame-ancestors * 'self'\" } }"
+ args:
+ - lab
+ - --LabApp.token=''
+ - --LabApp.allow_remote_access=True
+ - --LabApp.allow_origin="*"
+ - --LabApp.disable_check_xsrf=True
+ - --LabApp.trust_xheaders=True
+ - --LabApp.tornado_settings=$(tornado)
+ - --notebook-dir='/data'
+ ports:
+ - containerPort: 8888
+ name: jupyterlab
+ # Volumes to be mounted in this container
+ # Onepanel will automatically create these volumes and mount them to the container
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ # Ports that need to be exposed
+ ports:
+ - name: jupyterlab
+ port: 80
+ protocol: TCP
+ targetPort: 8888
+ # Routes that will map to ports
+ routes:
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
+ # DAG Workflow to be executed once a Workspace action completes
+ # postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
\ No newline at end of file
diff --git a/db/yaml/workspaces/jupyterlab/20200821162630.yaml b/db/yaml/workspaces/jupyterlab/20200821162630.yaml
new file mode 100644
index 0000000..b73ce44
--- /dev/null
+++ b/db/yaml/workspaces/jupyterlab/20200821162630.yaml
@@ -0,0 +1,65 @@
+metadata:
+ name: JupyterLab
+ kind: Workspace
+ version: 20200821162630
+ action: update
+ description: "Interactive development environment for notebooks"
+spec:
+ # Docker containers that are part of the Workspace
+ containers:
+ - name: jupyterlab-tensorflow
+ image: onepanel/jupyterlab:1.0.1
+ command: ["/bin/bash", "-c", "start.sh jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * \'self\'\"}}' --notebook-dir='/data' --allow-root"]
+ env:
+ - name: tornado
+ value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ \'self'\"}}'"
+ args:
+ ports:
+ - containerPort: 8888
+ name: jupyterlab
+ - containerPort: 6006
+ name: tensorboard
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ ports:
+ - name: jupyterlab
+ port: 80
+ protocol: TCP
+ targetPort: 8888
+ - name: tensorboard
+ port: 6006
+ protocol: TCP
+ targetPort: 6006
+ routes:
+ - match:
+ - uri:
+ prefix: /tensorboard
+ route:
+ - destination:
+ port:
+ number: 6006
+ - match:
+ - uri:
+ prefix: / #jupyter runs at the default route
+ route:
+ - destination:
+ port:
+ number: 80
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
\ No newline at end of file
diff --git a/db/yaml/workspaces/jupyterlab/20200929153931.yaml b/db/yaml/workspaces/jupyterlab/20200929153931.yaml
index 4d92b38..5c1c97b 100644
--- a/db/yaml/workspaces/jupyterlab/20200929153931.yaml
+++ b/db/yaml/workspaces/jupyterlab/20200929153931.yaml
@@ -1,58 +1,65 @@
-# Docker containers that are part of the Workspace
-containers:
- - name: jupyterlab-tensorflow
- image: onepanel/jupyterlab:1.0.1
- command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * \'self\'\"}}' --notebook-dir='/data' --allow-root"]
- env:
- - name: tornado
- value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ \'self'\"}}'"
- args:
- ports:
- - containerPort: 8888
- name: jupyterlab
- - containerPort: 6006
- name: tensorboard
- volumeMounts:
- - name: data
- mountPath: /data
-ports:
- - name: jupyterlab
- port: 80
- protocol: TCP
- targetPort: 8888
- - name: tensorboard
- port: 6006
- protocol: TCP
- targetPort: 6006
-routes:
- - match:
- - uri:
- prefix: /tensorboard
- route:
- - destination:
- port:
- number: 6006
- - match:
- - uri:
- prefix: / #jupyter runs at the default route
- route:
- - destination:
- port:
- number: 80
-# DAG Workflow to be executed once a Workspace action completes (optional)
-#postExecutionWorkflow:
-# entrypoint: main
-# templates:
-# - name: main
-# dag:
-# tasks:
-# - name: slack-notify
-# template: slack-notify
-# - name: slack-notify
-# container:
-# image: technosophos/slack-notify
-# args:
-# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
-# command:
-# - sh
-# - -c
\ No newline at end of file
+metadata:
+ name: JupyterLab
+ kind: Workspace
+ version: 20200929153931
+ action: update
+ description: "Interactive development environment for notebooks"
+spec:
+ # Docker containers that are part of the Workspace
+ containers:
+ - name: jupyterlab-tensorflow
+ image: onepanel/jupyterlab:1.0.1
+ command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * \'self\'\"}}' --notebook-dir='/data' --allow-root"]
+ env:
+ - name: tornado
+ value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ \'self'\"}}'"
+ args:
+ ports:
+ - containerPort: 8888
+ name: jupyterlab
+ - containerPort: 6006
+ name: tensorboard
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ ports:
+ - name: jupyterlab
+ port: 80
+ protocol: TCP
+ targetPort: 8888
+ - name: tensorboard
+ port: 6006
+ protocol: TCP
+ targetPort: 6006
+ routes:
+ - match:
+ - uri:
+ prefix: /tensorboard
+ route:
+ - destination:
+ port:
+ number: 6006
+ - match:
+ - uri:
+ prefix: / #jupyter runs at the default route
+ route:
+ - destination:
+ port:
+ number: 80
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
\ No newline at end of file
diff --git a/db/yaml/workspaces/jupyterlab/20201028145442.yaml b/db/yaml/workspaces/jupyterlab/20201028145442.yaml
index c7d7965..9dccd5e 100644
--- a/db/yaml/workspaces/jupyterlab/20201028145442.yaml
+++ b/db/yaml/workspaces/jupyterlab/20201028145442.yaml
@@ -1,77 +1,84 @@
-# Docker containers that are part of the Workspace
-containers:
- - name: jupyterlab
- image: onepanel/jupyterlab:1.0.1
- command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * \'self\'\"}}' --notebook-dir='/data' --allow-root"]
- env:
- - name: tornado
- value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ \'self'\"}}'"
- ports:
- - containerPort: 8888
- name: jupyterlab
- - containerPort: 6006
- name: tensorboard
- volumeMounts:
- - name: data
- mountPath: /data
- lifecycle:
- postStart:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- condayml="/data/.environment.yml";
- jupytertxt="/data/.jupexported.txt";
- if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
- if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
- preStop:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- conda env export > /data/.environment.yml -n base;
- jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
- cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
-ports:
- - name: jupyterlab
- port: 80
- protocol: TCP
- targetPort: 8888
- - name: tensorboard
- port: 6006
- protocol: TCP
- targetPort: 6006
-routes:
- - match:
- - uri:
- prefix: /tensorboard
- route:
- - destination:
- port:
- number: 6006
- - match:
- - uri:
- prefix: / #jupyter runs at the default route
- route:
- - destination:
- port:
- number: 80
-# DAG Workflow to be executed once a Workspace action completes (optional)
-#postExecutionWorkflow:
-# entrypoint: main
-# templates:
-# - name: main
-# dag:
-# tasks:
-# - name: slack-notify
-# template: slack-notify
-# - name: slack-notify
-# container:
-# image: technosophos/slack-notify
-# args:
-# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
-# command:
-# - sh
-# - -c
\ No newline at end of file
+metadata:
+ name: JupyterLab
+ kind: Workspace
+ version: 20201028145442
+ action: update
+ description: "Interactive development environment for notebooks"
+spec:
+ # Docker containers that are part of the Workspace
+ containers:
+ - name: jupyterlab
+ image: onepanel/jupyterlab:1.0.1
+ command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * \'self\'\"}}' --notebook-dir='/data' --allow-root"]
+ env:
+ - name: tornado
+ value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ \'self'\"}}'"
+ ports:
+ - containerPort: 8888
+ name: jupyterlab
+ - containerPort: 6006
+ name: tensorboard
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ condayml="/data/.environment.yml";
+ jupytertxt="/data/.jupexported.txt";
+ if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
+ if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
+ preStop:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ conda env export > /data/.environment.yml -n base;
+ jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
+ cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
+ ports:
+ - name: jupyterlab
+ port: 80
+ protocol: TCP
+ targetPort: 8888
+ - name: tensorboard
+ port: 6006
+ protocol: TCP
+ targetPort: 6006
+ routes:
+ - match:
+ - uri:
+ prefix: /tensorboard
+ route:
+ - destination:
+ port:
+ number: 6006
+ - match:
+ - uri:
+ prefix: / #jupyter runs at the default route
+ route:
+ - destination:
+ port:
+ number: 80
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
\ No newline at end of file
diff --git a/db/yaml/workspaces/jupyterlab/20201031165106.yaml b/db/yaml/workspaces/jupyterlab/20201031165106.yaml
index 0ef89e8..af6ee41 100644
--- a/db/yaml/workspaces/jupyterlab/20201031165106.yaml
+++ b/db/yaml/workspaces/jupyterlab/20201031165106.yaml
@@ -1,79 +1,86 @@
-# Docker containers that are part of the Workspace
-containers:
- - name: jupyterlab
- image: onepanel/jupyterlab:1.0.1
- command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * \'self\'\"}}' --notebook-dir='/data' --allow-root"]
- env:
- - name: tornado
- value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ \'self'\"}}'"
- - name: TENSORBOARD_PROXY_URL
- value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard'
- ports:
- - containerPort: 8888
- name: jupyterlab
- - containerPort: 6006
- name: tensorboard
- volumeMounts:
- - name: data
- mountPath: /data
- lifecycle:
- postStart:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- condayml="/data/.environment.yml";
- jupytertxt="/data/.jupexported.txt";
- if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
- if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
- preStop:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- conda env export > /data/.environment.yml -n base;
- jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
- cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
-ports:
- - name: jupyterlab
- port: 80
- protocol: TCP
- targetPort: 8888
- - name: tensorboard
- port: 6006
- protocol: TCP
- targetPort: 6006
-routes:
- - match:
- - uri:
- prefix: /tensorboard
- route:
- - destination:
- port:
- number: 6006
- - match:
- - uri:
- prefix: / #jupyter runs at the default route
- route:
- - destination:
- port:
- number: 80
-# DAG Workflow to be executed once a Workspace action completes (optional)
-#postExecutionWorkflow:
-# entrypoint: main
-# templates:
-# - name: main
-# dag:
-# tasks:
-# - name: slack-notify
-# template: slack-notify
-# - name: slack-notify
-# container:
-# image: technosophos/slack-notify
-# args:
-# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
-# command:
-# - sh
-# - -c
\ No newline at end of file
+metadata:
+ name: JupyterLab
+ kind: Workspace
+ version: 20201031165106
+ action: update
+ description: "Interactive development environment for notebooks"
+spec:
+ # Docker containers that are part of the Workspace
+ containers:
+ - name: jupyterlab
+ image: onepanel/jupyterlab:1.0.1
+ command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * \'self\'\"}}' --notebook-dir='/data' --allow-root"]
+ env:
+ - name: tornado
+ value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ \'self'\"}}'"
+ - name: TENSORBOARD_PROXY_URL
+ value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard'
+ ports:
+ - containerPort: 8888
+ name: jupyterlab
+ - containerPort: 6006
+ name: tensorboard
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ condayml="/data/.environment.yml";
+ jupytertxt="/data/.jupexported.txt";
+ if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
+ if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
+ preStop:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ conda env export > /data/.environment.yml -n base;
+ jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
+ cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
+ ports:
+ - name: jupyterlab
+ port: 80
+ protocol: TCP
+ targetPort: 8888
+ - name: tensorboard
+ port: 6006
+ protocol: TCP
+ targetPort: 6006
+ routes:
+ - match:
+ - uri:
+ prefix: /tensorboard
+ route:
+ - destination:
+ port:
+ number: 6006
+ - match:
+ - uri:
+ prefix: / #jupyter runs at the default route
+ route:
+ - destination:
+ port:
+ number: 80
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
\ No newline at end of file
diff --git a/db/yaml/workspaces/jupyterlab/20201214133458.yaml b/db/yaml/workspaces/jupyterlab/20201214133458.yaml
index 0af3dfb..dc5be7c 100644
--- a/db/yaml/workspaces/jupyterlab/20201214133458.yaml
+++ b/db/yaml/workspaces/jupyterlab/20201214133458.yaml
@@ -1,80 +1,87 @@
-# Docker containers that are part of the Workspace
-containers:
- - name: jupyterlab
- image: onepanel/jupyterlab:1.0.1
- command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * \'self\'\"}}' --notebook-dir='/data' --allow-root"]
- workingDir: /data
- env:
- - name: tornado
- value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ \'self'\"}}'"
- - name: TENSORBOARD_PROXY_URL
- value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard'
- ports:
- - containerPort: 8888
- name: jupyterlab
- - containerPort: 6006
- name: tensorboard
- volumeMounts:
- - name: data
- mountPath: /data
- lifecycle:
- postStart:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- condayml="/data/.environment.yml";
- jupytertxt="/data/.jupexported.txt";
- if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
- if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
- preStop:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- conda env export > /data/.environment.yml -n base;
- jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
- cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
-ports:
- - name: jupyterlab
- port: 80
- protocol: TCP
- targetPort: 8888
- - name: tensorboard
- port: 6006
- protocol: TCP
- targetPort: 6006
-routes:
- - match:
- - uri:
- prefix: /tensorboard
- route:
- - destination:
- port:
- number: 6006
- - match:
- - uri:
- prefix: / #jupyter runs at the default route
- route:
- - destination:
- port:
- number: 80
-# DAG Workflow to be executed once a Workspace action completes (optional)
-#postExecutionWorkflow:
-# entrypoint: main
-# templates:
-# - name: main
-# dag:
-# tasks:
-# - name: slack-notify
-# template: slack-notify
-# - name: slack-notify
-# container:
-# image: technosophos/slack-notify
-# args:
-# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
-# command:
-# - sh
-# - -c
+metadata:
+ name: JupyterLab
+ kind: Workspace
+ version: 20201214133458
+ action: update
+ description: "Interactive development environment for notebooks"
+spec:
+ # Docker containers that are part of the Workspace
+ containers:
+ - name: jupyterlab
+ image: onepanel/jupyterlab:1.0.1
+ command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * \'self\'\"}}' --notebook-dir='/data' --allow-root"]
+ workingDir: /data
+ env:
+ - name: tornado
+ value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ \'self'\"}}'"
+ - name: TENSORBOARD_PROXY_URL
+ value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard'
+ ports:
+ - containerPort: 8888
+ name: jupyterlab
+ - containerPort: 6006
+ name: tensorboard
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ condayml="/data/.environment.yml";
+ jupytertxt="/data/.jupexported.txt";
+ if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
+ if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
+ preStop:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ conda env export > /data/.environment.yml -n base;
+ jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
+ cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
+ ports:
+ - name: jupyterlab
+ port: 80
+ protocol: TCP
+ targetPort: 8888
+ - name: tensorboard
+ port: 6006
+ protocol: TCP
+ targetPort: 6006
+ routes:
+ - match:
+ - uri:
+ prefix: /tensorboard
+ route:
+ - destination:
+ port:
+ number: 6006
+ - match:
+ - uri:
+ prefix: / #jupyter runs at the default route
+ route:
+ - destination:
+ port:
+ number: 80
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
diff --git a/db/yaml/workspaces/jupyterlab/20201229205644.yaml b/db/yaml/workspaces/jupyterlab/20201229205644.yaml
index 521f24f..767bfc9 100644
--- a/db/yaml/workspaces/jupyterlab/20201229205644.yaml
+++ b/db/yaml/workspaces/jupyterlab/20201229205644.yaml
@@ -1,93 +1,100 @@
-# Docker containers that are part of the Workspace
-containers:
- - name: jupyterlab
- image: onepanel/dl:0.17.0
- command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * 'self'\"}}' --notebook-dir='/data' --allow-root"]
- workingDir: /data
- env:
- - name: tornado
- value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ 'self'\"}}'"
- - name: TENSORBOARD_PROXY_URL
- value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard'
- ports:
- - containerPort: 8888
- name: jupyterlab
- - containerPort: 6006
- name: tensorboard
- - containerPort: 8080
- name: nni
- volumeMounts:
- - name: data
- mountPath: /data
- lifecycle:
- postStart:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- condayml="/data/.environment.yml";
- jupytertxt="/data/.jupexported.txt";
- if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
- if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
- preStop:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- conda env export > /data/.environment.yml -n base;
- jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
- cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
-ports:
- - name: jupyterlab
- port: 80
- protocol: TCP
- targetPort: 8888
- - name: tensorboard
- port: 6006
- protocol: TCP
- targetPort: 6006
- - name: nni
- port: 8080
- protocol: TCP
- targetPort: 8080
-routes:
- - match:
- - uri:
- prefix: /tensorboard
- route:
- - destination:
- port:
- number: 6006
- - match:
- - uri:
- prefix: /nni
- route:
- - destination:
- port:
- number: 8080
- - match:
- - uri:
- prefix: / #jupyter runs at the default route
- route:
- - destination:
- port:
- number: 80
-# DAG Workflow to be executed once a Workspace action completes (optional)
-#postExecutionWorkflow:
-# entrypoint: main
-# templates:
-# - name: main
-# dag:
-# tasks:
-# - name: slack-notify
-# template: slack-notify
-# - name: slack-notify
-# container:
-# image: technosophos/slack-notify
-# args:
-# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
-# command:
-# - sh
-# - -c
+metadata:
+ name: JupyterLab
+ kind: Workspace
+ version: 20201229205644
+ action: update
+ description: "Interactive development environment for notebooks"
+spec:
+ # Docker containers that are part of the Workspace
+ containers:
+ - name: jupyterlab
+ image: onepanel/dl:0.17.0
+ command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * 'self'\"}}' --notebook-dir='/data' --allow-root"]
+ workingDir: /data
+ env:
+ - name: tornado
+ value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ 'self'\"}}'"
+ - name: TENSORBOARD_PROXY_URL
+ value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard'
+ ports:
+ - containerPort: 8888
+ name: jupyterlab
+ - containerPort: 6006
+ name: tensorboard
+ - containerPort: 8080
+ name: nni
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ condayml="/data/.environment.yml";
+ jupytertxt="/data/.jupexported.txt";
+ if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
+ if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
+ preStop:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ conda env export > /data/.environment.yml -n base;
+ jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
+ cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
+ ports:
+ - name: jupyterlab
+ port: 80
+ protocol: TCP
+ targetPort: 8888
+ - name: tensorboard
+ port: 6006
+ protocol: TCP
+ targetPort: 6006
+ - name: nni
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ routes:
+ - match:
+ - uri:
+ prefix: /tensorboard
+ route:
+ - destination:
+ port:
+ number: 6006
+ - match:
+ - uri:
+ prefix: /nni
+ route:
+ - destination:
+ port:
+ number: 8080
+ - match:
+ - uri:
+ prefix: / #jupyter runs at the default route
+ route:
+ - destination:
+ port:
+ number: 80
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
diff --git a/db/yaml/workspaces/jupyterlab/20210129142057.yaml b/db/yaml/workspaces/jupyterlab/20210129142057.yaml
index 2db78e7..3c92312 100644
--- a/db/yaml/workspaces/jupyterlab/20210129142057.yaml
+++ b/db/yaml/workspaces/jupyterlab/20210129142057.yaml
@@ -1,101 +1,108 @@
-containers:
- - name: jupyterlab
- image: onepanel/dl:0.17.0
- command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * 'self'\"}}' --notebook-dir='/data' --allow-root"]
- workingDir: /data
- env:
- - name: tornado
- value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ 'self'\"}}'"
- - name: TENSORBOARD_PROXY_URL
- value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard'
- - name: ONEPANEL_MAIN_CONTAINER
- value: 'true'
- ports:
- - containerPort: 8888
- name: jupyterlab
- - containerPort: 6006
- name: tensorboard
- - containerPort: 8080
- name: nni
- volumeMounts:
- - name: data
- mountPath: /data
- lifecycle:
- postStart:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- condayml="/data/.environment.yml";
- jupytertxt="/data/.jupexported.txt";
- if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
- if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
- preStop:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- conda env export > /data/.environment.yml -n base;
- jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
- cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
- - name: sys-filesyncer
- image: onepanel/filesyncer:v0.18.0
- imagePullPolicy: Always
- args:
- - server
- - -host=localhost:8889
- - -server-prefix=/sys/filesyncer
- volumeMounts:
- - name: data
- mountPath: /data
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
-ports:
- - name: jupyterlab
- port: 80
- protocol: TCP
- targetPort: 8888
- - name: tensorboard
- port: 6006
- protocol: TCP
- targetPort: 6006
- - name: nni
- port: 8080
- protocol: TCP
- targetPort: 8080
- - name: fs
- port: 8889
- protocol: TCP
- targetPort: 8889
-routes:
- - match:
- - uri:
- prefix: /sys/filesyncer
- route:
- - destination:
- port:
- number: 8889
- - match:
- - uri:
- prefix: /tensorboard
- route:
- - destination:
- port:
- number: 6006
- - match:
- - uri:
- prefix: /nni
- route:
- - destination:
- port:
- number: 8080
- - match:
- - uri:
- prefix: /
- route:
- - destination:
- port:
- number: 80
+metadata:
+ name: JupyterLab
+ kind: Workspace
+ version: 20210129142057
+ action: update
+ description: "Interactive development environment for notebooks"
+spec:
+ containers:
+ - name: jupyterlab
+ image: onepanel/dl:0.17.0
+ command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * 'self'\"}}' --notebook-dir='/data' --allow-root"]
+ workingDir: /data
+ env:
+ - name: tornado
+ value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ 'self'\"}}'"
+ - name: TENSORBOARD_PROXY_URL
+ value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard'
+ - name: ONEPANEL_MAIN_CONTAINER
+ value: 'true'
+ ports:
+ - containerPort: 8888
+ name: jupyterlab
+ - containerPort: 6006
+ name: tensorboard
+ - containerPort: 8080
+ name: nni
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ condayml="/data/.environment.yml";
+ jupytertxt="/data/.jupexported.txt";
+ if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
+ if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
+ preStop:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ conda env export > /data/.environment.yml -n base;
+ jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
+ cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
+ - name: sys-filesyncer
+ image: onepanel/filesyncer:v0.18.0
+ imagePullPolicy: Always
+ args:
+ - server
+ - -host=localhost:8889
+ - -server-prefix=/sys/filesyncer
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: jupyterlab
+ port: 80
+ protocol: TCP
+ targetPort: 8888
+ - name: tensorboard
+ port: 6006
+ protocol: TCP
+ targetPort: 6006
+ - name: nni
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: fs
+ port: 8889
+ protocol: TCP
+ targetPort: 8889
+ routes:
+ - match:
+ - uri:
+ prefix: /sys/filesyncer
+ route:
+ - destination:
+ port:
+ number: 8889
+ - match:
+ - uri:
+ prefix: /tensorboard
+ route:
+ - destination:
+ port:
+ number: 6006
+ - match:
+ - uri:
+ prefix: /nni
+ route:
+ - destination:
+ port:
+ number: 8080
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
diff --git a/db/yaml/workspaces/jupyterlab/20210224180017.yaml b/db/yaml/workspaces/jupyterlab/20210224180017.yaml
index 8a41110..c8bd19e 100644
--- a/db/yaml/workspaces/jupyterlab/20210224180017.yaml
+++ b/db/yaml/workspaces/jupyterlab/20210224180017.yaml
@@ -1,101 +1,108 @@
-containers:
- - name: jupyterlab
- image: onepanel/dl:0.17.0
- command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * 'self'\"}}' --notebook-dir='/data' --allow-root"]
- workingDir: /data
- env:
- - name: tornado
- value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ 'self'\"}}'"
- - name: TENSORBOARD_PROXY_URL
- value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard'
- - name: ONEPANEL_MAIN_CONTAINER
- value: 'true'
- ports:
- - containerPort: 8888
- name: jupyterlab
- - containerPort: 6006
- name: tensorboard
- - containerPort: 8080
- name: nni
- volumeMounts:
- - name: data
- mountPath: /data
- lifecycle:
- postStart:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- condayml="/data/.environment.yml";
- jupytertxt="/data/.jupexported.txt";
- if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
- if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
- preStop:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- conda env export > /data/.environment.yml -n base;
- jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
- cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
- - name: sys-filesyncer
- image: onepanel/filesyncer:v0.19.0
- imagePullPolicy: Always
- args:
- - server
- - -host=localhost:8889
- - -server-prefix=/sys/filesyncer
- volumeMounts:
- - name: data
- mountPath: /data
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
-ports:
- - name: jupyterlab
- port: 80
- protocol: TCP
- targetPort: 8888
- - name: tensorboard
- port: 6006
- protocol: TCP
- targetPort: 6006
- - name: nni
- port: 8080
- protocol: TCP
- targetPort: 8080
- - name: fs
- port: 8889
- protocol: TCP
- targetPort: 8889
-routes:
- - match:
- - uri:
- prefix: /sys/filesyncer
- route:
- - destination:
- port:
- number: 8889
- - match:
- - uri:
- prefix: /tensorboard
- route:
- - destination:
- port:
- number: 6006
- - match:
- - uri:
- prefix: /nni
- route:
- - destination:
- port:
- number: 8080
- - match:
- - uri:
- prefix: /
- route:
- - destination:
- port:
- number: 80
+metadata:
+ name: JupyterLab
+ kind: Workspace
+ version: 20210224180017
+ action: update
+ description: "Interactive development environment for notebooks"
+spec:
+ containers:
+ - name: jupyterlab
+ image: onepanel/dl:0.17.0
+ command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * 'self'\"}}' --notebook-dir='/data' --allow-root"]
+ workingDir: /data
+ env:
+ - name: tornado
+ value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ 'self'\"}}'"
+ - name: TENSORBOARD_PROXY_URL
+ value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard'
+ - name: ONEPANEL_MAIN_CONTAINER
+ value: 'true'
+ ports:
+ - containerPort: 8888
+ name: jupyterlab
+ - containerPort: 6006
+ name: tensorboard
+ - containerPort: 8080
+ name: nni
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ condayml="/data/.environment.yml";
+ jupytertxt="/data/.jupexported.txt";
+ if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
+ if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
+ preStop:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ conda env export > /data/.environment.yml -n base;
+ jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
+ cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
+ - name: sys-filesyncer
+ image: onepanel/filesyncer:v0.19.0
+ imagePullPolicy: Always
+ args:
+ - server
+ - -host=localhost:8889
+ - -server-prefix=/sys/filesyncer
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: jupyterlab
+ port: 80
+ protocol: TCP
+ targetPort: 8888
+ - name: tensorboard
+ port: 6006
+ protocol: TCP
+ targetPort: 6006
+ - name: nni
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: fs
+ port: 8889
+ protocol: TCP
+ targetPort: 8889
+ routes:
+ - match:
+ - uri:
+ prefix: /sys/filesyncer
+ route:
+ - destination:
+ port:
+ number: 8889
+ - match:
+ - uri:
+ prefix: /tensorboard
+ route:
+ - destination:
+ port:
+ number: 6006
+ - match:
+ - uri:
+ prefix: /nni
+ route:
+ - destination:
+ port:
+ number: 8080
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
diff --git a/db/yaml/workspaces/jupyterlab/20210323175655.yaml b/db/yaml/workspaces/jupyterlab/20210323175655.yaml
index ef5cdb1..ac69646 100644
--- a/db/yaml/workspaces/jupyterlab/20210323175655.yaml
+++ b/db/yaml/workspaces/jupyterlab/20210323175655.yaml
@@ -1,101 +1,108 @@
-containers:
- - name: jupyterlab
- image: onepanel/dl:v0.20.0
- command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * 'self'\"}}' --notebook-dir='/data' --allow-root"]
- workingDir: /data
- env:
- - name: tornado
- value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ 'self'\"}}'"
- - name: TENSORBOARD_PROXY_URL
- value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard'
- - name: ONEPANEL_MAIN_CONTAINER
- value: 'true'
- ports:
- - containerPort: 8888
- name: jupyterlab
- - containerPort: 6006
- name: tensorboard
- - containerPort: 8080
- name: nni
- volumeMounts:
- - name: data
- mountPath: /data
- lifecycle:
- postStart:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- condayml="/data/.environment.yml";
- jupytertxt="/data/.jupexported.txt";
- if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
- if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
- preStop:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- conda env export > /data/.environment.yml -n base;
- jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
- cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
- - name: sys-filesyncer
- image: onepanel/filesyncer:v0.20.0
- imagePullPolicy: Always
- args:
- - server
- - -host=localhost:8889
- - -server-prefix=/sys/filesyncer
- volumeMounts:
- - name: data
- mountPath: /data
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
-ports:
- - name: jupyterlab
- port: 80
- protocol: TCP
- targetPort: 8888
- - name: tensorboard
- port: 6006
- protocol: TCP
- targetPort: 6006
- - name: nni
- port: 8080
- protocol: TCP
- targetPort: 8080
- - name: fs
- port: 8889
- protocol: TCP
- targetPort: 8889
-routes:
- - match:
- - uri:
- prefix: /sys/filesyncer
- route:
- - destination:
- port:
- number: 8889
- - match:
- - uri:
- prefix: /tensorboard
- route:
- - destination:
- port:
- number: 6006
- - match:
- - uri:
- prefix: /nni
- route:
- - destination:
- port:
- number: 8080
- - match:
- - uri:
- prefix: /
- route:
- - destination:
- port:
- number: 80
+metadata:
+ name: JupyterLab
+ kind: Workspace
+ version: 20210323175655
+ action: update
+ description: "Interactive development environment for notebooks"
+spec:
+ containers:
+ - name: jupyterlab
+ image: onepanel/dl:v0.20.0
+ command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * 'self'\"}}' --notebook-dir='/data' --allow-root"]
+ workingDir: /data
+ env:
+ - name: tornado
+ value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ 'self'\"}}'"
+ - name: TENSORBOARD_PROXY_URL
+ value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard'
+ - name: ONEPANEL_MAIN_CONTAINER
+ value: 'true'
+ ports:
+ - containerPort: 8888
+ name: jupyterlab
+ - containerPort: 6006
+ name: tensorboard
+ - containerPort: 8080
+ name: nni
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ condayml="/data/.environment.yml";
+ jupytertxt="/data/.jupexported.txt";
+ if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
+ if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
+ preStop:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ conda env export > /data/.environment.yml -n base;
+ jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
+ cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
+ - name: sys-filesyncer
+ image: onepanel/filesyncer:v0.20.0
+ imagePullPolicy: Always
+ args:
+ - server
+ - -host=localhost:8889
+ - -server-prefix=/sys/filesyncer
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: jupyterlab
+ port: 80
+ protocol: TCP
+ targetPort: 8888
+ - name: tensorboard
+ port: 6006
+ protocol: TCP
+ targetPort: 6006
+ - name: nni
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: fs
+ port: 8889
+ protocol: TCP
+ targetPort: 8889
+ routes:
+ - match:
+ - uri:
+ prefix: /sys/filesyncer
+ route:
+ - destination:
+ port:
+ number: 8889
+ - match:
+ - uri:
+ prefix: /tensorboard
+ route:
+ - destination:
+ port:
+ number: 6006
+ - match:
+ - uri:
+ prefix: /nni
+ route:
+ - destination:
+ port:
+ number: 8080
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
diff --git a/db/yaml/workspaces/jupyterlab/20210719190719.yaml b/db/yaml/workspaces/jupyterlab/20210719190719.yaml
index fa15546..82881fe 100644
--- a/db/yaml/workspaces/jupyterlab/20210719190719.yaml
+++ b/db/yaml/workspaces/jupyterlab/20210719190719.yaml
@@ -1,101 +1,108 @@
-containers:
- - name: jupyterlab
- image: onepanel/dl:v0.20.0
- command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * 'self'\"}}' --notebook-dir='/data' --allow-root"]
- workingDir: /data
- env:
- - name: tornado
- value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ 'self'\"}}'"
- - name: TENSORBOARD_PROXY_URL
- value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard'
- - name: ONEPANEL_MAIN_CONTAINER
- value: 'true'
- ports:
- - containerPort: 8888
- name: jupyterlab
- - containerPort: 6006
- name: tensorboard
- - containerPort: 8080
- name: nni
- volumeMounts:
- - name: data
- mountPath: /data
- lifecycle:
- postStart:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- condayml="/data/.environment.yml";
- jupytertxt="/data/.jupexported.txt";
- if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
- if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
- preStop:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- conda env export > /data/.environment.yml -n base;
- jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
- cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
- - name: sys-filesyncer
- image: onepanel/filesyncer:v1.0.0
- imagePullPolicy: Always
- args:
- - server
- - -host=localhost:8889
- - -server-prefix=/sys/filesyncer
- volumeMounts:
- - name: data
- mountPath: /data
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
-ports:
- - name: jupyterlab
- port: 80
- protocol: TCP
- targetPort: 8888
- - name: tensorboard
- port: 6006
- protocol: TCP
- targetPort: 6006
- - name: nni
- port: 8080
- protocol: TCP
- targetPort: 8080
- - name: fs
- port: 8889
- protocol: TCP
- targetPort: 8889
-routes:
- - match:
- - uri:
- prefix: /sys/filesyncer
- route:
- - destination:
- port:
- number: 8889
- - match:
- - uri:
- prefix: /tensorboard
- route:
- - destination:
- port:
- number: 6006
- - match:
- - uri:
- prefix: /nni
- route:
- - destination:
- port:
- number: 8080
- - match:
- - uri:
- prefix: /
- route:
- - destination:
- port:
- number: 80
+metadata:
+ name: JupyterLab
+ kind: Workspace
+ version: 20210719190719
+ action: update
+ description: "Interactive development environment for notebooks"
+spec:
+ containers:
+ - name: jupyterlab
+ image: onepanel/dl:v0.20.0
+ command: ["/bin/bash", "-c", "pip install onepanel-sdk && start.sh LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 jupyter lab --LabApp.token='' --LabApp.allow_remote_access=True --LabApp.allow_origin=\"*\" --LabApp.disable_check_xsrf=True --LabApp.trust_xheaders=True --LabApp.base_url=/ --LabApp.tornado_settings='{\"headers\":{\"Content-Security-Policy\":\"frame-ancestors * 'self'\"}}' --notebook-dir='/data' --allow-root"]
+ workingDir: /data
+ env:
+ - name: tornado
+ value: "'{'headers':{'Content-Security-Policy':\"frame-ancestors\ *\ 'self'\"}}'"
+ - name: TENSORBOARD_PROXY_URL
+ value: '//$(ONEPANEL_RESOURCE_UID)--$(ONEPANEL_RESOURCE_NAMESPACE).$(ONEPANEL_DOMAIN)/tensorboard'
+ - name: ONEPANEL_MAIN_CONTAINER
+ value: 'true'
+ ports:
+ - containerPort: 8888
+ name: jupyterlab
+ - containerPort: 6006
+ name: tensorboard
+ - containerPort: 8080
+ name: nni
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ condayml="/data/.environment.yml";
+ jupytertxt="/data/.jupexported.txt";
+ if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
+ if [ -f "$jupytertxt" ]; then cat $jupytertxt | xargs -n 1 jupyter labextension install --no-build && jupyter lab build --minimize=False; fi;
+ preStop:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ conda env export > /data/.environment.yml -n base;
+ jupyter labextension list 1>/dev/null 2> /data/.jup.txt;
+ cat /data/.jup.txt | sed -n '2,$p' | awk 'sub(/v/,"@", $2){print $1$2}' > /data/.jupexported.txt;
+ - name: sys-filesyncer
+ image: onepanel/filesyncer:v1.0.0
+ imagePullPolicy: Always
+ args:
+ - server
+ - -host=localhost:8889
+ - -server-prefix=/sys/filesyncer
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: jupyterlab
+ port: 80
+ protocol: TCP
+ targetPort: 8888
+ - name: tensorboard
+ port: 6006
+ protocol: TCP
+ targetPort: 6006
+ - name: nni
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: fs
+ port: 8889
+ protocol: TCP
+ targetPort: 8889
+ routes:
+ - match:
+ - uri:
+ prefix: /sys/filesyncer
+ route:
+ - destination:
+ port:
+ number: 8889
+ - match:
+ - uri:
+ prefix: /tensorboard
+ route:
+ - destination:
+ port:
+ number: 6006
+ - match:
+ - uri:
+ prefix: /nni
+ route:
+ - destination:
+ port:
+ number: 8080
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
diff --git a/db/yaml/workspaces/vnc/20210414165510.yaml b/db/yaml/workspaces/vnc/20210414165510.yaml
index 0d9dbf0..36f38db 100644
--- a/db/yaml/workspaces/vnc/20210414165510.yaml
+++ b/db/yaml/workspaces/vnc/20210414165510.yaml
@@ -1,57 +1,64 @@
-arguments:
- parameters:
- # parameter screen-resolution allows users to select screen resolution
- - name: screen-resolution
- value: 1680x1050
- type: select.select
- displayName: Screen Resolution
- options:
- - name: 1280x1024
- value: 1280x1024
- - name: 1680x1050
- value: 1680x1050
- - name: 2880x1800
- value: 2880x1800
-containers:
- - name: ubuntu
- image: onepanel/vnc:dl-vnc
- env:
- - name: VNC_PASSWORDLESS
- value: true
- - name: VNC_RESOLUTION
- value: '{{workflow.parameters.screen-resolution}}'
- ports:
- - containerPort: 6901
- name: vnc
- volumeMounts:
- - name: data
- mountPath: /data
-ports:
- - name: vnc
- port: 80
- protocol: TCP
- targetPort: 6901
-routes:
- - match:
- - uri:
- prefix: /
- route:
- - destination:
- port:
- number: 80
-# DAG Workflow to be executed once a Workspace action completes (optional)
-#postExecutionWorkflow:
-# entrypoint: main
-# templates:
-# - name: main
-# dag:
-# tasks:
-# - name: slack-notify
-# template: slack-notify
-# - name: slack-notify
-# container:
-# image: technosophos/slack-notify
-# args:
-# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
-# command:
-# - sh
\ No newline at end of file
+metadata:
+ name: "Deep Learning Desktop"
+ kind: Workspace
+ version: 20210414165510
+ action: create
+ description: "Deep learning desktop with VNC"
+spec:
+ arguments:
+ parameters:
+ # parameter screen-resolution allows users to select screen resolution
+ - name: screen-resolution
+ value: 1680x1050
+ type: select.select
+ displayName: Screen Resolution
+ options:
+ - name: 1280x1024
+ value: 1280x1024
+ - name: 1680x1050
+ value: 1680x1050
+ - name: 2880x1800
+ value: 2880x1800
+ containers:
+ - name: ubuntu
+ image: onepanel/vnc:dl-vnc
+ env:
+ - name: VNC_PASSWORDLESS
+ value: true
+ - name: VNC_RESOLUTION
+ value: '{{workflow.parameters.screen-resolution}}'
+ ports:
+ - containerPort: 6901
+ name: vnc
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ ports:
+ - name: vnc
+ port: 80
+ protocol: TCP
+ targetPort: 6901
+ routes:
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
\ No newline at end of file
diff --git a/db/yaml/workspaces/vnc/20210719190719.yaml b/db/yaml/workspaces/vnc/20210719190719.yaml
index 93b3f32..7a0a95a 100644
--- a/db/yaml/workspaces/vnc/20210719190719.yaml
+++ b/db/yaml/workspaces/vnc/20210719190719.yaml
@@ -1,81 +1,88 @@
-arguments:
- parameters:
- # parameter screen-resolution allows users to select screen resolution
- - name: screen-resolution
- value: 1680x1050
- type: select.select
- displayName: Screen Resolution
- options:
- - name: 1280x1024
- value: 1280x1024
- - name: 1680x1050
- value: 1680x1050
- - name: 2880x1800
- value: 2880x1800
-containers:
- - name: ubuntu
- image: onepanel/vnc:dl-vnc
- env:
- - name: VNC_PASSWORDLESS
- value: true
- - name: VNC_RESOLUTION
- value: '{{workflow.parameters.screen-resolution}}'
- ports:
- - containerPort: 6901
- name: vnc
- volumeMounts:
- - name: data
- mountPath: /data
- - name: sys-filesyncer
- image: onepanel/filesyncer:v1.0.0
- imagePullPolicy: Always
- args:
- - server
- - -host=localhost:8889
- - -server-prefix=/sys/filesyncer
- volumeMounts:
- - name: data
- mountPath: /data
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
-ports:
- - name: vnc
- port: 80
- protocol: TCP
- targetPort: 6901
- - name: fs
- port: 8889
- protocol: TCP
- targetPort: 8889
-routes:
- - match:
- - uri:
- prefix: /sys/filesyncer
- route:
- - destination:
- port:
- number: 8889
- - match:
- - uri:
- prefix: /
- route:
- - destination:
- port:
- number: 80
-# DAG Workflow to be executed once a Workspace action completes (optional)
-#postExecutionWorkflow:
-# entrypoint: main
-# templates:
-# - name: main
-# dag:
-# tasks:
-# - name: slack-notify
-# template: slack-notify
-# - name: slack-notify
-# container:
-# image: technosophos/slack-notify
-# args:
-# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
-# command:
-# - sh
\ No newline at end of file
+metadata:
+ name: "Deep Learning Desktop"
+ kind: Workspace
+ version: 20210719190719
+ action: update
+ description: "Deep learning desktop with VNC"
+spec:
+ arguments:
+ parameters:
+ # parameter screen-resolution allows users to select screen resolution
+ - name: screen-resolution
+ value: 1680x1050
+ type: select.select
+ displayName: Screen Resolution
+ options:
+ - name: 1280x1024
+ value: 1280x1024
+ - name: 1680x1050
+ value: 1680x1050
+ - name: 2880x1800
+ value: 2880x1800
+ containers:
+ - name: ubuntu
+ image: onepanel/vnc:dl-vnc
+ env:
+ - name: VNC_PASSWORDLESS
+ value: true
+ - name: VNC_RESOLUTION
+ value: '{{workflow.parameters.screen-resolution}}'
+ ports:
+ - containerPort: 6901
+ name: vnc
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ - name: sys-filesyncer
+ image: onepanel/filesyncer:v1.0.0
+ imagePullPolicy: Always
+ args:
+ - server
+ - -host=localhost:8889
+ - -server-prefix=/sys/filesyncer
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: vnc
+ port: 80
+ protocol: TCP
+ targetPort: 6901
+ - name: fs
+ port: 8889
+ protocol: TCP
+ targetPort: 8889
+ routes:
+ - match:
+ - uri:
+ prefix: /sys/filesyncer
+ route:
+ - destination:
+ port:
+ number: 8889
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 80
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
\ No newline at end of file
diff --git a/db/yaml/workspaces/vscode/20200929144301.yaml b/db/yaml/workspaces/vscode/20200929144301.yaml
index 21d3177..00d3894 100644
--- a/db/yaml/workspaces/vscode/20200929144301.yaml
+++ b/db/yaml/workspaces/vscode/20200929144301.yaml
@@ -1,41 +1,48 @@
-# Docker containers that are part of the Workspace
-containers:
- - name: vscode
- image: onepanel/vscode:1.0.0
- command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."]
- ports:
- - containerPort: 8080
- name: vscode
- volumeMounts:
- - name: data
- mountPath: /data
-ports:
- - name: vscode
- port: 8080
- protocol: TCP
- targetPort: 8080
-routes:
- - match:
- - uri:
- prefix: / #vscode runs at the default route
- route:
- - destination:
- port:
- number: 8080
-# DAG Workflow to be executed once a Workspace action completes (optional)
-#postExecutionWorkflow:
-# entrypoint: main
-# templates:
-# - name: main
-# dag:
-# tasks:
-# - name: slack-notify
-# template: slack-notify
-# - name: slack-notify
-# container:
-# image: technosophos/slack-notify
-# args:
-# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
-# command:
-# - sh
-# - -c
\ No newline at end of file
+metadata:
+ name: "Visual Studio Code"
+ kind: Workspace
+ version: 20200929144301
+ action: create
+ description: "Open source code editor"
+spec:
+ # Docker containers that are part of the Workspace
+ containers:
+ - name: vscode
+ image: onepanel/vscode:1.0.0
+ command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."]
+ ports:
+ - containerPort: 8080
+ name: vscode
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ ports:
+ - name: vscode
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ routes:
+ - match:
+ - uri:
+ prefix: / #vscode runs at the default route
+ route:
+ - destination:
+ port:
+ number: 8080
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
\ No newline at end of file
diff --git a/db/yaml/workspaces/vscode/20201028145443.yaml b/db/yaml/workspaces/vscode/20201028145443.yaml
index d57ba2b..810b396 100644
--- a/db/yaml/workspaces/vscode/20201028145443.yaml
+++ b/db/yaml/workspaces/vscode/20201028145443.yaml
@@ -1,60 +1,66 @@
-# Docker containers that are part of the Workspace
-containers:
- - name: vscode
- image: onepanel/vscode:1.0.0
- command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."]
- ports:
- - containerPort: 8080
- name: vscode
- volumeMounts:
- - name: data
- mountPath: /data
- lifecycle:
- postStart:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- condayml="/data/.environment.yml";
- vscodetxt="/data/.vscode-extensions.txt";
- if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
- if [ -f "$vscodetxt" ]; then cat $vscodetxt | xargs -n 1 code-server --install-extension; fi;
- preStop:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- conda env export > /data/.environment.yml -n base;
- code-server --list-extensions | tail -n +2 > /data/.vscode-extensions.txt;
-ports:
- - name: vscode
- port: 8080
- protocol: TCP
- targetPort: 8080
-routes:
- - match:
- - uri:
- prefix: / #vscode runs at the default route
- route:
- - destination:
- port:
- number: 8080
-# DAG Workflow to be executed once a Workspace action completes (optional)
-#postExecutionWorkflow:
-# entrypoint: main
-# templates:
-# - name: main
-# dag:
-# tasks:
-# - name: slack-notify
-# template: slack-notify
-# - name: slack-notify
-# container:
-# image: technosophos/slack-notify
-# args:
-# - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
-# command:
-# - sh
-# - -c
\ No newline at end of file
+metadata:
+ name: "Visual Studio Code"
+ kind: Workspace
+ version: 20201028145443
+ action: update
+spec:
+ # Docker containers that are part of the Workspace
+ containers:
+ - name: vscode
+ image: onepanel/vscode:1.0.0
+ command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."]
+ ports:
+ - containerPort: 8080
+ name: vscode
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ condayml="/data/.environment.yml";
+ vscodetxt="/data/.vscode-extensions.txt";
+ if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
+ if [ -f "$vscodetxt" ]; then cat $vscodetxt | xargs -n 1 code-server --install-extension; fi;
+ preStop:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ conda env export > /data/.environment.yml -n base;
+ code-server --list-extensions | tail -n +2 > /data/.vscode-extensions.txt;
+ ports:
+ - name: vscode
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ routes:
+ - match:
+ - uri:
+ prefix: / #vscode runs at the default route
+ route:
+ - destination:
+ port:
+ number: 8080
+ # DAG Workflow to be executed once a Workspace action completes (optional)
+ #postExecutionWorkflow:
+ # entrypoint: main
+ # templates:
+ # - name: main
+ # dag:
+ # tasks:
+ # - name: slack-notify
+ # template: slack-notify
+ # - name: slack-notify
+ # container:
+ # image: technosophos/slack-notify
+ # args:
+ # - SLACK_USERNAME=onepanel SLACK_TITLE="Your workspace is ready" SLACK_ICON=https://www.gravatar.com/avatar/5c4478592fe00878f62f0027be59c1bd SLACK_MESSAGE="Your workspace is now running" ./slack-notify
+ # command:
+ # - sh
+ # - -c
\ No newline at end of file
diff --git a/db/yaml/workspaces/vscode/20210129152427.yaml b/db/yaml/workspaces/vscode/20210129152427.yaml
index 9af2a05..b2cb55c 100644
--- a/db/yaml/workspaces/vscode/20210129152427.yaml
+++ b/db/yaml/workspaces/vscode/20210129152427.yaml
@@ -1,68 +1,74 @@
-containers:
- - name: vscode
- image: onepanel/vscode:1.0.0
- command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."]
- env:
- - name: ONEPANEL_MAIN_CONTAINER
- value: 'true'
- ports:
- - containerPort: 8080
- name: vscode
- volumeMounts:
- - name: data
- mountPath: /data
- lifecycle:
- postStart:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- condayml="/data/.environment.yml";
- vscodetxt="/data/.vscode-extensions.txt";
- if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
- if [ -f "$vscodetxt" ]; then cat $vscodetxt | xargs -n 1 code-server --install-extension; fi;
- preStop:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- conda env export > /data/.environment.yml -n base;
- code-server --list-extensions | tail -n +2 > /data/.vscode-extensions.txt;
- - name: sys-filesyncer
- image: onepanel/filesyncer:v0.18.0
- imagePullPolicy: Always
- args:
- - server
- - -server-prefix=/sys/filesyncer
- volumeMounts:
- - name: data
- mountPath: /data
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
-ports:
- - name: vscode
- port: 8080
- protocol: TCP
- targetPort: 8080
- - name: fs
- port: 8888
- protocol: TCP
- targetPort: 8888
-routes:
- - match:
- - uri:
- prefix: /sys/filesyncer
- route:
- - destination:
- port:
- number: 8888
- - match:
- - uri:
- prefix: /
- route:
- - destination:
- port:
- number: 8080
+metadata:
+ name: "Visual Studio Code"
+ kind: Workspace
+ version: 20210129152427
+ action: update
+spec:
+ containers:
+ - name: vscode
+ image: onepanel/vscode:1.0.0
+ command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."]
+ env:
+ - name: ONEPANEL_MAIN_CONTAINER
+ value: 'true'
+ ports:
+ - containerPort: 8080
+ name: vscode
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ condayml="/data/.environment.yml";
+ vscodetxt="/data/.vscode-extensions.txt";
+ if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
+ if [ -f "$vscodetxt" ]; then cat $vscodetxt | xargs -n 1 code-server --install-extension; fi;
+ preStop:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ conda env export > /data/.environment.yml -n base;
+ code-server --list-extensions | tail -n +2 > /data/.vscode-extensions.txt;
+ - name: sys-filesyncer
+ image: onepanel/filesyncer:v0.18.0
+ imagePullPolicy: Always
+ args:
+ - server
+ - -server-prefix=/sys/filesyncer
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: vscode
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: fs
+ port: 8888
+ protocol: TCP
+ targetPort: 8888
+ routes:
+ - match:
+ - uri:
+ prefix: /sys/filesyncer
+ route:
+ - destination:
+ port:
+ number: 8888
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 8080
diff --git a/db/yaml/workspaces/vscode/20210224180017.yaml b/db/yaml/workspaces/vscode/20210224180017.yaml
index 3327410..5779353 100644
--- a/db/yaml/workspaces/vscode/20210224180017.yaml
+++ b/db/yaml/workspaces/vscode/20210224180017.yaml
@@ -1,68 +1,74 @@
-containers:
- - name: vscode
- image: onepanel/vscode:1.0.0
- command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."]
- env:
- - name: ONEPANEL_MAIN_CONTAINER
- value: 'true'
- ports:
- - containerPort: 8080
- name: vscode
- volumeMounts:
- - name: data
- mountPath: /data
- lifecycle:
- postStart:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- condayml="/data/.environment.yml";
- vscodetxt="/data/.vscode-extensions.txt";
- if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
- if [ -f "$vscodetxt" ]; then cat $vscodetxt | xargs -n 1 code-server --install-extension; fi;
- preStop:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- conda env export > /data/.environment.yml -n base;
- code-server --list-extensions | tail -n +2 > /data/.vscode-extensions.txt;
- - name: sys-filesyncer
- image: onepanel/filesyncer:v0.19.0
- imagePullPolicy: Always
- args:
- - server
- - -server-prefix=/sys/filesyncer
- volumeMounts:
- - name: data
- mountPath: /data
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
-ports:
- - name: vscode
- port: 8080
- protocol: TCP
- targetPort: 8080
- - name: fs
- port: 8888
- protocol: TCP
- targetPort: 8888
-routes:
- - match:
- - uri:
- prefix: /sys/filesyncer
- route:
- - destination:
- port:
- number: 8888
- - match:
- - uri:
- prefix: /
- route:
- - destination:
- port:
- number: 8080
+metadata:
+ name: "Visual Studio Code"
+ kind: Workspace
+ version: 20210224180017
+ action: update
+spec:
+ containers:
+ - name: vscode
+ image: onepanel/vscode:1.0.0
+ command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."]
+ env:
+ - name: ONEPANEL_MAIN_CONTAINER
+ value: 'true'
+ ports:
+ - containerPort: 8080
+ name: vscode
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ condayml="/data/.environment.yml";
+ vscodetxt="/data/.vscode-extensions.txt";
+ if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
+ if [ -f "$vscodetxt" ]; then cat $vscodetxt | xargs -n 1 code-server --install-extension; fi;
+ preStop:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ conda env export > /data/.environment.yml -n base;
+ code-server --list-extensions | tail -n +2 > /data/.vscode-extensions.txt;
+ - name: sys-filesyncer
+ image: onepanel/filesyncer:v0.19.0
+ imagePullPolicy: Always
+ args:
+ - server
+ - -server-prefix=/sys/filesyncer
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: vscode
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: fs
+ port: 8888
+ protocol: TCP
+ targetPort: 8888
+ routes:
+ - match:
+ - uri:
+ prefix: /sys/filesyncer
+ route:
+ - destination:
+ port:
+ number: 8888
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 8080
diff --git a/db/yaml/workspaces/vscode/20210323175655.yaml b/db/yaml/workspaces/vscode/20210323175655.yaml
index aff126d..11efdee 100644
--- a/db/yaml/workspaces/vscode/20210323175655.yaml
+++ b/db/yaml/workspaces/vscode/20210323175655.yaml
@@ -1,68 +1,74 @@
-containers:
- - name: vscode
- image: onepanel/vscode:v0.20.0_code-server.3.9.1
- command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."]
- env:
- - name: ONEPANEL_MAIN_CONTAINER
- value: 'true'
- ports:
- - containerPort: 8080
- name: vscode
- volumeMounts:
- - name: data
- mountPath: /data
- lifecycle:
- postStart:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- condayml="/data/.environment.yml";
- vscodetxt="/data/.vscode-extensions.txt";
- if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
- if [ -f "$vscodetxt" ]; then cat $vscodetxt | xargs -n 1 code-server --install-extension; fi;
- preStop:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- conda env export > /data/.environment.yml -n base;
- code-server --list-extensions | tail -n +2 > /data/.vscode-extensions.txt;
- - name: sys-filesyncer
- image: onepanel/filesyncer:v0.20.0
- imagePullPolicy: Always
- args:
- - server
- - -server-prefix=/sys/filesyncer
- volumeMounts:
- - name: data
- mountPath: /data
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
-ports:
- - name: vscode
- port: 8080
- protocol: TCP
- targetPort: 8080
- - name: fs
- port: 8888
- protocol: TCP
- targetPort: 8888
-routes:
- - match:
- - uri:
- prefix: /sys/filesyncer
- route:
- - destination:
- port:
- number: 8888
- - match:
- - uri:
- prefix: /
- route:
- - destination:
- port:
- number: 8080
+metadata:
+ name: "Visual Studio Code"
+ kind: Workspace
+ version: 20210323175655
+ action: update
+spec:
+ containers:
+ - name: vscode
+ image: onepanel/vscode:v0.20.0_code-server.3.9.1
+ command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."]
+ env:
+ - name: ONEPANEL_MAIN_CONTAINER
+ value: 'true'
+ ports:
+ - containerPort: 8080
+ name: vscode
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ condayml="/data/.environment.yml";
+ vscodetxt="/data/.vscode-extensions.txt";
+ if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
+ if [ -f "$vscodetxt" ]; then cat $vscodetxt | xargs -n 1 code-server --install-extension; fi;
+ preStop:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ conda env export > /data/.environment.yml -n base;
+ code-server --list-extensions | tail -n +2 > /data/.vscode-extensions.txt;
+ - name: sys-filesyncer
+ image: onepanel/filesyncer:v0.20.0
+ imagePullPolicy: Always
+ args:
+ - server
+ - -server-prefix=/sys/filesyncer
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: vscode
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: fs
+ port: 8888
+ protocol: TCP
+ targetPort: 8888
+ routes:
+ - match:
+ - uri:
+ prefix: /sys/filesyncer
+ route:
+ - destination:
+ port:
+ number: 8888
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 8080
diff --git a/db/yaml/workspaces/vscode/20210719190719.yaml b/db/yaml/workspaces/vscode/20210719190719.yaml
index 464b009..e733c61 100644
--- a/db/yaml/workspaces/vscode/20210719190719.yaml
+++ b/db/yaml/workspaces/vscode/20210719190719.yaml
@@ -1,68 +1,74 @@
-containers:
- - name: vscode
- image: onepanel/vscode:v0.20.0_code-server.3.9.1
- command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."]
- env:
- - name: ONEPANEL_MAIN_CONTAINER
- value: 'true'
- ports:
- - containerPort: 8080
- name: vscode
- volumeMounts:
- - name: data
- mountPath: /data
- lifecycle:
- postStart:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- condayml="/data/.environment.yml";
- vscodetxt="/data/.vscode-extensions.txt";
- if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
- if [ -f "$vscodetxt" ]; then cat $vscodetxt | xargs -n 1 code-server --install-extension; fi;
- preStop:
- exec:
- command:
- - /bin/sh
- - -c
- - >
- conda env export > /data/.environment.yml -n base;
- code-server --list-extensions | tail -n +2 > /data/.vscode-extensions.txt;
- - name: sys-filesyncer
- image: onepanel/filesyncer:v1.0.0
- imagePullPolicy: Always
- args:
- - server
- - -server-prefix=/sys/filesyncer
- volumeMounts:
- - name: data
- mountPath: /data
- - name: sys-namespace-config
- mountPath: /etc/onepanel
- readOnly: true
-ports:
- - name: vscode
- port: 8080
- protocol: TCP
- targetPort: 8080
- - name: fs
- port: 8888
- protocol: TCP
- targetPort: 8888
-routes:
- - match:
- - uri:
- prefix: /sys/filesyncer
- route:
- - destination:
- port:
- number: 8888
- - match:
- - uri:
- prefix: /
- route:
- - destination:
- port:
- number: 8080
+metadata:
+ name: "Visual Studio Code"
+ kind: Workspace
+ version: 20210719190719
+ action: update
+spec:
+ containers:
+ - name: vscode
+ image: onepanel/vscode:v0.20.0_code-server.3.9.1
+ command: ["/bin/bash", "-c", "pip install onepanel-sdk && /usr/bin/entrypoint.sh --bind-addr 0.0.0.0:8080 --auth none ."]
+ env:
+ - name: ONEPANEL_MAIN_CONTAINER
+ value: 'true'
+ ports:
+ - containerPort: 8080
+ name: vscode
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ condayml="/data/.environment.yml";
+ vscodetxt="/data/.vscode-extensions.txt";
+ if [ -f "$condayml" ]; then conda env update -f $condayml; fi;
+ if [ -f "$vscodetxt" ]; then cat $vscodetxt | xargs -n 1 code-server --install-extension; fi;
+ preStop:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - >
+ conda env export > /data/.environment.yml -n base;
+ code-server --list-extensions | tail -n +2 > /data/.vscode-extensions.txt;
+ - name: sys-filesyncer
+ image: onepanel/filesyncer:v1.0.0
+ imagePullPolicy: Always
+ args:
+ - server
+ - -server-prefix=/sys/filesyncer
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ - name: sys-namespace-config
+ mountPath: /etc/onepanel
+ readOnly: true
+ ports:
+ - name: vscode
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: fs
+ port: 8888
+ protocol: TCP
+ targetPort: 8888
+ routes:
+ - match:
+ - uri:
+ prefix: /sys/filesyncer
+ route:
+ - destination:
+ port:
+ number: 8888
+ - match:
+ - uri:
+ prefix: /
+ route:
+ - destination:
+ port:
+ number: 8080
diff --git a/pkg/data.go b/pkg/data.go
new file mode 100644
index 0000000..f7fbe0e
--- /dev/null
+++ b/pkg/data.go
@@ -0,0 +1,135 @@
+package v1
+
+import (
+ "github.com/onepanelio/core/pkg/util/data"
+ "github.com/onepanelio/core/pkg/util/extensions"
+)
+
+// createWorkspaceTemplateFromGenericFile will create the workspace template given by {{templateName}} with the contents
+// given by {{filename}} for the input {{namespace}}
+func (c *Client) createWorkspaceTemplateFromGenericManifest(namespace string, manifestFile *data.ManifestFile) (err error) {
+ manifest, err := manifestFile.SpecString()
+ if err != nil {
+ return err
+ }
+ templateName := manifestFile.Metadata.Name
+ description := manifestFile.Metadata.Description
+
+ artifactRepositoryType, err := c.GetArtifactRepositoryType(namespace)
+ if err != nil {
+ return err
+ }
+
+ replaceMap := map[string]string{
+ "{{.ArtifactRepositoryType}}": artifactRepositoryType,
+ }
+ manifest = extensions.ReplaceMapValues(manifest, replaceMap)
+
+ workspaceTemplate, err := CreateWorkspaceTemplate(templateName)
+ if err != nil {
+ return err
+ }
+ workspaceTemplate.Manifest = manifest
+
+ if description != nil {
+ workspaceTemplate.Description = *description
+ }
+
+ _, err = c.CreateWorkspaceTemplate(namespace, workspaceTemplate)
+
+ return
+}
+
+// updateWorkspaceTemplateManifest will update the workspace template given by {{templateName}} with the contents
+// given by {{filename}}
+func (c *Client) updateWorkspaceTemplateManifest(namespace string, manifestFile *data.ManifestFile) (err error) {
+ manifest, err := manifestFile.SpecString()
+ if err != nil {
+ return err
+ }
+ templateName := manifestFile.Metadata.Name
+
+ artifactRepositoryType, err := c.GetArtifactRepositoryType(namespace)
+ if err != nil {
+ return err
+ }
+
+ replaceMap := map[string]string{
+ "{{.ArtifactRepositoryType}}": artifactRepositoryType,
+ }
+ manifest = extensions.ReplaceMapValues(manifest, replaceMap)
+
+ workspaceTemplate, err := CreateWorkspaceTemplate(templateName)
+ if err != nil {
+ return err
+ }
+ workspaceTemplate.Manifest = manifest
+
+ _, err = c.UpdateWorkspaceTemplateManifest(namespace, workspaceTemplate.UID, workspaceTemplate.Manifest)
+
+ return
+}
+
+// createWorkflowTemplate will create the workflow template given by {{templateName}} with the contents
+// given by {{filename}}
+func (c *Client) createWorkflowTemplateFromGenericManifest(namespace string, manifestFile *data.ManifestFile) (err error) {
+ manifest, err := manifestFile.SpecString()
+ if err != nil {
+ return err
+ }
+ templateName := manifestFile.Metadata.Name
+ labels := manifestFile.Metadata.Labels
+
+ artifactRepositoryType, err := c.GetArtifactRepositoryType(namespace)
+ if err != nil {
+ return err
+ }
+
+ replaceMap := map[string]string{
+ "{{.ArtifactRepositoryType}}": artifactRepositoryType,
+ }
+ manifest = extensions.ReplaceMapValues(manifest, replaceMap)
+
+ workflowTemplate, err := CreateWorkflowTemplate(templateName)
+ if err != nil {
+ return
+ }
+ workflowTemplate.Manifest = manifest
+ workflowTemplate.Labels = labels
+
+ _, err = c.CreateWorkflowTemplate(namespace, workflowTemplate)
+
+ return
+}
+
+// updateWorkflowTemplateManifest will update the workflow template given by {{templateName}} with the contents
+// given by {{filename}}
+func (c *Client) updateWorkflowTemplateManifest(namespace string, manifestFile *data.ManifestFile) (err error) {
+ manifest, err := manifestFile.SpecString()
+ if err != nil {
+ return err
+ }
+ templateName := manifestFile.Metadata.Name
+ labels := manifestFile.Metadata.Labels
+
+ artifactRepositoryType, err := c.GetArtifactRepositoryType(namespace)
+ if err != nil {
+ return err
+ }
+
+ replaceMap := map[string]string{
+ "{{.ArtifactRepositoryType}}": artifactRepositoryType,
+ }
+ manifest = extensions.ReplaceMapValues(manifest, replaceMap)
+
+ workflowTemplate, err := CreateWorkflowTemplate(templateName)
+ if err != nil {
+ return
+ }
+ workflowTemplate.Manifest = manifest
+ workflowTemplate.Labels = labels
+
+ _, err = c.CreateWorkflowTemplateVersion(namespace, workflowTemplate)
+
+ return
+}
diff --git a/pkg/util/data/migration.go b/pkg/util/data/migration.go
new file mode 100644
index 0000000..19919e1
--- /dev/null
+++ b/pkg/util/data/migration.go
@@ -0,0 +1,49 @@
+package data
+
+import (
+ "gopkg.in/yaml.v3"
+ "io/ioutil"
+)
+
+// ManifestFile represents a file that contains information about a workflow or workspace template
+type ManifestFile struct {
+ Metadata ManifestFileMetadata `yaml:"metadata"`
+ Spec interface{} `yaml:"spec"`
+}
+
+// ManifestFileMetadata represents information about the tempalte we are working with
+type ManifestFileMetadata struct {
+ Name string
+ Kind string // {Workflow, Workspace}
+ Version uint64
+ Action string // {create,update}
+ Description *string
+ Labels map[string]string
+ Deprecated *bool
+ Source *string
+}
+
+// SpecString returns the spec of a manifest file as a string
+func (m *ManifestFile) SpecString() (string, error) {
+ data, err := yaml.Marshal(m.Spec)
+ if err != nil {
+ return "", err
+ }
+
+ return string(data), err
+}
+
+// ManifestFileFromFile loads a manifest from a yaml file.
+func ManifestFileFromFile(path string) (*ManifestFile, error) {
+ fileData, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ manifest := &ManifestFile{}
+ if err := yaml.Unmarshal(fileData, manifest); err != nil {
+ return nil, err
+ }
+
+ return manifest, nil
+}
diff --git a/pkg/util/extensions/extensions.go b/pkg/util/extensions/extensions.go
index ae9e25b..a84e472 100644
--- a/pkg/util/extensions/extensions.go
+++ b/pkg/util/extensions/extensions.go
@@ -213,3 +213,17 @@ func DeleteNode(node *yaml.Node, key *YamlIndex) error {
return nil
}
+
+// ReplaceMapValues will replace strings that are keys in the input map with their values
+// the result is returned
+func ReplaceMapValues(value string, replaceMap map[string]string) string {
+ replacePairs := make([]string, 0)
+
+ for key, value := range replaceMap {
+ replacePairs = append(replacePairs, key)
+ replacePairs = append(replacePairs, value)
+ }
+
+ return strings.NewReplacer(replacePairs...).
+ Replace(value)
+}
diff --git a/pkg/workflow_template_types.go b/pkg/workflow_template_types.go
index 3f05131..ef884bb 100644
--- a/pkg/workflow_template_types.go
+++ b/pkg/workflow_template_types.go
@@ -55,6 +55,22 @@ func (wt *WorkflowTemplate) GenerateUID(name string) error {
return nil
}
+// CreateWorkflowTemplate creates a new workflow template with the given name.
+// All fields that can be generated in memory without external requests are filled out, such as the UID.
+func CreateWorkflowTemplate(name string) (*WorkflowTemplate, error) {
+ nameUID, err := uid2.GenerateUID(name, 30)
+ if err != nil {
+ return nil, err
+ }
+
+ workflowTemplate := &WorkflowTemplate{
+ Name: name,
+ UID: nameUID,
+ }
+
+ return workflowTemplate, nil
+}
+
// GetManifestBytes returns the manifest as []byte
func (wt *WorkflowTemplate) GetManifestBytes() []byte {
return []byte(wt.Manifest)
diff --git a/pkg/workspace_template_types.go b/pkg/workspace_template_types.go
index 88a8cde..92d13f5 100644
--- a/pkg/workspace_template_types.go
+++ b/pkg/workspace_template_types.go
@@ -51,6 +51,22 @@ func (wt *WorkspaceTemplate) GenerateUID(name string) error {
return nil
}
+// CreateWorkspaceTemplate creates a new workspace template with the given name.
+// All fields that can be generated in memory without external requests are filled out, such as the UID.
+func CreateWorkspaceTemplate(name string) (*WorkspaceTemplate, error) {
+ nameUID, err := uid2.GenerateUID(name, 30)
+ if err != nil {
+ return nil, err
+ }
+
+ workspaceTemplate := &WorkspaceTemplate{
+ Name: name,
+ UID: nameUID,
+ }
+
+ return workspaceTemplate, nil
+}
+
// InjectRuntimeParameters will inject all runtime variables into the WorkflowTemplate's manifest.
func (wt *WorkspaceTemplate) InjectRuntimeParameters(config SystemConfig) error {
if wt.WorkflowTemplate == nil {