mirror of
https://github.com/onepanelio/onepanel.git
synced 2025-11-01 09:12:32 +08:00
Compare commits
22 Commits
v0.13.0-rc
...
release-v0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
03f8f47664 | ||
|
|
c85496d216 | ||
|
|
5f6415548d | ||
|
|
c641c17a8c | ||
|
|
83a2543b13 | ||
|
|
e8dae0f2e9 | ||
|
|
b85bf4d688 | ||
|
|
7fe0ab2654 | ||
|
|
dfa6eb2fe6 | ||
|
|
cc2c51ace5 | ||
|
|
897462ede7 | ||
|
|
4e3c24fd89 | ||
|
|
276e105f20 | ||
|
|
656026ac84 | ||
|
|
95bea11e43 | ||
|
|
c6f65510d8 | ||
|
|
d6e279dde5 | ||
|
|
e99b0e943d | ||
|
|
22a7c31f1d | ||
|
|
b6c0f24170 | ||
|
|
9c04ee066d | ||
|
|
29c3e808e1 |
29
.github/workflows/push_tag.yaml
vendored
Normal file
29
.github/workflows/push_tag.yaml
vendored
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
name: Publish docker image on tag push
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@master
|
||||||
|
- uses: olegtarasov/get-tag@v2
|
||||||
|
id: tagName
|
||||||
|
- name: Publish to Registry
|
||||||
|
uses: elgohr/Publish-Docker-Github-Action@master
|
||||||
|
with:
|
||||||
|
name: onepanel/core
|
||||||
|
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKER_HUB_TOKEN }}
|
||||||
|
tags: "${{ env.GIT_TAG_NAME }}"
|
||||||
|
- name: Set Slack Message
|
||||||
|
run: echo "::set-env name=SLACK_MESSAGE::Tag $GIT_TAG_NAME. Docker Tag onepanel/core:$GIT_TAG_NAME"
|
||||||
|
- name: Notify Slack Channels
|
||||||
|
uses: rtCamp/action-slack-notify@v2.0.0
|
||||||
|
env:
|
||||||
|
SLACK_CHANNEL: dev
|
||||||
|
SLACK_ICON: https://avatars1.githubusercontent.com/u/30390575?s=48&v=4
|
||||||
|
SLACK_TITLE: New Core Version
|
||||||
|
SLACK_USERNAME: opBot
|
||||||
|
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||||
@@ -155,6 +155,10 @@ func printMarkDown(issues []*issue, version *string) {
|
|||||||
fmt.Println("# Contributors")
|
fmt.Println("# Contributors")
|
||||||
contributors := make([]user, 0)
|
contributors := make([]user, 0)
|
||||||
for _, contributor := range contributorsMap {
|
for _, contributor := range contributorsMap {
|
||||||
|
// Sorry, no bots.
|
||||||
|
if contributor.Login == "dependabot[bot]" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
contributors = append(contributors, contributor)
|
contributors = append(contributors, contributor)
|
||||||
}
|
}
|
||||||
sort.Slice(contributors, func(i, j int) bool { return contributors[i].ContributionsCount > contributors[j].ContributionsCount })
|
sort.Slice(contributors, func(i, j int) bool { return contributors[i].ContributionsCount > contributors[j].ContributionsCount })
|
||||||
|
|||||||
@@ -105,13 +105,14 @@ func Up20200821162630(tx *sql.Tx) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, namespace := range namespaces {
|
||||||
workspaceTemplate := &v1.WorkspaceTemplate{
|
workspaceTemplate := &v1.WorkspaceTemplate{
|
||||||
UID: uid,
|
UID: uid,
|
||||||
Name: jupyterLabTemplateName,
|
Name: jupyterLabTemplateName,
|
||||||
Manifest: jupyterWorkspaceTemplate2,
|
Manifest: jupyterWorkspaceTemplate2,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, namespace := range namespaces {
|
|
||||||
if _, err := client.UpdateWorkspaceTemplate(namespace.Name, workspaceTemplate); err != nil {
|
if _, err := client.UpdateWorkspaceTemplate(namespace.Name, workspaceTemplate); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package migration
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
v1 "github.com/onepanelio/core/pkg"
|
|
||||||
uid2 "github.com/onepanelio/core/pkg/util/uid"
|
uid2 "github.com/onepanelio/core/pkg/util/uid"
|
||||||
"github.com/pressly/goose"
|
"github.com/pressly/goose"
|
||||||
)
|
)
|
||||||
@@ -102,14 +101,8 @@ func Up20200929153931(tx *sql.Tx) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
workspaceTemplate := &v1.WorkspaceTemplate{
|
|
||||||
UID: uid,
|
|
||||||
Name: jupyterLabTemplateName,
|
|
||||||
Manifest: jupyterWorkspaceTemplate3,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, namespace := range namespaces {
|
for _, namespace := range namespaces {
|
||||||
if _, err := client.UpdateWorkspaceTemplate(namespace.Name, workspaceTemplate); err != nil {
|
if _, err := client.UpdateWorkspaceTemplateManifest(namespace.Name, uid, jupyterWorkspaceTemplate3); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -144,14 +137,9 @@ func Down20200929153931(tx *sql.Tx) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
workspaceTemplate := &v1.WorkspaceTemplate{
|
|
||||||
UID: uid,
|
|
||||||
Name: jupyterLabTemplateName,
|
|
||||||
Manifest: jupyterWorkspaceTemplate2,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, namespace := range namespaces {
|
for _, namespace := range namespaces {
|
||||||
if _, err := client.UpdateWorkspaceTemplate(namespace.Name, workspaceTemplate); err != nil {
|
if _, err := client.UpdateWorkspaceTemplateManifest(namespace.Name, uid, jupyterWorkspaceTemplate2); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ import (
|
|||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -197,93 +196,21 @@ func injectArtifactRepositoryConfig(artifact *wfv1.Artifact, namespaceConfig *Na
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// injectContainerResourceQuotas adds resource requests and limits if they exist
|
// injectHostPortToContainer adds a hostPort to the template container, if a nodeSelector is present.
|
||||||
// Code grabs the resource request information from the nodeSelector, compared against running nodes.
|
// Kubernetes will ensure that multiple containers with the same hostPort do not share the same node.
|
||||||
// If the running node is not present, no resource information is retrieved.
|
func (c *Client) injectHostPortToContainer(template *wfv1.Template) error {
|
||||||
func (c *Client) injectContainerResourceQuotas(wf *wfv1.Workflow, template *wfv1.Template, systemConfig SystemConfig) error {
|
|
||||||
if template.NodeSelector == nil {
|
if template.NodeSelector == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
supportedNodePoolLabels := []string{"beta.kubernetes.io/instance-type", "node.kubernetes.io/instance-type"}
|
ports := []corev1.ContainerPort{
|
||||||
nodePoolLabel := ""
|
{Name: "node-capturer", HostPort: 80, ContainerPort: 80},
|
||||||
var value string
|
|
||||||
for k, v := range template.NodeSelector {
|
|
||||||
for _, supportedNodePoolLabel := range supportedNodePoolLabels {
|
|
||||||
if k == supportedNodePoolLabel {
|
|
||||||
nodePoolLabel = k
|
|
||||||
value = v
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if value == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if strings.Contains(value, "{{workflow.") {
|
|
||||||
parts := strings.Split(strings.Replace(value, "}}", "", -1), ".")
|
|
||||||
paramName := parts[len(parts)-1]
|
|
||||||
for _, param := range wf.Spec.Arguments.Parameters {
|
|
||||||
if param.Name == paramName && param.Value != nil {
|
|
||||||
value = *param.Value
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
runningNodes, err := c.Interface.CoreV1().Nodes().List(ListOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var cpu string
|
|
||||||
var memory string
|
|
||||||
var gpu int64
|
|
||||||
gpuManufacturer := ""
|
|
||||||
for _, node := range runningNodes.Items {
|
|
||||||
if node.Labels[nodePoolLabel] == value {
|
|
||||||
cpuInt := node.Status.Allocatable.Cpu().MilliValue()
|
|
||||||
cpu = strconv.FormatFloat(float64(cpuInt)*.9, 'f', 0, 64) + "m"
|
|
||||||
memoryInt := node.Status.Allocatable.Memory().MilliValue()
|
|
||||||
kiBase := 1024.0
|
|
||||||
ninetyPerc := float64(memoryInt) * .9
|
|
||||||
toKi := ninetyPerc / kiBase / kiBase
|
|
||||||
memory = strconv.FormatFloat(toKi, 'f', 0, 64) + "Ki"
|
|
||||||
//Check for Nvidia
|
|
||||||
gpuQuantity := node.Status.Allocatable["nvidia.com/gpu"]
|
|
||||||
if gpuQuantity.IsZero() == false {
|
|
||||||
gpu = gpuQuantity.Value()
|
|
||||||
gpuManufacturer = "nvidia.com/gpu"
|
|
||||||
}
|
|
||||||
|
|
||||||
//Check for AMD
|
|
||||||
//Source: https://github.com/RadeonOpenCompute/k8s-device-plugin/blob/master/example/pod/alexnet-gpu.yaml
|
|
||||||
gpuQuantity = node.Status.Allocatable["amd.com/gpu"]
|
|
||||||
if gpuQuantity.IsZero() == false {
|
|
||||||
gpu = gpuQuantity.Value()
|
|
||||||
gpuManufacturer = "amd.com/gpu"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cpu != "" && memory != "" {
|
|
||||||
resourceList := corev1.ResourceRequirements{
|
|
||||||
Limits: nil,
|
|
||||||
Requests: map[corev1.ResourceName]resource.Quantity{
|
|
||||||
corev1.ResourceCPU: resource.MustParse(cpu),
|
|
||||||
corev1.ResourceMemory: resource.MustParse(memory),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if gpu > 0 {
|
|
||||||
stringGpu := strconv.FormatInt(gpu, 10)
|
|
||||||
resourceList.Limits = make(map[corev1.ResourceName]resource.Quantity)
|
|
||||||
resourceList.Limits[corev1.ResourceName(gpuManufacturer)] = resource.MustParse(stringGpu)
|
|
||||||
}
|
}
|
||||||
if template.Container != nil {
|
if template.Container != nil {
|
||||||
template.Container.Resources = resourceList
|
template.Container.Ports = ports
|
||||||
}
|
}
|
||||||
if template.Script != nil {
|
if template.Script != nil {
|
||||||
template.Script.Container.Resources = resourceList
|
template.Script.Container.Ports = ports
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -355,7 +282,7 @@ func (c *Client) injectAutomatedFields(namespace string, wf *wfv1.Workflow, opts
|
|||||||
Name: "sys-dshm",
|
Name: "sys-dshm",
|
||||||
MountPath: "/dev/shm",
|
MountPath: "/dev/shm",
|
||||||
})
|
})
|
||||||
err = c.injectContainerResourceQuotas(wf, template, systemConfig)
|
err = c.injectHostPortToContainer(template)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -363,7 +290,7 @@ func (c *Client) injectAutomatedFields(namespace string, wf *wfv1.Workflow, opts
|
|||||||
}
|
}
|
||||||
|
|
||||||
if template.Script != nil {
|
if template.Script != nil {
|
||||||
err = c.injectContainerResourceQuotas(wf, template, systemConfig)
|
err = c.injectHostPortToContainer(template)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
110
pkg/workspace.go
110
pkg/workspace.go
@@ -15,7 +15,6 @@ import (
|
|||||||
"github.com/onepanelio/core/pkg/util/request"
|
"github.com/onepanelio/core/pkg/util/request"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
@@ -286,32 +285,15 @@ func (c *Client) addResourceRequestsAndLimitsToWorkspaceTemplate(t wfv1.Template
|
|||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.New("unable to type check statefulset manifest")
|
return nil, errors.New("unable to type check statefulset manifest")
|
||||||
}
|
}
|
||||||
//Get node selected
|
extraContainer := generateExtraContainerWithHostPortToSequesterNode()
|
||||||
labelKey := "sys-node-pool-label"
|
if extraContainer != nil {
|
||||||
labelKeyVal := ""
|
|
||||||
for _, parameter := range argoTemplate.Spec.Arguments.Parameters {
|
|
||||||
if parameter.Name == labelKey {
|
|
||||||
labelKeyVal = *parameter.Value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nodePoolKey := "sys-node-pool"
|
|
||||||
nodePoolVal := ""
|
|
||||||
for _, parameter := range workspace.Parameters {
|
|
||||||
if parameter.Name == nodePoolKey {
|
|
||||||
nodePoolVal = *parameter.Value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
extraContainer, err := generateExtraContainerWithResources(c, labelKeyVal, nodePoolVal)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
containers, ok := templateSpec["containers"].([]interface{})
|
containers, ok := templateSpec["containers"].([]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.New("unable to type check statefulset manifest")
|
return nil, errors.New("unable to type check statefulset manifest")
|
||||||
}
|
}
|
||||||
|
|
||||||
templateSpec["containers"] = append([]interface{}{extraContainer}, containers...)
|
templateSpec["containers"] = append([]interface{}{extraContainer}, containers...)
|
||||||
|
}
|
||||||
resultManifest, err := yaml.Marshal(statefulSet)
|
resultManifest, err := yaml.Marshal(statefulSet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -319,78 +301,25 @@ func (c *Client) addResourceRequestsAndLimitsToWorkspaceTemplate(t wfv1.Template
|
|||||||
return resultManifest, nil
|
return resultManifest, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// generateExtraContainerWithResources will add an extra container to a workspace.
|
// generateExtraContainerWithHostPortToSequesterNode will add an extra container to a workspace.
|
||||||
// The extra container will have the calculated resource request for the node selected by the workspace.
|
// The extra container have a hostPort set. Kubernetes will ensure the hostPort does not get conflict
|
||||||
|
// between containers, scheduling a new node as needed.
|
||||||
// The container will sleep once started, and generally consume negligible resources.
|
// The container will sleep once started, and generally consume negligible resources.
|
||||||
//
|
func generateExtraContainerWithHostPortToSequesterNode() map[string]interface{} {
|
||||||
// The node that was selected has to be already running, in order to get the resource request correct.
|
|
||||||
func generateExtraContainerWithResources(c *Client, labelKeyVal string, nodePoolVal string) (map[string]interface{}, error) {
|
|
||||||
runningNodes, err := c.Interface.CoreV1().Nodes().List(ListOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var cpu string
|
|
||||||
var memory string
|
|
||||||
var gpu int64
|
|
||||||
gpuManufacturer := ""
|
|
||||||
for _, node := range runningNodes.Items {
|
|
||||||
if node.Labels[labelKeyVal] == nodePoolVal {
|
|
||||||
cpuInt := node.Status.Allocatable.Cpu().MilliValue()
|
|
||||||
cpu = strconv.FormatFloat(float64(cpuInt)*.9, 'f', 0, 64) + "m"
|
|
||||||
memoryInt := node.Status.Allocatable.Memory().MilliValue()
|
|
||||||
kiBase := 1024.0
|
|
||||||
ninetyPerc := float64(memoryInt) * .9
|
|
||||||
toKi := ninetyPerc / kiBase / kiBase
|
|
||||||
memory = strconv.FormatFloat(toKi, 'f', 0, 64) + "Ki"
|
|
||||||
//Check for Nvidia
|
|
||||||
gpuQuantity := node.Status.Allocatable["nvidia.com/gpu"]
|
|
||||||
if gpuQuantity.IsZero() == false {
|
|
||||||
gpu = gpuQuantity.Value()
|
|
||||||
gpuManufacturer = "nvidia.com/gpu"
|
|
||||||
}
|
|
||||||
|
|
||||||
//Check for AMD
|
|
||||||
//Source: https://github.com/RadeonOpenCompute/k8s-device-plugin/blob/master/example/pod/alexnet-gpu.yaml
|
|
||||||
gpuQuantity = node.Status.Allocatable["amd.com/gpu"]
|
|
||||||
if gpuQuantity.IsZero() == false {
|
|
||||||
gpu = gpuQuantity.Value()
|
|
||||||
gpuManufacturer = "amd.com/gpu"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
extraContainer := map[string]interface{}{
|
extraContainer := map[string]interface{}{
|
||||||
"image": "alpine:latest",
|
"image": "alpine:latest",
|
||||||
"name": "resource-requester",
|
"name": "node-capturer",
|
||||||
"command": []interface{}{"/bin/sh"},
|
"command": []interface{}{"/bin/sh"},
|
||||||
"args": []interface{}{"-c", "while :; do sleep 2073600; done"},
|
"args": []interface{}{"-c", "while :; do sleep 2073600; done"},
|
||||||
"resources": map[string]interface{}{
|
"ports": []interface{}{
|
||||||
"requests": map[string]interface{}{
|
map[string]interface{}{
|
||||||
"cpu": cpu,
|
"name": "node-capturer",
|
||||||
"memory": memory,
|
"hostPort": 80,
|
||||||
|
"containerPort": 80,
|
||||||
},
|
},
|
||||||
"limits": map[string]interface{}{},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
return extraContainer
|
||||||
if gpu > 0 {
|
|
||||||
res, ok := extraContainer["resources"].(map[string]interface{})
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.New("unable to type check extraContainer")
|
|
||||||
}
|
|
||||||
reqs, ok := res["requests"].(map[string]interface{})
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.New("unable to type check extraContainer")
|
|
||||||
}
|
|
||||||
reqs[gpuManufacturer] = gpu
|
|
||||||
|
|
||||||
limits, ok := res["limits"].(map[string]interface{})
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.New("unable to type check extraContainer")
|
|
||||||
}
|
|
||||||
limits[gpuManufacturer] = gpu
|
|
||||||
|
|
||||||
}
|
|
||||||
return extraContainer, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// startWorkspace starts a workspace and related resources. It assumes a DB record already exists
|
// startWorkspace starts a workspace and related resources. It assumes a DB record already exists
|
||||||
@@ -441,6 +370,17 @@ func (c *Client) startWorkspace(namespace string, parameters []byte, workspace *
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
templates := argoTemplate.Spec.Templates
|
||||||
|
for i, t := range templates {
|
||||||
|
if t.Name == WorkspaceStatefulSetResource {
|
||||||
|
resultManifest, err := c.addResourceRequestsAndLimitsToWorkspaceTemplate(t, argoTemplate, workspace)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
templates[i].Resource.Manifest = string(resultManifest)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
_, err = c.CreateWorkflowExecution(namespace, &WorkflowExecution{
|
_, err = c.CreateWorkflowExecution(namespace, &WorkflowExecution{
|
||||||
Parameters: workspace.Parameters,
|
Parameters: workspace.Parameters,
|
||||||
}, workflowTemplate)
|
}, workflowTemplate)
|
||||||
|
|||||||
@@ -1158,6 +1158,19 @@ func (c *Client) UpdateWorkspaceTemplate(namespace string, workspaceTemplate *Wo
|
|||||||
return workspaceTemplate, nil
|
return workspaceTemplate, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UpdateWorkspaceTemplateManifest updates a workspace template by creating a new version where the only difference is the manifest
|
||||||
|
func (c *Client) UpdateWorkspaceTemplateManifest(namespace, uid string, manifest string) (*WorkspaceTemplate, error) {
|
||||||
|
existingTemplate, err := c.GetWorkspaceTemplate(namespace, uid, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
existingTemplate.UID = uid
|
||||||
|
existingTemplate.Manifest = manifest
|
||||||
|
|
||||||
|
return c.UpdateWorkspaceTemplate(namespace, existingTemplate)
|
||||||
|
}
|
||||||
|
|
||||||
// ListWorkspaceTemplates returns a list of workspace templates that are not archived, sorted by most recent created first
|
// ListWorkspaceTemplates returns a list of workspace templates that are not archived, sorted by most recent created first
|
||||||
func (c *Client) ListWorkspaceTemplates(namespace string, request *request.Request) (workspaceTemplates []*WorkspaceTemplate, err error) {
|
func (c *Client) ListWorkspaceTemplates(namespace string, request *request.Request) (workspaceTemplates []*WorkspaceTemplate, err error) {
|
||||||
sb := c.workspaceTemplatesSelectBuilder(namespace).
|
sb := c.workspaceTemplatesSelectBuilder(namespace).
|
||||||
|
|||||||
Reference in New Issue
Block a user