Commit 9f1b099e authored by derailed's avatar derailed
Browse files

Merge branch '5_14_20'

parents 13096b86 e04d7da4
No related merge requests found
Showing with 287 additions and 91 deletions
+287 -91
......@@ -15,7 +15,7 @@ FROM alpine:3.10.0
COPY --from=build /k9s/execs/k9s /bin/k9s
ENV KUBE_LATEST_VERSION="v1.18.1"
RUN apk add --update ca-certificates \
&& apk add --update -t deps curl \
&& apk add --update -t deps curl vim \
&& curl -L https://storage.googleapis.com/kubernetes-release/release/${KUBE_LATEST_VERSION}/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl \
&& chmod +x /usr/local/bin/kubectl \
&& apk del --purge deps \
......
<img src="https://raw.githubusercontent.com/derailed/k9s/master/assets/k9s_small.png" align="right" width="200" height="auto"/>
# Release v0.19.5
## Notes
Thank you to all that contributed with flushing out issues and enhancements for K9s! I'll try to mark some of these issues as fixed. But if you don't mind grab the latest rev and see if we're happier with some of the fixes! If you've filed an issue please help me verify and close. Your support, kindness and awesome suggestions to make K9s better is as ever very much noticed and appreciated!
Also if you dig this tool, consider joining our [sponsorhip program](https://github.com/sponsors/derailed) and/or make some noise on social! [@kitesurfer](https://twitter.com/kitesurfer)
On Slack? Please join us [K9slackers](https://join.slack.com/t/k9sers/shared_invite/enQtOTA5MDEyNzI5MTU0LWQ1ZGI3MzliYzZhZWEyNzYxYzA3NjE0YTk1YmFmNzViZjIyNzhkZGI0MmJjYzhlNjdlMGJhYzE2ZGU1NjkyNTM)
---
## A Word From Out Sponsors...
First off, I would like to send a `Big Thank You` to the following generous K9s friends for joining our sponsorship program and supporting this project!
* [Tommy Dejbjerg Pedersen](https://github.com/tpedersen123)
* [Matt Welke](https://github.com/mattwelke)
## Disruption In The Force
During this drop, I've gotten totally slammed by other forces ;( I've had so many disruptions that affected my `quasi` normal flow hence this drop might be a bit wonky ;( So please proceed with caution!!
As always please help me flush/report issues and I'll address them promptly! Thank you so much for your understanding and patience!! 🙏👨‍❤️‍👨😍
## Improved Node Shell Usability
In this drop we've changed the configuration of the node shell action that let's you shell into nodes. Big thanks to [Patrick Decat](https://github.com/pdecat) for helping us flesh out this beta feature! We've added configuration to not only customize the image but also the resources and namespace on how to run these K9s pods on your clusters. The new configuration is set at the cluster scope level.
Here is an example of the new pod shell config options:
```yaml
# $HOME/.k9s/config.yml
k9s:
clusters:
blee:
featureGates:
# You must enable the nodeShell feature gate to enable shelling into nodes
nodeShell: true
# NEW! You can now tune the pod specification: currently image, namespace and resources
shellPod:
image: cool_kid_admin:42
namespace: blee
limits:
cpu: 100m
memory: 100Mi
```
## Resolved Bugs/Features/PRs
* [Issue #714](https://github.com/derailed/k9s/issues/714)
* [Issue #713](https://github.com/derailed/k9s/issues/713)
* [Issue #708](https://github.com/derailed/k9s/issues/708)
* [Issue #707](https://github.com/derailed/k9s/issues/707)
* [Issue #705](https://github.com/derailed/k9s/issues/705)
* [Issue #704](https://github.com/derailed/k9s/issues/704)
* [Issue #702](https://github.com/derailed/k9s/issues/702)
* [Issue #700](https://github.com/derailed/k9s/issues/700) Fingers and toes crossed ;)
* [Issue #694](https://github.com/derailed/k9s/issues/694)
* [Issue #663](https://github.com/derailed/k9s/issues/663) Partially - should be better launching in a given namespace ie k9s -n fred??
* [Issue #702](https://github.com/derailed/k9s/issues/702)
* [PR #709](https://github.com/derailed/k9s/pull/709) All credits goes to [Namco](https://github.com/namco1992)!!
* [PR #706](https://github.com/derailed/k9s/pull/706) Big Thanks to [M. Tarık Yurt](https://github.com/mtyurt)!
* [PR #704](https://github.com/derailed/k9s/pull/704) Atta Boy!! [psvo](https://github.com/psvo)
* [PR #696](https://github.com/derailed/k9s/pull/696) Thank you! Credits to [Christian Köhn](https://github.com/ckoehn)
* [PR #691](https://github.com/derailed/k9s/pull/691) Mega Thanks To [Pavel Tumik](https://github.com/sagor999)!
---
<img src="https://raw.githubusercontent.com/derailed/k9s/master/assets/imhotep_logo.png" width="32" height="auto"/> © 2020 Imhotep Software LLC. All materials licensed under [Apache v2.0](http://www.apache.org/licenses/LICENSE-2.0)
......@@ -105,7 +105,6 @@ func loadConfiguration() *config.Config {
log.Warn().Msg("Unable to locate K9s config. Generating new configuration...")
}
log.Debug().Msgf("DEMO MODE %#v", demoMode)
if demoMode != nil {
k9sCfg.SetDemoMode(*demoMode)
}
......
......@@ -4,7 +4,7 @@ go 1.13
require (
github.com/atotto/clipboard v0.1.2
github.com/derailed/popeye v0.8.1
github.com/derailed/popeye v0.8.2
github.com/derailed/tview v0.3.10
github.com/drone/envsubst v1.0.2 // indirect
github.com/fatih/color v1.9.0
......
......@@ -9,6 +9,7 @@ type Cluster struct {
Namespace *Namespace `yaml:"namespace"`
View *View `yaml:"view"`
FeatureGates *FeatureGates `yaml:"featureGates"`
ShellPod *ShellPod `yaml:"shellPod"`
}
// NewCluster creates a new cluster configuration.
......@@ -17,6 +18,7 @@ func NewCluster() *Cluster {
Namespace: NewNamespace(),
View: NewView(),
FeatureGates: NewFeatureGates(),
ShellPod: NewShellPod(),
}
}
......@@ -34,4 +36,9 @@ func (c *Cluster) Validate(conn client.Connection, ks KubeSettings) {
c.View = NewView()
}
c.View.Validate()
if c.ShellPod == nil {
c.ShellPod = NewShellPod()
}
c.ShellPod.Validate(conn, ks)
}
......@@ -84,13 +84,13 @@ func (c *Config) Refine(flags *genericclioptions.ConfigFlags) error {
if c.K9s.CurrentContext == "" {
return errors.New("Invalid kubeconfig context detected")
}
ctx, ok := cfg.Contexts[c.K9s.CurrentContext]
context, ok := cfg.Contexts[c.K9s.CurrentContext]
if !ok {
return fmt.Errorf("The specified context %q does not exists in kubeconfig", c.K9s.CurrentContext)
}
c.K9s.CurrentCluster = ctx.Cluster
if len(ctx.Namespace) != 0 {
if err := c.SetActiveNamespace(ctx.Namespace); err != nil {
c.K9s.CurrentCluster = context.Cluster
if len(context.Namespace) != 0 {
if err := c.SetActiveNamespace(context.Namespace); err != nil {
return err
}
}
......@@ -124,13 +124,6 @@ func (c *Config) CurrentCluster() *Cluster {
// ActiveNamespace returns the active namespace in the current cluster.
func (c *Config) ActiveNamespace() string {
if c.client != nil {
ns := c.client.ActiveNamespace()
if client.IsNamespaced(ns) {
return ns
}
}
if cl := c.CurrentCluster(); cl != nil {
if cl.Namespace != nil {
return cl.Namespace.Active
......@@ -162,9 +155,9 @@ func (c *Config) SetActiveNamespace(ns string) error {
if c.K9s.ActiveCluster() != nil {
return c.K9s.ActiveCluster().Namespace.SetActive(ns, c.settings)
}
err := errors.New("no active cluster. unable to set active namespace")
log.Error().Err(err).Msg("SetActiveNamespace")
return err
}
......
......@@ -261,7 +261,6 @@ func TestSetup(t *testing.T) {
var expectedConfig = `k9s:
refreshRate: 100
dockerShellImage: busybox:1.31
headless: false
readOnly: true
noIcons: false
......@@ -284,6 +283,12 @@ var expectedConfig = `k9s:
active: po
featureGates:
nodeShell: false
shellPod:
image: busybox:1.31
namespace: default
limits:
cpu: 100m
memory: 100Mi
fred:
namespace:
active: default
......@@ -297,6 +302,12 @@ var expectedConfig = `k9s:
active: po
featureGates:
nodeShell: false
shellPod:
image: busybox:1.31
namespace: default
limits:
cpu: 100m
memory: 100Mi
minikube:
namespace:
active: kube-system
......@@ -310,6 +321,12 @@ var expectedConfig = `k9s:
active: ctx
featureGates:
nodeShell: false
shellPod:
image: busybox:1.31
namespace: default
limits:
cpu: 100m
memory: 100Mi
thresholds:
cpu:
critical: 90
......@@ -321,7 +338,6 @@ var expectedConfig = `k9s:
var resetConfig = `k9s:
refreshRate: 2
dockerShellImage: busybox:1.31
headless: false
readOnly: false
noIcons: false
......@@ -344,6 +360,12 @@ var resetConfig = `k9s:
active: po
featureGates:
nodeShell: false
shellPod:
image: busybox:1.31
namespace: default
limits:
cpu: 100m
memory: 100Mi
thresholds:
cpu:
critical: 90
......
......@@ -2,16 +2,11 @@ package config
import "github.com/derailed/k9s/internal/client"
const (
defaultRefreshRate = 2
// DefaultDockerShellImage specifies the docker image and tag for shelling into nodes.
DefaultDockerShellImage = "busybox:1.31"
)
const defaultRefreshRate = 2
// K9s tracks K9s configuration options.
type K9s struct {
RefreshRate int `yaml:"refreshRate"`
DockerShellImage string `yaml:"dockerShellImage"`
Headless bool `yaml:"headless"`
ReadOnly bool `yaml:"readOnly"`
NoIcons bool `yaml:"noIcons"`
......@@ -29,11 +24,10 @@ type K9s struct {
// NewK9s create a new K9s configuration.
func NewK9s() *K9s {
return &K9s{
RefreshRate: defaultRefreshRate,
DockerShellImage: DefaultDockerShellImage,
Logger: NewLogger(),
Clusters: make(map[string]*Cluster),
Thresholds: NewThreshold(),
RefreshRate: defaultRefreshRate,
Logger: NewLogger(),
Clusters: make(map[string]*Cluster),
Thresholds: NewThreshold(),
}
}
......@@ -104,9 +98,6 @@ func (k *K9s) validateDefaults() {
if k.RefreshRate <= 0 {
k.RefreshRate = defaultRefreshRate
}
if k.DockerShellImage == "" {
k.DockerShellImage = DefaultDockerShellImage
}
}
func (k *K9s) validateClusters(c client.Connection, ks KubeSettings) {
......
package config
import (
"github.com/derailed/k9s/internal/client"
v1 "k8s.io/api/core/v1"
)
const defaultDockerShellImage = "busybox:1.31"
// Limits represents resource limits.
type Limits map[v1.ResourceName]string
// ShellPod represents k9s shell configuration.
type ShellPod struct {
Image string `json:"Image"`
Namespace string `json:"namespace"`
Limits Limits `json:"resources,omitempty"`
}
// NewShellPod returns a new instance.
func NewShellPod() *ShellPod {
return &ShellPod{
Image: defaultDockerShellImage,
Namespace: "default",
Limits: defaultLimits(),
}
}
// Validate validates the configuration.
func (s *ShellPod) Validate(client.Connection, KubeSettings) {
if s.Image == "" {
s.Image = defaultDockerShellImage
}
if len(s.Limits) == 0 {
s.Limits = defaultLimits()
}
}
func defaultLimits() Limits {
return Limits{
v1.ResourceCPU: "100m",
v1.ResourceMemory: "100Mi",
}
}
......@@ -88,8 +88,13 @@ func (a *App) Init(version string, rate int) error {
return errors.New("No client connection detected")
}
ns, err := a.Conn().Config().CurrentNamespaceName()
log.Debug().Msgf("CURRENT-NS %q -- %v", ns, err)
if err != nil {
log.Info().Msg("No namespace specified using all namespaces")
log.Info().Msg("No namespace specified using cluster default namespace")
} else {
if err := a.Config.SetActiveNamespace(ns); err != nil {
log.Error().Err(err).Msgf("Fail to set active namespace to %q", ns)
}
}
a.factory = watch.NewFactory(a.Conn())
......@@ -111,6 +116,13 @@ func (a *App) Init(version string, rate int) error {
a.CmdBuff().SetSuggestionFn(a.suggestCommand())
a.CmdBuff().AddListener(a)
a.layout(ctx, version)
a.initSignals()
return nil
}
func (a *App) layout(ctx context.Context, version string) {
flash := ui.NewFlash(a.App)
go flash.Watch(ctx, a.Flash().Channel())
......@@ -123,10 +135,6 @@ func (a *App) Init(version string, rate int) error {
a.Main.AddPage("main", main, true, false)
a.Main.AddPage("splash", ui.NewSplash(a.Styles, version), true, true)
a.toggleHeader(!a.Config.K9s.GetHeadless())
a.initSignals()
return nil
}
func (a *App) initSignals() {
......@@ -134,8 +142,12 @@ func (a *App) initSignals() {
signal.Notify(sig, syscall.SIGABRT, syscall.SIGINT, syscall.SIGHUP, syscall.SIGQUIT)
go func(sig chan os.Signal) {
<-sig
nukeK9sShell(a.Conn())
signal := <-sig
if signal == syscall.SIGHUP {
a.BailOut()
return
}
nukeK9sShell(a)
}(sig)
}
......@@ -375,7 +387,7 @@ func (a *App) BailOut() {
}
}()
nukeK9sShell(a.Conn())
nukeK9sShell(a)
a.factory.Terminate()
a.App.BailOut()
}
......
......@@ -158,7 +158,18 @@ func (c *Command) run(cmd, path string, clearStack bool) error {
}
func (c *Command) defaultCmd() error {
if err := c.run(c.app.Config.ActiveView(), "", true); err != nil {
view := c.app.Config.ActiveView()
if view == "" {
return c.run("pod", "", true)
}
tokens := strings.Split(view, " ")
cmd := view
ns, err := c.app.Conn().Config().CurrentNamespaceName()
if err == nil {
cmd = tokens[0] + " " + ns
}
if err := c.run(cmd, "", true); err != nil {
log.Error().Err(err).Msgf("Saved command load failed. Loading default view")
return c.run("pod", "", true)
}
......
......@@ -50,15 +50,10 @@ func (c *Container) bindKeys(aa ui.KeyActions) {
}
aa.Add(ui.KeyActions{
ui.KeyShiftF: ui.NewKeyAction("PortForward", c.portFwdCmd, true),
ui.KeyShiftT: ui.NewKeyAction("Sort Restart", c.GetTable().SortColCmd("RESTARTS", false), false),
ui.KeyShiftC: ui.NewKeyAction("Sort CPU", c.GetTable().SortColCmd(cpuCol, false), false),
ui.KeyShiftM: ui.NewKeyAction("Sort MEM", c.GetTable().SortColCmd(memCol, false), false),
ui.KeyShiftX: ui.NewKeyAction("Sort %CPU (REQ)", c.GetTable().SortColCmd("%CPU/R", false), false),
ui.KeyShiftZ: ui.NewKeyAction("Sort %MEM (REQ)", c.GetTable().SortColCmd("%MEM/R", false), false),
tcell.KeyCtrlX: ui.NewKeyAction("Sort %CPU (LIM)", c.GetTable().SortColCmd("%CPU/L", false), false),
tcell.KeyCtrlQ: ui.NewKeyAction("Sort %MEM (LIM)", c.GetTable().SortColCmd("%MEM/L", false), false),
ui.KeyShiftF: ui.NewKeyAction("PortForward", c.portFwdCmd, true),
ui.KeyShiftT: ui.NewKeyAction("Sort Restart", c.GetTable().SortColCmd("RESTARTS", false), false),
})
aa.Add(resourceSorters(c.GetTable()))
}
func (c *Container) k9sEnv() Env {
......
......@@ -129,27 +129,33 @@ func clearScreen() {
const (
k9sShell = "k9s-shell"
k9sShellNS = "default"
k9sShellRetryCount = 10
k9sShellRetryDelay = 500 * time.Millisecond
)
func ssh(a *App, node string) error {
nukeK9sShell(a.Conn())
defer nukeK9sShell(a.Conn())
nukeK9sShell(a)
defer nukeK9sShell(a)
if err := launchShellPod(a, node); err != nil {
return err
}
shellIn(a, client.FQN(k9sShellNS, k9sShellPodName()), k9sShell)
ns := a.Config.K9s.ActiveCluster().ShellPod.Namespace
shellIn(a, client.FQN(ns, k9sShellPodName()), k9sShell)
return nil
}
func nukeK9sShell(c client.Connection) {
func nukeK9sShell(a *App) {
cl := a.Config.K9s.CurrentCluster
if !a.Config.K9s.Clusters[cl].FeatureGates.NodeShell {
return
}
ns := a.Config.K9s.ActiveCluster().ShellPod.Namespace
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
defer cancel()
err := c.DialOrDie().CoreV1().Pods(k9sShellNS).Delete(ctx, k9sShellPodName(), metav1.DeleteOptions{})
err := a.Conn().DialOrDie().CoreV1().Pods(ns).Delete(ctx, k9sShellPodName(), metav1.DeleteOptions{})
if kerrors.IsNotFound(err) {
return
}
......@@ -159,20 +165,17 @@ func nukeK9sShell(c client.Connection) {
}
func launchShellPod(a *App, node string) error {
img := a.Config.K9s.DockerShellImage
if img == "" {
img = config.DefaultDockerShellImage
}
spec := k9sShellPod(node, img)
ns := a.Config.K9s.ActiveCluster().ShellPod.Namespace
spec := k9sShellPod(node, a.Config.K9s.ActiveCluster().ShellPod)
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
defer cancel()
dial := a.Conn().DialOrDie().CoreV1().Pods(k9sShellNS)
dial := a.Conn().DialOrDie().CoreV1().Pods(ns)
if _, err := dial.Create(ctx, &spec, metav1.CreateOptions{}); err != nil {
return err
}
for i := 0; i < k9sShellRetryCount; i++ {
o, err := a.factory.Get("v1/pods", client.FQN(k9sShellNS, k9sShellPodName()), true, labels.Everything())
o, err := a.factory.Get("v1/pods", client.FQN(ns, k9sShellPodName()), true, labels.Everything())
if err != nil {
time.Sleep(k9sShellRetryDelay)
continue
......@@ -194,14 +197,14 @@ func k9sShellPodName() string {
return fmt.Sprintf("%s-%d", k9sShell, os.Getpid())
}
func k9sShellPod(node, image string) v1.Pod {
func k9sShellPod(node string, cfg *config.ShellPod) v1.Pod {
var grace int64
var priv bool = true
return v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: k9sShellPodName(),
Namespace: k9sShellNS,
Namespace: cfg.Namespace,
},
Spec: v1.PodSpec{
NodeName: node,
......@@ -222,7 +225,7 @@ func k9sShellPod(node, image string) v1.Pod {
Containers: []v1.Container{
{
Name: k9sShell,
Image: image,
Image: cfg.Image,
VolumeMounts: []v1.VolumeMount{
{
Name: "root-vol",
......@@ -230,13 +233,8 @@ func k9sShellPod(node, image string) v1.Pod {
ReadOnly: true,
},
},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("200m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
},
Stdin: true,
Resources: asResource(cfg.Limits),
Stdin: true,
SecurityContext: &v1.SecurityContext{
Privileged: &priv,
},
......@@ -245,3 +243,12 @@ func k9sShellPod(node, image string) v1.Pod {
},
}
}
func asResource(r config.Limits) v1.ResourceRequirements {
return v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse(r[v1.ResourceCPU]),
v1.ResourceMemory: resource.MustParse(r[v1.ResourceMemory]),
},
}
}
......@@ -52,18 +52,13 @@ func (p *Pod) bindKeys(aa ui.KeyActions) {
}
aa.Add(ui.KeyActions{
ui.KeyShiftR: ui.NewKeyAction("Sort Ready", p.GetTable().SortColCmd(readyCol, true), false),
ui.KeyShiftT: ui.NewKeyAction("Sort Restart", p.GetTable().SortColCmd("RESTARTS", false), false),
ui.KeyShiftS: ui.NewKeyAction("Sort Status", p.GetTable().SortColCmd(statusCol, true), false),
ui.KeyShiftC: ui.NewKeyAction("Sort CPU", p.GetTable().SortColCmd(cpuCol, false), false),
ui.KeyShiftM: ui.NewKeyAction("Sort MEM", p.GetTable().SortColCmd(memCol, false), false),
ui.KeyShiftX: ui.NewKeyAction("Sort %CPU (REQ)", p.GetTable().SortColCmd("%CPU", false), false),
ui.KeyShiftZ: ui.NewKeyAction("Sort %MEM (REQ)", p.GetTable().SortColCmd("%MEM", false), false),
tcell.KeyCtrlX: ui.NewKeyAction("Sort %CPU (LIM)", p.GetTable().SortColCmd("%CPU/L", false), false),
tcell.KeyCtrlQ: ui.NewKeyAction("Sort %MEM (LIM)", p.GetTable().SortColCmd("%MEM/L", false), false),
ui.KeyShiftI: ui.NewKeyAction("Sort IP", p.GetTable().SortColCmd("IP", true), false),
ui.KeyShiftO: ui.NewKeyAction("Sort Node", p.GetTable().SortColCmd("NODE", true), false),
ui.KeyShiftR: ui.NewKeyAction("Sort Ready", p.GetTable().SortColCmd(readyCol, true), false),
ui.KeyShiftT: ui.NewKeyAction("Sort Restart", p.GetTable().SortColCmd("RESTARTS", false), false),
ui.KeyShiftS: ui.NewKeyAction("Sort Status", p.GetTable().SortColCmd(statusCol, true), false),
ui.KeyShiftI: ui.NewKeyAction("Sort IP", p.GetTable().SortColCmd("IP", true), false),
ui.KeyShiftO: ui.NewKeyAction("Sort Node", p.GetTable().SortColCmd("NODE", true), false),
})
aa.Add(resourceSorters(p.GetTable()))
}
func (p *Pod) selectedContainer() string {
......@@ -318,3 +313,14 @@ func podIsRunning(f dao.Factory, path string) bool {
log.Debug().Msgf("Phase %#v", re.Phase(po))
return re.Phase(po) == render.Running
}
func resourceSorters(t *Table) ui.KeyActions {
return ui.KeyActions{
ui.KeyShiftC: ui.NewKeyAction("Sort CPU", t.SortColCmd(cpuCol, false), false),
ui.KeyShiftM: ui.NewKeyAction("Sort MEM", t.SortColCmd(memCol, false), false),
ui.KeyShiftX: ui.NewKeyAction("Sort %CPU (REQ)", t.SortColCmd("%CPU/R", false), false),
ui.KeyShiftZ: ui.NewKeyAction("Sort %MEM (REQ)", t.SortColCmd("%MEM/R", false), false),
tcell.KeyCtrlX: ui.NewKeyAction("Sort %CPU (LIM)", t.SortColCmd("%CPU/L", false), false),
tcell.KeyCtrlQ: ui.NewKeyAction("Sort %MEM (LIM)", t.SortColCmd("%MEM/L", false), false),
}
}
......@@ -53,8 +53,13 @@ func (p *Popeye) decorateRows(data render.TableData) render.TableData {
}
sum += n
}
score := sum / len(data.RowEvents)
p.GetTable().Extras = fmt.Sprintf("Score %d -- %s", score, grade(score))
score, letter := 0, render.NAValue
if len(data.RowEvents) > 0 {
score = sum / len(data.RowEvents)
letter = grade(score)
}
p.GetTable().Extras = fmt.Sprintf("Score %d -- %s", score, letter)
return data
}
......
package view
import (
"github.com/derailed/k9s/internal/client"
"github.com/derailed/k9s/internal/render"
"github.com/derailed/k9s/internal/ui"
)
// PersistentVolumeClaim represents a PVC custom viewer.
type PersistentVolumeClaim struct {
ResourceViewer
}
// NewPersistentVolumeClaim returns a new viewer.
func NewPersistentVolumeClaim(gvr client.GVR) ResourceViewer {
d := PersistentVolumeClaim{
ResourceViewer: NewBrowser(gvr),
}
d.SetBindKeysFn(d.bindKeys)
d.GetTable().SetColorerFn(render.PersistentVolumeClaim{}.ColorerFunc())
return &d
}
func (d *PersistentVolumeClaim) bindKeys(aa ui.KeyActions) {
aa.Add(ui.KeyActions{
ui.KeyShiftS: ui.NewKeyAction("Sort Status", d.GetTable().SortColCmd("STATUS", true), false),
ui.KeyShiftV: ui.NewKeyAction("Sort Volume", d.GetTable().SortColCmd("VOLUME", true), false),
ui.KeyShiftO: ui.NewKeyAction("Sort StorageClass", d.GetTable().SortColCmd("STORAGECLASS", true), false),
ui.KeyShiftC: ui.NewKeyAction("Sort Capacity", d.GetTable().SortColCmd("CAPACITY", true), false),
})
}
......@@ -45,6 +45,9 @@ func coreViewers(vv MetaViewers) {
vv[client.NewGVR("v1/secrets")] = MetaViewer{
viewerFn: NewSecret,
}
vv[client.NewGVR("v1/persistentvolumeclaims")] = MetaViewer{
viewerFn: NewPersistentVolumeClaim,
}
}
func miscViewers(vv MetaViewers) {
......
......@@ -163,13 +163,6 @@ func (f *Factory) isClusterWide() bool {
// CanForResource return an informer is user has access.
func (f *Factory) CanForResource(ns, gvr string, verbs []string) (informers.GenericInformer, error) {
// If user can access resource cluster wide, prefer cluster wide factory.
if !client.IsClusterWide(ns) {
auth, err := f.Client().CanI(client.AllNamespaces, gvr, verbs)
if auth && err == nil {
return f.ForResource(client.AllNamespaces, gvr), nil
}
}
auth, err := f.Client().CanI(ns, gvr, verbs)
if err != nil {
return nil, err
......
......@@ -3,6 +3,8 @@ package main
import (
"os"
_ "net/http/pprof"
"github.com/derailed/k9s/cmd"
"github.com/derailed/k9s/internal/config"
"github.com/rs/zerolog"
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment