skip to Main Content

We’re trying to enable Vault audit using the Pulumi Vault package with Kubernetes. But getting this error:

Diagnostics:
  pulumi:pulumi:Stack (workspace-local-local-vault-audit):
    error: update failed

  vault:index:Audit (vault-audit):
    error: 1 error occurred:
        * error enabling audit backend: Error making API request.
    
    URL: PUT https://localhost:8443/v1/sys/audit/file
    Code: 400. Errors:
    
    * file sink creation failed for path "/Users/.../vault-audit.log": event.NewFileSink: sanity check failed; unable to open "/Users/.../vault-audit.log" for writing: event.(FileSink).open: unable to create file "/Users/.../vault-audit.log": mkdir /Users: permission denied

Here’s the function we wrote:

import (
        "github.com/pulumi/pulumi-vault/sdk/v5/go/vault"
        ...
)

// Vault provider args
// vault.ProviderArgs{
//    Address:       pulumi.String("https://localhost:8443"),
//    SkipTlsVerify: pulumi.Bool(true),
//
//    Root token so probably has permissions for everything
//    Token:         pulumi.String("hvs..."),
// }

func (v Vault) EnableAudit(environment string) pulumi.RunFunc {
    program := func(ctx *pulumi.Context) error {

        cwd, _ := os.Getwd()
        logPath := path.Join(cwd, "vault-audit.log")

        _ := os.WriteFile(logPath, []byte(""), 0777)

        provider, _ := mount.NewProvider(pulumiContext, "vaultprovider", &v.Vaultprovider)

        _, err = mount.NewAudit(ctx, "vault-audit", &mount.AuditArgs{
            Options: pulumi.StringMap{
                "file_path": pulumi.String(logPath),
            },
            Type:  pulumi.String("file"),
            Local: pulumi.Bool(true),
        }, pulumi.Provider(provider))

        if err != nil {
            return err
        }

        return nil
    }

    return program
}

The vault-audit.log file is successfully created. The permissions seem permissive enough.

Additionally, we’re doing this on Rancher Desktop with Traefik turned off in favour of Nginx for port-forwarding (8080:80 8443:443) to access Vault, following these docs. Using the default containerd. We don’t think this is an issue though.

If we try doing it directly in the k8s pod without Pulumi:

kubectl exec -it vault-0 -n vault -- /bin/sh -c "VAULT_TOKEN=hvs... vault audit enable file file_path=/var/log/vault-audit.log"

We get a very similar error:

Error enabling audit device: Error making API request.

URL: PUT http://127.0.0.1:8200/v1/sys/audit/file
Code: 400. Errors:

* file sink creation failed for path "./vault-audit.log": event.NewFileSink: sanity check failed; unable to open "./vault-audit.log" for writing: event.(FileSink).open: unable to open file for sink: open ./vault-audit.log: permission denied

What are we missing? The docs make it seem straightforward with no prerequisites from the Vault docs but do we need to create a policy or something first?

2

Answers


  1. Chosen as BEST ANSWER

    Ron's solution pointed me in the right direction. I should mention my code above is based in part on a misunderstanding of the Vault Pulumi package. The NewAudit file_path property is a path in the K8s pod. So using cwd, _ := os.Getwd() as I did above won't work since it gets a path on my local machine.

    In the end I enabled Vault auditStorage when installing from the helm chart with Pulumi:

    import (
        "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/helm/v3"
    )
    
        newChart, _ := helm.NewChart(pulumiContext, "vault", helm.ChartArgs{
            Chart: pulumi.String("vault"),
            FetchArgs: helm.FetchArgs{
                Repo: pulumi.String("https://helm.releases.hashicorp.com"),
            },
            Namespace: pulumi.String("vault"),
            Values: pulumi.Map{
                "server": pulumi.Map{
                    "affinity": pulumi.String(""),
                    "replicas": pulumi.Int(3),
                    "ha": pulumi.Map{
                        "enabled": pulumi.Bool(true),
                        "raft": pulumi.Map{
                            "enabled": pulumi.Bool(true),
                        },
                        "config": pulumi.String(`
                        ui = true
    
                        listener "tcp" {
                          tls_disable = 1
                          address = "[::]:8200"
                        }`),
                    },
                    "loglevel": pulumi.String("debug"),
    
                    // Configures Vault Statefulset to create a PVC for audit logs.
                    // Adapted from Ron's example and from https://github.com/hashicorp/vault-helm/blob/main/values.yaml
                    "auditStorage": pulumi.Map{
                        "enabled":    pulumi.Bool(true),
                        "size":       pulumi.String("1Gi"),
                        "mountPath":  pulumi.String("/vault/audit"),
                        "accessMode": pulumi.String("ReadWriteOnce"),
                    },
                },
                "csi": pulumi.Map{
                    "enabled": pulumi.Bool(true),
                },
            },
        }, pulumi.Provider(vaultProvider))
    

    Then created the Vault audit resource in another stack:

    import (
        "github.com/pulumi/pulumi-vault/sdk/v5/go/vault"
    )
    
            _, _ = vault.NewAudit(ctx, "vault-audit", &vault.AuditArgs{
                Options: pulumi.StringMap{
                    "file_path": pulumi.String("/vault/audit/vault-audit.log"),
                },
                Type:  pulumi.String("file"),
    
                // Path is where `auditStorage` `mountPath` above
                Path:  pulumi.String("/vault/audit"),
                Local: pulumi.Bool(true),
            }, pulumi.Provider(vaultprovider))
    
    

    Now, I can do kubectl exec --stdin --tty -n vault vault-0 -- /bin/sh and then cat /vault/audit/vault-audit.log to see the audit logs. It's vault-0 because it's HA.

    Probably a better idea to create the PVC claim in the same stack as where I create the audit resource rather than in the helm chart but I'll go with this for now.


  2. See below sample function you may try:

    package main
    
    import (
        corev1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/core/v1"
        metav1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/meta/v1"
        "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
        pulumi.Run(func(ctx *pulumi.Context) error {
            // Define a Kubernetes namespace for the Vault server.
            vaultNamespace, err := corev1.NewNamespace(ctx, "vaultNamespace", &corev1.NamespaceArgs{
                Metadata: &metav1.ObjectMetaArgs{
                    Name: pulumi.String("vault-ns"),
                },
            })
            if err != nil {
                return err
            }
    
            // Create a Kubernetes PersistentVolumeClaim for the Vault server to use for audit logs.
            _, err = corev1.NewPersistentVolumeClaim(ctx, "vaultAuditLogsPvc", &corev1.PersistentVolumeClaimArgs{
                Metadata: &metav1.ObjectMetaArgs{
                    Name:      pulumi.String("vault-audit-logs"),
                    Namespace: vaultNamespace.Metadata.Name(),
                },
                Spec: &corev1.PersistentVolumeClaimSpecArgs{
                    AccessModes: pulumi.StringArray{
                        pulumi.String("ReadWriteOnce"),
                    },
                    Resources: &corev1.ResourceRequirementsArgs{
                        Requests: pulumi.StringMap{
                            "storage": pulumi.String("1Gi"),
                        },
                    },
                },
            })
            if err != nil {
                return err
            }
    
            // Define the Vault server Pod.
            _, err = corev1.NewPod(ctx, "vaultPod", &corev1.PodArgs{
                Metadata: &metav1.ObjectMetaArgs{
                    Name:      pulumi.String("vault-server"),
                    Namespace: vaultNamespace.Metadata.Name(),
                },
                Spec: &corev1.PodSpecArgs{
                    Containers: corev1.ContainerArray{
                        &corev1.ContainerArgs{
                            Name:  pulumi.String("vault"),
                            Image: pulumi.String("vault:1.7.0"),
                            Args: pulumi.StringArray{
                                pulumi.String("server"),
                            },
                            Env: corev1.EnvVarArray{
                                &corev1.EnvVarArgs{
                                    Name:  pulumi.String("VAULT_LOCAL_CONFIG"),
                                    Value: pulumi.String("listener "tcp" {n address = "0.0.0.0:8200"n tls_disable = "true"n}n backend "file" {n path = "/vault/logs"n}n"),
                                },
                            },
                            VolumeMounts: corev1.VolumeMountArray{
                                &corev1.VolumeMountArgs{
                                    Name:      pulumi.String("vault-audit-logs"),
                                    MountPath: pulumi.String("/vault/logs"),
                                },
                            },
                        },
                    },
                    Volumes: corev1.VolumeArray{
                        &corev1.VolumeArgs{
                            Name: pulumi.String("vault-audit-logs"),
                            PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSourceArgs{
                                ClaimName: pulumi.String("vault-audit-logs"),
                            },
                        },
                    },
                },
            })
            if err != nil {
                return err
            }
    
            return nil
        })
    }
    
    
    

    This assumes that you have a Kubernetes cluster running and properly configured with Pulumi. Also, Vault is appropriately set up to run with the cluster.

    You should change the Vault server configuration, the Docker image version, and storage size according to the requirements. Ensure that the Vault configuration (VAULT_LOCAL_CONFIG) matches your use case.

    Login or Signup to reply.
Please signup or login to give your own answer.
Back To Top
Search