Hi all, Did anyone bump into issue with Dagit gra...
# deployment-kubernetes
b
Hi all, Did anyone bump into issue with Dagit graphql API response time via instance deployed with Helm? Pages loads for 20-50 seconds and most of that is spent on "Waiting for server response" We are using EKS on AWS
b
Hi Bartosz, would you be able to share any custom config/values you applied alongside the helm chart? What are the specs of the cluster you are deploying to?
👍 1
b
@Szymon Piskorz Could you share the config and cluster spec with Ben?
s
@ben Here is the values for our Helm:
Copy code
global:
  serviceAccountName: dagster
dagit:
  workspace:
    enabled: true

    # List of servers to include in the workflow file. When set,
    # `externalConfigmap` must be empty.
    servers:
      - host: "k8s-example-user-code-3"
        port: 3030
        name: "user-code-example"
      # - host: "k8s-dagster-poc-simon"
      #   port: 3030
      #   name: "k8s-dagster-poc-simon-name"
      - host: "dagster-poc-s3-io"
        port: 3030
        name: "dagster-poc-s3-io"
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
          - matchExpressions:
              - key: <http://topology.kubernetes.io/zone|topology.kubernetes.io/zone>
                operator: In
                values:
                  - eu-central-1a
                  - eu-central-1b
  tolerations:
    - key: "deployments-control-plane"
      operator: "Exists"
      effect: "NoSchedule"
  nodeSelector:
    deployments-control-plane: "true"
  resources:
    limits:
      cpu: 4000m
      memory: 8Gi
    requests:
      cpu: 1000m
      memory: 2Gi

dagster-user-deployments:
  deployments:
    # - name: "k8s-dagster-poc-simon"
    #   image:
    #     repository: "<http://083749379286.dkr.ecr.eu-central-1.amazonaws.com/common/dagster_poc/refactored_project_repo|083749379286.dkr.ecr.eu-central-1.amazonaws.com/common/dagster_poc/refactored_project_repo>"
    #     tag: 75fb843fe418bf84c0f28a40c54f137c2547efe8 # pragma: allowlist secret
    #     pullPolicy: Always
    #   dagsterApiGrpcArgs:
    #     - "--python-file"
    #     - "./repo.py"
    #   port: 3030
    #   envSecrets:
    #     - name: personify-redshift-dbname
    #     - name: personify-redshift-user
    #     - name: personify-redshift-password
    #     - name: personify-redshift-endpoint
    #   affinity:
    #     nodeAffinity:
    #       requiredDuringSchedulingIgnoredDuringExecution:
    #         nodeSelectorTerms:
    #           - matchExpressions:
    #               - key: <http://topology.kubernetes.io/zone|topology.kubernetes.io/zone>
    #                 operator: In
    #                 values:
    #                   - eu-central-1a
    #                   - eu-central-1b
    #   tolerations:
    #     - key: "deployments-control-plane"
    #       operator: "Exists"
    #       effect: "NoSchedule"
    #   nodeSelector:
    #     deployments-control-plane: "true"
    #   resources:
    #     limits:
    #       cpu: 1000m
    #       memory: 2Gi
    #     requests:
    #       cpu: 100m
    #       memory: 128Mi
    - name: "dagster-poc-s3-io"
      image:
        repository: "<http://083749379286.dkr.ecr.eu-central-1.amazonaws.com/common/dagster_poc/refactored_project_repo|083749379286.dkr.ecr.eu-central-1.amazonaws.com/common/dagster_poc/refactored_project_repo>"
        tag: 3a02912b916be140ec9fbd4e1906db211b40b289 # pragma: allowlist secret
        pullPolicy: Always
      dagsterApiGrpcArgs:
        - "--python-file"
        - "./repo.py"
      port: 3030
      envSecrets:
        - name: personify-redshift-dbname
        - name: personify-redshift-user
        - name: personify-redshift-password
        - name: personify-redshift-endpoint
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: <http://topology.kubernetes.io/zone|topology.kubernetes.io/zone>
                    operator: In
                    values:
                      - eu-central-1a
                      - eu-central-1b
      tolerations:
        - key: "deployments-control-plane"
          operator: "Exists"
          effect: "NoSchedule"
      nodeSelector:
        deployments-control-plane: "true"
      resources:
        limits:
          cpu: 4000m
          memory: 8Gi
        requests:
          cpu: 1000m
          memory: 2Gi

    - name: "k8s-example-user-code-3"
      image:
        repository: "<http://docker.io/dagster/user-code-example|docker.io/dagster/user-code-example>"
        tag: latest
        pullPolicy: Always
      dagsterApiGrpcArgs:
        - "--python-file"
        - "/example_project/example_repo/repo.py"
      port: 3030
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: <http://topology.kubernetes.io/zone|topology.kubernetes.io/zone>
                    operator: In
                    values:
                      - eu-central-1a
                      - eu-central-1b
      tolerations:
        - key: "deployments-control-plane"
          operator: "Exists"
          effect: "NoSchedule"
      nodeSelector:
        deployments-control-plane: "true"

dagsterDaemon:
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
          - matchExpressions:
              - key: <http://topology.kubernetes.io/zone|topology.kubernetes.io/zone>
                operator: In
                values:
                  - eu-central-1a
                  - eu-central-1b
  tolerations:
    - key: "deployments-control-plane"
      operator: "Exists"
      effect: "NoSchedule"
  nodeSelector:
    deployments-control-plane: "true"
  runMonitoring:
    enabled: true
postgresql:
  master:
    tolerations:
      - key: "deployments-control-plane"
        operator: "Exists"
        effect: "NoSchedule"
    nodeSelector:
      deployments-control-plane: "true"
runLauncher:
  type: K8sRunLauncher
  config:
    k8sRunLauncher:
      resources:
        limits:
          cpu: 4000m
          memory: 12Gi
        requests:
          cpu: 1000m
          memory: 4Gi
      runK8sConfig:
        podTemplateSpecMetadata:
          annotations:
            <http://argocd.argoproj.io/sync-options|argocd.argoproj.io/sync-options>: Prune=false
        jobMetadata:
          annotations:
            <http://argocd.argoproj.io/sync-options|argocd.argoproj.io/sync-options>: Prune=false
        containerConfig: # raw config for the pod's main container
          resources:
            limits:
              cpu: 2000m
              memory: 8Gi
            requests:
              cpu: 1000m
              memory: 4Gi
We are using m5xlarge on EKS for our nodegroup.
d
What kind of database are you using? Is it possible that it needs more resources? For production use cases we generally recommend running postgres outside of the k8s cluster (for example, in AWS RDS)
s
@daniel We are using the default db without any configuration added. Also the default k8slauncher is used. We are doing a PoC so we didnt really go with full production grade deployment. Will try adding more resources