登录
首页 >  Golang >  Go问答

Docker 构建 LakeFS 失败

来源:stackoverflow

时间:2024-02-08 08:06:25 171浏览 收藏

哈喽!今天心血来潮给大家带来了《Docker 构建 LakeFS 失败》,想必大家应该对Golang都不陌生吧,那么阅读本文就都不会很困难,以下内容主要涉及到,若是你正在学习Golang,千万别错过这篇文章~希望能帮助到你!

问题内容

我正在尝试开始使用“本地”数据处理生态系统,其中包括 presto、spark、hive。 lakefs 和其他一些。

我的 docker-compose.yml 看起来像这样:

version: "3.5"
services:

  lakefs:
    image: treeverse/lakefs:latest
    container_name: lakefs
    depends_on:
      - minio-setup
    ports:
      - "8000:8000"
    environment:
      - lakefs_database_type=local
      - lakefs_blockstore_type=s3
      - lakefs_blockstore_s3_force_path_style=true
      - lakefs_blockstore_s3_endpoint=http://minio:9000
      - lakefs_blockstore_s3_credentials_access_key_id=minioadmin
      - lakefs_blockstore_s3_credentials_secret_access_key=minioadmin
      - lakefs_auth_encrypt_secret_key=some random secret string
      - lakefs_stats_enabled
      - lakefs_logging_level
      - lakectl_credentials_access_key_id=akiaiosfodnn7example
      - lakectl_credentials_secret_access_key=wjalrxutnfemi/k7mdeng/bpxrficyexamplekey
      - lakectl_server_endpoint_url=http://localhost:8000
    entrypoint: ["/bin/sh", "-c"]
    command:
        - |
          lakefs setup --local-settings --user-name docker --access-key-id akiaiosfodnn7example --secret-access-key wjalrxutnfemi/k7mdeng/bpxrficyexamplekey || true
          lakefs run --local-settings &
          wait-for -t 60 lakefs:8000 -- lakectl repo create lakefs://example s3://example || true
          wait

  minio-setup:
    image: minio/mc
    container_name: minio-setup
    environment:
        - mc_host_lakefs=http://minioadmin:minioadmin@minio:9000
    depends_on:
      - minio
    command: ["mb", "lakefs/example"]

  minio:
    image: minio/minio
    container_name: minio
    ports:
      - "9000:9000"
      - "9001:9001"
    entrypoint: ["minio", "server", "/data", "--console-address", ":9001"]

  mariadb:
    image: mariadb:10
    container_name: mariadb
    environment:
      mysql_root_password: admin
      mysql_user: admin
      mysql_password: admin
      mysql_database: metastore_db

  hive-metastore:
    build: hive
    container_name: hive
    depends_on:
      - mariadb
    ports:
      - "9083:9083"
    environment:
      - db_uri=mariadb:3306
    volumes:
      - ./etc/hive-site.xml:/opt/apache-hive-bin/conf/hive-site.xml
    ulimits:
      nofile:
        soft: 65536
        hard: 65536

  hive-server:
    build: hive
    container_name: hiveserver2
    ports:
      - "10001:10000"
    depends_on:
      - hive-metastore
    environment:
      - db_uri=mariadb:3306
    volumes:
      - ./etc/hive-site.xml:/opt/apache-hive-bin/conf/hive-site.xml
    ulimits:
      nofile:
        soft: 65536
        hard: 65536
    entrypoint: [
      "wait-for-it", "-t", "60", "hive:9083", "--",
      "hive", "--service", "hiveserver2", "--hiveconf", "hive.root.logger=info,console"]

  hive-client:
    build: hive
    profiles: ["client"]
    entrypoint: ["beeline", "-u", "jdbc:hive2://hiveserver2:10000"]

  trino:
    image: trinodb/trino:358
    container_name: trino
    volumes:
      - ./etc/s3.properties:/etc/trino/catalog/s3.properties
    ports:
      - "48080:8080"

  trino-client:
    image: trinodb/trino:358
    profiles: ["client"]
    entrypoint: ["trino", "--server", "trino:8080", "--catalog", "s3", "--schema", "default"]

  spark:
    image: docker.io/bitnami/spark:3
    container_name: spark
    environment:
      - spark_mode=master
      - spark_master_host=spark
      - spark_rpc_authentication_enabled=no
      - spark_rpc_encryption_enabled=no
      - spark_local_storage_encryption_enabled=no
      - spark_ssl_enabled=no
    ports:
      - "18080:8080"
    volumes:
      - ./etc/hive-site.xml:/opt/bitnami/spark/conf/hive-site.xml

  spark-worker:
    image: docker.io/bitnami/spark:3
    ports:
      - "8081"
    environment:
      - spark_mode=worker
      - spark_master_url=spark://spark:7077
      - spark_worker_memory=1g
      - spark_worker_cores=1
      - spark_rpc_authentication_enabled=no
      - spark_rpc_encryption_enabled=no
      - spark_local_storage_encryption_enabled=no
      - spark_ssl_enabled=no
    deploy:
      replicas: 3
    volumes:
      - ./etc/hive-site.xml:/opt/bitnami/spark/conf/hive-site.xml

  spark-submit:
    image: docker.io/bitnami/spark:3
    profiles: ["client"]
    entrypoint: /opt/bitnami/spark/bin/spark-submit
    environment:
      - spark_mode=worker
      - spark_master_url=spark://spark:7077
      - spark_worker_memory=1g
      - spark_worker_cores=1
      - spark_rpc_authentication_enabled=no
      - spark_rpc_encryption_enabled=no
      - spark_local_storage_encryption_enabled=no
      - spark_ssl_enabled=no
    volumes:
      - ./:/local
      - ./etc/hive-site.xml:/opt/bitnami/spark/conf/hive-site.xml

  spark-sql:
    image: docker.io/bitnami/spark:3
    profiles: ["client"]
    environment:
      - spark_mode=worker
      - spark_master_url=spark://spark:7077
      - spark_worker_memory=1g
      - spark_worker_cores=1
      - spark_rpc_authentication_enabled=no
      - spark_rpc_encryption_enabled=no
      - spark_local_storage_encryption_enabled=no
      - spark_ssl_enabled=no
    volumes:
      - ./:/local
      - ./etc/hive-site.xml:/opt/bitnami/spark/conf/hive-site.xml
    command: ["spark-sql", "--master", "spark://spark:7077"]


  spark-thrift:
    image: docker.io/bitnami/spark:3
    container_name: spark-thrift
    command: ["bash","-c", "/opt/bitnami/entrypoint.sh"]
    depends_on:
      - spark
    environment:
      - spark_mode=master
      - spark_master_url=spark://spark:7077
      - spark_rpc_authentication_enabled=no
      - spark_rpc_encryption_enabled=no
      - spark_local_storage_encryption_enabled=no
      - spark_mode=worker
    volumes:
      - ./etc/spark-thrift-entrypoint.sh:/opt/bitnami/entrypoint.sh
      - ./etc/hive-site.xml:/opt/bitnami/spark/conf/hive-site.xml


  create-dbt-schema-main:
    image: trinodb/trino:358
    profiles: ["client"]
    entrypoint: ["trino", "--server", "trino:8080", "--catalog", "s3", "--execute", "drop schema if exists dbt_main  ;create schema dbt_main with (location = 's3://example/main/dbt' )"]

  dbt:
    build: dbt
    profiles: ["client"]
    volumes:
      - ./dbt/dbt-project:/usr/app
      - ./dbt/profiles.yml:/root/.dbt/profiles.yml
    entrypoint: dbt

  notebook:
    # to login to jupyter notebook, use password:lakefs
    build: jupyter
    container_name: notebook
    ports:
      - 8888:8888
    volumes:
      - ./etc/jupyter_notebook_config.py:/home/jovyan/.jupyter/jupyter_notebook_config.py
      - ./etc/hive-site.xml:/usr/local/spark/conf/hive-site.xml

networks:
  default:
    name: bagel

当我运行“docker compose up”时,出现此错误:

=> error [build 7/8] run --mount=type=cache,target=/root/.cache/go-build     --mount=type=cache,target=/go/pkg     goos=linux goarch=amd64      0.4s
 => cached [lakefs 2/8] run apk add -u --no-cache ca-certificates                                                                                0.0s
 => cached [lakefs 3/8] run apk add netcat-openbsd                                                                                               0.0s
 => cached [lakefs 4/8] workdir /app                                                                                                             0.0s
 => cached [lakefs 5/8] copy ./scripts/wait-for ./                                                                                               0.0s
------                                                                                                                                                
 > [build 7/8] run --mount=type=cache,target=/root/.cache/go-build     --mount=type=cache,target=/go/pkg     goos=linux goarch=amd64     go build -ldflags "-x github.com/treeverse/lakefs/pkg/version.version=dev" -o lakefs ./cmd/lakefs:
#0 0.407 webui/content.go:7:12: pattern dist: no matching files found
------
failed to solve: executor failed running [/bin/sh -c goos=$targetos goarch=$targetarch     go build -ldflags "-x github.com/treeverse/lakefs/pkg/version.version=${version}" -o lakefs ./cmd/lakefs]: exit code: 1

我的操作系统是:

linux b460mds3hacy1 5.15.0-58-generic #64~20.04.1-ubuntu smp fri jan 6 16:42:31 utc 2023 x86_64 x86_64 x86_64 gnu/linux

我的做法是:

go version go1.16.7 linux/amd64

我应该怎样做才能克服这个错误?


正确答案


奇怪 - docker-compose 使用一个镜像,它应该只是拉取它而不是尝试构建一个 docker 镜像。 您可以验证工作目录是否包含您的 docker-compose 吗? 您还可以在调用 docker-compose up 之前通过 docker-compose pull 验证您使用的是最新的镜像。

理论要掌握,实操不能落!以上关于《Docker 构建 LakeFS 失败》的详细介绍,大家都掌握了吧!如果想要继续提升自己的能力,那么就来关注golang学习网公众号吧!

声明:本文转载于:stackoverflow 如有侵犯,请联系study_golang@163.com删除
相关阅读
更多>
最新阅读
更多>
课程推荐
更多>