Linux 정보기록

컨테이너&가상화

컨테이너&가상화

CRI-O기반의 k8s 설치

사전사항

  1. OS환경설정
    $> swapoff -a
    
    $> cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
    br_netfilter
    EOF
    
    $> cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    EOF
    
    $> sudo sysctl --system
  2. crio / kubernetees 패키지 리포지터리 구성
    $> cat /etc/yum.repos.d/libcontainers.repo
    [devel_kubic_libcontainers_stable]
    name=Stable Releases of Upstream github.com/containers packages (CentOS_8)
    type=rpm-md
    baseurl=https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_8/
    gpgcheck=1
    gpgkey=https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_8/repodata/repomd.xml.key
    enabled=1


    $> cat /etc/yum.repos.d/cri-o-1.23.repo
    [devel_kubic_libcontainers_stable_cri-o_1.23]
    name=devel:kubic:libcontainers:stable:cri-o:1.23 (CentOS_8)
    type=rpm-md
    baseurl=https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/1.23/CentOS_8/
    gpgcheck=1
    gpgkey=https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/1.23/CentOS_8/repodata/repomd.xml.key
    enabled=1
    

    $> cat /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-$basearch
    enabled=1
    gpgcheck=1
    repo_gpgcheck=1
    gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg

  3. 패키지 설치
    $> yum install kubelet kubeadm kubectl libcgroup cri-o cri-tools -y
    $> systemctl enable crio --now
    $> systemctl enable kubelet

클러스터 생성 (control palin 1번에서만 수행)

  1. kubeadm 클러스터 생성
    $> kubeadm init --control-plane-endpoint 172.21.107.238:6443 --pod-network-cidr 10.250.0.0/16 --ignore-preflight-errors=all --upload-certs
    
    ## 결과값중에 control / worker 노드별 join 명령이 다르기 때문에 별도로 복사해놓어야 함
    


    ##Control node용
    $> kubeadm join 172.21.107.238:6443 --token abcd \
           --discovery-token-ca-cert-hash sha256:yyy \
           --control-plane --certificate-key zzz
    

    ## Worker Node용
    $> kubeadm join 172.21.107.238:6443 --token abcd \
           --discovery-token-ca-cert-hash sha256:yyyy \
     --ignore-preflight-errors=all

  2. 인증서 정보 복사
    $> mkdir -p $HOME/.kube
    $> /bin/cp /etc/kubernetes/admin.conf $HOME/.kube/config
    $> chown $(id -u):$(id -g) $HOME/.kube/config
    $> export KUBECONFIG=/etc/kubernetes/admin.conf
  3. CNI 설치(Calico)
    $> curl https://projectcalico.docs.tigera.io/manifests/calico.yaml -O
    $> kubectl apply -f calico.yaml

클러스터 연동

  1. 타 Control plain 연동 (Control Plain 한대씩 순차 작업 수행)
    $> kubeadm join 172.21.107.238:6443 --token abcd \
           --discovery-token-ca-cert-hash sha256:yyy \
           --control-plane --certificate-key zzz
  2. 노드 연동 확인 (Control plain에서 수행)
    $> kubectl get no
    NAME                         STATUS   ROLES                  AGE    VERSION
    k8stesttx-k8s-master-dev01   Ready    control-plane,master   3h6m   v1.23.5
    k8stesttx-k8s-master-dev02   Ready    control-plane,master   3h6m   v1.23.5
    k8stesttx-k8s-master-dev03   Ready    control-plane,master   3h6m   v1.23.5
  3. Worker Node  연동
    $> kubeadm join 172.21.107.238:6443 --token abcd \
           --discovery-token-ca-cert-hash sha256:yyyy \
     --ignore-preflight-errors=all
  4. 노드 연동 확인 (Control plain에서 수행)
    $> kubectl get no
    NAME                         STATUS   ROLES                  AGE    VERSION
    ...
    k8stesttx-k8s-worker-dev01   Ready    <none>                 40m    v1.23.5
    k8stesttx-k8s-worker-dev02   Ready    <none>                 40m    v1.23.5
    k8stesttx-k8s-worker-dev03   Ready    <none>                 40m    v1.23.5

k8s 인증서 10년으로 연장


  1. 인증서 연장 스크립트
    #!/usr/bin/env bash
    
    set -o errexit
    set -o pipefail
    # set -o xtrace
    
    # set output color
    NC='\033[0m'
    RED='\033[31m'
    GREEN='\033[32m'
    YELLOW='\033[33m'
    BLUE='\033[34m'
    
    log::err() {
     printf "[$(date +'%Y-%m-%dT%H:%M:%S.%2N%z')][${RED}ERROR${NC}] %b\n" "$@"
    }
    
    log::info() {
     printf "[$(date +'%Y-%m-%dT%H:%M:%S.%2N%z')][INFO] %b\n" "$@"
    }
    
    log::warning() {
     printf "[$(date +'%Y-%m-%dT%H:%M:%S.%2N%z')][${YELLOW}WARNING${NC}] \033[0m%b\n" "$@"
    }
    
    check_file() {
     if [[ ! -r ${1} ]]; then
        log::err "can not find ${1}"
       exit 1
     fi
    }
    
    # get x509v3 subject alternative name from the old certificate
    cert::get_subject_alt_name() {
     local cert=${1}.crt
     local alt_name
    
      check_file "${cert}"
     alt_name=$(openssl x509 -text -noout -in "${cert}" | grep -A1 'Alternative' | tail -n1 | sed 's/[[:space:]]*Address//g')
     printf "%s\n" "${alt_name}"
    }
    
    # get subject from the old certificate
    cert::get_subj() {
     local cert=${1}.crt
     local subj
    
      check_file "${cert}"
     subj=$(openssl x509 -text -noout -in "${cert}" | grep "Subject:" | sed 's/Subject:/\//g;s/\,/\//;s/[[:space:]]//g')
     printf "%s\n" "${subj}"
    }
    
    cert::backup_file() {
     local file=${1}
     if [[ ! -e ${file}.old-$(date +%Y%m%d) ]]; then
        cp -rp "${file}" "${file}.old-$(date +%Y%m%d)"
        log::info "backup ${file} to ${file}.old-$(date +%Y%m%d)"
     else
        log::warning "does not backup, ${file}.old-$(date +%Y%m%d) already exists"
     fi
    }
    
    # check certificate expiration
    cert::check_cert_expiration() {
     local cert=${1}.crt
     local cert_expires
    
     cert_expires=$(openssl x509 -text -noout -in "${cert}" | awk -F ": " '/Not After/{print$2}')
     printf "%s\n" "${cert_expires}"
    }
    
    # check kubeconfig expiration
    cert::check_kubeconfig_expiration() {
     local config=${1}.conf
     local cert
     local cert_expires
    
     cert=$(grep "client-certificate-data" "${config}" | awk '{print$2}' | base64 -d)
     cert_expires=$(openssl x509 -text -noout -in <(printf "%s" "${cert}") | awk -F ": " '/Not After/{print$2}')
     printf "%s\n" "${cert_expires}"
    }
    
    # check etcd certificates expiration
    cert::check_etcd_certs_expiration() {
     local cert
     local certs
    
     certs=(
       "${ETCD_CERT_CA}"
       "${ETCD_CERT_SERVER}"
       "${ETCD_CERT_PEER}"
       "${ETCD_CERT_HEALTHCHECK_CLIENT}"
       "${ETCD_CERT_APISERVER_ETCD_CLIENT}"
     )
    
     for cert in "${certs[@]}"; do
       if [[ ! -r ${cert} ]]; then
         printf "%-50s%-30s\n" "${cert}.crt" "$(cert::check_cert_expiration "${cert}")"
       fi
     done
    }
    
    # check master certificates expiration
    cert::check_master_certs_expiration() {
     local certs
     local kubeconfs
     local cert
     local conf
    
     certs=(
       "${CERT_CA}"
       "${CERT_APISERVER}"
       "${CERT_APISERVER_KUBELET_CLIENT}"
       "${FRONT_PROXY_CA}"
       "${FRONT_PROXY_CLIENT}"
     )
    
     kubeconfs=(
       "${CONF_CONTROLLER_MANAGER}"
       "${CONF_SCHEDULER}"
       "${CONF_ADMIN}"
     )
    
     printf "%-50s%-30s\n" "CERTIFICATE" "EXPIRES"
    
     for conf in "${kubeconfs[@]}"; do
       if [[ ! -r ${conf} ]]; then
         printf "%-50s%-30s\n" "${conf}.config" "$(cert::check_kubeconfig_expiration "${conf}")"
       fi
     done
    
     for cert in "${certs[@]}"; do
       if [[ ! -r ${cert} ]]; then
         printf "%-50s%-30s\n" "${cert}.crt" "$(cert::check_cert_expiration "${cert}")"
       fi
     done
    }
    
    # check all certificates expiration
    cert::check_all_expiration() {
      cert::check_master_certs_expiration
      cert::check_etcd_certs_expiration
    }
    
    # generate certificate whit client, server or peer
    # Args:
    #   $1 (the name of certificate)
    #   $2 (the type of certificate, must be one of client, server, peer)
    #   $3 (the subject of certificates)
    #   $4 (the validity of certificates) (days)
    #   $5 (the name of ca)
    #   $6 (the x509v3 subject alternative name of certificate when the type of certificate is server or peer)
    cert::gen_cert() {
     local cert_name=${1}
     local cert_type=${2}
     local subj=${3}
     local cert_days=${4}
     local ca_name=${5}
     local alt_name=${6}
     local ca_cert=${ca_name}.crt
     local ca_key=${ca_name}.key
     local cert=${cert_name}.crt
     local key=${cert_name}.key
     local csr=${cert_name}.csr
     local common_csr_conf='distinguished_name = dn\n[dn]\n[v3_ext]\nkeyUsage = critical, digitalSignature, keyEncipherment\n'
    
     for file in "${ca_cert}" "${ca_key}" "${cert}" "${key}"; do
        check_file "${file}"
     done
    
     case "${cert_type}" in
      client)
       csr_conf=$(printf "%bextendedKeyUsage = clientAuth\n" "${common_csr_conf}")
        ;;
      server)
       csr_conf=$(printf "%bextendedKeyUsage = serverAuth\nsubjectAltName = %b\n" "${common_csr_conf}" "${alt_name}")
        ;;
      peer)
       csr_conf=$(printf "%bextendedKeyUsage = serverAuth, clientAuth\nsubjectAltName = %b\n" "${common_csr_conf}" "${alt_name}")
        ;;
      *)
        log::err "unknow, unsupported certs type: ${YELLOW}${cert_type}${NC}, supported type: client, server, peer"
       exit 1
        ;;
     esac
    
     # gen csr
     openssl req -new -key "${key}" -subj "${subj}" -reqexts v3_ext \
       -config <(printf "%b" "${csr_conf}") \
       -out "${csr}" >/dev/null 2>&1
     # gen cert
     openssl x509 -in "${csr}" -req -CA "${ca_cert}" -CAkey "${ca_key}" -CAcreateserial -extensions v3_ext \
       -extfile <(printf "%b" "${csr_conf}") \
       -days "${cert_days}" -out "${cert}" >/dev/null 2>&1
    
      rm -f "${csr}"
    }
    
    cert::update_kubeconf() {
     local cert_name=${1}
     local kubeconf_file=${cert_name}.conf
     local cert=${cert_name}.crt
     local key=${cert_name}.key
     local subj
     local cert_base64
    
      check_file "${kubeconf_file}"
     # get the key from the old kubeconf
     grep "client-key-data" "${kubeconf_file}" | awk '{print$2}' | base64 -d >"${key}"
     # get the old certificate from the old kubeconf
     grep "client-certificate-data" "${kubeconf_file}" | awk '{print$2}' | base64 -d >"${cert}"
     # get subject from the old certificate
     subj=$(cert::get_subj "${cert_name}")
      cert::gen_cert "${cert_name}" "client" "${subj}" "${CERT_DAYS}" "${CERT_CA}"
     # get certificate base64 code
     cert_base64=$(base64 -w 0 "${cert}")
    
     # set certificate base64 code to kubeconf
     sed -i 's/client-certificate-data:.*/client-certificate-data: '"${cert_base64}"'/g' "${kubeconf_file}"
    
      rm -f "${cert}"
      rm -f "${key}"
    }
    
    cert::update_etcd_cert() {
     local subj
     local subject_alt_name
     local cert
    
     # generate etcd server,peer certificate
     # /etc/kubernetes/pki/etcd/server
     # /etc/kubernetes/pki/etcd/peer
     for cert in ${ETCD_CERT_SERVER} ${ETCD_CERT_PEER}; do
       subj=$(cert::get_subj "${cert}")
       subject_alt_name=$(cert::get_subject_alt_name "${cert}")
        cert::gen_cert "${cert}" "peer" "${subj}" "${CERT_DAYS}" "${ETCD_CERT_CA}" "${subject_alt_name}"
        log::info "${GREEN}updated ${BLUE}${cert}.conf${NC}"
     done
    
     # generate etcd healthcheck-client,apiserver-etcd-client certificate
     # /etc/kubernetes/pki/etcd/healthcheck-client
     # /etc/kubernetes/pki/apiserver-etcd-client
     for cert in ${ETCD_CERT_HEALTHCHECK_CLIENT} ${ETCD_CERT_APISERVER_ETCD_CLIENT}; do
       subj=$(cert::get_subj "${cert}")
        cert::gen_cert "${cert}" "client" "${subj}" "${CERT_DAYS}" "${ETCD_CERT_CA}"
        log::info "${GREEN}updated ${BLUE}${cert}.conf${NC}"
     done
    
     # restart etcd
     docker ps | awk '/k8s_etcd/{print$1}' | xargs -r -I '{}' docker restart {} >/dev/null 2>&1 || true
      log::info "restarted etcd"
    }
    
    cert::update_master_cert() {
     local subj
     local subject_alt_name
     local conf
    
     # generate apiserver server certificate
     # /etc/kubernetes/pki/apiserver
     subj=$(cert::get_subj "${CERT_APISERVER}")
     subject_alt_name=$(cert::get_subject_alt_name "${CERT_APISERVER}")
      cert::gen_cert "${CERT_APISERVER}" "server" "${subj}" "${CERT_DAYS}" "${CERT_CA}" "${subject_alt_name}"
      log::info "${GREEN}updated ${BLUE}${CERT_APISERVER}.crt${NC}"
    
     # generate apiserver-kubelet-client certificate
     # /etc/kubernetes/pki/apiserver-kubelet-client
     subj=$(cert::get_subj "${CERT_APISERVER_KUBELET_CLIENT}")
      cert::gen_cert "${CERT_APISERVER_KUBELET_CLIENT}" "client" "${subj}" "${CERT_DAYS}" "${CERT_CA}"
      log::info "${GREEN}updated ${BLUE}${CERT_APISERVER_KUBELET_CLIENT}.crt${NC}"
    
     # generate kubeconf for controller-manager,scheduler and kubelet
     # /etc/kubernetes/controller-manager,scheduler,admin,kubelet.conf
     for conf in ${CONF_CONTROLLER_MANAGER} ${CONF_SCHEDULER} ${CONF_ADMIN} ${CONF_KUBELET}; do
       if [[ ${conf##*/} == "kubelet" ]]; then
         # https://github.com/kubernetes/kubeadm/issues/1753
         set +e
          grep kubelet-client-current.pem /etc/kubernetes/kubelet.conf >/dev/null 2>&1
         kubelet_cert_auto_update=$?
         set -e
         if [[ "$kubelet_cert_auto_update" == "0" ]]; then
            log::info "does not need to update kubelet.conf"
           continue
         fi
       fi
    
       # update kubeconf
       cert::update_kubeconf "${conf}"
        log::info "${GREEN}updated ${BLUE}${conf}.conf${NC}"
    
       # copy admin.conf to ${HOME}/.kube/config
       if [[ ${conf##*/} == "admin" ]]; then
          mkdir -p "${HOME}/.kube"
         local config=${HOME}/.kube/config
         local config_backup
         config_backup=${HOME}/.kube/config.old-$(date +%Y%m%d)
         if [[ -f ${config} ]] && [[ ! -f ${config_backup} ]]; then
            cp -fp "${config}" "${config_backup}"
            log::info "backup ${config} to ${config_backup}"
         fi
          cp -fp "${conf}.conf" "${HOME}/.kube/config"
          log::info "copy the admin.conf to ${HOME}/.kube/config"
       fi
     done
    
     # generate front-proxy-client certificate
     # /etc/kubernetes/pki/front-proxy-client
     subj=$(cert::get_subj "${FRONT_PROXY_CLIENT}")
      cert::gen_cert "${FRONT_PROXY_CLIENT}" "client" "${subj}" "${CERT_DAYS}" "${FRONT_PROXY_CA}"
      log::info "${GREEN}updated ${BLUE}${FRONT_PROXY_CLIENT}.crt${NC}"
    
     # restart apiserver, controller-manager, scheduler and kubelet
     for item in "apiserver" "controller-manager" "scheduler"; do
        docker ps | awk '/k8s_kube-'${item}'/{print$1}' | xargs -r -I '{}' docker restart {} >/dev/null 2>&1 || true
        log::info "restarted ${item}"
     done
      systemctl restart kubelet || true
      log::info "restarted kubelet"
    }
    
    main() {
     local node_type=$1
    
     CERT_DAYS=3650
    
     KUBE_PATH=/etc/kubernetes
     PKI_PATH=${KUBE_PATH}/pki
    
     # master certificates path
     # apiserver
     CERT_CA=${PKI_PATH}/ca
     CERT_APISERVER=${PKI_PATH}/apiserver
     CERT_APISERVER_KUBELET_CLIENT=${PKI_PATH}/apiserver-kubelet-client
     CONF_CONTROLLER_MANAGER=${KUBE_PATH}/controller-manager
     CONF_SCHEDULER=${KUBE_PATH}/scheduler
     CONF_ADMIN=${KUBE_PATH}/admin
     CONF_KUBELET=${KUBE_PATH}/kubelet
     # front-proxy
     FRONT_PROXY_CA=${PKI_PATH}/front-proxy-ca
     FRONT_PROXY_CLIENT=${PKI_PATH}/front-proxy-client
    
     # etcd certificates path
     ETCD_CERT_CA=${PKI_PATH}/etcd/ca
     ETCD_CERT_SERVER=${PKI_PATH}/etcd/server
     ETCD_CERT_PEER=${PKI_PATH}/etcd/peer
     ETCD_CERT_HEALTHCHECK_CLIENT=${PKI_PATH}/etcd/healthcheck-client
     ETCD_CERT_APISERVER_ETCD_CLIENT=${PKI_PATH}/apiserver-etcd-client
    
     case ${node_type} in
     # etcd)
     # # update etcd certificates
     #   cert::update_etcd_cert
     # ;;
     master)
       # check certificates expiration
       cert::check_master_certs_expiration
       # backup $KUBE_PATH to $KUBE_PATH.old-$(date +%Y%m%d)
       cert::backup_file "${KUBE_PATH}"
       # update master certificates and kubeconf
       log::info "${GREEN}updating...${NC}"
        cert::update_master_cert
        log::info "${GREEN}done!!!${NC}"
       # check certificates expiration after certificates updated
       cert::check_master_certs_expiration
        ;;
      all)
       # check certificates expiration
       cert::check_all_expiration
       # backup $KUBE_PATH to $KUBE_PATH.old-$(date +%Y%m%d)
       cert::backup_file "${KUBE_PATH}"
       # update etcd certificates
       log::info "${GREEN}updating...${NC}"
        cert::update_etcd_cert
       # update master certificates and kubeconf
       cert::update_master_cert
        log::info "${GREEN}done!!!${NC}"
       # check certificates expiration after certificates updated
       cert::check_all_expiration
        ;;
      check)
       # check certificates expiration
       cert::check_all_expiration
        ;;
      *)
        log::err "unknown, unsupported cert type: ${node_type}, supported type: \"all\", \"master\""
       printf "Documentation: https://github.com/yuyicai/update-kube-cert
      example:
        '\033[32m./update-kubeadm-cert.sh all\033[0m' update all etcd certificates, master certificates and kubeconf
          /etc/kubernetes
          ├── admin.conf
          ├── controller-manager.conf
          ├── scheduler.conf
          ├── kubelet.conf
          └── pki
              ├── apiserver.crt
              ├── apiserver-etcd-client.crt
              ├── apiserver-kubelet-client.crt
              ├── front-proxy-client.crt
              └── etcd
                  ├── healthcheck-client.crt
                  ├── peer.crt
                  └── server.crt
    
        '\033[32m./update-kubeadm-cert.sh master\033[0m' update only master certificates and kubeconf
          /etc/kubernetes
          ├── admin.conf
          ├── controller-manager.conf
          ├── scheduler.conf
          ├── kubelet.conf
          └── pki
              ├── apiserver.crt
              ├── apiserver-kubelet-client.crt
              └── front-proxy-client.crt
    "
       exit 1
        ;;
     esac
    }
    
    main "$@"
  2. 업데이트 전 인증서 정보 확인
    $> kubeadm certs check-expiration
    [check-expiration] Reading configuration from the cluster...
    [check-expiration] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
    
    CERTIFICATE                EXPIRES                  RESIDUAL TIME   CERTIFICATE AUTHORITY   EXTERNALLY MANAGED
    admin.conf                 Apr 17, 2023 06:09 UTC   364d            ca                      no
    apiserver                  Apr 17, 2023 06:09 UTC   364d            ca                      no
    apiserver-etcd-client      Apr 17, 2023 06:09 UTC   364d            etcd-ca                 no
    apiserver-kubelet-client   Apr 17, 2023 06:09 UTC   364d            ca                      no
    controller-manager.conf    Apr 17, 2023 06:09 UTC   364d            ca                      no
    etcd-healthcheck-client    Apr 17, 2023 06:09 UTC   364d            etcd-ca                 no
    etcd-peer                  Apr 17, 2023 06:09 UTC   364d            etcd-ca                 no
    etcd-server                Apr 17, 2023 06:09 UTC   364d            etcd-ca                 no
    front-proxy-client         Apr 17, 2023 06:09 UTC   364d            front-proxy-ca          no
    scheduler.conf             Apr 17, 2023 06:09 UTC   364d            ca                      no
    
    CERTIFICATE AUTHORITY   EXPIRES                  RESIDUAL TIME   EXTERNALLY MANAGED
    ca                      Apr 17, 2032 04:17 UTC   9y              no
    etcd-ca                 Apr 17, 2032 04:17 UTC   9y              no
    front-proxy-ca          Apr 17, 2032 04:17 UTC   9y              no
  3. 인증서 업데이트 (Control Plain 1대씩 순차 작업 수행, 서버단위로 30초 가량 대기 필요)
    $> chmod +x cert_update.sh
    $> ./cert_update.sh
    ...
  4. 인증서 갱신정보 확인
    $> kubeadm certs check-expiration
    [check-expiration] Reading configuration from the cluster...
    [check-expiration] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
    
    CERTIFICATE                EXPIRES                  RESIDUAL TIME   CERTIFICATE AUTHORITY   EXTERNALLY MANAGED
    admin.conf                 Apr 17, 2032 06:09 UTC   9y              ca                      no
    apiserver                  Apr 17, 2032 06:09 UTC   9y              ca                      no
    apiserver-etcd-client      Apr 17, 2032 06:09 UTC   9y              etcd-ca                 no
    apiserver-kubelet-client   Apr 17, 2032 06:09 UTC   9y              ca                      no
    controller-manager.conf    Apr 17, 2032 06:09 UTC   9y              ca                      no
    etcd-healthcheck-client    Apr 17, 2032 06:09 UTC   9y              etcd-ca                 no
    etcd-peer                  Apr 17, 2032 06:09 UTC   9y              etcd-ca                 no
    etcd-server                Apr 17, 2032 06:09 UTC   9y              etcd-ca                 no
    front-proxy-client         Apr 17, 2032 06:09 UTC   9y              front-proxy-ca          no
    scheduler.conf             Apr 17, 2032 06:09 UTC   9y              ca                      no
    
    CERTIFICATE AUTHORITY   EXPIRES                  RESIDUAL TIME   EXTERNALLY MANAGED
    ca                      Apr 17, 2032 04:17 UTC   9y              no
    etcd-ca                 Apr 17, 2032 04:17 UTC   9y              no
    front-proxy-ca          Apr 17, 2032 04:17 UTC   9y              no

Reference

컨테이너&가상화

docker기반의 nexus설치

Nexus 소개

  1. NeXus Repository Manager (약어;NXRM)

  2. 지원 OS : Windows / Linux / Unix / OSX 

  3. 설치버전 :  3.29.2 OSS ('21.01.10 기준)

  4. License : Eclipse Public License-v 1.0

  5. Pro버전 추가 기능 : Atalssian Crowd지원, Staging / Build 분리 / 태깅 / 계정별 토큰 / HA / 아티팩트별 OSS보안 취약점 진단 / 기술지원 / 그룹 저장소 관리 기능 / SAML / SSO 기능

Nexus 시스템 요구사항

  1. 스펙정보

     

    리소스정보 최소사양 권장사양
    CPU 4Core 8Core
    Memory Host  : 8Gb
    JAVA Heap : 2Gb
    Host : 8Gb
    Java Heap : 4GB
    Disk 330M 이상 330M 이상
    • 사용가능 디스크 공간이 4GB이하일 경우 Readonly모드로 작동
    • Java Directory 메모리 설정공식은 (호스트메모리 * 0.6 ) - java heap

     

  2. 지원 가능 파일시스템

     

    파일시스템 종류 Embbed Blob 비 고
    XFS O O
    NFS V4 O O
    NFS V3 X X 오류발생
    AWS - EBS O O
    AWS - EFS X O Embbed 응답 이슈
    AWS - S3 X O 임베디드 데이터 적용불가
    AWS- S3호환 X X S3 호환 저장소는 지원 불가
    Samba/CIFS X O 임베디드 데이터 적용불가
    GlusterFS X X Split-brain 발생, 응답속도 지연
    Fuse X X 데이터 오류 발생

     

    * Embbed : 응답성이 빠른 스토리지 (DB / Elastic Search)
    * Blob : binary 저장소

  3. 디렉토리 구조

     

    디렉토리명 설  명
    bin 시작스크립트 / 시작관련 구성파일
    etc nxrm 구성파일 /런타임 구성파일
    lib nxrm 라이브러리 파일
    public nxrm 공개 리소스데이터
    system 프로그램 구성 데이터 / 플러그인
    blobs blob 저장소
    cache 캐시된 데이터
    db orientDB 데이터 저장
    elasticsearch 현재 구성된 elasticsearch 데이터
    health-check 저장소 상태 확인 기능 보고서
    keystores 저장소 관리자를 식별하는 키 데이터
    log 로그데이터
    nexus.log → 관리자 프로그램 로그,
    request.log → http요청로그
    jvm.log → jvm stdout, stderror 및 덤프데이터
    karaf.log → apache karaf 컨테이너 로그 파일
    audit → 감사 활성시 감사로그 데이터
    staks → 작업별 수행한 로그
    tmp → 임시저장소

Nexus 기능

  1. 리포지터리 종류
    • hosted : 서버에 저장되는 리포지터리
    • proxy : hosted와 연결하여 사용하는 cache용 리포지터리
    • group : 여러개의 hosted or proxy를 그룹으로 묶은 논리개념의 리포지터리
  2. 리포지터리 버전관리 방식
    • release : 정식릴리즈되는 아티팩트 저장소 / 동일버전으로 한번만 업로드 가능(필요시 삭제 후 업로드해야 함)
    • snapshot : 수시로 릴리즈 되는 아티팩트 저장소 / 같은 버전으로 여러번 업로드 가능
  3. 리포지터리 유형
    • maven2 : 메이븐에서 제공하는 메타데이터로 버전관리 (group / artifact id / version의 메타데이터로 관리함)
    • raw : 별도의 메타데이터는 없으며 파일 위치와 파일이름만 생성
    • yum : yum 리포지터리 저장소
  4. 아티팩트 관리 방식
    • curl을 이용한 바이너리 관리
    • test_repo에 있는 img-1.png 파일 다운로드
      $ curl -u 'test:test123' http://192.168.10.10:11000/repository/test_repo/img-1.png -o img-1.png

       

    • test.tar.gz 파일을 test_repo로 업로드

      $ curl -u 'test:test123' --upload-file ./test.tar.gz http://192.168.10.10:11000/repository/test_repo/test.tar.gz
    • 다중파일 업로드시
      $find ./ -exec curl -u 'test:test123' -T ./{} http://192.168.10.10:11000/repository/test_repo/test.tar.gz/{} \;
    • test_repo에 있는 test.tar.gz 파일 삭제
      $> curl --request DELETE -u 'test:test123' http://http/192.168.10.10:11000/repository/test_repo/test.tar.gz

Nexus 설치 (Docker 기반)

  1. container pull 하기
    $> docker pull sonatype/nexus3:3.29.2     #2.x을 사용할 경우 sonatype/nexus를 사용하면 됨
  2. nexus 저장용 데이터 생성하기
    $> mkdir /data/nexus-repo
    $> chown -R 200 /data/nexus-repo/
  3. container 실행하기
    $ docker run -d -p 88081:8081 \
    -e TZ=Asia/Seoul  \
    -v /etc/localtime:/etc/localtime:ro \
    -v /data/nexus-repo/:/nexus-data \
    -name nexus-repo sonatype/nexus3:3.29.2
  4. Container log 확인 (started sonatype 메시지가 출력되면 정상구동 완료)
    $ docker logs -f nexus-repo
    ...
    2021-01-19 23:32:02,793+0900 INFO  [jetty-main-1] *SYSTEM org.sonatype.nexus.repository.httpbridge.internal.ViewServlet - Initialized
    2021-01-19 23:32:02,870+0900 INFO  [jetty-main-1] *SYSTEM org.eclipse.jetty.server.handler.ContextHandler - Started o.e.j.w.WebAppContext@637c45d9{Sonatype Nexus,/,file:~/~//opt/sonatype/nexus/public/,AVAILABLE}
    2021-01-19 23:32:02,928+0900 INFO  [jetty-main-1] *SYSTEM org.eclipse.jetty.server.AbstractConnector - Started ServerConnector@45bb642d{HTTP/1.1, (http/1.1)}{0.0.0.0:8081}
    2021-01-19 23:32:02,929+0900 INFO  [jetty-main-1] *SYSTEM org.eclipse.jetty.server.Server - Started @41677ms
    2021-01-19 23:32:02,930+0900 INFO  [jetty-main-1] *SYSTEM org.sonatype.nexus.bootstrap.jetty.JettyServer -
    ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~--
    \\Started Sonatype Nexus OSS 3.29.2-02
    \\~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~--
  5. web-ui 접속하기  – http://{{서버IP}}:88081/
  6. 초기 접속 화면
  7. 로그인 패스워드 확인
    $docker exec -it nexus cat /nexus-data/admin.password
    1123-aad-xxxx-xxxx-
  8. 로그인 계정 ( admin / 패스워드는 b.절차에서 확인된 패스워드 입력)
  9. admin 패스워드 설정 (변경할 패스워드 입력 후 Next)
  10. 마침 

Repository Cleanup 정책관리

  1. cleanup policies 정책 수립하기

    • 상단 톱니바퀴 모양  → Repository → Cleanup Policies → Create Cleanup Policy

    • Cleanup policy 규칙 메뉴 구성

    • 항목별 세부구성 (예시는 90일 보관 후 삭제하는 정책)

       

      항 목 설 명 설정값 비 고
      Name 규칙이름 설정 proxy_cleanup_{{보관일}}d 고유의 값 설정
      Format Repository format 설정 raw 모든 유형의 repository로 할 경우 all formats 선택
      notes 규칙에 대한 설명정보 
      옵션
      Component Age 저장된 데이터의 보관 주기 90 체크박스 선택 후 보관일 입력
       
      Component Usage 다운로드 되지 않은 데이터 보관주기 90 체크박스 선택 후 보관일 입력
      preview repository 해당 정책을 수립시 삭제되는 데이터 리스트가 보임

Repository 관리

  1. Hosted 기반의 리포지터리 생성
    • 상단 톱니바퀴 모양  → Repository → Create Repository → RAW (hosted)
    • repository 생성 메뉴구성
    • 항목별 세부설명
    항 목 설 명 설정값 비 고
    Name Repository 고유 이름 repo이름 입력 문자, 숫자로 구성되어야 하고 특수문자는( -, _ )만 허용되고 마침표로 마칠수 없음  
    Online repository 활성화 여부 체크 기본값은 체크되어 있음
    Content Disposition 컨텐츠 관리 방법 attachment Attachment / inline 중 택1, Attachment로 구성해야 파일 전송이 유용함
    Blob store 바이너리 저장 경로 default 별도 바이너리 저장소를 설정하지 않는 경우 default로 구성
    Deployment Policy 아티팩트 배포 / 업데이트 허용 여부 Allow redeploy Allow redeploy  : 재 배포허용
    Deny redeploy : 재배포 불가
    read only : 업데이트 불가, 읽기전용으로 구성
    Cleanup policies Repository 오래된 파일 삭제
    Cleanup 정책 수립했을때 적용
  2. Proxy 기반의 리포지터리 생성
    • 상단 톱니바퀴 모양  → Repository → Create Repository → RAW (Proxy)
    • repository 생성 메뉴구성
    • 항목별 세부설명
    항 목 설 명 설정값 비 고
    Name Repository 고유 이름
    문자, 숫자로 구성되어야 하고 특수문자는( -, _ )만 허용되고 마침표로 마칠수 없음  
    Online repository 활성화 여부
    기본값음 체크되어 있음
    Remote storage hosted 리포지터리 주소 입력

    Blocked 하위 Proxy 연결을 차단할 경우 체크 체크수행 
    Auto blocking enabled Remote Storage에 연결이 되지 않을 경우 하위 Proxy 연결 차단 체크수행
    Maximum component age cache된  바이너리의 최대 보관 주기 (분단위) 60
    Maximum metadata age cache된  메타데이터 최대 보관 주기 (분단위) 60
    Blob store 바이너리 저장 경로 default 별도 Storage로 구성할 경우 구성한Stoage 이름 선택
    Strict Content Type Validation 바이너리 저장시 MIME유형여부 확인 체크 해제




    Not found cache enabled 원격 저장소에 없는 컨텐츠 요청시 캐쉬로 응답 체크 수행
    Not found cache TTL 원격 저장소에 파일없다는 캐쉬 정보 보관주기 (분단위) 60
    Cleanup policies Repository 오래된 파일 삭제
    Cleanup 정책 수립했을때 적용
    HTTP authentication 원격저장소의 계정명 입력 host용 nexus에 등록한 계정정보 입력
  3. Proxy  Repo에 cache 정보 강제 삭제

    • 톱니바퀴 → Repository → Proxy타입의 Repository 선택 → Invalidate cache 선택

       

Proxy 기능 테스트

  1. 보관 주기별 Proxy 데이터 전송
    보관주기 Proxy 반영시간
    -1 수동으로 캐쉬 삭제전까지 반영 안됨
    0 즉시
    1 1분뒤 Proxy repo에 반영
    2 2분위 Proxy Repo에 반영
  2. Proxy 전달 Traffic 정보
    • 테스트정보 : Proxy에 1차버전 caching 된 상태에서 host에 새로운 바이너리가 업데이트 된 경우, caching flush 후 다시 caching하는 과정 확인 (flush되기 전까지 지속적 호출)

       

    • 패킷 정보 (Hosted → Proxy), 192.168.10.10 (Proxy), 192.168.100.10 (Hosted)

      15:42:31.554851 IP 192.168.10.10.42224 > 192.168.100.10.88081: Flags [S], seq 1521618240, win 29200, options [mss 1460,nop,nop,sackOK], length 0
      15:42:31.555029 IP 192.168.100.10.88081 > 192.168.10.10.42224: Flags [S.], seq 2385004980, ack 1521618241, win 29200, options [mss 1460,nop,nop,sackOK], length 0
      15:42:31.555223 IP 192.168.10.10.42224 > 192.168.100.10.88081: Flags [.], ack 1, win 29200, length 0
      15:42:31.555234 IP 192.168.10.10.42224 > 192.168.100.10.88081: Flags [P.], seq 1:262, ack 1,
컨테이너&가상화

onlyoffice를 컨테이너 기반에서 설치하기

MSoffice를 대체할 수 있는 offic중에 onlyoffice가 있어서 한번 설치해봤습니다.

 

  1. Onlyoffice용 데이터 디렉토리 생성
    $ mkdir /data/office/CommunityServer/{logs,data,lib,db}
    $ mkdir /data/office/ControlPanel/{logs,data,lib,db}
  2. onlyoffice용 DB구성 저는 기존에 실행중인 DB용 컨테이너가 있어서 기존 DB컨테이너에 DB인스턴스와 계정만 추가할께요
    mysql> CREATE USER 'onlyoffice_user'@'localhost' IDENTIFIED BY 'onlyoffice_pass';
    mysql> CREATE USER 'mail_admin'@'localhost' IDENTIFIED BY 'Isadmin123';
    mysql> GRANT ALL PRIVILEGES ON * . * TO 'onlyoffice_user'@'%' IDENTIFIED BY 'onlyoffice_pass';
    mysql> GRANT ALL PRIVILEGES ON * . * TO 'mail_admin'@'%' IDENTIFIED BY 'Isadmin123';
    mysql> FLUSH PRIVILEGES;
  3. Control Panel 띄우기
    $ docker run -d --net=test --restart=always --dns 219.250.36.130 -d \
    -v /var/run/docker.sock:/var/run/docker.sock \
    -v /data/office/CommunityServer/data:/app/onlyoffice/CommunityServer/data \
    -v /data/office/ControlPanel/data:/var/www/onlyoffice-controlpanel/Data \
    -v /data/office/ControlPanel/logs:/var/log/onlyoffice-controlpanel \
    --name office_panel onlyoffice/controlpanel
  4. 커뮤니티용 onlyoffice 서버 Container 띄우기
    $ docker run -d --net=test--restart=always --dns 219.250.36.130 -p 80:80 -p 443:443 -p 5222:5222 \
    -e MYSQL_SERVER_ROOT_PASSWORD=dbpass \
    -e MYSQL_SERVER_DB_NAME=onlyoffice \
    -e MYSQL_SERVER_HOST=db \
    -e MYSQL_SERVER_USER=onlyoffice_user \
    -e MYSQL_SERVER_PASS='onlyoffice_pass' \
    -e CONTROL_PANEL_PORT_80_TCP=80 \
    -e CONTROL_PANEL_PORT_80_TCP_ADDR=office_panel \
    -v /data/office/CommunityServer/letsencrypt:/etc/letsencrypt \
    -v /sys/fs/cgroup:/sys/fs/cgroup:ro \
    -v /data/office/CommunityServer/data:/var/www/onlyoffice/Data \
    -v /data/office/CommunityServer/logs:/var/log/onlyoffice \
    -v /data/office/DocumentServer/data:/var/www/onlyoffice/DocumentServerData \
    --name office onlyoffice/communityserver
  5. 브라우져에서 접속하기 http://{{서버IP}}

Reference

 

컨테이너&가상화

pip로 docker-compose 설치 오류

  1. 환경정보
    • os : centos7
    • pip : pip2 / pip3
  2. 작업사항
    • pip2가 설치되어 있는 서버에서 새로운 pip를 올리려고 했더니 python3에서 사용해야 한다함
    • pip 커맨드가 pip2로 심볼릭 링크 걸려있는 상태에서 pip3로 교체
      $> ls -l pip
      /usr/bin/pip -> /usr/bin/pip2
      $> ln -snf /usr/bin/pip3 /usr/bin/pip
      $> pip -V
      pip 21.0.1 from /usr/local/lib/python3.6/site-packages/pip (python 3.6)
  3. pip를 통해 docker-compose 설치

    $ pip instal docker-compose
    ...
    Collecting cryptography>=2.5 (from paramiko>=2.4.2; extra == "ssh"->docker[ssh]<5,>=4.4.3->docker-compose)
       Downloading http://python.org/pypi/+f/2d3/2223e5b0ee029/cryptography-3.4.6.tar.gz (546kB)
        100% |████████████████████████████████| 552kB 43.7MB/s
         Complete output from command python setup.py egg_info:
         WARNING: The wheel package is not available.
         ERROR: 'pip wheel' requires the 'wheel' package. To fix this, run: pip install wheel
         Traceback (most recent call last):
           File "/usr/local/lib/python3.6/site-packages/setuptools/installer.py", line 75, in fetch_build_egg
             subprocess.check_call(cmd)
           File "/usr/lib64/python3.6/subprocess.py", line 311, in check_call
             raise CalledProcessError(retcode, cmd)
         subprocess.CalledProcessError: Command '['/usr/bin/python3', '-m', 'pip', '--disable-pip-version-check', 'wheel', '--no-deps', '-w', '/tmp/tmpn5ys3xpr', '--quiet', 'cffi>=1.12']' returned non-zero exit status 1.
     The above exception was the direct cause of the following exception: Traceback (most recent call last):   File "<string>", line 1, in <module>   File "/tmp/pip-build-0pwkcfz1/cryptography/setup.py", line 152, in <module>     rust_extensions=rust_extensions,   File "/usr/local/lib/python3.6/site-packages/setuptools/__init__.py", line 152, in setup     _install_setup_requires(attrs)   File "/usr/local/lib/python3.6/site-packages/setuptools/__init__.py", line 147, in _install_setup_requires     dist.fetch_build_eggs(dist.setup_requires)   File "/usr/local/lib/python3.6/site-packages/setuptools/dist.py", line 689, in fetch_build_eggs     replace_conflicting=True,   File "/usr/local/lib/python3.6/site-packages/pkg_resources/__init__.py", line 768, in resolve     replace_conflicting=replace_conflicting   File "/usr/local/lib/python3.6/site-packages/pkg_resources/__init__.py", line 1051, in best_match     return self.obtain(req, installer)   File "/usr/local/lib/python3.6/site-packages/pkg_resources/__init__.py", line 1063, in obtain     return installer(requirement)   File "/usr/local/lib/python3.6/site-packages/setuptools/dist.py", line 745, in fetch_build_egg     return fetch_build_egg(self, req)   File "/usr/local/lib/python3.6/site-packages/setuptools/installer.py", line 77, in fetch_build_egg     raise DistutilsError(str(e)) from e distutils.errors.DistutilsError: Command '['/usr/bin/python3', '-m', 'pip', '--disable-pip-version-check', 'wheel', '--no-deps', '-w', '/tmp/tmpn5ys3xpr', '--quiet', 'cffi>=1.12']' returned non-zero exit status 1.     =============================DEBUG ASSISTANCE=============================     If you are seeing a compilation error please try the following steps to     successfully install cryptography:     1) Upgrade to the latest pip and try again. This will fix errors for most        users. See: https://pip.pypa.io/en/stable/installing/#upgrading-pip     2) Read https://cryptography.io/en/latest/installation.html for specific        instructions for your platform.     3) Check our frequently asked questions for more information:        https://cryptography.io/en/latest/faq.html     4) Ensure you have a recent Rust toolchain installed:        https://cryptography.io/en/latest/installation.html#rust     5) If you are experiencing issues with Rust for *this release only* you may        set the environment variable `CRYPTOGRAPHY_DONT_BUILD_RUST=1`.     =============================DEBUG ASSISTANCE============================= ---------------------------------------- Command "python setup.py egg_info" failed with error code 1 in /tmp/pip-build-0pwkcfz1/cryptography/
    • 하지만 에러…????


  4. 발생원인
    • pip모듈을 2버전에서 3버전으로 단순히 심볼릭 링크만 바꾸었던게 문제인듯…
    • python모듈이 2버전으로 설치가 되어 있었을텐데….. 그래서 python3용으로 패키지 강제 재설치


  5. 추가작업
    $ pip install --upgrade setuptools pip --ignore-installed
     WARNING: Running pip install with root privileges is generally not a good idea. Try pip install --user instead. Collecting setuptools Downloading http://python.org/pypi/+f/0e8/6620d658c5ca8/setuptools-53.0.0-py3-none-any.whl (784kB) 100% |████████████████████████████████| 788kB 58.3MB/s Collecting pip Downloading http://python.org/pypi/+f/37f/d50e056e2aed6/pip-21.0.1-py3-none-any.whl (1.5MB) 100% |████████████████████████████████| 1.5MB 36.2MB/s Installing collected packages: setuptools, pip
  6. 다시 docker-compose 설치
    $ pip install docker-compose
     WARNING: pip is being invoked by an old script wrapper. This will fail in a future version of pip.
     Please see https://github.com/pypa/pip/issues/5599 for advice on fixing the underlying issue.
     To avoid this problem you can invoke Python with '-m pip' instead of running pip directly.
     Looking in indexes: http://python.org/pypi/simple/
     Collecting docker-compose
    ...
    Successfully installed PyYAML-5.4.1 attrs-20.3.0 bcrypt-3.2.0 cached-property-1.5.2 certifi-2020.12.5 cffi-1.14.5 chardet-4.0.0 cryptography-3.4.6 distro-1.5.0 docker-4.4.3 docker-compose-1.28.4 dockerpty-0.4.1 docopt-0.6.2 idna-2.10 importlib-metadata-3.6.0 jsonschema-3.2.0 paramiko-2.7.2 pycparser-2.20 pynacl-1.4.0 pyrsistent-0.17.3 python-dotenv-0.15.0 requests-2.25.1 six-1.15.0 texttable-1.6.3 typing-extensions-3.7.4.3 urllib3-1.26.3 websocket-client-0.57.0 zipp-3.4.0
  7. 설치 완료!
    $ pip list| grep docker
     docker             4.4.3
     docker-compose     1.28.4
     dockerpty          0.4.1
컨테이너&가상화

QEMU tcp원격접속 허용하기

  1. libvirt.conf 수정 (주석해제)
    $> vi /etc/libvirt/libvirtd.conf
    listen_tls = 0 (기본값은 TLS통신)
    listen_tcp = 1 (기본값은 비활성화)
    tcp_port = "16509" (TCP열었을때 사용하는 포트)
    listen_addr = "0.0.0.0" 허용할 IP (기본값은 모두 허용)
    mdns_name = "VMHOST-001" (MDNS환경에서 해당정보, 이 값은 같은 네트워크 안에 있는 서버들과 달라야 함)
    unix_sock_group = "libvirt" (Qemu관리그룹, 기본은 root이기 때문에 보안상 위험;…)
    unix_sock_rw_perms = “0770” (TCP통신시 적용받은 퍼미션값)
  2. /etc/sysconfig/libvirtd 수정 (주석해제)
    $> vi /etc/sysconfig/libvirtd
    ...
    LIBVIRTD_ARGS="–listen"
    ...
  3. TLS통신을 안할경우 인증정보가 없다는 오류를 회피하기 위한 방법
    $> vi /etc/sysconfig/libvirtd
    #KRB5_KTNAME=/etc/libvirt/krb5.tab
    #KRB5_KTNAME값 주석처리
    
  4. 사용자 인증구성
    $> groupadd libvirt
    $> saslpasswd2 -a libvirt svc
  5. 서비스 시작
    $> /etc/init.d/libvirt start
컨테이너&가상화

VMware GuestOS에서 마우스 5버튼 작동안될 경우 조치방법

vmware guestOS에서 마우스 5버튼 작동이 안될 경우 조치방법 (일반적인 마우스 버튼은 작동하고 있고, 마우스 자체가 앞으로, 뒤로 등 5버튼이 있는 마우스에 한함.)

1. host에서 해당 VM이 설치된 경로에서 *.vmx 파일을 메모장으로 오픈

2. 내용중에 아래 내용 추가

...
usb.generic.allowHID = "TRUE"
mouse.vusb.enable = "TRUE"
mouse.vusb.useBasicMouse = "FALSE"
...

3. GuestOS종료 후 재실행 한다음에 마우스 5버튼 작동 여부 확인

reference

컨테이너&가상화

rancher 패스워드 초기화 방법

rancher를 설치하고 나서 root사용자 패스워드 초기화가 필요한 경우 수행하면 됩니다.

작업방법

  1. rancher pod 정보 확인
    $> kubectl get pod -A | grep rancher
    cattle-system                  rancher-c56764479-nt7nb                             1/1     Running   3 (3d2h ago)    45d
    

  2. rancher pod가 어느노드에서 떴는지 확인
    $> kubectl describe pod rancher-c56764479-nt7nb -n cattle-system
    Name:                 rancher-c56764479-nt7nb
    Namespace:            cattle-system
    Priority:             1000000000
    Priority Class Name:  rancher-critical
    Node:                 test2/192.168.0.25
    Start Time:           Thu, 21 Sep 2023 00:51:03 +0900
    Labels:               app=rancher
                          pod-template-hash=c56764479
                          release=rancher
    Annotations:          cni.projectcalico.org/containerID: f44aa8d23b68a7376bc29e0bc66605670dc5a87daa9c668bd4f241aa4b63b492
                          cni.projectcalico.org/podIP: 10.233.64.107/32
                          cni.projectcalico.org/podIPs: 10.233.64.107/32
    Status:               Running
    IP:                   10.233.64.107
    IPs:
      IP:           10.233.64.107
    Controlled By:  ReplicaSet/rancher-c56764479
    

  3. rancher 컨테이너 정보 확인 (test2노드에서 실행)
    $> crictl ps | grep rancher
    a68dc79ee1d8c       81ee0878ffcdc       26 minutes ago      Running             rancher                              3                   f44aa8d23b68a       rancher-c56764479-nt7nb
    

  4. rancher 컨테이너로 패스워드 초기화 (test2노드에서 실행)
    $> crictl exec a68dc79ee1d8c reset-password
    W1104 17:53:31.241158     386 client_config.go:617] Neither --kubeconfig nor --master was specified.  Using the inClusterConfig.  This might not work.
    New password for default admin user (user-q2kl4):
    123123

reference

pip로 패키지 설치시 requests메시지 출력하면서 에러나는 경우

Centos7버전에서 pip로 docker-compose 패키지를 설치할때 경험했던 현상입니다.

docker를 rpm으로 설치할 경우 dependency 패키지로 설치되는 경우도 있는데, 그렇지 않은 경우 pip를 통해서 docker-compose 설치가 가능합니다.

물론 이건 docker-compose 를 설치할때 확인했던 현상이였고, 원인은 python-requests패키지 충돌로 인한 문제였으니 pip를 통해 다른 패키지를 설치하려고 하면 동일한 현상이 발생할것 같아요.

 

  1. 에러메시지 경험
    $> pip install docker-compose
    ...
    ...
    ERROR: Cannot uninstall 'requests'. It is a distutils installed project and thus we cannot accurately determine which files belong to it which would lead to only a partial uninstall.

  2. 우회방안 설치되어 있는 패키지는 추가 설치 없이 구성하도록 설치
    $> pip install docker-compose --ignore-installed
    DEPRECATION: Python 2.7 reached the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 is no longer maintained. pip 21.0 will drop support for Python 2.7 in January 2021. More details about Python 2 support in pip can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support
    WARNING: Running pip install with root privileges is generally not a good idea. Try `pip install --user` instead.
    Collecting docker-compose
      Using cached docker_compose-1.26.2-py2.py3-none-any.whl (139 kB)
    Collecting awscli
      Downloading awscli-1.19.112-py2.py3-none-any.whl (3.6 MB)
         |████████████████████████████████| 3.6 MB 2.2 MB/s
    Collecting botocore
      Downloading botocore-1.20.112-py2.py3-none-any.whl (7.7 MB)
         |████████████████████████████████| 7.7 MB 61.8 MB/s
    Collecting distro<2,>=1.5.0
      Using cached distro-1.6.0-py2.py3-none-any.whl (19 kB)
    Collecting backports.ssl-match-hostname<4,>=3.5; python_version < "3.5"
      Using cached backports.ssl_match_hostname-3.7.0.1.tar.gz (5.7 kB)
    Collecting docopt<1,>=0.6.1
      Using cached docopt-0.6.2.tar.gz (25 kB)
    Collecting dockerpty<1,>=0.4.1
      Using cached dockerpty-0.4.1.tar.gz (13 kB)
    Collecting texttable<2,>=0.9.0
      Using cached texttable-1.6.4-py2.py3-none-any.whl (10 kB)
    Collecting cached-property<2,>=1.2.0
      Using cached cached_property-1.5.2-py2.py3-none-any.whl (7.6 kB)
    Collecting PyYAML<6,>=3.10
      Using cached PyYAML-5.4.1-cp27-cp27mu-manylinux1_x86_64.whl (574 kB)
    Collecting subprocess32<4,>=3.5.4; python_version < "3.2"
      Using cached subprocess32-3.5.4-cp27-cp27mu-manylinux2014_x86_64.whl (69 kB)
    Collecting backports.shutil-get-terminal-size==1.0.0; python_version < "3.3"
      Using cached backports.shutil_get_terminal_size-1.0.0-py2.py3-none-any.whl (6.5 kB)
    Collecting enum34<2,>=1.0.4; python_version < "3.4"
      Using cached enum34-1.1.10-py2-none-any.whl (11 kB)
    Collecting requests<3,>=2.20.0
      Using cached requests-2.26.0-py2.py3-none-any.whl (62 kB)
    Collecting websocket-client<1,>=0.32.0
      Using cached websocket_client-0.59.0-py2.py3-none-any.whl (67 kB)
    Collecting jsonschema<4,>=2.5.1
      Using cached jsonschema-3.2.0-py2.py3-none-any.whl (56 kB)
    Collecting python-dotenv<1,>=0.13.0
      Using cached python_dotenv-0.18.0-py2.py3-none-any.whl (18 kB)
    Collecting six<2,>=1.3.0
      Using cached six-1.16.0-py2.py3-none-any.whl (11 kB)
    Collecting ipaddress<2,>=1.0.16; python_version < "3.3"
      Using cached ipaddress-1.0.23-py2.py3-none-any.whl (18 kB)
    Collecting docker[ssh]<5,>=4.2.2
      Using cached docker-4.4.4-py2.py3-none-any.whl (147 kB)
    Collecting rsa<=4.5.0,>=3.1.2; python_version == "2.7"
      Downloading rsa-4.5-py2.py3-none-any.whl (36 kB)
    Collecting colorama<0.4.4,>=0.2.5
      Downloading colorama-0.4.3-py2.py3-none-any.whl (15 kB)
    Collecting docutils<0.16,>=0.10
      Downloading docutils-0.15.2-py2-none-any.whl (548 kB)
         |████████████████████████████████| 548 kB 59.7 MB/s
    Collecting s3transfer<0.5.0,>=0.4.0
      Downloading s3transfer-0.4.2-py2.py3-none-any.whl (79 kB)
         |████████████████████████████████| 79 kB 13.1 MB/s
    Collecting jmespath<1.0.0,>=0.7.1
      Downloading jmespath-0.10.0-py2.py3-none-any.whl (24 kB)
    Collecting python-dateutil<3.0.0,>=2.1
      Downloading python_dateutil-2.8.2-py2.py3-none-any.whl (247 kB)
         |████████████████████████████████| 247 kB 68.0 MB/s
    Collecting urllib3<1.27,>=1.25.4
      Using cached urllib3-1.26.7-py2.py3-none-any.whl (138 kB)
    Collecting certifi>=2017.4.17
      Using cached certifi-2021.10.8-py2.py3-none-any.whl (149 kB)
    Collecting idna<3,>=2.5; python_version < "3"
      Using cached idna-2.10-py2.py3-none-any.whl (58 kB)
    Collecting chardet<5,>=3.0.2; python_version < "3"
      Using cached chardet-4.0.0-py2.py3-none-any.whl (178 kB)
    Collecting functools32; python_version < "3"
      Using cached functools32-3.2.3-2.tar.gz (31 kB)
    Collecting pyrsistent>=0.14.0
      Using cached pyrsistent-0.16.1.tar.gz (108 kB)
    Collecting setuptools
      Using cached setuptools-44.1.1-py2.py3-none-any.whl (583 kB)
    Collecting importlib-metadata; python_version < "3.8"
      Using cached importlib_metadata-2.1.2-py2.py3-none-any.whl (10 kB)
    Collecting attrs>=17.4.0
      Using cached attrs-21.4.0-py2.py3-none-any.whl (60 kB)
    Collecting typing; python_version < "3.5"
      Using cached typing-3.10.0.0-py2-none-any.whl (26 kB)
    Collecting paramiko>=2.4.2; extra == "ssh"
      Using cached paramiko-2.9.1-py2.py3-none-any.whl (210 kB)
    Collecting pyasn1>=0.1.3
      Downloading pyasn1-0.4.8-py2.py3-none-any.whl (77 kB)
         |████████████████████████████████| 77 kB 11.8 MB/s
    Collecting futures<4.0.0,>=2.2.0; python_version == "2.7"
      Downloading futures-3.3.0-py2-none-any.whl (16 kB)
    Collecting contextlib2; python_version < "3"
      Using cached contextlib2-0.6.0.post1-py2.py3-none-any.whl (9.8 kB)
    Collecting pathlib2; python_version < "3"
      Using cached pathlib2-2.3.6-py2.py3-none-any.whl (17 kB)
    Collecting zipp>=0.5
      Using cached zipp-1.2.0-py2.py3-none-any.whl (4.8 kB)
    Collecting configparser>=3.5; python_version < "3"
      Using cached configparser-4.0.2-py2.py3-none-any.whl (22 kB)
    Collecting cryptography>=2.5
      Using cached cryptography-3.3.2-cp27-cp27mu-manylinux2010_x86_64.whl (2.6 MB)
    Collecting pynacl>=1.0.1
      Using cached PyNaCl-1.4.0-cp27-cp27mu-manylinux1_x86_64.whl (964 kB)
    Collecting bcrypt>=3.1.3
      Using cached bcrypt-3.1.7-cp27-cp27mu-manylinux1_x86_64.whl (59 kB)
    Collecting scandir; python_version < "3.5"
      Using cached scandir-1.10.0.tar.gz (33 kB)
    Collecting cffi>=1.12
      Using cached cffi-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl (393 kB)
    Collecting pycparser
      Using cached pycparser-2.21-py2.py3-none-any.whl (118 kB)
    Using legacy 'setup.py install' for backports.ssl-match-hostname, since package 'wheel' is not installed.
    Using legacy 'setup.py install' for docopt, since package 'wheel' is not installed.
    Using legacy 'setup.py install' for dockerpty, since package 'wheel' is not installed.
    Using legacy 'setup.py install' for functools32, since package 'wheel' is not installed.
    Using legacy 'setup.py install' for pyrsistent, since package 'wheel' is not installed.
    Using legacy 'setup.py install' for scandir, since package 'wheel' is not installed.
    Installing collected packages: distro, backports.ssl-match-hostname, docopt, six, dockerpty, texttable, cached-property, PyYAML, subprocess32, backports.shutil-get-terminal-size, enum34, certifi, urllib3, idna, chardet, requests, websocket-client, functools32, pyrsistent, setuptools, contextlib2, scandir, pathlib2, zipp, configparser, importlib-metadata, attrs, jsonschema, typing, python-dotenv, ipaddress, pycparser, cffi, cryptography, pynacl, bcrypt, paramiko, docker, docker-compose, pyasn1, rsa, colorama, docutils, jmespath, python-dateutil, botocore, futures, s3transfer, awscli
        Running setup.py install for backports.ssl-match-hostname ... done
        Running setup.py install for docopt ... done
        Running setup.py install for dockerpty ... done
        Running setup.py install for functools32 ... done
        Running setup.py install for pyrsistent ... done
        Running setup.py install for scandir ... done
    Successfully installed PyYAML-5.4.1 attrs-21.4.0 awscli-1.19.112 backports.shutil-get-termin