diff --git a/.github/workflows/AWS-EC2-Tests-Old.yaml b/.github/workflows/AWS-EC2-Tests-Old.yaml new file mode 100644 index 00000000..7f6a96f2 --- /dev/null +++ b/.github/workflows/AWS-EC2-Tests-Old.yaml @@ -0,0 +1,232 @@ +run-name: AWS EC2 - ${{ github.event_name }} by @${{ github.actor }} +# name: Fabric Build +concurrency: aws-workflow +on: + workflow_dispatch: + inputs: + this_repo_branch: + type: string + description: Select the branch to use from this repo + default: main + windows_repo_branch: + type: string + description: Select the branch to use from windows-host-configuration repo + default: main + terraform_run_destroy: + type: choice + options: + - true + - false + fail_first_test: + type: choice + options: + - false + - true + fail_second_test: + type: choice + options: + - false + - true + push: + branches: + - 'arthur/secret-manager' + # pull_request: + + schedule: + # only runs on default branch + # * is a special character in YAML so you have to quote this string + - cron: '15 */12 * * *' + +jobs: + + Run-Test-Build: + strategy: + max-parallel: 30 + fail-fast: false + matrix: + test_groups: ['base_defaults'] + runs-on: ubuntu-latest + permissions: + id-token: write + contents: write + pull-requests: write + issues: read + checks: write + env: + TF_VAR_REGION: "us-west-2" + THIS_REPO_BRANCH: main + WINDOWS_REPO_BRANCH: main + TERRAFORM_RUN_DESTROY: true + FAIL_FIRST_TEST: false + FAIL_SECOND_TEST: false + WORK_DIR: test_code + CLOUD: aws + MODULE: aws_machines + # TF_LOG: DEBUG + + steps: + # GCP Login + # This is key generated in GCP console for service account + - id: 'auth' + uses: 'google-github-actions/auth@v0' + with: + credentials_json: ${{ secrets.GCP_CREDENTIALS }} + + - name: 'Set up Cloud SDK' + uses: 'google-github-actions/setup-gcloud@v0' + with: + project_id: ${{ secrets.GCP_PROJECT_ID }} + + - id: 'secrets' + uses: 'google-github-actions/get-secretmanager-secrets@v1' + with: + secrets: |- + TF_VAR_PUBLIC_KEY:projects/896946759488/secrets/TF_VAR_PUBLIC_KEY + THUNDERDOME_AWS_ROLE:projects/896946759488/secrets/THUNDERDOME_AWS_ROLE + PRIVATE_KEY:projects/896946759488/secrets/PRIVATE_KEY + STAGE_CUSTOMER_ID:projects/896946759488/secrets/STAGE_CUSTOMER_ID + STAGE_DATASTREAM_TOKEN:projects/896946759488/secrets/STAGE_DATASTREAM_TOKEN + STAGE_DOMAIN:projects/896946759488/secrets/STAGE_DOMAIN + STAGE_USER_EMAIL:projects/896946759488/secrets/STAGE_USER_EMAIL + STAGE_USER_PASSWORD:projects/896946759488/secrets/STAGE_USER_PASSWORD + + + + # AWS Login - orig role - has to occur before checkout + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ steps.secrets.outputs.THUNDERDOME_AWS_ROLE }} + aws-region: ${{ env.TF_VAR_REGION }} + + - name: Set code repo #Set branches based on via Workflow Dispatch or Pull Request + run: | + if ${{ github.event.inputs.this_repo_branch != '' }}; then + echo "THIS_REPO_BRANCH=refs/heads/${{ github.event.inputs.this_repo_branch }}" >> $GITHUB_ENV + echo "WINDOWS_REPO_BRANCH=refs/heads/${{ github.event.inputs.windows_repo_branch }}" >> $GITHUB_ENV + elif ${{ github.event_name == 'pull_request' }}; then + echo "THIS_REPO_BRANCH=refs/heads/${{ github.head_ref }}" >> $GITHUB_ENV + echo "WINDOWS_REPO_BRANCH=refs/heads/${{ env.WINDOWS_REPO_BRANCH}}" >> $GITHUB_ENV + fi + + - name: Set env var + run: | + echo "TF_VAR_PUBLIC_KEY=${{ steps.secrets.outputs.TF_VAR_PUBLIC_KEY }}" >> $GITHUB_ENV + + - name: Check out repository code + uses: actions/checkout@v3 + with: + ref: ${{ env.THIS_REPO_BRANCH }} + + - name: Set contexts + run: | + mkdir context + echo '${{ toJSON(github) }}' > context/github_context.json + echo '${{ toJSON(matrix) }}' > context/matrix_context.json + echo '${{ steps.secrets.outputs.PRIVATE_KEY }}' > context/private_key + + working-directory: "${{ env.WORK_DIR }}/python_scripts" + + - name: workflow helper + run: | + python3 -c "from workflow_tasks import set_custom_vars; set_custom_vars(context_dir='context')" + + python3 -c "from workflow_tasks import tf_override_file; tf_override_file(cloud=\"${{ env.CLOUD }}\", test_group=\"${{ matrix.test_groups }}\")" + + # !!! vvvvv THIS OVERWRITES MAIN.TF FILE for specific cloud module vvvvv !!! + python3 -c "from workflow_tasks import tf_main_file; tf_main_file(module=\"${{ env.MODULE }}\")" + + python3 -c "from workflow_tasks import tf_output_file; tf_output_file(module=\"${{ env.MODULE }}\")" + + python3 -c "from workflow_tasks import config_ini; config_ini(custid=\"${{ steps.secrets.outputs.STAGE_CUSTOMER_ID }}\", domain=\"${{ steps.secrets.outputs.STAGE_DOMAIN }}\", token=\"${{ steps.secrets.outputs.STAGE_DATASTREAM_TOKEN }}\",user_email=\"${{ steps.secrets.outputs.STAGE_USER_EMAIL }}\",user_password=\"${{ steps.secrets.outputs.STAGE_USER_PASSWORD }}\")" + + working-directory: "${{ env.WORK_DIR }}/python_scripts" + + - name: Print Environment Variables - troubleshooting + run: | + env | sort -f + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v2 + with: + terraform_wrapper: false + + - name: terraform tasks + run: | + terraform version + + terraform init + + terraform validate + working-directory: "${{ env.WORK_DIR }}" + + - name: terraform apply + run: | + terraform apply -auto-approve + working-directory: "${{ env.WORK_DIR }}" + + # Run tests + - name: run fabric tests python script + run: | + # create output directory for archive files + mkdir file_outputs + mkdir log_outputs + + # install dependencies + pip3 install -r requirements.txt + + # run tests + fab test -a ${{ env.FAIL_FIRST_TEST }} -b ${{ env.THIS_REPO_BRANCH }} -w ${{ env.WINDOWS_REPO_BRANCH }} -o "1: run fabric tests python script" + working-directory: "${{ env.WORK_DIR }}/python_scripts" + + - name: Retry tests + if: ${{ env.TEST_RESULT == 'FAIL' }} + run: | + # run tests + fab test -a ${{ env.FAIL_SECOND_TEST }} -o "2: Retry tests" -w ${{ env.WINDOWS_REPO_BRANCH }} -b ${{ env.THIS_REPO_BRANCH }} + + working-directory: "${{ env.WORK_DIR }}/python_scripts" + + - name: cleanup + if: always() + run: | + rm -f python_scripts/config.ini + + sed -i 's/${{ steps.secrets.outputs.STAGE_DATASTREAM_TOKEN }}/******/g' ./python_scripts/file_outputs/* + sed -i 's/${{ steps.secrets.outputs.STAGE_CUSTOMER_ID }}/******/g' ./python_scripts/file_outputs/* + sed -i 's/${{ steps.secrets.outputs.STAGE_USER_EMAIL}}/******/g' ./python_scripts/file_outputs/* + sed -i 's/${{ steps.secrets.outputs.STAGE_USER_PASSWORD}}/******/g' ./python_scripts/file_outputs/* + + + sed -i 's/${{ steps.secrets.outputs.STAGE_DATASTREAM_TOKEN }}/******/g' ./python_scripts/log_outputs/* + sed -i 's/${{ steps.secrets.outputs.STAGE_CUSTOMER_ID }}/******/g' ./python_scripts/log_outputs/* + sed -i 's/${{ steps.secrets.outputs.STAGE_USER_EMAIL }}/******/g' ./python_scripts/log_outputs/* + sed -i 's/${{ steps.secrets.outputs.STAGE_USER_PASSWORD }}/******/g' ./python_scripts/log_outputs/* + + working-directory: "${{ env.WORK_DIR }}" + + - name: Archive test results + uses: actions/upload-artifact@v3 + with: + name: file_outputs + path: | + /home/runner/work/linux-host-configuration-scripts/linux-host-configuration-scripts/test_code/python_scripts/file_outputs/ + /home/runner/work/linux-host-configuration-scripts/linux-host-configuration-scripts/test_code/python_scripts/log_outputs/ + retention-days: 1 + + - name: terraform destroy + if: always() + run: | + echo "Value of input ${{ env.TERRAFORM_RUN_DESTROY == 'true' }}" + + if ${{ env.TERRAFORM_RUN_DESTROY == 'true' }}; then + terraform destroy -auto-approve + fi + working-directory: "${{ env.WORK_DIR }}" + + - name: Fail Check + if: ${{ env.TEST_RESULT == 'FAIL' }} + uses: actions/github-script@v3 + with: + script: | + core.setFailed('Fabric tests failed') diff --git a/.github/workflows/AWS-EC2-Tests.yaml b/.github/workflows/AWS-EC2-Tests.yaml index 932ac049..396169d5 100644 --- a/.github/workflows/AWS-EC2-Tests.yaml +++ b/.github/workflows/AWS-EC2-Tests.yaml @@ -1,36 +1,9 @@ run-name: AWS EC2 - ${{ github.event_name }} by @${{ github.actor }} -# name: Fabric Build +name: PR Tests V2 concurrency: aws-workflow on: workflow_dispatch: - inputs: - this_repo_branch: - type: string - description: Select the branch to use from this repo - default: main - windows_repo_branch: - type: string - description: Select the branch to use from windows-host-configuration repo - default: main - terraform_run_destroy: - type: choice - options: - - true - - false - fail_first_test: - type: choice - options: - - false - - true - fail_second_test: - type: choice - options: - - false - - true - push: - branches: - - 'arthur/secret-manager' - pull_request: + # pull_request: schedule: # only runs on default branch @@ -54,179 +27,45 @@ jobs: checks: write env: TF_VAR_REGION: "us-west-2" - THIS_REPO_BRANCH: main - WINDOWS_REPO_BRANCH: main - TERRAFORM_RUN_DESTROY: true - FAIL_FIRST_TEST: false - FAIL_SECOND_TEST: false - WORK_DIR: test_code - CLOUD: aws - MODULE: aws_machines + WORK_DIR: test_code_v2 # TF_LOG: DEBUG steps: - # GCP Login - # This is key generated in GCP console for service account - - id: 'auth' - uses: 'google-github-actions/auth@v0' - with: - credentials_json: ${{ secrets.GCP_CREDENTIALS }} - - - name: 'Set up Cloud SDK' - uses: 'google-github-actions/setup-gcloud@v0' - with: - project_id: ${{ secrets.GCP_PROJECT_ID }} - - - id: 'secrets' - uses: 'google-github-actions/get-secretmanager-secrets@v1' - with: - secrets: |- - TF_VAR_PUBLIC_KEY:projects/896946759488/secrets/TF_VAR_PUBLIC_KEY - THUNDERDOME_AWS_ROLE:projects/896946759488/secrets/THUNDERDOME_AWS_ROLE - PRIVATE_KEY:projects/896946759488/secrets/PRIVATE_KEY - STAGE_CUSTOMER_ID:projects/896946759488/secrets/STAGE_CUSTOMER_ID - STAGE_DATASTREAM_TOKEN:projects/896946759488/secrets/STAGE_DATASTREAM_TOKEN - STAGE_DOMAIN:projects/896946759488/secrets/STAGE_DOMAIN - STAGE_USER_EMAIL:projects/896946759488/secrets/STAGE_USER_EMAIL - STAGE_USER_PASSWORD:projects/896946759488/secrets/STAGE_USER_PASSWORD - - - # AWS Login - orig role - has to occur before checkout - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v1 with: - role-to-assume: ${{ steps.secrets.outputs.THUNDERDOME_AWS_ROLE }} + role-to-assume: ${{ secrets.THUNDERDOME_AWS_ROLE }} aws-region: ${{ env.TF_VAR_REGION }} - - name: Set code repo #Set branches based on via Workflow Dispatch or Pull Request - run: | - if ${{ github.event.inputs.this_repo_branch != '' }}; then - echo "THIS_REPO_BRANCH=refs/heads/${{ github.event.inputs.this_repo_branch }}" >> $GITHUB_ENV - echo "WINDOWS_REPO_BRANCH=refs/heads/${{ github.event.inputs.windows_repo_branch }}" >> $GITHUB_ENV - elif ${{ github.event_name == 'pull_request' }}; then - echo "THIS_REPO_BRANCH=refs/heads/${{ github.head_ref }}" >> $GITHUB_ENV - echo "WINDOWS_REPO_BRANCH=refs/heads/${{ env.WINDOWS_REPO_BRANCH}}" >> $GITHUB_ENV - fi - - - name: Set env var - run: | - echo "TF_VAR_PUBLIC_KEY=${{ steps.secrets.outputs.TF_VAR_PUBLIC_KEY }}" >> $GITHUB_ENV - name: Check out repository code - uses: actions/checkout@v3 + uses: actions/checkout@v4 + + - name: Setup Python + uses: actions/setup-python@v5 with: - ref: ${{ env.THIS_REPO_BRANCH }} - - - name: Set contexts - run: | - mkdir context - echo '${{ toJSON(github) }}' > context/github_context.json - echo '${{ toJSON(matrix) }}' > context/matrix_context.json - echo '${{ steps.secrets.outputs.PRIVATE_KEY }}' > context/private_key - - working-directory: "${{ env.WORK_DIR }}/python_scripts" + python-version: '3.10' - - name: workflow helper + - name: Run Tests run: | - python3 -c "from workflow_tasks import set_custom_vars; set_custom_vars(context_dir='context')" - - python3 -c "from workflow_tasks import tf_override_file; tf_override_file(cloud=\"${{ env.CLOUD }}\", test_group=\"${{ matrix.test_groups }}\")" - - # !!! vvvvv THIS OVERWRITES MAIN.TF FILE for specific cloud module vvvvv !!! - python3 -c "from workflow_tasks import tf_main_file; tf_main_file(module=\"${{ env.MODULE }}\")" - - python3 -c "from workflow_tasks import tf_output_file; tf_output_file(module=\"${{ env.MODULE }}\")" - - python3 -c "from workflow_tasks import config_ini; config_ini(custid=\"${{ steps.secrets.outputs.STAGE_CUSTOMER_ID }}\", domain=\"${{ steps.secrets.outputs.STAGE_DOMAIN }}\", token=\"${{ steps.secrets.outputs.STAGE_DATASTREAM_TOKEN }}\",user_email=\"${{ steps.secrets.outputs.STAGE_USER_EMAIL }}\",user_password=\"${{ steps.secrets.outputs.STAGE_USER_PASSWORD }}\")" - - working-directory: "${{ env.WORK_DIR }}/python_scripts" + pip install -r requirements.txt + python main.py + working-directory: "${{ env.WORK_DIR }}/python" + env: + OBSERVE_CUSTOMER: ${{ secrets.TERRAFORM_MODULES_TEST_OBSERVE_CUSTOMER }} + OBSERVE_DOMAIN: ${{ secrets.TERRAFORM_MODULES_TEST_OBSERVE_DOMAIN }} + OBSERVE_USER_EMAIL: ${{ secrets.TERRAFORM_MODULES_TEST_OBSERVE_USER_EMAIL }} + OBSERVE_USER_PASSWORD: ${{ secrets.TERRAFORM_MODULES_TEST_OBSERVE_USER_PASSWORD }} + O2_OBSERVE_TOKEN: ${{ secrets.O2_OBSERVE_TOKEN }} - name: Print Environment Variables - troubleshooting run: | env | sort -f - - name: Setup Terraform - uses: hashicorp/setup-terraform@v2 - with: - terraform_wrapper: false - - - name: terraform tasks - run: | - terraform version - - terraform init - - terraform validate - working-directory: "${{ env.WORK_DIR }}" - - - name: terraform apply - run: | - terraform apply -auto-approve - working-directory: "${{ env.WORK_DIR }}" - - # Run tests - - name: run fabric tests python script - run: | - # create output directory for archive files - mkdir file_outputs - mkdir log_outputs - - # install dependencies - pip3 install -r requirements.txt - - # run tests - fab test -a ${{ env.FAIL_FIRST_TEST }} -b ${{ env.THIS_REPO_BRANCH }} -w ${{ env.WINDOWS_REPO_BRANCH }} -o "1: run fabric tests python script" - working-directory: "${{ env.WORK_DIR }}/python_scripts" - - - name: Retry tests - if: ${{ env.TEST_RESULT == 'FAIL' }} - run: | - # run tests - fab test -a ${{ env.FAIL_SECOND_TEST }} -o "2: Retry tests" -w ${{ env.WINDOWS_REPO_BRANCH }} -b ${{ env.THIS_REPO_BRANCH }} - - working-directory: "${{ env.WORK_DIR }}/python_scripts" - - - name: cleanup - if: always() - run: | - rm -f python_scripts/config.ini - - sed -i 's/${{ steps.secrets.outputs.STAGE_DATASTREAM_TOKEN }}/******/g' ./python_scripts/file_outputs/* - sed -i 's/${{ steps.secrets.outputs.STAGE_CUSTOMER_ID }}/******/g' ./python_scripts/file_outputs/* - sed -i 's/${{ steps.secrets.outputs.STAGE_USER_EMAIL}}/******/g' ./python_scripts/file_outputs/* - sed -i 's/${{ steps.secrets.outputs.STAGE_USER_PASSWORD}}/******/g' ./python_scripts/file_outputs/* - - - sed -i 's/${{ steps.secrets.outputs.STAGE_DATASTREAM_TOKEN }}/******/g' ./python_scripts/log_outputs/* - sed -i 's/${{ steps.secrets.outputs.STAGE_CUSTOMER_ID }}/******/g' ./python_scripts/log_outputs/* - sed -i 's/${{ steps.secrets.outputs.STAGE_USER_EMAIL }}/******/g' ./python_scripts/log_outputs/* - sed -i 's/${{ steps.secrets.outputs.STAGE_USER_PASSWORD }}/******/g' ./python_scripts/log_outputs/* - - working-directory: "${{ env.WORK_DIR }}" - - - name: Archive test results - uses: actions/upload-artifact@v3 - with: - name: file_outputs - path: | - /home/runner/work/linux-host-configuration-scripts/linux-host-configuration-scripts/test_code/python_scripts/file_outputs/ - /home/runner/work/linux-host-configuration-scripts/linux-host-configuration-scripts/test_code/python_scripts/log_outputs/ - retention-days: 1 - - - name: terraform destroy - if: always() - run: | - echo "Value of input ${{ env.TERRAFORM_RUN_DESTROY == 'true' }}" - - if ${{ env.TERRAFORM_RUN_DESTROY == 'true' }}; then - terraform destroy -auto-approve - fi - working-directory: "${{ env.WORK_DIR }}" - - name: Fail Check if: ${{ env.TEST_RESULT == 'FAIL' }} uses: actions/github-script@v3 with: script: | - core.setFailed('Fabric tests failed') + core.setFailed('Integration tests failed') diff --git a/.github/workflows/Azure-Compute-Tests.yaml b/.github/workflows/Azure-Compute-Tests.yaml index b14fc80e..711d48af 100644 --- a/.github/workflows/Azure-Compute-Tests.yaml +++ b/.github/workflows/Azure-Compute-Tests.yaml @@ -26,7 +26,7 @@ on: # push: # branches: # - 'arthur/secret-manager' - pull_request: + # pull_request: schedule: # only runs on default branch diff --git a/.github/workflows/GCP-Compute-Tests.yaml b/.github/workflows/GCP-Compute-Tests.yaml index f9896e55..e4eb5030 100644 --- a/.github/workflows/GCP-Compute-Tests.yaml +++ b/.github/workflows/GCP-Compute-Tests.yaml @@ -26,7 +26,7 @@ on: push: branches: - 'arthur/secret-manager' - pull_request: + # pull_request: schedule: # only runs on default branch diff --git a/.gitignore b/.gitignore index 1b335b73..054f9ab5 100644 --- a/.gitignore +++ b/.gitignore @@ -3,8 +3,7 @@ override.tf *.tfvars .terraform* -terraform.tfstate -terraform.tfstate.backup +terraform.tfstate* tf_hosts.json __pycache__ /test_code/python_scripts/file_outputs @@ -13,4 +12,9 @@ test_code/python_scripts/testenv log_outputs /test_code/python_scripts/context /test_code/python_scripts/.idea -/test_code/outputs.tf \ No newline at end of file +/test_code/outputs.tf +/test_code_v2/python/.*env +observe_datastream_token_hostmon +observe_name_format_hostmon +aws_machines* +ephemeral_key \ No newline at end of file diff --git a/test_code_v2/aws/README.md b/test_code_v2/aws/README.md new file mode 100644 index 00000000..6eb0abd3 --- /dev/null +++ b/test_code_v2/aws/README.md @@ -0,0 +1,2 @@ + +[Creating ephemeral account and logging in ](aws_helper/SETUP_README.md) diff --git a/test_code_v2/aws/aws_helper/SETUP_README.md b/test_code_v2/aws/aws_helper/SETUP_README.md new file mode 100644 index 00000000..ccda6c15 --- /dev/null +++ b/test_code_v2/aws/aws_helper/SETUP_README.md @@ -0,0 +1,72 @@ +# FOR OBSERVE INTERNAL USERS + +## One Time Setup for Terraform AWS Sample Infrastructure +Clone or download this repository to your local machine + +Observe uses Britive for employees to check out credentials to use against AWS accounts and uses dce to create ephemeral AWS accounts in our Blunderdome Account. + +There are several dependencies you need to setup the first time you use this sample infrastructure. + +### One Time Set Up for MAC +Upgrade bash if you don't have version 5 +``` +brew install bash +``` + +Install pybritive + +``` +pip3 install click pybritive urllib3==1.26.6 + +# if this gives you an error, remove the `urllib==1.26.6` from the command +``` + +Install DCE +``` +# Download the zip file - works on intel chip too +wget https://github.com/Optum/dce-cli/releases/download/v0.5.0/dce_darwin_amd64.zip + +# Unzip to a directory on your path +unzip dce_darwin_amd64.zip -d /usr/local/bin +``` +Initialize DCE +``` +dce init +``` + +Update config file +``` +cat < ~/.dce/config.yaml +api: + host: playground.observe-blunderdome.com + basepath: / + token: +region: us-west-2 +terraform: + bin: null + source: null +EOF +``` + + +## Login +Run script for sample interactive login command +``` +./make_me_env.sh +``` +Will produce command for your local user that looks like: +``` +./make_me_env.sh --principal_id YOU@observeinc.com --email YOU@observeinc.com +``` + +Run that command for interactive login you should see a screen like this (enter your email): + +![Login Screen](./image/LoginScreen.png) + +When you have success screen return to terminal: + +![Login Success Screen](./image/CLI_success.png) + +Your terminal should look like: + +![CLI Success Screen](./image/terminal_success.png) \ No newline at end of file diff --git a/test_code_v2/aws/aws_helper/aws-creds b/test_code_v2/aws/aws_helper/aws-creds new file mode 100755 index 00000000..5725b113 --- /dev/null +++ b/test_code_v2/aws/aws_helper/aws-creds @@ -0,0 +1,283 @@ +#!/usr/bin/env bash +# helper script for managing awscli profiles with pybritive +# https://www.notion.so/observeinc/How-to-Use-Britive-Access-Management-36393b713cbf41ada73a846ddabfea21 + +set -eu +set -o pipefail +OBSERVE_ROOT=$(realpath "$(dirname "$0")"/..) + +parent_path=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P ) + + +# shellcheck source=/home/dev/observe/s/functions.sh +# source "$parent_path/functions.sh" + +# format: alias|pybritive profile name +# a pybritive profile can be listed multiple times to have multiple aliases for 'checkout'. +# checking out any of the aliases will checkout creds to all aliases for a given account. +aliases=" +prod-cap1|AWS Cap1 Account/238922581007 (AWS Cap1)/BritiveCap1-FullAWSAdmin +prod|AWS Observe Organization/158067661102 (Observe Prod)/BritiveProd-FullAWSAdmin +prod-ap-1|AWS Observe Organization/368861046847 (observe-prod-ap-1)/BritiveAP1-FullAWSAdmin +sockshop|AWS Observe Organization/384876807807 (sockshop)/BritiveSockshop-FullAWSAdmin +o2|AWS Observe Organization/623960370597 (O2)/BritiveO2-FullAWSAdmin +default|AWS Observe Organization/723346149663 (observe)/BritiveEng-FullAWSAdmin +eng|AWS Observe Organization/723346149663 (observe)/BritiveEng-FullAWSAdmin +thunderdome|AWS Observe Organization/739672403694 (observe-thunderdome)/BritiveThunderdome-FullAWSAdmin +staging|AWS Observe Organization/802757454165 (Observe Staging)/BritiveStaging-FullAWSAdmin +tf-account|AWS Observe Organization/989541196905 (observe-tf-account)/Britivetf-FullAWSAdmin +ops|AWS Observe Organization/677999842303 (observe-ops)/BritiveOps-FullAWSAdmin +marketplace|AWS Observe Organization/608198493184 (observe-marketplace)/BritiveMarketplace-FullAWSAdmin +costbench|AWS Observe Organization/864418771691 (observe-costbench)/BritiveCostbench-FullAWSAdmin +demo|AWS Observe Organization/039787680367 (observe-demo)/BritiveDemo-FullAWSAdmin +blunderdome|AWS Blunderdome Organization/460044344528 (observe-blunderdome)/BritiveBlunderdome-FullAWSAdmin +blunderdome-user|AWS Blunderdome Organization/460044344528 (observe-blunderdome)/BritiveBlunderdome-User +" + +MODES=( + aliases + checkin + checkout + console + list +) + +export BRITIVE_TENANT=observe + +function debug() { + if [[ -v DEBUG && "${DEBUG}" == "true" ]]; then + echo -e "DEBUG - $*" >&2 + fi +} + +# Make sure pybritive is installed, configured for the observe tenant, and logged in. +function britive_setup() { + # virtualenv users do not need help with PATH + if [[ -z "${VIRTUAL_ENV:-}" ]]; then + local py3_user_bin + py3_user_bin="$(python3 -m site --user-base)/bin" + if [[ -d $py3_user_bin ]]; then + export PATH="${py3_user_bin}:${PATH}" + fi + fi + + if ! command -v pybritive >/dev/null; then + echo -n "pybritive is not installed; try running: " >&2 + + if [[ "$(uname -o)" = "Darwin" ]]; then + # OS X default python has some oddity with both the bundled and latest urllib3 version, + # so pin a working one for now. + echo "pip3 install click pybritive urllib3==1.26.6" >&2 + else + # we have to upgrade system pyopenssl because of https://github.com/pyca/pyopenssl/issues/1143 + echo "sudo pip3 install -U pyopenssl && pip3 install -U click pybritive" >&2 + fi + exit 1 + fi + + # Ensure we have a proper tenant configured. We use the alias in $BRITIVE_TENANT to ensure + # what we configure here is what gets used in later commands. + pybritive configure tenant -P -t "${BRITIVE_TENANT}.britive-app.com" -a "${BRITIVE_TENANT}" -f list + + # Ensure we have a valid login session. If we're not logged in (or our previous token has + # expired) pybritive opens a browser, this commands lets the user auth and grabs new tokens. + pybritive login + + # There's a bug where 'pybritive login' exits 0 but we're not actually logged in. Try to catch + # that and run logout/login. + if pybritive ls profiles 2>&1 | grep -q -e 'You have logged out of Britive' -e 'Please login again'; then + pybritive logout || true + pybritive cache clear || true + pybritive login + fi +} + +# translate from alias -> Britive profile +function lookup_alias() { + local alias="$1" + awk -F'|' '$1 == "'"${alias}"'" {print $2; exit;}' <(echo "${aliases}") +} + +# translate from Britive profile -> main awscli profile +# if a profile is listed more than once in $aliases, we only return the first one. +function lookup_profile() { + local profile="$1" + awk -F'|' '$2 == "'"${profile}"'" {print $1; exit;}' <(echo "${aliases}") +} + +# translate from Britive profile -> list of awscli profile aliases +# if a profile is listed more than once in $aliases, we return all of the names excluding the first one. +function lookup_profile_aliases() { + local profile="$1" + awk -F'|' '$2 == "'"${profile}"'" {print $1;}' <(echo "${aliases}") | sed -e 1d +} + +# find the right Britive profile to use given user input. If the input does not +# match an alias (in $aliases), match against all available pybritive profiles. +function find_profile() { + local account="$1" + + # see if we were given an alias + local profile + profile=$(lookup_alias "${account}") + + # if we didn't match an alias, try to match a pybritive profile + if [[ -z $profile ]]; then + debug "No alias match, matching against pybritive profiles" + profile=$(pybritive ls profiles | grep -i -F "${account}" || true) + local match_count + match_count=$(echo "${profile}" | wc -l) + if [[ -z $profile ]]; then + echo "No profiles match ${account}" >&2 + exit 2 + elif [[ $match_count -gt 1 ]]; then + debug "Matching profiles (${match_count}):\n${profile}" + echo "Too many profiles match ${account}" >&2 + exit 2 + fi + fi + + debug "matched input '${account}' to profile '${profile}'" + echo "${profile}" +} + +function mode_aliases() { + echo -e "alias|profile\n${aliases}" | column -t -s"|" +} + +function usage_aliases() { + echo "aws-creds aliases" + echo -e "\t Display Britive profile aliases" +} + +function mode_checkin() { + [[ $# -eq 0 ]] && set -- default + for account; do + pybritive_checkin "$account" + done +} + +function pybritive_checkin() { + local profile + profile=$(find_profile "$1") + pybritive checkin "${profile}" + echo "Checked in profile ${profile}" +} + +function usage_checkin() { + echo "aws-creds checkin profile1 [profile2 ... profileN]" + echo -e "\t Checkin the given profiles. Same arguments as checkout." +} + +function mode_checkout() { + [[ $# -eq 0 ]] && set -- default + for account; do + pybritive_checkout "$account" + done +} + +# copy credentials & default region to another profile (called when we have aliases) +function profile_copy() { + local src="$1" + local dst="$2" + local val + + for var in aws_access_key_id aws_secret_access_key aws_session_token aws_expiration region; do + val=$(aws --profile="${src}" configure get "${var}") + aws --profile="${dst}" configure set "${var}" "${val}" + done +} + +function pybritive_checkout() { + local profile + profile=$(find_profile "$1") + + local awscli_profile awscli_profile_aliases + awscli_profile=$(lookup_profile "${profile}") + + # pybritive "-m integrate" will write credentials directly to ~/.aws/credentials under the alias + # specified by "-a". + pybritive checkout -s -m integrate -a "${awscli_profile}" "${profile}" + + # populate the default region for the new awscli profile + local region + region=$(aws --profile=default configure get region || true) + [[ -n "$region" ]] && aws --profile="${awscli_profile}" configure set region "$region" + + echo "Checked out '${profile}' into awscli profile '${awscli_profile}'" + for profile_alias in $(lookup_profile_aliases "${profile}"); do + profile_copy "${awscli_profile}" "${profile_alias}" + echo "Checked out '${profile}' into awscli profile '${profile_alias}'" + done +} + +function usage_checkout() { + echo "aws-creds checkout profile1 [profile2 ... profileN]" + echo -e "\t Checkout the given profile. profile can be one of:" + echo -e "\t - a predefined alias (run 'aws-creds aliases' to see a list)" + echo -e "\t - anything matching a pybritive profile name (e.g. an account number)" + echo -e "\t If no profile is provided, check out the 'default' alias." +} + +function mode_console() { + local awscli_profile + local profile + + [[ $# -eq 0 ]] && set -- default + profile=$(find_profile "$1") + awscli_profile=$(lookup_profile "${profile}") + + # make sure the profile exists & is not expired + if ! aws --profile="${awscli_profile}" sts get-caller-identity >/dev/null 2>&1; then + mode_checkout "${profile}" + fi + + pybritive aws console -p "${awscli_profile}" +} + +function usage_console() { + echo "aws-creds console profile" + echo -e "\t Load the AWS console for the given profile. profile can be one of:" + echo -e "\t - a predefined alias (run 'aws-creds aliases' to see a list)" + echo -e "\t - anything matching a pybritive profile name (e.g. an account number)" + echo -e "\t If no profile is provided, loads console for the 'default' alias." +} + +function mode_list() { + pybritive ls profiles | grep '^AWS' +} + +function usage_list() { + echo "aws-creds list" + echo -e "\t Lists all available accounts/roles available in Britive" +} + +function usage() { + [[ $# -gt 0 ]] && echo "$@" + echo "aws-creds is a helper script to manage AWS credentails through Britive" + echo "" + echo "aws-creds [options]" + echo "" + for runmode in "${MODES[@]}"; do + usage_"${runmode}" + done + exit 1 +} + +function needle_in_haystack() { + local needle="$1" + shift + for arg in "$@"; do + [[ "${needle}" = "${arg}" ]] && return 0 + done + return 1 +} + +[[ $# == 0 ]] && usage +runmode="$1" +shift +needle_in_haystack "${runmode}" "${MODES[@]}" || usage "unknown mode: ${runmode}" + +britive_setup + +mode_"${runmode}" "$@" diff --git a/test_code_v2/aws/aws_helper/image/CLI_success.png b/test_code_v2/aws/aws_helper/image/CLI_success.png new file mode 100644 index 00000000..11c67787 Binary files /dev/null and b/test_code_v2/aws/aws_helper/image/CLI_success.png differ diff --git a/test_code_v2/aws/aws_helper/image/LoginScreen.png b/test_code_v2/aws/aws_helper/image/LoginScreen.png new file mode 100644 index 00000000..a0885b29 Binary files /dev/null and b/test_code_v2/aws/aws_helper/image/LoginScreen.png differ diff --git a/test_code_v2/aws/aws_helper/image/terminal_success.png b/test_code_v2/aws/aws_helper/image/terminal_success.png new file mode 100644 index 00000000..35912da5 Binary files /dev/null and b/test_code_v2/aws/aws_helper/image/terminal_success.png differ diff --git a/test_code_v2/aws/aws_helper/make_me_env.sh b/test_code_v2/aws/aws_helper/make_me_env.sh new file mode 100755 index 00000000..0e9e3b1c --- /dev/null +++ b/test_code_v2/aws/aws_helper/make_me_env.sh @@ -0,0 +1,130 @@ +#!/usr/bin/env bash + +SPACER="########################################" +END_OUTPUT="END_OF_OUTPUT" + +parent_path=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P ) + +budget_amount=100.0 +budget_currency=USD +expiry=7d +aws_creds_path="./aws-creds" + +log () +{ + echo "$1" +} + +printHelp(){ + log "$SPACER" + log "## HELP CONTENT" + log "$SPACER" + log "### Required inputs" + log "- Required --principal_id whatever feel free to use your email address" + log "- Required --email your email address" + log "## Optional inputs" + log "- Optional --budget_amount - Defaults to 100 " + log "- Optional --budget_currency - Defaults to USD" + log "- Optional --expiry - Defaults to 7d" + log "- Optional --aws_cred_path - Defaults to /Users/arthur/observe/s/aws-creds" + log "***************************" + log "### Sample command:" + log "\`\`\` ./make_me_env.sh --principal_id $USER@observeinc.com --email $USER@observeinc.com \`\`\`" + log "***************************" +} + +if [ "$1" == "--help" ]; then + printHelp + log "$SPACER" + log "$END_OUTPUT" + log "$SPACER" + exit 0 +fi + +requiredInputs(){ + log "$SPACER" + log "* Error: Invalid argument.*" + log "$SPACER" + printVariables + printHelp + log "$SPACER" + log "$END_OUTPUT" + log "$SPACER" + exit 1 + +} + +printVariables(){ + log "$SPACER" + log "* VARIABLES *" + log "$SPACER" + log "principal_id: $principal_id" + log "email: $email" + log "aws_creds_path: $aws_creds_path" + log "$SPACER" +} + +if [ $# -lt 2 ]; then + requiredInputs +fi + + # Parse inputs + while [ $# -gt 0 ]; do + echo "required inputs $1 $2 $# " + case "$1" in + --principal_id) + principal_id="$2" + ;; + --email) + email="$2" + ;; + --budget_amount) + budget_amount="$2" + ;; + --budget_currency) + budget_currency="$2" + ;; + --expiry) + expiry="$2" + ;; + --aws_creds_path) + aws_creds_path="$2" + ;; + *) + + esac + shift + shift + done + +PRINCIPAL_ID=$principal_id +BUDGET_AMOUNT=$budget_amount +BUDGET_CURRENCY=$budget_currency +EMAIL=$email +EXPIRY=$expiry +AWS_CREDS_PATH=$aws_creds_path + +$AWS_CREDS_PATH checkout blunderdome-user + +# /Users/arthur/observe/s/aws-creds checkout blunderdome-user + +export AWS_PROFILE=blunderdome-user + +lease_id=$(dce leases list --status Active --principal-id $PRINCIPAL_ID | jq -r '.[0].id') +if [[ "$lease_id" == "null" ]]; then + echo "Creating a new lease..." + lease_id=$(dce leases create --budget-amount $BUDGET_AMOUNT --budget-currency $BUDGET_CURRENCY --email $EMAIL --principal-id $PRINCIPAL_ID -E $EXPIRY | jq -r '.id') + if [[ "$lease_id" == "null" ]]; then + echo "Failed to create a lease. Exiting." + exit 1 + fi +fi + +echo "Logging into lease $lease_id..." +# Execute the credentials script to set the AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, and AWS_SESSION_TOKEN environment variables +eval $(dce leases login $lease_id -p dce) + +echo "To open browser run [ export AWS_PROFILE=blunderdome-user; dce leases login $lease_id -p dce --open-browser ]" + +echo "If you are getting error message try running pybritive logout" + diff --git a/test_code_v2/aws/host_monitoring/ec2/TERRAFORM_README.md b/test_code_v2/aws/host_monitoring/ec2/TERRAFORM_README.md new file mode 100644 index 00000000..6529cf9c --- /dev/null +++ b/test_code_v2/aws/host_monitoring/ec2/TERRAFORM_README.md @@ -0,0 +1,172 @@ +# FOR OBSERVE INTERNAL USERS + +## Terraform AWS Sample Infrastructure +Clone or download this repository to your local machine + +Assumptions - you have docker desktop installed + +Within this directory the terraform folder provides a sample deployment for Linux and Windows with otel collector installed and pushing data to your Observe environment. + +You will need: +- observe_collection_endpoint - example "https://REPLACE_WITH_YOUR_CUSTOMER_ID.collect.observe.com" +- observe_token "REPLACE_WITH_DATASTREAM_TOKEN" + +### Create an auto vars file for terraform to read + +You will need to create a file for providing variables to terraform. Use the following commands within the ***terraform*** directory. +Be sure to replace OBSERVE_ENDPOINT and OBSERVE_TOKEN with your values: + +``` +touch observe_vars.auto.tfvars + +cat < observe_vars.auto.tfvars +name_format = "host-explorer-test-%s" +OBSERVE_ENDPOINT = "https://[123456789].collect.[observe-staging].com" +OBSERVE_CUSTOMER_DOMAIN = = "[123456789].observe-staging.com" +OBSERVE_TOKEN_OTEL = "[gobbly:gook]" +OBSERVE_TOKEN_HOST_MONITORING = "[gobbly:gook]" +OBSERVE_CUSTOMER = "[123456789]" +EOF +``` + +For simplicity I have assumed you have docker desktop installed and therefore can run terraform with the below commands. + +To pull image from DockerHub: +``` +docker pull hashicorp/terraform:1.6 +``` + +[Login to Britive using script](../../aws_helper/SETUP_README.md) + +Look at test_machines local variable in variables.tf file for a list of machines you can create. Comment out as needed. + +To run terraform init - notice the mapping of volumes(-v) and environment variables (-e) +``` +docker run -i -t \ +-v $(pwd)/:/workspace \ +-v $HOME/.aws:/aws_creds \ +-e AWS_SHARED_CREDENTIALS_FILE=/aws_creds/credentials \ +-e AWS_PROFILE=dce -w /workspace \ +-e AWS_REGION=us-west-2 \ +hashicorp/terraform:1.6 \ +init +``` + +To create the ec2 instances within an ephemeral account run the following: +``` +docker run -i -t \ +-v $(pwd)/:/workspace \ +-v $HOME/.aws:/aws_creds \ +-e AWS_SHARED_CREDENTIALS_FILE=/aws_creds/credentials \ +-e AWS_PROFILE=dce -w /workspace \ +-e AWS_REGION=us-west-2 \ +hashicorp/terraform:1.6 \ +apply -auto-approve; +``` + +To see what terraform will do run a plan: +``` +docker run -i -t \ +-v $(pwd)/:/workspace \ +-v $HOME/.aws:/aws_creds \ +-e AWS_SHARED_CREDENTIALS_FILE=/aws_creds/credentials \ +-e AWS_PROFILE=dce -w /workspace \ +-e AWS_REGION=us-west-2 \ +hashicorp/terraform:1.6 \ +plan +``` + +After the create process is completed you should start to see data flowing to your Observe account. + +To delete the ec2 instances within an ephemeral account run the following: +``` +docker run -i -t \ +-v $(pwd)/:/workspace \ +-v $HOME/.aws:/aws_creds \ +-e AWS_SHARED_CREDENTIALS_FILE=/aws_creds/credentials \ +-e AWS_PROFILE=dce -w /workspace \ +-e AWS_REGION=us-west-2 \ +hashicorp/terraform:1.6 \ +destroy -auto-approve +``` + +Destroy and Create + +``` +docker run -i -t \ +-v $(pwd)/:/workspace \ +-v $HOME/.aws:/aws_creds \ +-e AWS_SHARED_CREDENTIALS_FILE=/aws_creds/credentials \ +-e AWS_PROFILE=dce -w /workspace \ +-e AWS_REGION=us-west-2 \ +hashicorp/terraform:1.6 \ +destroy -auto-approve; +docker run -i -t \ +-v $(pwd)/:/workspace \ +-v $HOME/.aws:/aws_creds \ +-e AWS_SHARED_CREDENTIALS_FILE=/aws_creds/credentials \ +-e AWS_PROFILE=dce -w /workspace \ +-e AWS_REGION=us-west-2 \ +hashicorp/terraform:1.6 \ +apply -auto-approve; +``` + +If you have terraform installed locally - and you don't want to use docker - then run +``` +export AWS_PROFILE=dce; export AWS_REGION=us-west-2 before running terraform commands. +``` + +## Other commands you run (replace destroy -auto-approve above ^^^^^^ ) +``` +plan + +output -json | jq -cr '.hosts_aws.value' + +``` + +Terraform will create ephemeral key files for you that you can use to log in to your vm. You should see a list of vms created in outputs when apply is complete that look like this: +``` + "host-explorer-test-otel_0-UBUNTU_20_04_LTS_MKjlCd" = { + "host" = "34.209.148.230" + "instance_id" = "i-063135d0980a0120f" + "machine" = "AWS_UBUNTU_20_04_LTS" + "public_ssh_link" = "ssh -i keypair_module/keys/ephemeral_key ubuntu@34.209.148.230" + "sleep" = 120 + "user" = "ubuntu" + } +``` +If you want to login to your vm for any reason use the public_ssh_link value to ssh to your machine. + +Following assumes you have jq installed: +``` +brew install jq +``` +To get a list of outputs at any time use the following command: +``` +docker run -i -t \ +-v $(pwd)/:/workspace \ +-v $HOME/.aws:/aws_creds \ +-e AWS_SHARED_CREDENTIALS_FILE=/aws_creds/credentials \ +-e AWS_PROFILE=dce -w /workspace \ +-e AWS_REGION=us-west-2 \ +hashicorp/terraform:1.6 \ +output -json | jq -cr '.hosts_aws.value' +``` + + +# References +### Setup Britive +https://www.notion.so/observeinc/How-to-Use-Britive-Access-Management-36393b713cbf41ada73a846ddabfea21?pvs=4#e51bfe23dc8a4dc3a8e51aaeda4ee4fe + + +### create an ephemeral account - you will need dce utility +[Setup dce](https://dce.readthedocs.io/en/latest/howto.html) + +systemctl --type=service --state=running + + +while IFS= read -r line; do + echo "Processing line: $line" + # Perform actions on each line here + tf taint $line +done < <(tf state list | grep linux) \ No newline at end of file diff --git a/test_code_v2/aws/host_monitoring/ec2/terraform/02-module/main.tf b/test_code_v2/aws/host_monitoring/ec2/terraform/02-module/main.tf new file mode 100644 index 00000000..a57d362d --- /dev/null +++ b/test_code_v2/aws/host_monitoring/ec2/terraform/02-module/main.tf @@ -0,0 +1,58 @@ +variable "OBSERVE_TOKEN_OTEL" { + description = "A datastream token" + nullable = true + default = "TOKEN" + type = string +} + +module "o2" { + source = "../" + OBSERVE_CUSTOMER = "102" + OBSERVE_DOMAIN = "observe-o2.com" + OBSERVE_TOKEN_OTEL = var.OBSERVE_TOKEN_OTEL + CREATE_INTEGRATION = { + host_mon = false, + otel = true, + no_agent = false + } + + debian_machines = { + + UBUNTU_22_04_INSTALLED = { + # ami used in testing + ami_instance_type = "t3.small" + ami_id = "ami-008fe2fc65df48dac" + ami_description = "Canonical, Ubuntu, 22.04 LTS, amd64 jammy image build on 2023-05-16" + default_user = "ubuntu" + sleep = 120 + host_mon_user_data_path = "user_data/aptbased_linux_configuration_script_repo.sh" + otel_user_data_path = "user_data/aptbased_otel_repo.sh" + no_agent_user_data_path = "user_data/aptbased_otel_repo_noagentinstall.sh" + } + + UBUNTU_20_04_LTS_INSTALLED = { + # ami used in testing + ami_instance_type = "t3.small" + ami_id = "ami-0892d3c7ee96c0bf7" + ami_description = "Canonical, Ubuntu, 20.04 LTS, amd64 focal image build on 2021-11-29" + default_user = "ubuntu" + sleep = 120 + host_mon_user_data_path = "user_data/aptbased_linux_configuration_script_repo.sh" + otel_user_data_path = "user_data/aptbased_otel_repo.sh" + no_agent_user_data_path = "user_data/aptbased_otel_repo_noagentinstall.sh" + } + + UBUNTU_18_04_LTS_INSTALLED = { + ami_instance_type = "t3.small" + ami_id = "ami-0cfa91bdbc3be780c" + ami_description = "Canonical, Ubuntu, 18.04 LTS, amd64 bionic image build on 2022-04-11" + default_user = "ubuntu" + sleep = 120 + host_mon_user_data_path = "user_data/aptbased_linux_configuration_script_repo.sh" + otel_user_data_path = "user_data/aptbased_otel_repo.sh" + no_agent_user_data_path = "user_data/aptbased_otel_repo_noagentinstall.sh" + } + + } +} + diff --git a/test_code_v2/aws/host_monitoring/ec2/terraform/02-module/versions.tf b/test_code_v2/aws/host_monitoring/ec2/terraform/02-module/versions.tf new file mode 100644 index 00000000..69648545 --- /dev/null +++ b/test_code_v2/aws/host_monitoring/ec2/terraform/02-module/versions.tf @@ -0,0 +1,16 @@ +# https://www.terraform.io/language/expressions/version-constraints +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 4.11" + } + + random = { + source = "hashicorp/random" + version = ">= 3.4.3" + } + } + required_version = ">= 1.2" +} + diff --git a/test_code_v2/aws/host_monitoring/ec2/terraform/ec2_module/data.tf b/test_code_v2/aws/host_monitoring/ec2/terraform/ec2_module/data.tf new file mode 100644 index 00000000..7b31ba77 --- /dev/null +++ b/test_code_v2/aws/host_monitoring/ec2/terraform/ec2_module/data.tf @@ -0,0 +1,12 @@ +# The Canonical User ID data source allows access to the canonical user ID for the effective account in which Terraform is working. +# tflint-ignore: terraform_unused_declarations +data "aws_canonical_user_id" "current_user" { +} + + +# # # rando value for filtering output and validating results +# resource "random_string" "output" { +# for_each = var.AWS_MACHINE_CONFIGS +# length = 6 +# special = false +# } diff --git a/test_code_v2/aws/host_monitoring/ec2/terraform/ec2_module/main.tf b/test_code_v2/aws/host_monitoring/ec2/terraform/ec2_module/main.tf new file mode 100644 index 00000000..666e7f45 --- /dev/null +++ b/test_code_v2/aws/host_monitoring/ec2/terraform/ec2_module/main.tf @@ -0,0 +1,41 @@ +locals { + # machine_loop = { for key, value in var.AWS_MACHINE_CONFIGS : key => value if key == var.AWS_MACHINE_FILTER || var.AWS_MACHINE_FILTER == true } + + # compute_instances = { for key, value in var.AWS_MACHINE_CONFIGS } + # : + # key => value if contains(var.AWS_MACHINE_FILTER, key) || length(var.AWS_MACHINE_FILTER) == 0 } + +} + + +# EC2 instance for linux host +resource "aws_instance" "linux_host_integration" { + # for_each = var.AWS_MACHINE_CONFIGS + + ami = var.AWS_MACHINE_CONFIGS.ami_id + instance_type = var.AWS_MACHINE_CONFIGS.ami_instance_type + + associate_public_ip_address = true + + subnet_id = var.subnet_public_id + + vpc_security_group_ids = [var.aws_security_group_public_id] + key_name = var.aws_key_pair_name + + user_data = var.USERDATA + get_password_data = can(regex("WINDOWS", var.name)) ? true : false + + root_block_device { + volume_size = 100 + } + + tags = merge( + var.BASE_TAGS, + { + Name = var.name + # OS_KEY = each.key + }, + ) +} + + diff --git a/test_code_v2/aws/host_monitoring/ec2/terraform/ec2_module/outputs.tf b/test_code_v2/aws/host_monitoring/ec2/terraform/ec2_module/outputs.tf new file mode 100644 index 00000000..9043b784 --- /dev/null +++ b/test_code_v2/aws/host_monitoring/ec2/terraform/ec2_module/outputs.tf @@ -0,0 +1,18 @@ +output "instance" { + value = { + # tflint-ignore: terraform_deprecated_interpolation + "${aws_instance.linux_host_integration.tags["Name"]}" = { + "host" = aws_instance.linux_host_integration.public_ip + "instance_id" = aws_instance.linux_host_integration.id + "user" = var.AWS_MACHINE_CONFIGS.default_user + "public_ssh_link" = "ssh -i ${var.PRIVATE_KEY_PATH} ${var.AWS_MACHINE_CONFIGS.default_user}@${aws_instance.linux_host_integration.public_ip}" + # "sleep" : var.AWS_MACHINE_CONFIGS[key].sleep + "private_ip" = aws_instance.linux_host_integration.private_ip + "password_data" : aws_instance.linux_host_integration.password_data + "password_decrypted" : aws_instance.linux_host_integration.password_data == "" ? null : rsadecrypt(aws_instance.linux_host_integration.password_data, file(var.PRIVATE_KEY_PATH)) + } + } +} +# output "instance" { +# value = aws_instance.linux_host_integration +# } diff --git a/test_code_v2/aws/host_monitoring/ec2/terraform/ec2_module/variables.tf b/test_code_v2/aws/host_monitoring/ec2/terraform/ec2_module/variables.tf new file mode 100644 index 00000000..26d0b1b7 --- /dev/null +++ b/test_code_v2/aws/host_monitoring/ec2/terraform/ec2_module/variables.tf @@ -0,0 +1,210 @@ +# your local key path (assumes it exists) - this will allow you to access ec2 instances +# tflint-ignore: terraform_naming_convention,terraform_unused_declarations +variable "PUBLIC_KEY_PATH" { + description = "Public key path" + nullable = true + default = null + type = string +} + +# tflint-ignore: terraform_naming_convention,terraform_unused_declarations +variable "PRIVATE_KEY_PATH" { + description = "Private key path" + nullable = true + default = null + type = string +} + +# where to deploy +# tflint-ignore: terraform_naming_convention +# variable "REGION" { +# default = "us-west-2" +# description = "Where resources will be deployed" +# type = string +# } + +# appended to resource names so you can find your stuff +variable "name" { + description = "name" + type = string +} + +# tflint-ignore: terraform_naming_convention +variable "BASE_TAGS" { + description = "base resource tags" + type = map(string) + default = { + owner = "Observe" + createdBy = "terraform" + team = "content" + purpose = "test auto configuration script" + git_repo_url = "https://github.com/observeinc/linux-host-configuration-scripts" + } +} + +# tflint-ignore: terraform_naming_convention +# variable "USE_BRANCH_NAME" { +# default = "main" +# description = "git repository branch to use" +# type = string +# } + +# tflint-ignore: terraform_naming_convention +# variable "CI" { +# type = bool +# default = false +# description = "This variable is set to true by github actions to tell us we are running in ci" +# } + +# # tflint-ignore: terraform_naming_convention +# variable "PUBLIC_KEY" { +# description = "This value comes from a variable in github actions" +# nullable = true +# default = null +# type = string +# } + +# tflint-ignore: terraform_naming_convention +variable "AWS_MACHINE_CONFIGS" { + type = map(any) + description = "variables for supported OS" + default = { + + # UBUNTU_22_04_LTS = { + # # ami used in testing + # ami_instance_type = "t3.small" + # ami_id = " /dev/null << EOT +192.168.%%{randombetween(0, 99)}%%.%%{randombetween(0, 99)}%% - - [%%{utcnow()}%%] "GET / HTTP/1.1" 200 396 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36" +EOT +# nginx error +tee /home/ubuntu/templates/nginx_error.template > /dev/null << EOT +[%%{utcnow()}%%] - [%%{randomitem(INFO ,WARN ,ERROR)}%%] - I am a log for request with id: %%{uuid}%% +EOT +# apache access +tee /home/ubuntu/templates/apache_access.template > /dev/null << EOT +192.168.%%{randombetween(0, 99)}%%.%%{randombetween(0, 99)}%% - - [%%{utcnow()}%%] "GET %%{randomitem(/cgi-bin/try/, ,/hidden/)}%% HTTP/1.0" %%{randomitem(200,400,401,403,404,405,500,502,503)}%% 3395 +EOT +# apache error +tee /home/ubuntu/templates/apache_error.template > /dev/null << EOT +[%%{utcnow()}%%] [error] [client 1.2.3.4] %%{randomitem(Directory index forbidden by rule: /home/test/,Directory index forbidden by rule: /apache/web-data/test2,Client sent malformed Host header,user test: authentication failure for "/~dcid/test1": Password Mismatch)}%% +EOT + +# create script to generate logs using templates +tee /home/ubuntu/genlogs.sh > /dev/null << EOT +#!/bin/bash +/usr/local/bin/lignator -t /home/ubuntu/templates --token-opening "%%{" --token-closing "}%%" -l 50 -o /var/log/ +EOT + +sudo chmod +x /home/ubuntu/genlogs.sh + +# create cron jobs to generate logs and system stress +(crontab -l 2>/dev/null; echo "* * * * * /home/ubuntu/genlogs.sh >> /var/log/cron_gen.log 2>&1") | crontab - +(crontab -l 2>/dev/null; echo "*/2 * * * * /usr/bin/stress-ng --matrix 0 -t 1m >> /var/log/cron_stress.log 2>&1") | crontab - + +${SCRIPT} +# Get the current timestamp +timestamp_date=$(date +"%Y-%m-%d %H:%M:%S") +timestamp_timedatectl=$(timedatectl status) + +# Specify the filename +filename="/tmp/hostmon_install_complete.log" + +# Write the timestamp to the file +echo "$timestamp_date" > "$filename" +echo "$timestamp_timedatectl" >> "$filename" + +input="${OBSERVE_ENDPOINT}" + +# Remove "https://" from the input string +OBSERVE_ENVIRONMENT="$${input#https://}" + +sudo curl -o "/etc/fluent-bit/observe-monitoring-all-platforms.conf" "https://raw.githubusercontent.com/observeinc/linux-host-configuration-scripts/${BRANCH}/other_configs/fluent_monitoring/observe-monitoring-all-platforms.conf" +sudo curl -o "/etc/fluent-bit/observe-monitoring-linux-only.conf" "https://raw.githubusercontent.com/observeinc/linux-host-configuration-scripts/${BRANCH}/other_configs/fluent_monitoring/observe-monitoring-linux-only.conf" + +sudo sed -i "s/REPLACE_WITH_CUSTOMER_INGEST_TOKEN/${OBSERVE_TOKEN}/g" /etc/fluent-bit/* + +sudo sed -i "s/REPLACE_WITH_OBSERVE_ENVIRONMENT/$${OBSERVE_ENVIRONMENT}/g" /etc/fluent-bit/* + +sudo sed -i "s/http_server Off/http_server On/g" /etc/fluent-bit/fluent-bit.conf + +# create script to generate logs using templates +sudo tee /etc/fluent-bit/observe-timestamp.conf > /dev/null << EOT +[FILTER] + Name record_modifier + Match * +# if you want to group your servers into an application group +# [e.g. Proxy nodes] so you have have custom alert levels for them +# uncomment this next line + #REPLACE_WITH_OBSERVE_APP_GROUP_OPTION + Record host $${HOSTNAME} + Record datacenter hostmon_test + Record obs_ver 20230412 + Remove_key _MACHINE_ID +[INPUT] + name tail + tag tail_hostmon_install_complete + Path_Key path + path /tmp/hostmon_install_complete.log + Read_from_Head true +[OUTPUT] + name http + match tail_hostmon_install_complete + host ${trimprefix("${OBSERVE_ENDPOINT}", "https://")} + port 443 + URI /v1/http/fluentbit/hostmon_install_complete + Format msgpack + Header X-Observe-Decoder fluent + Header Authorization Bearer ${OBSERVE_TOKEN} + Compress gzip + tls on +EOT + +sudo service fluent-bit restart \ No newline at end of file diff --git a/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/aptbased_otel.sh b/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/aptbased_otel.sh new file mode 100644 index 00000000..6758e2d2 --- /dev/null +++ b/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/aptbased_otel.sh @@ -0,0 +1,277 @@ +#!/bin/bash + +################################ +# WARNING USING THIS AS TEMPLATE FILE FOR TERRAFORM +# Percent signs are doubled to escape them +################################ +apt-get update -y + +apt-get install wget curl sed nano uuid-runtime ca-certificates apt-utils stress-ng cron ca-certificates -y + +# need to add a line for high precision timestamp +# make syslog use precise timestamp +sudo sed -i "s/\$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat/#\$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat/g" /etc/rsyslog.conf +sudo systemctl restart syslog + +# install otel collector +wget "https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.88.0/otelcol-contrib_0.88.0_linux_amd64.deb" +dpkg -i otelcol-contrib_0.88.0_linux_amd64.deb + +# create otel config +cp /etc/otelcol-contrib/config.yaml /etc/otelcol-contrib/config.OLD +rm /etc/otelcol-contrib/config.yaml + +# # add collector user to syslog group +sudo usermod -a -G syslog otelcol-contrib + +# install lignator log generator +# https://github.com/microsoft/lignator +# sample commands +## lignator -t "timestamp: %%{utcnow()}%%" --token-opening "%%{" --token-closing "}%%" -o /home/ubuntu/testlogs +## lignator -t "[%%{utcnow()}%%] - [%%{randomitem(INFO ,WARN ,ERROR)}%%] - I am a log for request with id: %%{uuid}%%" --token-opening "%%{" --token-closing "}%%" -o /home/ubuntu/testlogs + +# install lignator log generator +# https://github.com/microsoft/lignator +# sample commands +## lignator -t "timestamp: %%{utcnow()}%%" --token-opening "%%{" --token-closing "}%%" -o /home/ubuntu/testlogs +## lignator -t "[%%{utcnow()}%%] - [%%{randomitem(INFO ,WARN ,ERROR)}%%] - I am a log for request with id: %%{uuid}%%" --token-opening "%%{" --token-closing "}%%" -o /home/ubuntu/testlogs + +wget https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb +sudo dpkg -i packages-microsoft-prod.deb +rm packages-microsoft-prod.deb + +sudo apt-get update +sudo apt-get install -y dotnet-sdk-7.0 + + +wget https://github.com/microsoft/lignator/archive/v0.8.0.tar.gz \ +&& tar xvzf v0.8.0.tar.gz \ +&& cd ./lignator-0.8.0/src \ +&& sudo dotnet publish -r linux-x64 -c Release -o /usr/local/bin/ -p:PublishSingleFile=true --self-contained true -p:InformationalVersion=0.8.0 \ +&& lignator --version + +sudo su ubuntu + +mkdir /home/ubuntu/templates + +# create lignator templates +# nginx access +tee /home/ubuntu/templates/nginx_access.template > /dev/null << EOT +192.168.%%{randombetween(0, 99)}%%.%%{randombetween(0, 99)}%% - - [%%{utcnow()}%%] "GET / HTTP/1.1" 200 396 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36" +EOT +# nginx error +tee /home/ubuntu/templates/nginx_error.template > /dev/null << EOT +[%%{utcnow()}%%] - [%%{randomitem(INFO ,WARN ,ERROR)}%%] - I am a log for request with id: %%{uuid}%% +EOT +# apache access +tee /home/ubuntu/templates/apache_access.template > /dev/null << EOT +192.168.%%{randombetween(0, 99)}%%.%%{randombetween(0, 99)}%% - - [%%{utcnow()}%%] "GET %%{randomitem(/cgi-bin/try/, ,/hidden/)}%% HTTP/1.0" %%{randomitem(200,400,401,403,404,405,500,502,503)}%% 3395 +EOT +# apache error +tee /home/ubuntu/templates/apache_error.template > /dev/null << EOT +[%%{utcnow()}%%] [error] [client 1.2.3.4] %%{randomitem(Directory index forbidden by rule: /home/test/,Directory index forbidden by rule: /apache/web-data/test2,Client sent malformed Host header,user test: authentication failure for "/~dcid/test1": Password Mismatch)}%% +EOT + +# create script to generate logs using templates +tee /home/ubuntu/genlogs.sh > /dev/null << EOT +#!/bin/bash +/usr/local/bin/lignator -t /home/ubuntu/templates --token-opening "%%{" --token-closing "}%%" -l 50 -o /home/ubuntu/logs +EOT + +sudo chmod +x /home/ubuntu/genlogs.sh + +# create cron jobs to generate logs and system stress +(crontab -l 2>/dev/null; echo "* * * * * /home/ubuntu/genlogs.sh >> /home/ubuntu/cron_gen.log 2>&1") | crontab - +(crontab -l 2>/dev/null; echo "*/2 * * * * /usr/bin/stress-ng --matrix 0 -t 1m >> /home/ubuntu/cron_stress.log 2>&1") | crontab - + +tee /etc/otelcol-contrib/config.yaml > /dev/null << EOT +exporters: + logging: + loglevel: + otlphttp: + endpoint: "${OBSERVE_ENDPOINT}/v1/otel" + headers: + authorization: "Bearer ${OBSERVE_TOKEN}" +processors: + batch: + resource: + attributes: + - key: OBSERVE_GUID + value: $${HOSTNAME} + action: upsert + resourcedetection: + detectors: [env, ec2, eks, system] + system: + hostname_sources: ["lookup", "cname", "dns", "os"] +receivers: + filestats: + include: /var/log/dpkg.log + collection_interval: 5m + initial_delay: 1s + + filelog/base: + include: [/var/log/*.log, /root/*.log, /root/logs/*.log] + include_file_path: true + #start_at: beginning + operators: + #- type: regex_parser + #regex: (?P\w+ \d+ \d+:\d+:\d+) + - type: filter + expr: 'body matches "otel-contrib"' + # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/hostmetricsreceiver + hostmetrics: + root_path: / + collection_interval: 60s + scrapers: + cpu: + metrics: + # Default + system.cpu.time: + enabled: "${DEFAULT_METRICS_ENABLED}" + # Optional + # system.cpu.frequency: + # enabled: "${OPTIONAL_METRICS_ENABLED}" + system.cpu.logical.count: + enabled: "${OPTIONAL_METRICS_ENABLED}" + system.cpu.physical.count: + enabled: "${OPTIONAL_METRICS_ENABLED}" + system.cpu.utilization: + enabled: "${OPTIONAL_METRICS_ENABLED}" + disk: + metrics: + # Default + system.disk.io: + enabled: "${DEFAULT_METRICS_ENABLED}" + system.disk.io_time: + enabled: "${DEFAULT_METRICS_ENABLED}" + system.disk.merged: + enabled: "${DEFAULT_METRICS_ENABLED}" + system.disk.operation_time: + enabled: "${DEFAULT_METRICS_ENABLED}" + system.disk.operations: + enabled: "${DEFAULT_METRICS_ENABLED}" + system.disk.pending_operations: + enabled: "${DEFAULT_METRICS_ENABLED}" + system.disk.weighted_io_time: + enabled: "${DEFAULT_METRICS_ENABLED}" + load: + metrics: + # Default + system.cpu.load_average.15m: + enabled: "${DEFAULT_METRICS_ENABLED}" + system.cpu.load_average.1m: + enabled: "${DEFAULT_METRICS_ENABLED}" + system.cpu.load_average.5m: + enabled: "${DEFAULT_METRICS_ENABLED}" + # Config - divide by cpus + cpu_average: true + filesystem: + metrics: + # Default + system.filesystem.inodes.usage: + enabled: "${DEFAULT_METRICS_ENABLED}" + system.filesystem.usage: + enabled: "${DEFAULT_METRICS_ENABLED}" + # Optional + system.filesystem.utilization: + enabled: "${OPTIONAL_METRICS_ENABLED}" + memory: + metrics: + # Default + system.memory.usage: + enabled: "${DEFAULT_METRICS_ENABLED}" + # Optional + system.memory.utilization: + enabled: "${OPTIONAL_METRICS_ENABLED}" + network: + metrics: + # Default + system.network.connections: + enabled: "${DEFAULT_METRICS_ENABLED}" + system.network.dropped: + enabled: "${DEFAULT_METRICS_ENABLED}" + system.network.errors: + enabled: "${DEFAULT_METRICS_ENABLED}" + system.network.io: + enabled: "${DEFAULT_METRICS_ENABLED}" + system.network.packets: + enabled: "${DEFAULT_METRICS_ENABLED}" + # Optional + system.network.conntrack.count: + enabled: "${OPTIONAL_METRICS_ENABLED}" + system.network.conntrack.max: + enabled: "${OPTIONAL_METRICS_ENABLED}" + paging: + metrics: + # Default + system.paging.faults: + enabled: "${DEFAULT_METRICS_ENABLED}" + system.paging.operations: + enabled: "${DEFAULT_METRICS_ENABLED}" + system.paging.usage: + enabled: "${DEFAULT_METRICS_ENABLED}" + # Optional + system.paging.utilization: + enabled: "${OPTIONAL_METRICS_ENABLED}" + processes: + metrics: + # Default + system.processes.count: + enabled: "${DEFAULT_METRICS_ENABLED}" + system.processes.created: + enabled: "${DEFAULT_METRICS_ENABLED}" + process: + metrics: + # Default + process.cpu.time: + enabled: "${PROCESS_DEFAULT_METRICS_ENABLED}" + process.disk.io: + enabled: "${PROCESS_DEFAULT_METRICS_ENABLED}" + process.memory.usage: + enabled: "${PROCESS_DEFAULT_METRICS_ENABLED}" + process.memory.virtual: + enabled: "${PROCESS_DEFAULT_METRICS_ENABLED}" + # Optional + process.context_switches: + enabled: "${PROCESS_OPTIONAL_METRICS_ENABLED}" + process.cpu.utilization: + enabled: "${PROCESS_OPTIONAL_METRICS_ENABLED}" + process.disk.operations: + enabled: "${PROCESS_OPTIONAL_METRICS_ENABLED}" + process.handles: + enabled: "${PROCESS_OPTIONAL_METRICS_ENABLED}" + process.open_file_descriptors: + enabled: "${PROCESS_OPTIONAL_METRICS_ENABLED}" + process.paging.faults: + enabled: "${PROCESS_OPTIONAL_METRICS_ENABLED}" + process.signals_pending: + enabled: "${PROCESS_OPTIONAL_METRICS_ENABLED}" + process.threads: + enabled: "${PROCESS_OPTIONAL_METRICS_ENABLED}" + otlp: + protocols: + grpc: + include_metadata: true + http: + include_metadata: true +service: + pipelines: + traces: + receivers: [otlp] + processors: [resource, batch] + exporters: [otlphttp, logging] + logs/base: + receivers: [filelog/base] + processors: [resource,batch] + exporters: [otlphttp] + metrics/two: + receivers: [filestats] + processors: [resource,resourcedetection,batch] + exporters: [otlphttp] + metrics: + receivers: [hostmetrics, otlp] + processors: [resource] + exporters: [logging, otlphttp] +EOT + +sudo service otelcol-contrib restart \ No newline at end of file diff --git a/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/aptbased_otel_repo.sh b/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/aptbased_otel_repo.sh new file mode 100644 index 00000000..074081c1 --- /dev/null +++ b/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/aptbased_otel_repo.sh @@ -0,0 +1,117 @@ +#!/bin/bash + +################################ +# WARNING USING THIS AS TEMPLATE FILE FOR TERRAFORM +# Percent signs are doubled to escape them +################################ +sudo apt-get update -y + +sudo apt-get install wget curl sed nano uuid-runtime ca-certificates apt-utils stress-ng cron acl ca-certificates -y + +# need to add a line for high precision timestamp +# make syslog use precise timestamp +sudo sed -i "s/\$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat/#\$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat/g" /etc/rsyslog.conf +sudo systemctl restart syslog + +# install lignator log generator +# https://github.com/microsoft/lignator +# sample commands +## lignator -t "timestamp: %%{utcnow()}%%" --token-opening "%%{" --token-closing "}%%" -o /home/ubuntu/testlogs +## lignator -t "[%%{utcnow()}%%] - [%%{randomitem(INFO ,WARN ,ERROR)}%%] - I am a log for request with id: %%{uuid}%%" --token-opening "%%{" --token-closing "}%%" -o /home/ubuntu/testlogs + +# install lignator log generator +# https://github.com/microsoft/lignator +# sample commands +## lignator -t "timestamp: %%{utcnow()}%%" --token-opening "%%{" --token-closing "}%%" -o /home/ubuntu/testlogs +## lignator -t "[%%{utcnow()}%%] - [%%{randomitem(INFO ,WARN ,ERROR)}%%] - I am a log for request with id: %%{uuid}%%" --token-opening "%%{" --token-closing "}%%" -o /home/ubuntu/testlogs +# identify OS and architecture +OS=$(lsb_release -is | tr '[:upper:]' '[:lower:]') +VERSION=$(lsb_release -rs | tr '[:upper:]' '[:lower:]') +lsb_release -rs +CODENAME=$(lsb_release -cs) + +SYS_ARCH=$(uname -m) +if [[ $SYS_ARCH = "aarch64" ]]; then + ARCH="arm64" +else + ARCH="amd64" +fi + +if [[ $VERSION == "22.04" ]]; then + # download package with wget + wget http://archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.0g-2ubuntu4_amd64.deb + + # install package locally + sudo dpkg -i libssl1.1_1.1.0g-2ubuntu4_amd64.deb +fi + +wget https://packages.microsoft.com/config/ubuntu/"$${VERSION}"/packages-microsoft-prod.deb -O packages-microsoft-prod.deb +sudo dpkg -i packages-microsoft-prod.deb +rm packages-microsoft-prod.deb + +sudo apt-get update +sudo apt-get install -y dotnet-sdk-8.0 + + +wget https://github.com/microsoft/lignator/archive/v0.8.0.tar.gz \ +&& tar xvzf v0.8.0.tar.gz \ +&& cd ./lignator-0.8.0/src \ +&& sudo dotnet publish -r linux-x64 -c Release -o /usr/local/bin/ -p:PublishSingleFile=true --self-contained true -p:InformationalVersion=0.8.0 \ +&& lignator --version + +sudo setfacl -Rm u:ubuntu:rwX /var/log + +sudo su ubuntu + +mkdir /home/ubuntu/templates + +# create lignator templates +# nginx access +tee /home/ubuntu/templates/nginx_access.template > /dev/null << EOT +192.168.%%{randombetween(0, 99)}%%.%%{randombetween(0, 99)}%% - - [%%{utcnow()}%%] "GET / HTTP/1.1" 200 396 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36" +EOT +# nginx error +tee /home/ubuntu/templates/nginx_error.template > /dev/null << EOT +[%%{utcnow()}%%] - [%%{randomitem(INFO ,WARN ,ERROR)}%%] - I am a log for request with id: %%{uuid}%% +EOT +# apache access +tee /home/ubuntu/templates/apache_access.template > /dev/null << EOT +192.168.%%{randombetween(0, 99)}%%.%%{randombetween(0, 99)}%% - - [%%{utcnow()}%%] "GET %%{randomitem(/cgi-bin/try/, ,/hidden/)}%% HTTP/1.0" %%{randomitem(200,400,401,403,404,405,500,502,503)}%% 3395 +EOT +# apache error +tee /home/ubuntu/templates/apache_error.template > /dev/null << EOT +[%%{utcnow()}%%] [error] [client 1.2.3.4] %%{randomitem(Directory index forbidden by rule: /home/test/,Directory index forbidden by rule: /apache/web-data/test2,Client sent malformed Host header,user test: authentication failure for "/~dcid/test1": Password Mismatch)}%% +EOT + +# create script to generate logs using templates +tee /home/ubuntu/genlogs.sh > /dev/null << EOT +#!/bin/bash +/usr/local/bin/lignator -t /home/ubuntu/templates --token-opening "%%{" --token-closing "}%%" -l 50 -o /var/log/ +EOT + +sudo chmod +x /home/ubuntu/genlogs.sh + +# create cron jobs to generate logs and system stress +(crontab -l 2>/dev/null; echo "* * * * * /home/ubuntu/genlogs.sh >> /var/log/cron_gen.log 2>&1") | crontab - +(crontab -l 2>/dev/null; echo "*/2 * * * * /usr/bin/stress-ng --matrix 0 -t 1m >> /var/log/cron_stress.log 2>&1") | crontab - + +${SCRIPT} + +sudo setcap 'cap_dac_read_search=ep' "/usr/bin/otelcol-contrib" + +# echo -e "Setting the CAP_DAC_READ_SEARCH Linux capability on the collector binary to allow it to read host metrics from /proc directory: setcap 'cap_dac_read_search=ep' \""/usr/bin/otelcol-contrib"\"" +# echo -e "You can remove it with the following command: sudo setcap -r \"/usr/bin/otelcol-contrib"\"" +# echo -e "Without this capability, the collector will not be able to collect some of the host metrics." +# setcap 'cap_dac_read_search=ep' "/usr/bin/otelcol-contrib" + + +# Get the current timestamp +timestamp_date=$(date +"%Y-%m-%d %H:%M:%S") +timestamp_timedatectl=$(timedatectl status) + +# Specify the filename +filename="/tmp/quickstart_install_complete.log" + +# Write the timestamp to the file +echo "$timestamp_date" > "$filename" +echo "$timestamp_timedatectl" >> "$filename" \ No newline at end of file diff --git a/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/aptbased_otel_repo_noagentinstall.sh b/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/aptbased_otel_repo_noagentinstall.sh new file mode 100644 index 00000000..8a164f28 --- /dev/null +++ b/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/aptbased_otel_repo_noagentinstall.sh @@ -0,0 +1,102 @@ +#!/bin/bash + +################################ +# WARNING USING THIS AS TEMPLATE FILE FOR TERRAFORM +# Percent signs are doubled to escape them +################################ +sudo apt-get update -y + +sudo apt-get install wget curl sed nano uuid-runtime ca-certificates apt-utils stress-ng cron acl ca-certificates -y + +sudo apt-get install gnupg2 -y +sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 97A80C63C9D8B80B +sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 9F9DDC083888C1CD + +# need to add a line for high precision timestamp +# make syslog use precise timestamp +sudo sed -i "s/\$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat/#\$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat/g" /etc/rsyslog.conf +sudo systemctl restart syslog + +# install lignator log generator +# https://github.com/microsoft/lignator +# sample commands +## lignator -t "timestamp: %%{utcnow()}%%" --token-opening "%%{" --token-closing "}%%" -o /home/ubuntu/testlogs +## lignator -t "[%%{utcnow()}%%] - [%%{randomitem(INFO ,WARN ,ERROR)}%%] - I am a log for request with id: %%{uuid}%%" --token-opening "%%{" --token-closing "}%%" -o /home/ubuntu/testlogs + +# install lignator log generator +# https://github.com/microsoft/lignator +# sample commands +## lignator -t "timestamp: %%{utcnow()}%%" --token-opening "%%{" --token-closing "}%%" -o /home/ubuntu/testlogs +## lignator -t "[%%{utcnow()}%%] - [%%{randomitem(INFO ,WARN ,ERROR)}%%] - I am a log for request with id: %%{uuid}%%" --token-opening "%%{" --token-closing "}%%" -o /home/ubuntu/testlogs +# identify OS and architecture +OS=$(lsb_release -is | tr '[:upper:]' '[:lower:]') +VERSION=$(lsb_release -rs | tr '[:upper:]' '[:lower:]') +lsb_release -rs +CODENAME=$(lsb_release -cs) + +SYS_ARCH=$(uname -m) +if [[ $SYS_ARCH = "aarch64" ]]; then + ARCH="arm64" +else + ARCH="amd64" +fi + +if [[ $VERSION == "22.04" ]]; then + # download package with wget + wget http://archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.0g-2ubuntu4_amd64.deb + + # install package locally + sudo dpkg -i libssl1.1_1.1.0g-2ubuntu4_amd64.deb +fi + +wget https://packages.microsoft.com/config/ubuntu/"$${VERSION}"/packages-microsoft-prod.deb -O packages-microsoft-prod.deb +sudo dpkg -i packages-microsoft-prod.deb +rm packages-microsoft-prod.deb + +sudo apt-get update +sudo apt-get install -y dotnet-sdk-8.0 + + +wget https://github.com/microsoft/lignator/archive/v0.8.0.tar.gz \ +&& tar xvzf v0.8.0.tar.gz \ +&& cd ./lignator-0.8.0/src \ +&& sudo dotnet publish -r linux-x64 -c Release -o /usr/local/bin/ -p:PublishSingleFile=true --self-contained true -p:InformationalVersion=0.8.0 \ +&& lignator --version + +sudo setfacl -Rm u:ubuntu:rwX /var/log + +sudo su ubuntu + +mkdir /home/ubuntu/templates + +# create lignator templates +# nginx access +tee /home/ubuntu/templates/nginx_access.template > /dev/null << EOT +192.168.%%{randombetween(0, 99)}%%.%%{randombetween(0, 99)}%% - - [%%{utcnow()}%%] "GET / HTTP/1.1" 200 396 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36" +EOT +# nginx error +tee /home/ubuntu/templates/nginx_error.template > /dev/null << EOT +[%%{utcnow()}%%] - [%%{randomitem(INFO ,WARN ,ERROR)}%%] - I am a log for request with id: %%{uuid}%% +EOT +# apache access +tee /home/ubuntu/templates/apache_access.template > /dev/null << EOT +192.168.%%{randombetween(0, 99)}%%.%%{randombetween(0, 99)}%% - - [%%{utcnow()}%%] "GET %%{randomitem(/cgi-bin/try/, ,/hidden/)}%% HTTP/1.0" %%{randomitem(200,400,401,403,404,405,500,502,503)}%% 3395 +EOT +# apache error +tee /home/ubuntu/templates/apache_error.template > /dev/null << EOT +[%%{utcnow()}%%] [error] [client 1.2.3.4] %%{randomitem(Directory index forbidden by rule: /home/test/,Directory index forbidden by rule: /apache/web-data/test2,Client sent malformed Host header,user test: authentication failure for "/~dcid/test1": Password Mismatch)}%% +EOT + +# create script to generate logs using templates +tee /home/ubuntu/genlogs.sh > /dev/null << EOT +#!/bin/bash +/usr/local/bin/lignator -t /home/ubuntu/templates --token-opening "%%{" --token-closing "}%%" -l 50 -o /var/log/ +EOT + +sudo chmod +x /home/ubuntu/genlogs.sh + +# create cron jobs to generate logs and system stress +(crontab -l 2>/dev/null; echo "* * * * * /home/ubuntu/genlogs.sh >> /var/log/cron_gen.log 2>&1") | crontab - +(crontab -l 2>/dev/null; echo "*/2 * * * * /usr/bin/stress-ng --matrix 0 -t 1m >> /var/log/cron_stress.log 2>&1") | crontab - + +# curl https://raw.githubusercontent.com/observeinc/host-config-scripts/main/opentelemetry/linux/observe_otel_install.sh | bash -s -- --observe_collection_endpoint ${OBSERVE_ENDPOINT} --observe_token ${OBSERVE_TOKEN} diff --git a/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/garbage/aptbased_fluent.sh b/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/garbage/aptbased_fluent.sh new file mode 100644 index 00000000..1ad90fc0 --- /dev/null +++ b/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/garbage/aptbased_fluent.sh @@ -0,0 +1,84 @@ +#!/bin/bash + +################################ +# WARNING USING THIS AS TEMPLATE FILE FOR TERRAFORM +# Percent signs are doubled to escape them +################################ +apt-get update -y + +apt-get install wget curl sed nano uuid-runtime ca-certificates apt-utils stress-ng cron ca-certificates -y + +# need to add a line for high precision timestamp +# make syslog use precise timestamp +sudo sed -i "s/\$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat/#\$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat/g" /etc/rsyslog.conf +sudo systemctl restart syslog + +# # add collector user to syslog group +# sudo usermod -a -G syslog otelcol-contrib + +# install lignator log generator +# https://github.com/microsoft/lignator +# sample commands +## lignator -t "timestamp: %%{utcnow()}%%" --token-opening "%%{" --token-closing "}%%" -o /home/ubuntu/testlogs +## lignator -t "[%%{utcnow()}%%] - [%%{randomitem(INFO ,WARN ,ERROR)}%%] - I am a log for request with id: %%{uuid}%%" --token-opening "%%{" --token-closing "}%%" -o /home/ubuntu/testlogs + +# install lignator log generator +# https://github.com/microsoft/lignator +# sample commands +## lignator -t "timestamp: %%{utcnow()}%%" --token-opening "%%{" --token-closing "}%%" -o /home/ubuntu/testlogs +## lignator -t "[%%{utcnow()}%%] - [%%{randomitem(INFO ,WARN ,ERROR)}%%] - I am a log for request with id: %%{uuid}%%" --token-opening "%%{" --token-closing "}%%" -o /home/ubuntu/testlogs + +wget https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb +sudo dpkg -i packages-microsoft-prod.deb +rm packages-microsoft-prod.deb + +sudo apt-get update +sudo apt-get install -y dotnet-sdk-7.0 + + +wget https://github.com/microsoft/lignator/archive/v0.8.0.tar.gz \ +&& tar xvzf v0.8.0.tar.gz \ +&& cd ./lignator-0.8.0/src \ +&& sudo dotnet publish -r linux-x64 -c Release -o /usr/local/bin/ -p:PublishSingleFile=true --self-contained true -p:InformationalVersion=0.8.0 \ +&& lignator --version + +sudo su ubuntu + +mkdir /home/ubuntu/templates + +# create lignator templates +# nginx access +tee /home/ubuntu/templates/nginx_access.template > /dev/null << EOT +192.168.%%{randombetween(0, 99)}%%.%%{randombetween(0, 99)}%% - - [%%{utcnow()}%%] "GET / HTTP/1.1" 200 396 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36" +EOT +# nginx error +tee /home/ubuntu/templates/nginx_error.template > /dev/null << EOT +[%%{utcnow()}%%] - [%%{randomitem(INFO ,WARN ,ERROR)}%%] - I am a log for request with id: %%{uuid}%% +EOT +# apache access +tee /home/ubuntu/templates/apache_access.template > /dev/null << EOT +192.168.%%{randombetween(0, 99)}%%.%%{randombetween(0, 99)}%% - - [%%{utcnow()}%%] "GET %%{randomitem(/cgi-bin/try/, ,/hidden/)}%% HTTP/1.0" %%{randomitem(200,400,401,403,404,405,500,502,503)}%% 3395 +EOT +# apache error +tee /home/ubuntu/templates/apache_error.template > /dev/null << EOT +[%%{utcnow()}%%] [error] [client 1.2.3.4] %%{randomitem(Directory index forbidden by rule: /home/test/,Directory index forbidden by rule: /apache/web-data/test2,Client sent malformed Host header,user test: authentication failure for "/~dcid/test1": Password Mismatch)}%% +EOT + +# create script to generate logs using templates +tee /home/ubuntu/genlogs.sh > /dev/null << EOT +#!/bin/bash +/usr/local/bin/lignator -t /home/ubuntu/templates --token-opening "%%{" --token-closing "}%%" -l 50 -o /home/ubuntu/logs +EOT + +sudo chmod +x /home/ubuntu/genlogs.sh + +# create cron jobs to generate logs and system stress +(crontab -l 2>/dev/null; echo "* * * * * /home/ubuntu/genlogs.sh >> /home/ubuntu/cron_gen.log 2>&1") | crontab - +(crontab -l 2>/dev/null; echo "*/2 * * * * /usr/bin/stress-ng --matrix 0 -t 1m >> /home/ubuntu/cron_stress.log 2>&1") | crontab - + +sudo tee /etc/default/fluent-bit > /dev/null << EOT +OBSERVE_HOST=${OBSERVE_ENDPOINT} +OBSERVE_TOKEN=${OBSERVE_TOKEN} +EOT + +curl https://raw.githubusercontent.com/yasar-observe/host-configuration-scripts/yasar/init/fluent-bit/linux/observe_fluent_install.sh | bash -s -- --observe_host "${OBSERVE_ENDPOINT}" --observe_token "${OBSERVE_TOKEN}" \ No newline at end of file diff --git a/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/garbage/aptbased_telegraf.sh b/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/garbage/aptbased_telegraf.sh new file mode 100644 index 00000000..505513c4 --- /dev/null +++ b/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/garbage/aptbased_telegraf.sh @@ -0,0 +1,79 @@ +#!/bin/bash + +################################ +# WARNING USING THIS AS TEMPLATE FILE FOR TERRAFORM +# Percent signs are doubled to escape them +################################ +apt-get update -y + +apt-get install wget curl sed nano uuid-runtime ca-certificates apt-utils stress-ng cron ca-certificates -y + +# need to add a line for high precision timestamp +# make syslog use precise timestamp +sudo sed -i "s/\$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat/#\$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat/g" /etc/rsyslog.conf +sudo systemctl restart syslog + +# # add collector user to syslog group +# sudo usermod -a -G syslog otelcol-contrib + +# install lignator log generator +# https://github.com/microsoft/lignator +# sample commands +## lignator -t "timestamp: %%{utcnow()}%%" --token-opening "%%{" --token-closing "}%%" -o /home/ubuntu/testlogs +## lignator -t "[%%{utcnow()}%%] - [%%{randomitem(INFO ,WARN ,ERROR)}%%] - I am a log for request with id: %%{uuid}%%" --token-opening "%%{" --token-closing "}%%" -o /home/ubuntu/testlogs + +# install lignator log generator +# https://github.com/microsoft/lignator +# sample commands +## lignator -t "timestamp: %%{utcnow()}%%" --token-opening "%%{" --token-closing "}%%" -o /home/ubuntu/testlogs +## lignator -t "[%%{utcnow()}%%] - [%%{randomitem(INFO ,WARN ,ERROR)}%%] - I am a log for request with id: %%{uuid}%%" --token-opening "%%{" --token-closing "}%%" -o /home/ubuntu/testlogs + +wget https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb +sudo dpkg -i packages-microsoft-prod.deb +rm packages-microsoft-prod.deb + +sudo apt-get update +sudo apt-get install -y dotnet-sdk-7.0 + + +wget https://github.com/microsoft/lignator/archive/v0.8.0.tar.gz \ +&& tar xvzf v0.8.0.tar.gz \ +&& cd ./lignator-0.8.0/src \ +&& sudo dotnet publish -r linux-x64 -c Release -o /usr/local/bin/ -p:PublishSingleFile=true --self-contained true -p:InformationalVersion=0.8.0 \ +&& lignator --version + +sudo su ubuntu + +mkdir /home/ubuntu/templates + +# create lignator templates +# nginx access +tee /home/ubuntu/templates/nginx_access.template > /dev/null << EOT +192.168.%%{randombetween(0, 99)}%%.%%{randombetween(0, 99)}%% - - [%%{utcnow()}%%] "GET / HTTP/1.1" 200 396 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36" +EOT +# nginx error +tee /home/ubuntu/templates/nginx_error.template > /dev/null << EOT +[%%{utcnow()}%%] - [%%{randomitem(INFO ,WARN ,ERROR)}%%] - I am a log for request with id: %%{uuid}%% +EOT +# apache access +tee /home/ubuntu/templates/apache_access.template > /dev/null << EOT +192.168.%%{randombetween(0, 99)}%%.%%{randombetween(0, 99)}%% - - [%%{utcnow()}%%] "GET %%{randomitem(/cgi-bin/try/, ,/hidden/)}%% HTTP/1.0" %%{randomitem(200,400,401,403,404,405,500,502,503)}%% 3395 +EOT +# apache error +tee /home/ubuntu/templates/apache_error.template > /dev/null << EOT +[%%{utcnow()}%%] [error] [client 1.2.3.4] %%{randomitem(Directory index forbidden by rule: /home/test/,Directory index forbidden by rule: /apache/web-data/test2,Client sent malformed Host header,user test: authentication failure for "/~dcid/test1": Password Mismatch)}%% +EOT + +# create script to generate logs using templates +tee /home/ubuntu/genlogs.sh > /dev/null << EOT +#!/bin/bash +/usr/local/bin/lignator -t /home/ubuntu/templates --token-opening "%%{" --token-closing "}%%" -l 50 -o /home/ubuntu/logs +EOT + +sudo chmod +x /home/ubuntu/genlogs.sh + +# create cron jobs to generate logs and system stress +(crontab -l 2>/dev/null; echo "* * * * * /home/ubuntu/genlogs.sh >> /home/ubuntu/cron_gen.log 2>&1") | crontab - +(crontab -l 2>/dev/null; echo "*/2 * * * * /usr/bin/stress-ng --matrix 0 -t 1m >> /home/ubuntu/cron_stress.log 2>&1") | crontab - + +curl https://raw.githubusercontent.com/yasar-observe/host-configuration-scripts/yasar/init/telegraf/linux/observe_telegraf_install.sh | bash -s -- --observe_host ${OBSERVE_ENDPOINT} --observe_token ${OBSERVE_TOKEN} \ No newline at end of file diff --git a/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/windows.ps b/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/windows.ps new file mode 100644 index 00000000..a73a501e Binary files /dev/null and b/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/windows.ps differ diff --git a/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/windows_noagentinstall.ps b/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/windows_noagentinstall.ps new file mode 100644 index 00000000..7a759c56 Binary files /dev/null and b/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/windows_noagentinstall.ps differ diff --git a/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/windows_otel.ps b/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/windows_otel.ps new file mode 100644 index 00000000..92113652 Binary files /dev/null and b/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/windows_otel.ps differ diff --git a/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/yum_based_linux_configuration_script_repo.sh b/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/yum_based_linux_configuration_script_repo.sh new file mode 100644 index 00000000..7af163d8 --- /dev/null +++ b/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/yum_based_linux_configuration_script_repo.sh @@ -0,0 +1,55 @@ +#!/bin/bash +yum update -y + +yum install curl -y + +yum install wget -y + +yum install ca-certificates -y + +${SCRIPT} + +# Get the current timestamp +timestamp_date=$(date +"%Y-%m-%d %H:%M:%S") +timestamp_timedatectl=$(timedatectl status) + +# Specify the filename +filename="/tmp/hostmon_install_complete.log" + +# Write the timestamp to the file +echo "$timestamp_date" > "$filename" +echo "$timestamp_timedatectl" >> "$filename" + +# create script to generate logs using templates +sudo tee /etc/fluent-bit/observe-timestamp.conf > /dev/null << EOT +[FILTER] + Name record_modifier + Match * +# if you want to group your servers into an application group +# [e.g. Proxy nodes] so you have have custom alert levels for them +# uncomment this next line + #REPLACE_WITH_OBSERVE_APP_GROUP_OPTION + Record host $${HOSTNAME} + Record datacenter hostmon_test + Record obs_ver 20230412 + Remove_key _MACHINE_ID +[INPUT] + name tail + tag tail_hostmon_install_complete + Path_Key path + path /tmp/hostmon_install_complete.log + Read_from_Head true +[OUTPUT] + name http + match tail_hostmon_install_complete + host ${trimprefix("${OBSERVE_ENDPOINT}", "https://")} + port 443 + URI /v1/http/fluentbit/hostmon_install_complete + Format msgpack + Header X-Observe-Decoder fluent + Header Authorization Bearer ${OBSERVE_TOKEN} + Compress gzip + tls on +EOT + +sudo service fluent-bit restart \ No newline at end of file diff --git a/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/yum_based_noagentinstall.sh b/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/yum_based_noagentinstall.sh new file mode 100644 index 00000000..fcdc20f7 --- /dev/null +++ b/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/yum_based_noagentinstall.sh @@ -0,0 +1,8 @@ +#!/bin/bash +yum update -y + +yum install curl -y + +yum install wget -y + +yum install ca-certificates -y \ No newline at end of file diff --git a/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/yum_based_otel_repo.sh b/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/yum_based_otel_repo.sh new file mode 100644 index 00000000..c91d44cd --- /dev/null +++ b/test_code_v2/aws/host_monitoring/ec2/terraform/user_data/yum_based_otel_repo.sh @@ -0,0 +1,10 @@ +#!/bin/bash +yum update -y + +yum install curl -y + +yum install wget -y + +yum install ca-certificates -y + +${SCRIPT} \ No newline at end of file diff --git a/test_code_v2/aws/host_monitoring/ec2/terraform/variables.tf b/test_code_v2/aws/host_monitoring/ec2/terraform/variables.tf new file mode 100644 index 00000000..48e00c5b --- /dev/null +++ b/test_code_v2/aws/host_monitoring/ec2/terraform/variables.tf @@ -0,0 +1,305 @@ +variable "name_format" { + description = "name format string" + type = string + default = "blunderdome-%s" +} + +# github actions uppercases all of there vars +# tflint-ignore: terraform_naming_convention +variable "WORKFLOW_MATRIX_VALUE" { + type = string + description = "Uses value for naming resources" + default = "base" +} + +# tflint-ignore: terraform_naming_convention +variable "CI" { + type = bool + default = false + description = "This variable is set to true by github actions to tell us we are running in ci" +} + +variable "FULL_PATH" { + description = "Public key var for running in ci" + nullable = true + default = null + type = string +} + +# tflint-ignore: terraform_naming_convention +variable "OBSERVE_TOKEN_OTEL" { + description = "A datastream token" + nullable = true + default = "TOKEN" + type = string +} + +# tflint-ignore: terraform_naming_convention +variable "OBSERVE_TOKEN_HOST_MONITORING" { + description = "A datastream token" + nullable = true + default = "TOKEN" + type = string +} + +variable "OBSERVE_TOKEN_NOAGENT" { + description = "A datastream token" + nullable = true + default = "TOKEN" + type = string +} + + +# tflint-ignore: terraform_naming_convention +variable "OBSERVE_CUSTOMER" { + description = "Observe customer id" + nullable = true + default = null + type = string +} + +# tflint-ignore: terraform_naming_convention,terraform_unused_declarations +variable "OBSERVE_DOMAIN" { + description = "Observe customer domain" + nullable = true + default = null + type = string +} + + +variable "debian_machines" { + description = "Map of Debian machines to create" + type = map(any) + default = { + + DEBIAN_12_INSTALLED = { + # ami used in testing + ami_instance_type = "t3.small" + ami_id = "ami-0c2644caf041bb6de" + ami_description = "Debian 12 (HVM), EBS General Purpose (SSD) Volume Type. Community developed free GNU/Linux distribution. https://www.debian.org/" + default_user = "admin" + sleep = 120 + host_mon_user_data_path = "user_data/aptbased_linux_configuration_script_repo.sh" + otel_user_data_path = "user_data/aptbased_otel_repo.sh" + no_agent_user_data_path = "user_data/aptbased_otel_repo_noagentinstall.sh" + } + + UBUNTU_22_04_INSTALLED = { + # ami used in testing + ami_instance_type = "t3.small" + ami_id = "ami-008fe2fc65df48dac" + ami_description = "Canonical, Ubuntu, 22.04 LTS, amd64 jammy image build on 2023-05-16" + default_user = "ubuntu" + sleep = 120 + host_mon_user_data_path = "user_data/aptbased_linux_configuration_script_repo.sh" + otel_user_data_path = "user_data/aptbased_otel_repo.sh" + no_agent_user_data_path = "user_data/aptbased_otel_repo_noagentinstall.sh" + } + + UBUNTU_20_04_LTS_INSTALLED = { + # ami used in testing + ami_instance_type = "t3.small" + ami_id = "ami-0892d3c7ee96c0bf7" + ami_description = "Canonical, Ubuntu, 20.04 LTS, amd64 focal image build on 2021-11-29" + default_user = "ubuntu" + sleep = 120 + host_mon_user_data_path = "user_data/aptbased_linux_configuration_script_repo.sh" + otel_user_data_path = "user_data/aptbased_otel_repo.sh" + no_agent_user_data_path = "user_data/aptbased_otel_repo_noagentinstall.sh" + } + + UBUNTU_18_04_LTS_INSTALLED = { + ami_instance_type = "t3.small" + ami_id = "ami-0cfa91bdbc3be780c" + ami_description = "Canonical, Ubuntu, 18.04 LTS, amd64 bionic image build on 2022-04-11" + default_user = "ubuntu" + sleep = 120 + host_mon_user_data_path = "user_data/aptbased_linux_configuration_script_repo.sh" + otel_user_data_path = "user_data/aptbased_otel_repo.sh" + no_agent_user_data_path = "user_data/aptbased_otel_repo_noagentinstall.sh" + } + + } +} + +variable "rhel_machines" { + description = "Map of RHEL machines to create" + type = map(any) + default = { + RHEL_8_4_0_NO_AGENT_INSTALLED = { + ami_instance_type = "t3.small" + ami_id = "ami-0b28dfc7adc325ef4" + ami_description = "Red Hat Enterprise Linux 8 (HVM), SSD Volume Type" + default_user = "ec2-user" + sleep = 120 + host_mon_user_data_path = "user_data/yum_based_linux_configuration_script_repo.sh" + otel_user_data_path = "user_data/yum_based_otel_repo.sh" + no_agent_user_data_path = "user_data/yum_based_noagentinstall.sh" + } + + CENT_OS_7_INSTALLED = { + # https://wiki.centos.org/Cloud/AWS + # Have to run install script on this machine manually + ami_instance_type = "t3.small" + ami_id = "ami-0686851c4e7b1a8e1" + ami_description = "CentOS 7.9.2009 x86_64 ami-0686851c4e7b1a8e1" + default_user = "centos" + sleep = 120 + host_mon_user_data_path = "user_data/yum_based_linux_configuration_script_repo.sh" + otel_user_data_path = "user_data/yum_based_otel_repo.sh" + no_agent_user_data_path = "user_data/yum_based_noagentinstall.sh" + + } + } +} + +variable "windows_machines" { + description = "Map of Windows machines to create" + type = map(any) + default = { + WINDOWS_SERVER_2022_BASE_OTEL_AGENT_INSTALLED = { + ami_instance_type = "t3.small" + ami_id = "ami-091f300417a06d788" + ami_description = "Microsoft Windows Server 2022 Full Locale English AMI provided by Amazon" + default_user = "Administrator" + sleep = 120 + + host_mon_user_data_path = "user_data/windows.ps" + otel_user_data_path = "user_data/windows_otel.ps" + no_agent_user_data_path = "user_data/windows_noagentinstall.ps" + } + + WINDOWS_SERVER_2019_BASE_NO_AGENT_INSTALLED = { + ami_instance_type = "t3.small" + ami_id = "ami-01baa2562e8727c9d" + ami_description = "Microsoft Windows Server 2022 Full Locale English AMI provided by Amazon" + default_user = "Administrator" + sleep = 120 + + host_mon_user_data_path = "user_data/windows.ps" + otel_user_data_path = "user_data/windows_otel.ps" + no_agent_user_data_path = "user_data/windows_noagentinstall.ps" + } + } +} + +variable "INTEGRATION" { + default = { + host_mon = "host_mon_user_data_path", + otel = "otel_user_data_path", + no_agent = "no_agent_user_data_path" + } + +} + +variable "MACHINES_TO_CREATE" { + default = ["rhel", "debian", "windows"] + type = list(string) +} + + +locals { + name_format = var.CI == true ? "gha-lht-${var.WORKFLOW_MATRIX_VALUE}-%s" : var.name_format + + OBSERVE_ENDPOINT = "https://${var.OBSERVE_CUSTOMER}.collect.${var.OBSERVE_DOMAIN}" + + CREATE_INTEGRATION = { + host_mon = var.CREATE_HOST_MON, + otel = var.CREATE_OTEL, + no_agent = var.CREATE_NOAGENT, + } + + TOKENS = { + host_mon = var.OBSERVE_TOKEN_HOST_MONITORING + otel = var.OBSERVE_TOKEN_OTEL + no_agent = var.OBSERVE_TOKEN_NOAGENT + + } + BRANCH = { + host_mon = var.OBSERVE_BRANCH_HOSTMON + otel = var.OBSERVE_BRANCH_OTEL + no_agent = var.OBSERVE_BRANCH_NOAGENT + } + + PRE = "https://raw.githubusercontent.com/observeinc" + SCRIPTS = { + host_mon = < { for key, value in local.machines[machine] : key => value } } + test_machines = merge({}, values(local.list_of_machines)...) + + merged_map = { + for name, integration in var.INTEGRATION : + name => { for key, value in local.test_machines : + key => merge(value, { USERDATA = templatefile("${path.module}/${value[integration]}", { + OBSERVE_ENDPOINT = local.OBSERVE_ENDPOINT + OBSERVE_TOKEN = local.TOKENS[name] + SCRIPT = local.SCRIPTS[name] + BRANCH = local.BRANCH[name] + }) }) + } } + +} + +variable "OBSERVE_BRANCH_HOSTMON" { + description = "Github branch name" + nullable = true + default = "main" + type = string +} + +variable "OBSERVE_BRANCH_OTEL" { + description = "Github branch name" + nullable = true + default = "main" + type = string +} + +variable "OBSERVE_BRANCH_NOAGENT" { + description = "Github branch name" + nullable = true + default = "main" + type = string +} + +variable "enable_app_content" { + description = "Enable creation of content" + default = false + type = bool +} + +variable "CREATE_HOST_MON" { + description = "Enable creation of content" + default = false + type = bool +} + +variable "CREATE_OTEL" { + description = "Enable creation of content" + default = false + type = bool +} + +variable "CREATE_NOAGENT" { + description = "Enable creation of content" + default = false + type = bool +} diff --git a/test_code_v2/aws/host_monitoring/ec2/terraform/versions.tf b/test_code_v2/aws/host_monitoring/ec2/terraform/versions.tf new file mode 100644 index 00000000..f0021a7c --- /dev/null +++ b/test_code_v2/aws/host_monitoring/ec2/terraform/versions.tf @@ -0,0 +1,21 @@ +# https://www.terraform.io/language/expressions/version-constraints +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 4.11" + } + + random = { + source = "hashicorp/random" + version = ">= 3.4.3" + } + } + required_version = ">= 1.2" +} + +provider "aws" { + region = "us-west-2" + profile = "dce" +} + diff --git a/test_code_v2/aws/host_monitoring/ec2/terraform/vpc_module/main.tf b/test_code_v2/aws/host_monitoring/ec2/terraform/vpc_module/main.tf new file mode 100644 index 00000000..4d389436 --- /dev/null +++ b/test_code_v2/aws/host_monitoring/ec2/terraform/vpc_module/main.tf @@ -0,0 +1,55 @@ +# data "aws_canonical_user_id" "current_user" { +# } + +#Create VPC +resource "aws_vpc" "vpc_public" { + cidr_block = "10.0.0.0/16" + enable_dns_hostnames = "true" + tags = merge( + var.BASE_TAGS, + { Name = format(var.name_format, "vpc") }, + ) +} + +# Creating Internet Gateway attached to VPC +resource "aws_internet_gateway" "gateway_public" { + vpc_id = aws_vpc.vpc_public.id # vpc_id will be generated after we create VPC + tags = merge( + var.BASE_TAGS, + { Name = format(var.name_format, "gateway") } + ) +} + +#Create public subnet within our VPC +resource "aws_subnet" "subnet_public" { + vpc_id = aws_vpc.vpc_public.id + cidr_block = "10.0.0.0/24" + + tags = merge( + var.BASE_TAGS, + { Name = format(var.name_format, "subnet") } + ) +} + +#Create route table on our VPC +resource "aws_route_table" "rt_public" { + vpc_id = aws_vpc.vpc_public.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.gateway_public.id + } + + + tags = merge( + var.BASE_TAGS, + { Name = format(var.name_format, "route-table") } + ) +} + + +#Associate Public subnet with abve Route Table +resource "aws_route_table_association" "rt_public_to_subnet" { + subnet_id = aws_subnet.subnet_public.id + route_table_id = aws_route_table.rt_public.id +} diff --git a/test_code_v2/aws/host_monitoring/ec2/terraform/vpc_module/outputs.tf b/test_code_v2/aws/host_monitoring/ec2/terraform/vpc_module/outputs.tf new file mode 100644 index 00000000..84fef38a --- /dev/null +++ b/test_code_v2/aws/host_monitoring/ec2/terraform/vpc_module/outputs.tf @@ -0,0 +1,11 @@ +output "vpc_id" { + value = aws_vpc.vpc_public.id +} + +output "subnet_public_id" { + value = aws_subnet.subnet_public.id +} + +output "aws_security_group_public_id" { + value = aws_security_group.ec2_public.id +} diff --git a/test_code_v2/aws/host_monitoring/ec2/terraform/vpc_module/security_group.tf b/test_code_v2/aws/host_monitoring/ec2/terraform/vpc_module/security_group.tf new file mode 100644 index 00000000..f459de90 --- /dev/null +++ b/test_code_v2/aws/host_monitoring/ec2/terraform/vpc_module/security_group.tf @@ -0,0 +1,31 @@ +resource "aws_security_group" "ec2_public" { + name = format(var.name_format, "ec2_sg") + vpc_id = aws_vpc.vpc_public.id + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + description = "Allow incomming SSH connections" + } + + ingress { + from_port = 3389 + to_port = 3389 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + description = "Allow incoming RDP connections" + } + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + tags = merge( + var.BASE_TAGS, + { + Name = format(var.name_format, "_ec2") + }, + ) +} diff --git a/test_code_v2/aws/host_monitoring/ec2/terraform/vpc_module/variables.tf b/test_code_v2/aws/host_monitoring/ec2/terraform/vpc_module/variables.tf new file mode 100644 index 00000000..413a1832 --- /dev/null +++ b/test_code_v2/aws/host_monitoring/ec2/terraform/vpc_module/variables.tf @@ -0,0 +1,182 @@ +# your local key path (assumes it exists) - this will allow you to access ec2 instances +# tflint-ignore: terraform_naming_convention +# variable "PUBLIC_KEY_PATH" { +# description = "Public key path" +# nullable = true +# default = null +# type = string +# } + +# # tflint-ignore: terraform_naming_convention +# variable "PRIVATE_KEY_PATH" { +# description = "Private key path" +# nullable = true +# default = null +# type = string +# } + +# where to deploy +# tflint-ignore: terraform_naming_convention +# variable "REGION" { +# default = "us-west-2" +# description = "Where resources will be deployed" +# type = string +# } + +# appended to resource names so you can find your stuff +variable "name_format" { + description = "Common prefix for resource names" + type = string +} + +# tflint-ignore: terraform_naming_convention +variable "BASE_TAGS" { + description = "base resource tags" + type = map(string) + default = { + owner = "Observe" + createdBy = "terraform" + team = "content" + purpose = "test auto configuration script" + git_repo_url = "https://github.com/observeinc/linux-host-configuration-scripts" + } +} + +# tflint-ignore: terraform_naming_convention +# variable "USE_BRANCH_NAME" { +# default = "main" +# description = "git repository branch to use" +# type = string +# } + +# tflint-ignore: terraform_naming_convention +# variable "CI" { +# type = bool +# default = false +# description = "This variable is set to true by github actions to tell us we are running in ci" +# } + +# # tflint-ignore: terraform_naming_convention +# variable "PUBLIC_KEY" { +# description = "This value comes from a variable in github actions" +# nullable = true +# default = null +# type = string +# } + +# tflint-ignore: terraform_naming_convention +# variable "AWS_MACHINE_CONFIGS" { +# type = map(any) +# description = "variables for supported OS" +# default = { + +# # UBUNTU_22_04_LTS = { +# # # ami used in testing +# # ami_instance_type = "t3.small" +# # ami_id = "