From c8471f0604038bd9040a2f8676d6ddbd11eee2c2 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Mon, 13 Jan 2025 18:24:11 +0000 Subject: [PATCH] Deploy static HTML files --- _site/Agile/README.html | 216 ++++++ _site/Cloud/0-linux/README.html | 245 +++++++ _site/Cloud/0-linux/lab0/README.html | 387 ++++++++++ _site/Cloud/0-linux/lab1/README.html | 400 ++++++++++ _site/Cloud/0-linux/lab2/README.html | 378 ++++++++++ _site/Cloud/0-linux/lab3/README.html | 334 +++++++++ _site/Cloud/0-linux/lab4/README.html | 612 ++++++++++++++++ _site/Cloud/0-linux/lab5/README.html | 378 ++++++++++ _site/Cloud/0-linux/lab6/README.html | 541 ++++++++++++++ _site/Cloud/0-linux/lab7/README.html | 303 ++++++++ _site/Cloud/0-linux/lab8/README.html | 513 +++++++++++++ _site/Cloud/0-linux/lab9/README.html | 357 +++++++++ _site/Cloud/1-terraform/README.html | 368 ++++++++++ _site/Cloud/1-terraform/lab0/README.html | 257 +++++++ _site/Cloud/1-terraform/lab1/README.html | 251 +++++++ _site/Cloud/1-terraform/lab2/README.html | 247 +++++++ _site/Cloud/1-terraform/lab3/README.html | 241 +++++++ _site/Cloud/1-terraform/lab4/README.html | 211 ++++++ _site/Cloud/1-terraform/lab5/README.html | 220 ++++++ _site/Cloud/1-terraform/lab6/README.html | 222 ++++++ _site/Cloud/1-terraform/lab7/README.html | 223 ++++++ _site/Cloud/1-terraform/lab8/README.html | 223 ++++++ _site/Cloud/1-terraform/lab9/README.html | 221 ++++++ _site/Cloud/1-terraform/lab_10/README.html | 223 ++++++ _site/Cloud/1-terraform/lab_11/README.html | 386 ++++++++++ _site/Cloud/1-terraform/lab_12/README.html | 287 ++++++++ _site/Cloud/1-terraform/lab_13/README.html | 289 ++++++++ _site/Cloud/1-terraform/lab_14/README.html | 285 ++++++++ _site/Cloud/1-terraform/lab_15/README.html | 318 ++++++++ _site/Cloud/1-terraform/lab_16/README.html | 282 ++++++++ _site/Cloud/1-terraform/lab_17/README.html | 332 +++++++++ _site/Cloud/1-terraform/lab_18/README.html | 337 +++++++++ _site/Cloud/1-terraform/lab_19/README.html | 264 +++++++ _site/Cloud/1-terraform/lab_20/README.html | 307 ++++++++ .../0_golang/README.html | 202 ++++++ .../0_golang/src/README.html | 201 ++++++ .../3_countryinfo_travelers/README.html | 682 ++++++++++++++++++ .../1_api_automations/README.html | 320 ++++++++ .../Cloud/2-automation_principles/README.html | 259 +++++++ .../Cloud/3-kubernetes_principles/README.html | 345 +++++++++ _site/Cloud/README.html | 578 +++++++++++++++ _site/DevOps/README.html | 545 ++++++++++++++ _site/GitHub/README.html | 255 +++++++ _site/GitHub/demos/0_GithubAImodels.html | 609 ++++++++++++++++ _site/GitHub/demos/1_GitHubPagesOverview.html | 425 +++++++++++ _site/Network/README.html | 264 +++++++ _site/README.html | 329 +++++---- 47 files changed, 15516 insertions(+), 156 deletions(-) create mode 100644 _site/Agile/README.html create mode 100644 _site/Cloud/0-linux/README.html create mode 100644 _site/Cloud/0-linux/lab0/README.html create mode 100644 _site/Cloud/0-linux/lab1/README.html create mode 100644 _site/Cloud/0-linux/lab2/README.html create mode 100644 _site/Cloud/0-linux/lab3/README.html create mode 100644 _site/Cloud/0-linux/lab4/README.html create mode 100644 _site/Cloud/0-linux/lab5/README.html create mode 100644 _site/Cloud/0-linux/lab6/README.html create mode 100644 _site/Cloud/0-linux/lab7/README.html create mode 100644 _site/Cloud/0-linux/lab8/README.html create mode 100644 _site/Cloud/0-linux/lab9/README.html create mode 100644 _site/Cloud/1-terraform/README.html create mode 100644 _site/Cloud/1-terraform/lab0/README.html create mode 100644 _site/Cloud/1-terraform/lab1/README.html create mode 100644 _site/Cloud/1-terraform/lab2/README.html create mode 100644 _site/Cloud/1-terraform/lab3/README.html create mode 100644 _site/Cloud/1-terraform/lab4/README.html create mode 100644 _site/Cloud/1-terraform/lab5/README.html create mode 100644 _site/Cloud/1-terraform/lab6/README.html create mode 100644 _site/Cloud/1-terraform/lab7/README.html create mode 100644 _site/Cloud/1-terraform/lab8/README.html create mode 100644 _site/Cloud/1-terraform/lab9/README.html create mode 100644 _site/Cloud/1-terraform/lab_10/README.html create mode 100644 _site/Cloud/1-terraform/lab_11/README.html create mode 100644 _site/Cloud/1-terraform/lab_12/README.html create mode 100644 _site/Cloud/1-terraform/lab_13/README.html create mode 100644 _site/Cloud/1-terraform/lab_14/README.html create mode 100644 _site/Cloud/1-terraform/lab_15/README.html create mode 100644 _site/Cloud/1-terraform/lab_16/README.html create mode 100644 _site/Cloud/1-terraform/lab_17/README.html create mode 100644 _site/Cloud/1-terraform/lab_18/README.html create mode 100644 _site/Cloud/1-terraform/lab_19/README.html create mode 100644 _site/Cloud/1-terraform/lab_20/README.html create mode 100644 _site/Cloud/2-automation_principles/0_golang/README.html create mode 100644 _site/Cloud/2-automation_principles/0_golang/src/README.html create mode 100644 _site/Cloud/2-automation_principles/1_api_automations/3_countryinfo_travelers/README.html create mode 100644 _site/Cloud/2-automation_principles/1_api_automations/README.html create mode 100644 _site/Cloud/2-automation_principles/README.html create mode 100644 _site/Cloud/3-kubernetes_principles/README.html create mode 100644 _site/Cloud/README.html create mode 100644 _site/DevOps/README.html create mode 100644 _site/GitHub/README.html create mode 100644 _site/GitHub/demos/0_GithubAImodels.html create mode 100644 _site/GitHub/demos/1_GitHubPagesOverview.html create mode 100644 _site/Network/README.html diff --git a/_site/Agile/README.html b/_site/Agile/README.html new file mode 100644 index 0000000..21ab9e9 --- /dev/null +++ b/_site/Agile/README.html @@ -0,0 +1,216 @@ + + + + + + + README + + + + +

Agile Methodology Overview

+

Costa Rica

+

brown9804

+

Last updated: 2024-12-13

+
+

Wiki

+
+ +Table of Wiki (Click to expand) + + +
+

image

+
+

+Total Visitors +

+

Visitor Count

+
+ + diff --git a/_site/Cloud/0-linux/README.html b/_site/Cloud/0-linux/README.html new file mode 100644 index 0000000..b5dbafe --- /dev/null +++ b/_site/Cloud/0-linux/README.html @@ -0,0 +1,245 @@ + + + + + + + README + + + + +

Linux Overview

+ +++ + + + + + + + + + + + + + + + + + +
Costa Rica
Belinda Brown, +belindabrownr04@gmail.com
brown9804
March, 2022
+

Content

+ + + + + diff --git a/_site/Cloud/0-linux/lab0/README.html b/_site/Cloud/0-linux/lab0/README.html new file mode 100644 index 0000000..a78a1dd --- /dev/null +++ b/_site/Cloud/0-linux/lab0/README.html @@ -0,0 +1,387 @@ + + + + + + + README + + + + +

Working with Users and +Permissions

+ +++ + + + + + + + + + + + + + + + + + +
Costa Rica
Belinda Brown, +belindabrownr04@gmail.com
brown9804
March, 2022
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Become root:

+

sudo -i
Enter the cloud_user password at the +prompt.

+

Add the Users to the +Server:

+
useradd tstark 
+useradd cdanvers
+useradd dprince
+

Create the new group:

+

groupadd superhero
Set wheel Group as the the +tstark Account’s Primary Group. The usermod command will change which +group a user is in. Change tstark: usermod -g wheel tstark +
Make sure it worked:
id tstark
The +command’s output should show his primary group is now wheel.

+

Supplementary Group +on All Three Users:

+

Run the usermod command for each user:

+
usermod -aG superhero tstark
+usermod -aG superhero dprince
+usermod -aG superhero cdanvers
+

Check with any of the users to make sure it worked:
+id <USERNAME>

+

Lock a specific Account:

+

usermod -L dprince

+

Create New Users:

+

Create a gfreeman user on the system:
+sudo useradd -m gfreeman

+

Create an avance user, and assign it to the wheel supplemental group: +
sudo useradd -G wheel -m avance

+

Set the password for both accounts to LASudo321:

+
sudo passwd gfreeman
+sudo passwd avance
+

Verify the +/etc/sudoers File and Test Access:

+
    +
  1. Verify that the /etc/sudoers file will allow the wheel group access +to run all commands with sudo:
    sudo visudo
  2. +
  3. Note that there should not be a comment (#) on this line of the +file:
    %wheel ALL=(ALL) ALL
  4. +
  5. Switch to the avance account, and use the dash (-) to utilize a +login shell:
    sudo su - avance
  6. +
  7. Attempt to read the /etc/shadow file at the console:
    +cat /etc/shadow
  8. +
  9. Rerun the command with the sudo command:
    +sudo cat /etc/shadow
  10. +
  11. After you have verified avance can read the /etc/shadow file, log +out of that account:
    exit
  12. +
+

Set Up the Web +Administrator:

+
    +
  1. Create a new sudoers file in the /etc/sudoers.d directory that will +contain a standalone entry for webmasters:
    +sudo visudo -f /etc/sudoers.d/web_admin
  2. +
  3. Enter in the following at the top of the file:
    +Cmnd_Alias WEB = /bin/systemctl restart httpd.service, /bin/systemctl reload httpd.service
  4. +
  5. Add another line in the file for gfreeman to be able to use the sudo +command in conjunction with any commands listed in the WEB alias: +gfreeman ALL=WEB
  6. +
  7. Save and close the file with :wq!.
  8. +
  9. Next, log in to the gfreeman account:
    +sudo su - gfreeman
  10. +
  11. Attempt to restart the web service:
    +sudo systemctl restart httpd.service
  12. +
  13. Try to read the new web_admin sudoers file:
    +sudo cat /etc/sudoers.d/web_admin
    Since the cat +command is not listed in the command alias group for WEB, gfreeman +cannot use sudo to read this file.
  14. +
+

Enable +SSH to Connect Without a Password from the dev User on server1 to the +dev User on server2:

+
    +
  1. Generate an SSH key:
    +[dev@server1]$ ssh-keygen
  2. +
  3. Press Enter three times to accept the defaults.
  4. +
  5. Then copy it over to the private IP of the other server:
    +[dev@server1]$ ssh-copy-id <server2_PRIVATE_IP>
  6. +
  7. Now if we try to log into server2 without a password, it should +work. Try it:
    +[dev@server1]$ ssh <server2_PRIVATE_IP>
  8. +
  9. Log out to get back to server1:
    +[dev@server2]$ logout
  10. +
+

Copy +All tar Files from /home/dev/ on server1 to /home/dev/ on +server2:

+
    +
  1. Copy the files:
    +[dev@server1]$ scp *.gz <server2_PRIVATE_IP>:~/
  2. +
  3. Connect to server2 again:
    +[dev@server1]$ ssh <server2_PRIVATE_IP>
  4. +
  5. Make sure they’re there:
    [dev@server2]$ ll
    +It should show the two files.
  6. +
+

Extract +the Files, Making Sure the Output is Redirected:

+
    +
  1. Extract the files:
  2. +
+
[dev@server2]$ tar -xvf deploy_content.tar.gz >> tar-output.log
+[dev@server2]$ tar -xvf deploy_script.tar.gz >> tar-output.log
+

2.Take a look at what’s in the directory now:
ll +
We’ll see the new files and their permissions.

+

Set +the Umask So New Files Are Only Readable and Writeable by the +Owner:

+
    +
  1. We need to make new files with 0600 (-rw-------) +permissions. Since the default is 0666, and we want it to be 0600, run +the following:
    [dev@server2]$ umask 0066
  2. +
+

Verify +the /home/dev/deploy.sh Script Is Executable and Run It:

+
    +
  1. Check permissions on deploy.sh:
    +[dev@server2]$ ls -l deploy.sh
  2. +
  3. Make the script executable:
    +[dev@server2]$ chmod +x deploy.sh
  4. +
  5. Run it:
    [dev@server2]$ ./deploy.sh
  6. +
+

References

+

https://learn.acloud.guru/course/cad92c58-0fd2-4657-98f7-79268b4ff2db/dashboard

+ + diff --git a/_site/Cloud/0-linux/lab1/README.html b/_site/Cloud/0-linux/lab1/README.html new file mode 100644 index 0000000..b5f24d2 --- /dev/null +++ b/_site/Cloud/0-linux/lab1/README.html @@ -0,0 +1,400 @@ + + + + + + + README + + + + +

System +Service Management, Runlevels and Boot Targets

+ +++ + + + + + + + + + + + + + + + + + +
Costa Rica
Belinda Brown, +belindabrownr04@gmail.com
brown9804
March, 2022
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Configuring a Default Boot +Target:

+

The LPIC-1 exam expects the candidate to know how to change a default +target for a Linux computer using systemd. This exercise will assist you +in your practice of determining what the default target is, and changing +it to a new one.

+

Check the Default +Target:

+

The current default target is set to multi-user.target. Use the +appropriate command to verify this:
+systemctl get-default

+

Change the Default +Target:

+

The administrator will need to change the default target so that the +computer boots into a graphical desktop:
+sudo systemctl set-default graphical.target

+

Check the Default Target +again:

+

Now verify that the system is set for a graphical boot:
+systemctl get-default

+

Scheduling a +Systemd Service Job With Timer Units:

+

During the time that our development team has spent working on the +new Web-based API for our organization, there have been several +instances of mistaken keystrokes or processes that have necessitated the +restoration of the site directory from backup.
We have been asked +to run periodic backups of the website directory and, given that the +development environment does not have access to the backup network, we +have decided to write a custom service that will do so.
Previously +we wrote systemd unit files to back up the site and have been provided a +file called web-backup.sh in our /root directory. Using that file and +the associated web-backup.service, we will create a systemd timer unit +file that will control the schedule of our service.
After we have +all three components ready, we’ll stage the files in their appropriate +locations and start the service for our team and turn it back over for +their use.

+

Sign in root:

+

[user@$host ~]$ sudo su - When prompted, enter the +provided lab credentials to finish logging in.

+

Considerations: Part of this job is already done. In a previous lab +(lab0), we wrote the systemd unit file, web-backup.service in the /root +directory, which we will use alongside the web-backup.sh file that the +Dev team gave us. To use it, we need to make sure we are in /root and +that the file is there.

+

Checking path:

+

[root@$host ~]# pwd

+

Checking content:

+

[root@$host ~]# ls Output:

+
web-backup.sh   web-backup.service
+

Create a Timer Unit +File:

+
    +
  1. With our items sourced, we are ready to create the timer unit file. +To do so, use the vi command along with web-backup.timer: +[root@$host ~]# vi web-backup.timer
  2. +
  3. Fill out the information as follows:
  4. +
+
[Unit]
+Description=Fire off the backup
+
+[Timer]
+OnCalendar=*-*-* 23:00:00
+Persistent=true
+Unit=web-backup.service
+
+[Install]
+WantedBy=multi-user.target
+

Note that 23:00:00 can be set to anything. We’re just picking 11:00 +PM here as an example.
When the file is correct, remember to write +and quit properly from vi using the Esc key, : (the colon), then w, then +q.

+

Putting Files Where They +Belong:

+
    +
  1. With everything ready, we now need to make sure our files are in the +correct locations. The shell script needs to be copied into +/usr/local/sbin with the cp command:
    +[root@$host ~]# cp web-backup.sh /usr/local/sbin/
  2. +
  3. Then, using the cp command again, copy both the service and timer +files into /etc/systemd/:
    +[root@$host ~]# cp web-backup.{service,timer} /etc/systemd/system/
  4. +
+

Tell systemd to Run The +Files:

+
    +
  1. With our files in place, we need to reload the systemd daemon so +that it can calculate the service dependencies:
    +[root@$host ~]# systemctl daemon-reload
  2. +
  3. Now enable the services to run at boot:
  4. +
+
[root@$host ~]# systemctl enable web-backup.service
+    
+[root@$host ~]# systemctl enable web-backup.timer
+
    +
  1. Set the permissions for the file to be executable.
    +[root@$host ~]# chmod +x /usr/local/sbin/web-backup.sh
  2. +
  3. Once the symlinks are created, go ahead and start the services +manually:
    +[root@$host ~]# systemctl start web-backup.timer web-backup.service
  4. +
  5. Then check on the statuses of both the timer and the service: +
  6. +
+
[root@$host ~]# systemctl status web-backup.timer
+ 
+[root@$host ~]# systemctl status web-backup.service
+

They both show as running, meaning this server is ready to go back to +the Dev team.

+

Working +with System Service Log Files Using the Journal Control:

+

This is to understand how to use the built-in journalctl utility to +view and troubleshoot system services.

+

Check the Web +Server Configuration File:

+
    +
  1. Change to the root account:
    sudo su -
  2. +
  3. Check the status of the web service:
    +systemctl status httpd.service 3.Attempt to start the web +service:
    systemctl start httpd.service
  4. +
  5. After the service fails to start, check the journal:
    +journalctl -u httpd.service
  6. +
  7. Check the directory where the httpd configuration file should be: +
    ls /etc/httpd/conf
  8. +
  9. Restore the original httpd configuration file:
    +mv /etc/httpd/conf/httpd.conf.bkup /etc/httpd/conf/httpd.conf
  10. +
  11. Restart the service:
    +systemctl restart httpd.service
  12. +
+

Verify That +the Web Server Service Is Running:

+
    +
  1. Check the status of the service:
    +systemctl status httpd.service
  2. +
  3. Navigate to the local web page:
    +elinks http://localhost
  4. +
+

References

+

https://learn.acloud.guru/course/cad92c58-0fd2-4657-98f7-79268b4ff2db/dashboard

+ + diff --git a/_site/Cloud/0-linux/lab2/README.html b/_site/Cloud/0-linux/lab2/README.html new file mode 100644 index 0000000..a5cf7e1 --- /dev/null +++ b/_site/Cloud/0-linux/lab2/README.html @@ -0,0 +1,378 @@ + + + + + + + README + + + + +

Securely Accessing Your +System

+ +++ + + + + + + + + + + + + + + + + + +
Costa Rica
Belinda Brown, +belindabrownr04@gmail.com
brown9804
March, 2022
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Generating +and Exchanging SSH Keys for Secure Access

+

Understand how to create secure keys for remote access, how to +exchange them, and where to store them on each system involved in the +chain.

+

Create the Key on Server +1:

+
    +
  1. In your terminal, log in to Server 1:
    +ssh cloud_user@&lt;SERVER1_PUBLIC_IP&gt;
  2. +
  3. List the contents of the current directory:
    +ls -la
  4. +
  5. Change to the .ssh directory:
    cd .ssh
  6. +
  7. List the contents of the .ssh directory:
    +ls -la
  8. +
  9. Generate a key for Server 1:
    ssh-keygen
  10. +
  11. Press Enter at the next three prompts.
  12. +
  13. List the contents of the .ssh directory again:
    +ls -la
  14. +
  15. List the contents of the id_rsa.pub file:
    +cat id_rsa.pub
  16. +
  17. Copy the output of this command to your clipboard.
  18. +
+

Create the Key on Server +2:

+
    +
  1. Log in to Server 2:
    +ssh cloud_user@&lt;SERVER2_PUBLIC_IP&gt;
  2. +
  3. Change to the .ssh directory.
  4. +
  5. List the contents of the .ssh directory:
    +ls -la
  6. +
  7. Install the nano text editor: +sudo yum install nano
  8. +
  9. Enter your password at the prompt.
  10. +
  11. Open the authorized_keys file in nano: +nano authorized_keys
  12. +
  13. Add the key we just generated to the file.
  14. +
  15. Press Ctrl + X.
  16. +
  17. Press Y then Enter to save the changes.
  18. +
+

Exchange +the SSH Keys between Server 1 and Server 2:

+
    +
  1. In your Server 2 terminal window, create a new key:
    +ssh-keygen
  2. +
  3. Press Enter for the next three prompts.
  4. +
  5. List the contents of the current directory:
    +ls -la
  6. +
  7. List the contents of the id_rsa.pub file: +cat id_rsa.pub
  8. +
  9. Copy the output of this command to your clipboard.
  10. +
  11. Type exit to log out of Server 2.
  12. +
  13. Install nano:
    sudo yum install nano
  14. +
  15. Type y to continue.
  16. +
  17. List the contents of the current directory: ls -la
  18. +
  19. Open the authorized_keys file in nano: +nano authorized_keys
  20. +
  21. Add the key we just generated to the file.
  22. +
  23. Press Ctrl + X.
  24. +
  25. Press Y then Enter to save the changes.
  26. +
+

Test the Configuration:

+
    +
  1. Attempt to log in to Server 2 from Server 1 without a password: +ssh cloud_user@&lt;SERVER2PUBLIC_IP&gt;
  2. +
  3. Attempt to log in to Server 1 from Server 2 without a password: +ssh cloud_user@&lt;SERVER1PUBLIC_IP&gt;
  4. +
+

Create and Use +an SSH Tunnel for Network Traffic:

+

Generating ssh key:

+

ssh-keygen
Enter the following parameters

+

Copying the key:

+

ssh-copy-id <user_name>@<IPadress>

+

Login the key:

+

ssh <user_name>@<IPadress>

+

Creating the tunnel:

+

ssh -f <user_name>@<IPadress> -L port:<IPadress> -N

+

Testing:

+

curl localhost:2000
Output:
+Webpage worked

+

Configure sshd to use +Sockets:

+
    +
  1. Verify that the sshd.socket unit is not active: +systemctl status sshd.socket
  2. +
  3. Set up an at job that stops the sshd.service unit and starts +sshd.socket: sudo at now + 3 minutes
  4. +
  5. Enter your password at the prompt.
  6. +
  7. Add the following:
  8. +
+
at> systemctl stop sshd.service
+at> systemctl start sshd.socket
+
    +
  1. Press Ctrl + D to end the at job configuration.
  2. +
  3. Verify that the sshd.socket unit is active and running.
    +systemctl status sshd.socket
  4. +
  5. Enable the socket for SSH and disable the service for SSH: +
  6. +
+
sudo systemctl enable sshd.socket
+sudo systemctl disable sshd.service
+

Set Up TCP Wrappers to +Only Allow SSH

+
    +
  1. Verify that the sshd server has been compiled to use TCP wrappers: +
    ldd /usr/sbin/sshd | grep libwrap
  2. +
  3. Edit the /etc/hosts.allow file:
    +sudo vim /etc/hosts.allow
  4. +
  5. Add the following line to the file:
    +sshd2 sshd : ALL
  6. +
  7. Edit the /etc/hosts.deny file:
    +sudo vim /etc/hosts.deny
  8. +
  9. Add the following line to the file:
    +ALL : ALL
  10. +
  11. Exit the SSH session:
    exit
  12. +
  13. Reconnect to the secure shell session:
    +ssh cloud_user@&lt;PUBLIC_IP&gt;
    Enter your +password at the prompt.
  14. +
+

References

+

https://learn.acloud.guru/course/cad92c58-0fd2-4657-98f7-79268b4ff2db/dashboard

+ + diff --git a/_site/Cloud/0-linux/lab3/README.html b/_site/Cloud/0-linux/lab3/README.html new file mode 100644 index 0000000..f3ab370 --- /dev/null +++ b/_site/Cloud/0-linux/lab3/README.html @@ -0,0 +1,334 @@ + + + + + + + README + + + + +

Package Management and +Troubleshooting

+ +++ + + + + + + + + + + + + + + + + + +
Costa Rica
Belinda Brown, +belindabrownr04@gmail.com
brown9804
March, 2022
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Installing +and Managing Packages on Debian/Ubuntu Systems

+

Understanding how to use package manager and installation utility apt +to manage packages on Ubuntu/Debian Linux distributions.

+

Install the Apache +Web Server Package:

+
+

Note: Ubuntu/Debian systems will usually start a service +automatically, once its package is installed. You may need to update the +package manager.

+
+

Update package:

+

sudo apt update

+

Install the packages:

+

sudo apt install apache2 wget

+

Verify +the Server is Running and Capture the Result:

+
    +
  1. Checking Apache server:
    +curl http://localhost
  2. +
  3. If that’s working, we use the wget package to capture the output of +an http request. We’ll point the output to a file in our home directory +called local_index.response
    +wget --output-document=local_index.response http://localhost
  4. +
+

Installing +and Managing Packages on Red Hat/CentOS Systems

+
    +
  1. Attempt to install the RPM to determine what dependencies are +required:
  2. +
+
cd Downloads
+sudo rpm -i elinks-0.12-0.37.pre6.el7.0.1.x86_64.rpm
+
+

We’ll get some dependency errors (version numbers may vary). 2. Use +the package manager to determine which packages provide the +dependencies:
sudo yum provides libmozjs185*
+The output shows that the js package provides libmozjs185. 3. Install +the packages that provide those dependencies:
+sudo yum install js 4. All of our dependencies were not +resolved with that one package installation. Attempt to install the RPM +again. If any other dependencies are needed, repeat steps 3 and 4 +(substituting libmozjs185 with whatever dependency is still missing) to +resolve that issue:
+sudo rpm -i elinks-0.12-0.37.pre6.el7.0.1.x86_64.rpm 5. +Once the RPM is installed successfully, run elinks to ensure the +application is working properly:
elinks 6. Attempt to +open a website by providing a URL:
+http://www.amazon.com

+
+

Troubleshooting RPM Issues:

+

Understand how to install telnet and install Apache.

+
    +
  1. Become the root user:
    sudo -i
  2. +
+

Telnet Installation:

+
    +
  1. Install the telnet package:
    +yum install -y telnet
  2. +
  3. Verify the integrity of the RPM database:
  4. +
+
cd /var/lib/rpm/
+/usr/lib/rpm/rpmdb_verify Packages
+
    +
  1. Move Packages to Packages.bad and create a new RPM database from +Packages.bad:
  2. +
+
mv Packages Packages.bad
+/usr/lib/rpm/rpmdb_dump Packages.bad | /usr/lib/rpm/rpmdb_load Packages
+
    +
  1. Verify the integrity of the new RPM database:
    +/usr/lib/rpm/rpmdb_verify Packages
  2. +
  3. Query installed packages for errors:
    +rpm -qa > /dev/null
  4. +
  5. Rebuild the RPM database:
    +rpm -vv --rebuilddb
  6. +
  7. Install telnet:
    yum install -y telnet
  8. +
+

Update Apache:

+
    +
  1. Attempt to install Apache:
    +yum install -y httpd
  2. +
  3. Edit /etc/yum.conf:
    vim /etc/yum.conf
  4. +
  5. Remove the exclusion for httpd:
    +exclude=httpd
  6. +
  7. Save and close the file:
    :wq
  8. +
  9. Install Apache:
    yum install -y httpd
  10. +
+

References

+

https://learn.acloud.guru/course/cad92c58-0fd2-4657-98f7-79268b4ff2db/dashboard

+ + diff --git a/_site/Cloud/0-linux/lab4/README.html b/_site/Cloud/0-linux/lab4/README.html new file mode 100644 index 0000000..6b16bb0 --- /dev/null +++ b/_site/Cloud/0-linux/lab4/README.html @@ -0,0 +1,612 @@ + + + + + + + README + + + + +

File Management, +Permissions and Backup

+ +++ + + + + + + + + + + + + + + + + + +
Costa Rica
Belinda Brown, +belindabrownr04@gmail.com
brown9804
March, 2022
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Creating a Directory +Structure in Linux:

+

Understanding how to create a certain directory and subdirectory +structure, plus a couple of empty text files.

+

Create the Parent +Directories:

+
+

By Hand:

+
+
[cloud_user@host]$ mkdir -p Projects/ancient
+[cloud_user@host]$ mkdir Projects/classical
+[cloud_user@host]$ mkdir Projects/medieval
+
+

With Bash Expansion:

+
+

[cloud_user@host]$ mkdir -p Projects/{ancient,classical,medieval}

+

Create the +Subdirectories:

+
+

By Hand:

+
+
[cloud_user@host]$ mkdir Projects/ancient/egyptian
+[cloud_user@host]$ mkdir Projects/ancient/nubian
+[cloud_user@host]$ mkdir Projects/classical/greek
+[cloud_user@host]$ mkdir Projects/medieval/britain
+[cloud_user@host]$ mkdir Projects/medieval/japan
+
+

With Bash Expansion:

+
+
[cloud_user@host]$ mkdir Projects/ancient/{egyptian,nubian}
+[cloud_user@host]$ mkdir Projects/classical/greek
+[cloud_user@host]$ mkdir Projects/medieval/{britain,japan}
+

Create Some Empty Files:

+
[cloud_user@host]$ touch Projects/ancient/nubian/further_research.txt
+[cloud_user@host]$ touch Projects/classical/greek/further_research.txt
+

Rename a Subdirectory:

+

[cloud_user@host]$ mv Projects/classical Projects/greco-roman

+

Working with Compressed +Files in Linux:

+

Get the Original File Size: +

+

[cloud_user@host]$ ls -lh junk.txt

+

Creating zip Files:

+
+

Gzip 1. Compressing data:
+[cloud_user@host]$ gzip junk.txt 2. Run ls to view the size +of the file:
[cloud_user@host]$ ls -lh 3. Unzip: +
[cloud_user@host]$ gunzip junk.txt.gz

+
+
+

bzip 1. Compression method instead:
+[cloud_user@host]$ bzip2 junk.txt 2. Run ls to view the +size of the file:
+[cloud_user@host]$ ls -lh junk.txt.bz2 3. Unzip:
+[cloud_user@host]$ bunzip2 junk.txt.bz2

+
+
+

XZ 1. Compression method instead:
+[cloud_user@host]$ xz junk.txt 2. Run ls to view the size +of the file:
[cloud_user@host]$ ls -lh 3. Unzip: +
[cloud_user@host]$ unxz junk.txt.xz

+
+

Creating tar Files:

+
    +
  1. Compression method instead:
    +[cloud_user@host]$ tar -cvzf gztar.tar.gz junk.txt
  2. +
  3. Then, let’s make one using bzip2:
    +[cloud_user@host]$ tar -cvjf bztar.tar.bz2 junk.txt
  4. +
  5. Finally, we’ll use XZ to make one:
    +[cloud_user@host]$ tar -cvJf xztar.tar.xz junk.txt
  6. +
  7. Run the ls command again to compare the file sizes:
    +[cloud_user@host]$ ls -lh
  8. +
+

Practice Reading +Compressed Text Files:

+

How to read the contents of compressed files without having to +actually decompress them? There is a way! Let’s do that now. First, +let’s copy over the /etc/passwd file to your home directory: +[cloud_user@host]$ cp /etc/passwd /home/cloud_user/

+
+

Gzip 1. Compression method instead:
+[cloud_user@host]$ tar -cvzf passwd.tar.gz passwd 2. And we +can use the zcat command to read this compressed file:
+[cloud_user@host]$ zcat passwd.tar.gz

+
+
+

bzip2 1. Now let’s compress the file, using bzip2, into a tarball: +
[cloud_user@host]$ tar -cvjf passwd.tar.bz2 passwd 2. +We can use the bzcat command to read the compressed file:
+[cloud_user@host]$ bzcat passwd.tar.bz2

+
+
+

XZ 1. Finally, let’s create an xz tar file:
+[cloud_user@host]$ tar -cvJf passwd.tar.xz passwd 2. And we +can use the xzcat command to read its contents:
+[cloud_user@host]$ xzcat passwd.tar.xz

+
+

Managing File +Attributes and Permissions:

+

Objectives:

+
    +
  1. Reset a directory’s permissions to the following:
  2. +
+ +
    +
  1. Apply all of these permissions to all subdirectories +recursively
  2. +
+

Grant Access to the +Directory:

+
    +
  1. Change to the opt directory:
    cd /opt
  2. +
  3. Next, open all of the directory’s files and permissions with the +following command:
    ls -la
  4. +
  5. Let’s try to access the myapp directory. Run the following command: +
    cd myapp/
  6. +
  7. How to fix “Permission denied”: +sudo chmod 777 myapp
  8. +
  9. Reopen the directory files and permissions using the ls -la command. +Now let’s try to open the directory again: cd myapp
  10. +
+

Change the Directory +Permissions:

+
    +
  1. Give all users read and write permissions for this directory:
    +sudo chmod -f -x -R * > Just everyone read and write +permissions sudo chmod 666 -f -R *
  2. +
  3. List the directory files and permissions again:
    +ls -la
  4. +
  5. How to set the directories as executable:
    +sudo find /opt/myapp -type d -exec chmod o+x {} \;
  6. +
+ +
    +
  1. Creating a symbolic (or soft) link from the /etc/redhat-release file +to a new link file named release in the home directory:
    +ln -s /etc/redhat-release release
  2. +
  3. Verify if the link is valid:
    ls -l
  4. +
  5. Check if you can read the file’s contents:
    +cat release
  6. +
  7. Check the link’s contents:
    +cat /etc/redhat-release > Should be the same
  8. +
+ +
    +
  1. Look at the inode number of /home/cloud_user/release:
    +ls -i release
  2. +
  3. Check the inode number for /etc/redhat-release:
    +ls -i /etc/redhat-release > They should be different, as +the symbolic link is just a new filesystem entry that references the +original file.
  4. +
+ +
    +
  1. Create docs directory:
    mkdir docs
  2. +
  3. Copy /etc/services into the docs directory:
    +cp /etc/services docs/
  4. +
  5. Create a hard link from the /home/cloud_user/docs/services file to a +new link location named /home/cloud_user/services:
    +ln docs/services services
  6. +
  7. Check the link’s inode number as well as the inode number for the +original /etc/services:
    ls -l
  8. +
  9. View the contents of the inodes:
    > This should verify for +us that this is a hard link, not a soft link. It won’t have an arrow +pointing to the actual file it’s linked to, like a soft link does. Just +to verify, check these two with cat and make sure they’re the same:
  10. +
+
ls -i services
+ls -i docs/services
+
+

You should see they the same inode number, meaning they’re +essentially the same file.

+
+ +
    +
  1. View the individual block devices:
    lsblk > +Each partition has its own set of inodes, hard links across partitions +don’t work. Soft links should.
  2. +
  3. Trying to make a hard link from /home/cloud_user/docs/services to +/opt/services:
    +ln /home/cloud_user/docs/services /opt/services > Should +get a failed to create hard link error
  4. +
+ +
    +
  1. Trying to make the same sort of cross-partition link, using the -s +flag to make it a soft link:
    +sudo ln -s /etc/redhat-release /opt/release > There +shouldn’t be any output, meaning it was successful.
  2. +
  3. View the contents of the inodes again:
  4. +
+
ls -i /etc/redhat-release
+ls -i /opt/release
+
+

You should see they have different inodes, but the linking will +work.

+
+

Encrypt a File Using GPG:

+

Understand how to new public GPG key, encrypt a file and sign it, and +send that file to another user to decrypt with “A Cloud Guru” public +key.

+

Create a GPG Key for cloud +user:

+
    +
  1. Generate a new GPG key:
    +[cloud_user@host]$ gpg --gen-key
  2. +
  3. Credentials: > Accept the defaults for each prompt. For the user +ID, enter cloud_user, and use cloud_user@localhost for the email +address. We can leave the comment field blank by just pressing Enter, +and press o at the end for OK. > We’ll use password321 when we’re +prompted for a passphrase, and when we’re prompted to confirm it.
  4. +
  5. Now that the key has been created, we need to export it so that +Gordon Freeman can decrypt files he gets from us. We’ll do that like +this:
    +[cloud_user@host]$ gpg -a -o gfreeman.key --export &lt;KEY_ID>
  6. +
  7. In that command, use the public key reference ID from the output of +the key generation. It will be a random string, and the line it’s +sitting on (in the key generation output) looks like this:
    +gpg: key XXXXXXXX marked as ultimately trusted
  8. +
  9. Now, we’ll use the mail command to send an email to Gordon Freeman +containing the cloud_user public key as an attachment:
  10. +
+
[cloud_user@host]$ mail -s "here is your key" -a gfreeman.key gfreeman@localhost`
+Don't lose this!  I'll call you with the passphrase.
+.
+
+

Include that final period (on the line by itself) and then press +Enter to send the message.

+
+

Configure GPG for +Gordon:

+
    +
  1. Just as we did with the cloud_user account, we’ll generate a GPG key +for Mr. Freeman, accepting the defaults for each prompt. The only +difference will be having a user ID of gfreeman and an email address of +gfreeman@localhost:
    +[gfreeman@host]$ gpg --gen-key
  2. +
  3. Once we’ve created the key for Mr. Freeman, we can open up the mutt +email client, and save the public key sent over by the cloud_user +account:
    [gfreeman@host]$ mutt > Arrow up and down +to highlight the cloud_user message, then press Enter. Press v to view +the attachment, and press s to save it to Mr. Freeman’s home directory. +Finally, press q to quit Mutt.
  4. +
  5. Now, to import the public key from cloud_user into Mr. Freeman’s +keyring, run the following command:
    +[gfreeman@host]$ gpg --import gfreeman.key
  6. +
  7. We can run this to view the contents of Mr. Freeman’s keyring:
    +[gfreeman@host]$ gpg --list-keys
  8. +
  9. Let’s log out of gfreeman’s account:
    +[gfreeman@host]$ exit
  10. +
+

Generate a +Signed Document and Send It to Gordon:

+
    +
  1. When we digitally sign a file, we are using our private GPG key to +guarantee that this file came from us. The user that receives the file +will use their copy of the public key from us to verify that we signed +the file. Let’s generate a test document:
    +[cloud_user@host]$ echo "Just need you to verify this file." > note.txt
  2. +
  3. Now we’ll use cloud_user’s private key to sign the file:
    +[cloud_user@host]$ gpg --clearsign note.txt > Remember +that we need to use the passphrase we created earlier (password321). +> Now there should now be a note.txt.asc file in cloud_user’s home +directory. We can run a quick ls to make sure.
  4. +
  5. Now that we’ve made the file, let’s email it to gfreeman@localhost: +
  6. +
+
[cloud_user@host]$ mail -s "check this out" -a note.txt.asc gfreeman@localhost
+Could you verify this file for me?
+.
+

Verify the +Signature of the Emailed Document:

+
+

Use the mutt email client, and just as before, view and save the new +email message’s attachment. 1. Now, verify the note.txt.asc file that +was emailed:
+[gfreeman@host]$ gpg --verify note.txt.asc 2. We’ll get a +warning about the signature not being verified by a third party, and +that’s ok. What is important is the following line from the output: +
+gpg: Good signature from "cloud_user <cloud_user@localhost>" +This is what a verified file displays. 3. Next, encrypt a copy of the +/etc/fstab file like this:

+
+
[gfreeman@host]$ cp /etc/fstab ~
+[gfreeman@host]$ gpg -a -r cloud_user -e ~/fstab
+
+

You will see a general warning displayed about the key possibly not +belonging to the named person. We already know that this key is from +cloud_user, so just press y at the prompt. 4. Verify that there is a +file called fstab.asc in the gfreeman home directory (by running ls). +Create a new email to cloud_user and attach this file:

+
+
[gfreeman@host]$ mail -s "looks good" -a fstab.asc cloud_user@localhost
+Can you decrypt this?
+.
+
    +
  1. Log out:
    [gfreeman@host]$ exit
  2. +
+

Decrypt the Attached +File:

+
+

Now, as cloud_user, open up the mutt email client and save the +fstab.asc attachment from the new email. 1. Decrypt the saved fstab.asc +file with the gpg command, and enter the passphrase for cloud_user’s key +when prompted:
[cloud_user@host]$ gpg fstab.asc 2. +Now let’s verify that we can read the contents of the decrypted file: +
[cloud_user@host]$ cat fstab

+
+

References

+

https://learn.acloud.guru/course/cad92c58-0fd2-4657-98f7-79268b4ff2db/dashboard

+ + diff --git a/_site/Cloud/0-linux/lab5/README.html b/_site/Cloud/0-linux/lab5/README.html new file mode 100644 index 0000000..d2ecb4b --- /dev/null +++ b/_site/Cloud/0-linux/lab5/README.html @@ -0,0 +1,378 @@ + + + + + + + README + + + + +

Working with Text Files and +Streams

+ +++ + + + + + + + + + + + + + + + + + +
Costa Rica
Belinda Brown, +belindabrownr04@gmail.com
brown9804
March, 2022
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Modify a Text File Using sed:

+
+

Someone got confused and wrote about cows instead of ants in a text +file. We’ve got to replace all instances of cows and with Ants, +regardless of whether or not cows contains any capital letters. ### +See File Content: 1. Let’s look at the file we’re dealing with: +
cat fable.txt

+
+

The Fix:

+
    +
  1. Replace information > We’re going to run a sed command. The -i +means “do this in place,” as in don’t create another file. The capital I +near the end stands for “case-insensitive” and means that whether cows +has any capital letters in it or not, change it to Ants. The g means do +it globally, throughout the whole file.
  2. +
+

sed -i 's/cows/Ants/Ig' fable.txt

+
+

Now if we run our cat command again, we’ll see that all the cows are +gone.

+
+

Working with Basic +Regular Expressions:

+

Understand how to read some text files and redirecting some output +(output that we’ll decide on using regular expressions) to other text +files.

+

Locate HTTP Services:

+
    +
  1. Read information: > We want to read all of the lines in +/etc/services that start with http (but not any that start with httpx) +and send them to ~/http-services.txt
    +grep ^http[^x] /etc/services > ~/http-services.txt
  2. +
  3. To check if we have what we want in the new file, run:
    +cat ~/http-services.txt
  4. +
+

Locate a Specific +Services:

+
    +
  1. Search lines:
    > This one is a little trickier. We want to +find all of the lines in /etc/services that start with ldap. The fifth +character can be any alphanumeric character, but the sixth character can +not be an a. We’ll dump the output into ~/lpic1-ldap.txt
    +grep ^ldap.[^a] /etc/services > ~/lpic1-ldap.txt
  2. +
  3. To check if we have what we want in the new file, run:
    +cat ~/lpic1-ldap.txt
  4. +
+

Refine the HTTP Results:

+
    +
  1. Search lines:
    > We want to read the ~/http-services.txt +file that we created earlier, and just look at lines that don’t end with +the word service. This grep command will do it:
    +grep -v service$ ~/http-services.txt > ~/http-updated.txt
  2. +
  3. To check if we have what we want in the new file, run:
    +cat ~/http-updated.txt
  4. +
+

Creating and Modifying a +File with Vim:

+

Understand how to create a text file with Vim, and edit it.

+

Create a New File:

+

1.We’re going to create a new file called notes.txt in +/home/cloud_user:

+
cd
+vim notes.txt
+
+

Now, to add the text Beginning of Notes File, we need to get into +insert mode, by pressing i. We can start typing now once we’re in insert +mode. 2. Leave two blank lines after Beginning of Notes File. Now, to +save the file and quit Vim, we have to first hit Esc (to get out of +insert mode), type :wq! (write and quit).

+
+

Send Data to notes.txt:

+
    +
  1. Appends content:
    > Using the cat command and output +redirection, send the contents of the /etc/redhat-release file to the +end of the notes.txt file, taking care to append the contents so as to +not overwrite the file (using >>, not >) > Run this to +append notes.txt with the contents of /etc/redhat-release:
    +cat /etc/redhat-release >> notes.txt
  2. +
+

Modify notes.txt:

+
+

Let’s open notes.txt again for editing. We’ll place the cursor before +the opening parenthesis around the word Core and use a keyboard shortcut +to delete the text from there to the end of the line. We’ll leave two +more blank lines at the end of the file and then save and quit again. +Here are all of the steps to do that: 1. Open the file:
+vim notes.txt 2. Use the arrow keys to move to the +beginning parentheses before Core
3. Remove text from the cursor’s +position to end of line:
SHIFT D (or d$) 4. Create a +blank line under where the cursor is:
+o 5. Hit Enter to create the second blank line 6. Hit Esc to leave insert mode 7. Hitting o added a blank line, but also put us in insert mode 8. Write and quit: <br/>:wq!`

+
+

Send +More Data to the File, and Modify Its Contents:

+
+

Now we’re going to send free -m output to the end of notes.txt, edit +notes.txt again, delete the last line of the file, and add two more +blank lines to the end of the file. Then, we’re going to jump to the +third line of the file, enter some text, and make another blank line +afterward. Here are all of the steps to do that: 1. Append the +notes.txt:
free -m >> notes.txt 2. Edit +notes.txt:
vim notes.txt 3. Navigate to the Swap line +with arrow keys. 4. Delete the line:
dd 5. Create a +blank line under where the cursor is (and put us in insert mode):
+o 6. Hit Enter to create the second blank line. 7. Hit Esc +to get out of insert mode. 8. Get to the 3rd line of file:
+:3 9. Get back into insert mode:
i 10. +Type This is a practice system. 11. Hit Enter to make another blank +line. 12. Hit Esc to leave insert mode. 13. Write and quit:
+:wq!

+
+

Finalize the Notes File:

+
+

We’re going to dump one last bit of text into the file, then edit it +again. We’ll take the output from dbus-uuidgen –get, append it to +notes.txt then edit notes.txt so that the text Dbus ID = is in the +beginning of the new appended line. We’ll do it like this: 1. Append the +notes.txt:
dbus-uuidgen --get >> notes.txt 2. +Edit notes.txt:
vim notes.txt 3. Get right to the end +of the file:
G (Capital G) 4. Get into insert mode: +
i 5. Type “Dbus ID =” (with a space between the +equals sign and the dbus-uuidgen –get command’s output). Only type the +text within the quotation marks. 6. Write and quit: +:wq!

+
+

References

+

https://learn.acloud.guru/course/cad92c58-0fd2-4657-98f7-79268b4ff2db/dashboard

+ + diff --git a/_site/Cloud/0-linux/lab6/README.html b/_site/Cloud/0-linux/lab6/README.html new file mode 100644 index 0000000..5efc9c0 --- /dev/null +++ b/_site/Cloud/0-linux/lab6/README.html @@ -0,0 +1,541 @@ + + + + + + + README + + + + +

Linux Device Management

+ +++ + + + + + + + + + + + + + + + + + +
Costa Rica
Belinda Brown, +belindabrownr04@gmail.com
brown9804
March, 2022
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Adding a New Hard Disk +to a Linux System:

+

Understand how to create a new filesystem, mounting the filesystem to +a directory, and then configuring the system so the mount persists +across reboots.

+

Create a New Partition:

+
+

Before we go mounting any new partition up, we’ve got to create that +partition. 1. Open up a terminal window and log in using the credentials +provided on the lab page, replacing x.x.x.x with the public IP address +listed:
ssh cloud_user@x.x.x.x Enter the provided +password when prompted. 2. Next, let’s run the lsblk command to verify +we have a /dev/nvme1n1 device available. Once we’ve confirmed that, +we’ll create a partition on the /dev/nvme1n1 disk using fdisk. Note that +we’ll need to preface these commands with sudo for these commands. This +partition we create will span the entire disk:

+
+
[cloud_user@host]$ lsblk
+[cloud_user@host]$ sudo fdisk /dev/nvme1n1
+
    +
  1. After running fdisk, we’ll have to perform a few tasks. At the +Command (m for help): command, type n to make a new partition, then hit +Enter. Our Partition type will be p, primary.
  2. +
  3. Press Enter for Partition number, the First sector, and the Last +sector options. This will make fdisk ready to create the partition. Type +p at the Command (m for help): to print out what the disk will look like +after we commit our changes.
  4. +
  5. If that all looks good, type w and press Enter to write our changes +to disk.
  6. +
+

Create the Filesystem:

+
    +
  1. Next, we’ve got to create a filesystem, so we can read and write +data. We’ll format the partition to the XFS file system with the +mkfs.xfs command. Once that is complete, we’ll run blkid on the newly +created partition to obtain the UUID. We’ll have to make a note of this +UUID, since we’re going to need it later:
  2. +
+
[cloud_user@host]$ sudo mkfs.xfs /dev/nvme1n1p1
+[cloud_user@host]$ sudo blkid /dev/nvme1n1p1
+

Mount the +New Filesystem and Make It Permanent:

+
    +
  1. We can mount this partition up manually with the mount command, but +it won’t be a persistent mount; it won’t get mounted after something +like a reboot.We’re going to edit /etc/fstab and create a new entry for +the new disk at the bottom:
    sudo vi /etc/fstab
  2. +
  3. When you want to add text: hit the esc key and then i to go into +insert mode type as normal.
  4. +
  5. When you want to save: hit the esc key and then :wq!
  6. +
  7. You may find the following vim cheat sheet helpful as well: +https://linuxacademy.com/site-content/uploads/2019/05/vim-1.png
  8. +
  9. The format should follow the following (be sure to use your disk’s +UUID from the previous step):
    +UUID=YOURUUID /opt xfs defaults 0 0
  10. +
  11. We can save the file (:wq!), Then run:
    +[cloud_user@host]$ sudo mount -a
  12. +
  13. This will mount everything that’s listed in fstab, including our new +partition.
  14. +
  15. And running a quick df -h /opt should show us roughly 5GB available +for the /opt directory.
  16. +
+

Working with the CUPS Print +Server:

+

Understand how to work with print server that will send jobs to PDF +files. We will use the lpd (line print daemon) toolset provided by a +CUPS installation.

+

Install a PDF Printer:

+
    +
  1. Open your terminal application.
  2. +
  3. Check to see which printers are installed:
    +lpstat -s
  4. +
  5. Check to see what types of printer connections are available:
    +sudo lpinfo -v
  6. +
  7. Install a PDF printer to use with CUPS:
    +sudo lpadmin -p CUPS-PDF -v cups-pdf:/
  8. +
  9. Determine which driver files we can use with our printer by querying +the CUPS database for files that contain “PDF”:
    +lpinfo --make-and-model "PDF" -m
  10. +
  11. Use CUPS-PDF.ppd as the driver file:
    +sudo lpadmin -p CUPS-PDF -m "CUPS-PDF.ppd"
  12. +
  13. Run the lpstat command again:
    lpstat -s
  14. +
  15. Check the status of the printer we just installed:
    +lpc status
  16. +
  17. Enable the printer to accept jobs, and set it up as the default +printer:
  18. +
+
sudo lpadmin -d CUPS-PDF -E
+sudo cupsenable CUPS-PDF
+sudo cupsaccept CUPS-PDF
+
    +
  1. Run the lpc status command again: lpc status > The +printer should now be ready.
  2. +
+ +
    +
  1. Print a copy of the /etc/passwd file to a PDF file in our home +directory:
    lpr /etc/passwd
  2. +
  3. Verify that there is a copy of the /etc/passwd file in the home +directory:
    ls
  4. +
+

Modify the +Printer and Work with the Print Queue:

+
    +
  1. Configure the printer so that it will not accept any new jobs:
    +sudo cupsreject CUPS-PDF
  2. +
  3. Verify the status of the printer:
    lpc status
  4. +
  5. Attempt to print the /etc/group file to the printer:
    +lpr /etc/group
  6. +
  7. You should receive a message that says the printer is not currently +accepting jobs.
  8. +
  9. Reconfigure the printer to once again accept incoming jobs: +sudo cupsaccept CUPS-PDF
  10. +
  11. Check the status of the printer: lpc status
  12. +
  13. Configure the printer so that it accepts jobs to its queue but will +not print them: sudo cupsdisable CUPS-PDF
  14. +
  15. Check the status of the printer: lpc status
  16. +
  17. Attempt to print the /etc/group file again: +lpr /etc/group
  18. +
  19. List the contents of the /home directory: ls
  20. +
  21. Check the printer’s queue: lpq
  22. +
  23. Remove the job from the printer’s queue (remember to substitute the +job ID from your command’s output): +lprm <JOB_ID>
  24. +
  25. Verify that the job was successfully removed from the printer’s +queue: lpq
  26. +
  27. Re-enable the printer’s ability to print new jobs: +sudo cupsenable CUPS-PDF
  28. +
  29. Verify that the CUPS-PDF printer is once again ready to accept new +jobs: lpq
  30. +
+

Storage Management:

+

Understanding of how to use these tools is a fundamental component of +a Linux sysadmin career.

+
+

Then, become root:
sudo -i

+
+

Create a 2 GB GPT +Partition on /dev/nvme1n1:

+
    +
  1. Create the partition:
    gdisk /dev/nvme1n1
  2. +
  3. Enter n to create a new partition.
  4. +
  5. Accept the default for the partition number.
  6. +
  7. Accept the default for the starting sector.
  8. +
  9. For the ending sector, enter +2G to create a 2 GB partition.
  10. +
  11. Accept the default partition type.
  12. +
  13. Enter w to write the partition information.
  14. +
  15. Enter y to proceed.
  16. +
  17. Finalize the settings:
    partprobe
  18. +
+

Create a 2 GB MBR +Partition on /dev/nvme2n1:

+
    +
  1. Create the partition:
    fdisk /dev/nvme2n1
  2. +
  3. Enter n to create a new partition.
  4. +
  5. Accept the default partition type.
  6. +
  7. Accept the default for the partition number.
  8. +
  9. Accept the default for the starting sector.
  10. +
  11. For the ending sector, type +2G to create a 2 GB partition.
  12. +
  13. Enter w to write the partition information.
  14. +
  15. Finalize the settings:
    partprobe
  16. +
+

Format +the GPT Partition with XFS and Mount the Device on /mnt/gptxfs +Persistently:

+
    +
  1. Format the partition:
    mkfs.xfs /dev/nvme1n1p1 +> Getting It Ready for Mounting
  2. +
  3. Run the following:
    blkid
  4. +
  5. Copy the UUID of the partition at /dev/nvme1n1p1.
  6. +
  7. Open the /etc/fstab file:
    vim /etc/fstab
  8. +
  9. Add the following, replacing with the UUID you just copied: +
    UUID="<UUID>" /mnt/gptxfs xfs defaults 0 0
  10. +
  11. Save and exit the file by pressing Escape followed by :wq.
  12. +
+
+

Create a Mount Point 1. Create the mount point we specified in fstab: +
mkdir /mnt/gptxfs 2. Mount everything that’s +described in fstab:
mount -a 3. Check that it’s +mounted:
mount The partition should be listed in the +output.

+
+

Format +the MBR Partition with ext4 and Mount the Device on +/mnt/mbrext4:

+
    +
  1. Format the partition:
    +mkfs.ext4 /dev/nvme2n1p1
  2. +
  3. Create the mount point:
    mkdir /mnt/mbrext4
  4. +
  5. Mount it:
    mount /dev/nvme2n1p1 /mnt/mbrext4
  6. +
  7. Check that it’s mounted:
    mount > The partition +should be listed in the output.
  8. +
+

Working with LVM +Storage:

+

Understanding of how to use LVM managent tool

+
+
+

Scenario

+
+
+
+

We’ve been tasked with creating a large logical volume out of the two +disks attached to this server. The volume group name should be RHCSA. +The Logical Volume name should be pinehead and should be 3 GB in size. +Make sure that the resulting logical volume is formatted as XFS, and +persistently mounted at /mnt/lvol. After that is complete, we should +grow the logical volume and the filesystem by 200 MB.

+
+

Create a Physical +Device:

+
    +
  1. To see the names of our disks, we need to run fdisk -l.
  2. +
  3. Then we run pvcreate /dev/xvdg /dev/xvdf to create the physical +devices.
  4. +
  5. To check how it went, we can do a quick pvs or pvdisplay, and we’ll +see that they’ve been created.
  6. +
+

Create a Volume Group:

+
    +
  1. All we need to do is run vgcreate RHCSA /dev/xvdg /dev/xvdf. > +RHCSA is going to be the name of our volume group, and those physical +devices we created in the last step is where this volume group will +go.
  2. +
+

Create a Logical Volume:

+
    +
  1. Now we can create the our logical volume using the lvcreate command: +
    [root@host]# lvcreate -n pinehead -L 3G RHCSA > -n +denotes the name of the LV
    > -L denotes the size of the LV +
    > RHCSA is the name of the Volume Group we’re creating this LV +in
  2. +
+

Format +the LV as XFS and Mount It Persistently at /mnt/lvol:

+
    +
  1. Now we can format the disk like any other device. To format it as +XFS, we’ll run:
    +[root@host]# mkfs.xfs /dev/mapper/RHCSA-pinehead
  2. +
  3. We’ve got to create a mount point:
    +mkdir /mnt/lvol
  4. +
  5. Before we can get it mounting persistently (after reboots), we need +the UUID. Run blkid to get it, then copy it. We’ll need it in a +second.
  6. +
  7. Edit /etc/fstab (with whichever text editor you prefer) and create a +new line that looks like this: +UUID="&ltTHE_UUID_WE_COPIED>" /mnt/lvol xfs defaults 0 0
  8. +
  9. Now, to mount everything listed in fstab (including this new mount +we just created), let’s run mount -a.
  10. +
+

Grow the Mount Point by 200 +MB:

+
    +
  1. To grow an LV, we can run:
    +[root@host]# lvextend -L+200M /dev/RHCSA/pinehead
  2. +
  3. We can let the LVM tools automatically resize the filesystem as well +by passing the -r or –resizefs flags.
  4. +
  5. Optionally, we could have run a growfs command to resize the +filesystem:
    [root@host]# xfs_growfs /mnt/lvol
  6. +
+

References

+

https://learn.acloud.guru/course/cad92c58-0fd2-4657-98f7-79268b4ff2db/dashboard

+ + diff --git a/_site/Cloud/0-linux/lab7/README.html b/_site/Cloud/0-linux/lab7/README.html new file mode 100644 index 0000000..a8c6c65 --- /dev/null +++ b/_site/Cloud/0-linux/lab7/README.html @@ -0,0 +1,303 @@ + + + + + + + README + + + + +

The Linux Shell

+ +++ + + + + + + + + + + + + + + + + + +
Costa Rica
Belinda Brown, +belindabrownr04@gmail.com
brown9804
March, 2022
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Modifying the Bash Shell

+

Understand how to create our own alias for a command and then create +a new command that will take a positional argument.

+
+
+

Scenario

+
+
+
+

You have just started a new job managing a web server for a small +company. To make your job a bit easier, you have decided to create some +shorthand commands that will help you in your daily tasks.
. +First, you will create an alias to view the status of the HTTP service +that is running on the company’s small web server.
. Next, +you will create a function that allows you to monitor the disk usage of +a couple of web content directories.

+
+

Create the alias:

+
    +
  1. The first step is to create an alias for the Bash shell that will +allow you to view the service status of the web server itself. You will +name this alias webstat. When you type the command webstat at the +prompt, you will see the output of the command systemctl status +httpd.service. User-created aliases and functions should go in your +local ~/.bashrc file. Using the commands listed, append the following +alias to your ~/.bashrc file:
    +echo 'alias webstat="systemctl status httpd.service"' >> /home/cloud_user/.bashrc
  2. +
+

Load and test the alias:

+
    +
  1. Now that we have created an alias that displays the status of the +web server, we need to tell Bash that we want to use it in our current +session. First, we need to source our .bashrc file using the “dot” (.) +command:
    . ~/.bashrc
  2. +
  3. Now that the Bash environment has been refreshed with the new alias +from our ~/.bashrc file, we can use our new alias:
    +webstat > We should be able to see the output of our +service’s status command.
  4. +
+

Create your function:

+
    +
  1. The next step is to create a function that will take the name of a +directory as a parameter and print out how much disk space that +directory is using. Using the vi text editor, open up the ~/.bashrc file +and add the following function to the bottom, beneath the alias that you +created earlier:
    vim ~/.bashrc
  2. +
+
function webspace()
+{
+    du -h /var/www/html/$1;
+}
+
    +
  1. Save and close your file.
  2. +
  3. Then source the .bashrc file again:
    +. .bashrc
  4. +
+

Use the webspace +function:

+
    +
  1. Since the /var/www/html directory is the root location for all of +the individual site locations for this web server, all you need to do is +provide the name of the folder that contains a particular part of the +site to the webspace function. To view the size and contents of the main +public web page, enter this command:
    webspace main +> This will print out the contents of the /var/www/html/main +directory and how much disk space this directory uses. The $1 used in +your function is a positional argument. When you type webspace main at +the prompt, the word main is replaced by the $1 argument, thus providing +the output of the command for the /var/www/html/main directory.
  2. +
  3. Try the same command again, this time for the customer directory on +the web server:
    webspace customer > You should see +more directories in the output, plus a 5 MB client binary file.
  4. +
+

References

+

https://learn.acloud.guru/course/cad92c58-0fd2-4657-98f7-79268b4ff2db/dashboard

+ + diff --git a/_site/Cloud/0-linux/lab8/README.html b/_site/Cloud/0-linux/lab8/README.html new file mode 100644 index 0000000..a30276a --- /dev/null +++ b/_site/Cloud/0-linux/lab8/README.html @@ -0,0 +1,513 @@ + + + + + + + README + + + + +

Networking

+ +++ + + + + + + + + + + + + + + + + + +
Costa Rica
Belinda Brown, +belindabrownr04@gmail.com
brown9804
March, 2022
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Testing DNS Resolution

+

Understand how to utilize the nmcli utility to configure our DNS +resolution.

+

Review Current DNS +Configuration:

+
    +
  1. See if the system can resolve hostnames to IP addresses:
    +host www.google.com > Note that the command times +out.
  2. +
  3. Check to see what DNS server entries we have in the /etc/resolv.conf +file:
    cat /etc/resolv.conf > Note that we do not +have any DNS entries listed.
  4. +
  5. Review our network connections:
    nmcli con show +> Our default connection name should be System ens5.
  6. +
  7. Review our DNS IP settings: +nmcli -f ipv4.dns con show "System ens5" > This system +obviously does not have any DNS servers configured for use.
  8. +
+

Configure +Your System to Use Your Network’s DNS:

+
    +
  1. Modify the system’s default connection to use the network’s DNS +server
    +sudo nmcli con mod "System ens5" ipv4.dns "10.0.0.2"
  2. +
  3. Verify the settings using the nmcli command and then checking the +/etc/resolv.conf file:
  4. +
+
nmcli -f ipv4.dns con show "System ens5"
+cat /etc/resolv.conf
+
    +
  1. We need to reactivate the system’s network connection for the change +to take effect:
    sudo nmcli con up "System ens5"
  2. +
  3. Verify our settings once more:
    +cat /etc/resolv.conf
  4. +
  5. Now, attempt to resolve a hostname to an IP address:
    +host www.google.com > Our system should be able to +resolve an IP address for the domain name.
  6. +
+

Monitoring Network Access:

+

Understand how use the netcat (nc) utility to generate network +traffic between two servers and view that traffic’s appearance in a tool +called iptraf-ng.

+
+
+

The Scenario

+
+
+
+

During the development of a new Web-based API our team is working on, +they have discovered that they are receiving intermittent network +disconnects from clients, even when they are local to the network of the +server itself. We have been provided credentials and access information +for two CentOS 7 systems in their environment. They have asked for us to +install tools that they can use to monitor network traffic between the +two systems. We’ll have to install the tools we need and create traffic +on port 2525 from server2 to server1. We want to get all network traffic +sent to /home/cloud_user/traffic_log.txt.

+
+

Install Client +Utilities:

+
    +
  1. We’ve got to install the two packages that the team will use to +generate and monitor traffic. Let’s use YUM to get it done:
    +[root@server1]# yum install iptraf-ng nc
  2. +
  3. Repeat this on the other server:
    +[root@server2]# yum install iptraf-ng nc
  4. +
+

Create the Traffic Log +File:

+
    +
  1. On the first server, let’s run iptraf-ng.
  2. +
  3. Go under Configure… > In the menu, don’t forget this isn’t a menu +we control with a mouse – it’s all keyboard.
  4. +
  5. Make sure Logging is toggled to On. Set the log file path to: +/home/cloud_user/traffic_log.txt.
  6. +
  7. Then go into IP traffic monitor.
  8. +
  9. In the next menu, select eth0.
  10. +
  11. Once we press Enter the logging will start.
  12. +
+

Listen for Traffic:

+
    +
  1. Let’s open a second terminal into server1 and run sudo su right +off.
  2. +
  3. Once we’re there, we’re going to start netcat listening on post 2525 +with this:
    [root@server1]# nc -l 2525
  4. +
+

Send Some Traffic:

+
    +
  1. Now, let’s start talking. Back in the server2 window we’ve got open, +send netcat traffic to server1 with this (where x.x.x.x is the internal +IP of server1 that we’ll see on the hands-on lab overview page):
    +[root@server2]# nc x.x.x.x 2525
  2. +
  3. We’ll just land at a blinking cursor below the prompt, but we can +type a message there and press Enter. Once we do, it will show up back +in the window we’re listening in on server1. A bunch of messages sent +from server2 would look like this:
    +[root@server2]# nc x.x.x.x 2525
  4. +
+
test
+test
+testing
+This is a test
+
    +
  1. On server1, they would look like this when they arrive:
    +[root@server1]# nc -l 2525
  2. +
+
test
+test
+testing
+This is a test
+
    +
  1. That should be enough traffic for what we’re doing.
  2. +
  3. On server2, press Ctrl + C to kill the nc command we’ve got running +and flip back over to the terminal we were running iptraf-ng in.
  4. +
  5. Press x to stop the monitoring and get out, then choose Exit from +the main menu.
  6. +
+

Examine the Log:

+
    +
  1. On server1, if we run ls /home/cloud_user we should see +traffic_log.txt listed in the output.
  2. +
  3. Read that to see if it was capturing what we need: +[root@server1]# less /home/cloud_user/traffic_log.txt > +We should see some log entries showing traffic going from server2 to +server1 on port 2525.
  4. +
+

Network Filesystems:

+

Understand how to configure network filesystems, how to set up both a +Linux Samba fileshare and an NFS fileshare that can then be used by a +remote client to store files.

+

Set Up the Samba Server:

+
    +
  1. Become root:
    +[cloud_user@samba-server]$ sudo -i
  2. +
  3. Create the /smb path:
    +[root@samba-server]# mkdir /smb
  4. +
  5. Make sure the client can write to the path:
    +[root@samba-server]# chmod 777 /smb
  6. +
  7. Install the Samba packages:
    +[root@samba-server]# yum install samba -y
  8. +
  9. Open /etc/samba/smb.conf:
    +[root@samba-server]# vim /etc/samba/smb.conf
  10. +
  11. Add the following section at the bottom:
  12. +
+
[share]
+        browsable = yes
+        path = /smb
+        writable = yes
+
    +
  1. Save and exit the file by pressing Escape followed by :wq.
  2. +
  3. Check that our changes saved correctly: +[root@samba-server]# testparm
  4. +
+

Samba Share User:

+
    +
  1. Create the user on the server:
    +[root@samba-server]# useradd shareuser
  2. +
  3. Give it a password:
    +[root@samba-server]# smbpasswd -a shareuser > Enter and +confirm a password you’ll easily remember (e.g., 123456), as we’ll need +to reenter it later.
  4. +
+

Start It Up:

+
    +
  1. Start the Samba daemon:
    +[root@samba-server]# systemctl start smb
  2. +
+

Set Up the Samba Client:

+
    +
  1. Open up a new terminal.
  2. +
  3. Log in to the NFS server
  4. +
  5. Become root:
    +[cloud_user@nfs-server]$ sudo -i
  6. +
  7. Install software:
    +[root@nfs-server]# yum install cifs-utils -y
  8. +
+

Make a Mount Point:

+

Create a place for mounting the share: +[root@nfs-server]# mkdir /mnt/smb

+

The Mount:

+
    +
  1. In the Samba server terminal, get its IP address:
    +[root@samba-server]# ip a s
  2. +
  3. Copy the private inet address on eth0, and paste it into a text +file, as we’ll need it next.
  4. +
  5. In the NFS terminal, run the following command, replacing + with the IP you just copied and with the +password you created earlier: +[root@nfs-server]# mount -t cifs //<SERVER_IP>/share /mnt/smb -o username=shareuser,password=<SMBPASSWD_PASS>
  6. +
  7. Make sure you see it listed when you run:
    +[root@nfs-server]# mount
  8. +
  9. Change directory:
    +[root@nfs-server]# cd /mnt/smb
  10. +
  11. Create a file:
    +[root@nfs-server smb]# touch file
  12. +
  13. List the contents:
    [root@nfs-server smb]# ls > +We should see the new file called file.
  14. +
+

Set Up the NFS Share:

+
    +
  1. Install software:
    +[root@nfs-server smb]# yum install nfs-utils -y
  2. +
  3. Create the directory that will be shared out:
    +[root@nfs-server smb]# mkdir /nfs
  4. +
  5. Open /etc/exports:
    +[root@nfs-server smb]# vim /etc/exports
  6. +
  7. Add the following line:
    /nfs *(rw)
  8. +
  9. Save and exit the file by pressing Escape followed by :wq. +
  10. +
  11. Edit permissions, to make sure it’s going to be writable, on the +shared directory:
    +[root@nfs-server smb]# chmod 777 /nfs
  12. +
  13. Implement what we’ve configured in /etc/exports:
    +[root@nfs-server smb]# exportfs -a
  14. +
  15. Start the required services:
    +[root@nfs-server smb]# systemctl start {rpcbind,nfs-server,rpc-statd,nfs-idmapd}
  16. +
  17. Verify it:
    +[root@nfs-server smb]# showmount -e localhost
  18. +
  19. Run the following to get the NFS server’s IP:
    +[root@nfs-server smb]# ip a s
  20. +
  21. Copy the inet address on eth0 and paste it into a text file, as +we’ll need it shortly.
  22. +
+

Set Up the NFS Client:

+
    +
  1. In the Samba server terminal, install software:
    +[root@samba-server]# yum install nfs-utils -y
  2. +
  3. Create a mount point:
    +[root@samba-server]# mkdir /mnt/nfs
  4. +
  5. Check to see what’s being shared out on the NFS server, replacing + with the IP you copied earlier:
    +[root@samba-server]# showmount -e <NFS_SERVER_IP>
  6. +
  7. To be able to mount NFS shares, we need start a daemon:
    +[root@samba-server]# systemctl start rpcbind
  8. +
+

Mount the NFS Share:

+
    +
  1. Mount it, replacing with the IP you copied earlier: +
    +[root@samba-server]# mount -t nfs <NFS_SERVER_IP>:/nfs /mnt/nfs
  2. +
  3. Make sure you see it listed after running:
    +[root@samba-server]# mount
  4. +
  5. Change directory:
    +[root@samba-server]# cd /mnt/nfs
  6. +
  7. Create a file:
    +[root@samba-server nfs]# touch file
  8. +
  9. List the contents:
    [root@samba-server nfs]# ls +> We should see the new file, called file.
  10. +
+

References

+

https://learn.acloud.guru/course/cad92c58-0fd2-4657-98f7-79268b4ff2db/dashboard

+ + diff --git a/_site/Cloud/0-linux/lab9/README.html b/_site/Cloud/0-linux/lab9/README.html new file mode 100644 index 0000000..1b66c10 --- /dev/null +++ b/_site/Cloud/0-linux/lab9/README.html @@ -0,0 +1,357 @@ + + + + + + + README + + + + +

Processes Management

+ +++ + + + + + + + + + + + + + + + + + +
Costa Rica
Belinda Brown, +belindabrownr04@gmail.com
brown9804
March, 2022
+

This is a summary based on References

+
+

pid: process ID

+
+

Starting a Process:

+
+

When you start a process (run a command), two ways of do it: 1. +Foreground Processes 2. Background Processes

+
+

Foreground Processes:

+
+

By default, every process that you start runs in the foreground. It +gets its input from the keyboard and sends its output to the screen. +
While a program is running in the foreground and is +time-consuming, no other commands can be run (start any other processes) +because the prompt would not be available until the program finishes +processing and comes out.

+
+

Background Processes:

+
+

A background process runs without being connected to your keyboard. +If the background process requires any keyboard input, it waits.
+The advantage of running a process in the background is that you can run +other commands; you do not have to wait until it completes to start +another!
The simplest way to start a background process is to add +an ampersand (&) at the end of the command.

+
+

Listing Running Processes:

+
+

It is easy to see your own processes by running the ps +(process status) command
One of the most commonly used flags for +ps is the -f ( f for full) option, which provides more information as +shown:

+
+

$ps -f

+
UID PID PPID C STIME TTY TIME CMD
+
    +
  1. UID: User ID that this process belongs to (the person +running it)
  2. +
  3. PID: Process ID
  4. +
  5. PPID: Parent process ID (the ID of the process that +started it)
  6. +
  7. C: CPU utilization of process
  8. +
  9. STIME: Process start time
  10. +
  11. TTY: Terminal type associated with the process
  12. +
  13. TIME: CPU time taken by the process
  14. +
  15. CMD: The command that started this process
  16. +
+
+

There are other options which can be used along with ps command: 1. +-a: Shows information about all users 2. -x: +Shows information about processes without terminals 3. -u: +Shows additional information like -f option 4. -e: Displays +extended information

+
+

Stopping Processes:

+
+

Sending a CTRL + C keystroke (the default interrupt character) will +exit the command.

+
+
+

If a process is running in the background, you should get its Job ID +using the ps command. After that, you can use the kill command to kill +the process, example:

+
+
$ps -f
+UID PID PPID C STIME TTY TIME CMD
+amrood 6738 3662 0 10:23:03 pts/6 0:00 first_one
+amrood 6739 3662 0 10:22:54 pts/6 0:00 second_one
+$kill 6738
+

Or forced:
$kill -9 6738

+

Parent and Child Processes:

+

Each unix process has two ID numbers assigned to it: - Process ID +(pid) - Parent process ID (ppid).

+
+

Each user process in the system has a parent process. Most of the +commands that you run have the shell as their parent.

+
+

Zombie and Orphan Processes:

+
+

Normally, when a child process is killed, the parent process is +updated via a SIGCHLD signal. Then the parent can do some other task or +restart a new child as needed. However, sometimes the parent process is +killed before its child is killed. In this case, the “parent of all +processes,” the init process, becomes the new PPID (parent process ID). +In some cases, these processes are called orphan processes.

+
+
+

When a process is killed, a ps listing may still show the process +with a Z state. This is a zombie or defunct process. The process is dead +and not being used. These processes are different from the orphan +processes. They have completed execution but still find an entry in the +process table.

+
+

Daemon Processes:

+
+

Daemons are system-related background processes that often run with +the permissions of root and services requests from other processes.

+
+
+

A daemon has no controlling terminal. It cannot open /dev/tty. If you +do a “ps -ef” and look at the tty field, all daemons will have a ? for +the tty.

+
+
+

To be precise, a daemon is a process that runs in the background, +usually waiting for something to happen that it is capable of working +with. For example, a printer daemon waiting for print commands.

+
+
+

If you have a program that calls for lengthy processing, then it’s +worth to make it a daemon and run it in the background.

+
+

Job ID Versus Process ID:

+
+

Background and suspended processes are usually manipulated via job +number (job ID). This number is different from the process ID and is +used because it is shorter.

+
+
+

A job can consist of multiple processes running in a series or at the +same time, in parallel. Using the job ID is easier than tracking +individual processes.

+
+

References:

+

[1] From https://www.tutorialspoint.com/unix/unix-processes.htm +

+ + diff --git a/_site/Cloud/1-terraform/README.html b/_site/Cloud/1-terraform/README.html new file mode 100644 index 0000000..fa08456 --- /dev/null +++ b/_site/Cloud/1-terraform/README.html @@ -0,0 +1,368 @@ + + + + + + + README + + + + +

Terraform Overview

+ +++ + + + + + + + + + + + + + + + + + +
Costa Rica
Belinda Brown, +belindabrownr04@gmail.com
brown9804
March, 2022
+

Content:

+ + + +

Wiki

+ +
.
+├── README.md
+├── main.tf
+├── variables.tf
+├── provider.tf
+├── terraform.tfvars
+├── remote-storage.tf
+├── outputs.tf
+ +
graph LR
+A[Clone the repo] -->B(Config the structure required - files/content)
+    B --> C(Provisioning Process)
+    C -->|Dev/Test| D[Provisioning via local]
+    C -->|Prod| E[Commit to repo on new branch]
+    D --> F(Delete resources after test it)
+    E --> G(PR/Merge branch) 
+ +
graph TD;
+
+A[az login] -->B(terraform init)
+B --> C{Terraform provisioning stage}
+C -->|Review| D[terraform plan -var-file terraform.tfvars]
+C -->|Order Now| E[terraform apply -var-file terraform.tfvars]
+C -->|Remove| F[terraform destroy -var-file terraform.tfvars]
+
+ + ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TopicReference
Multi-Cloud Infrastructure Provisioningimage
Terraform Open Sourceimage
Terraform Enterpriseimage
Workspace Structureimage
Using Modules for Self-Service Infrastructureimage
+ + diff --git a/_site/Cloud/1-terraform/lab0/README.html b/_site/Cloud/1-terraform/lab0/README.html new file mode 100644 index 0000000..cd6541e --- /dev/null +++ b/_site/Cloud/1-terraform/lab0/README.html @@ -0,0 +1,257 @@ + + + + + + + README + + + + +

# +Installing Terraform and Working with Terraform Providers

+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

+

brown9804

+

Jan, 2022

+
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Download zip:

+

$ wget -c <location>

+

For Windows:

+
+

See Install +Terraform On Windows 10 – Learn IT And DevOps Daily

+
+

Binary List link:https://releases.hashicorp.com/terraform

+

Recommended: https://releases.hashicorp.com/terraform/0.13.4/

+

Unzip:

+

$ unzip terraform_0.13.4_linux_amd64.zip

+

$ ls

+

Move dir to be accessed:

+

$ sudo mv terraform /usr/sbin

+

$ ls

+

$ terraform version

+

Access providers:

+

$ mkdir providers

+

$ cd providers/

+

$ ls

+

$ vim main.tf

+

$ cat main.tf

+

$ export TF_LOG=TRACE

+

export TF_LOG_PATH=./terraform.log

+

Initializes:

+

$ terraform init

+

$ ls -a

+

$ terraform fmt

+

$ terraform plan

+

$ terraform apply

+
+

Enter a value: yes

+
+

$ terraform destroy

+
+

Enter a value: yes

+
+

References

+

[1] From +https://help.acloud.guru/hc/en-us/articles/360001382275-Hands-On-Labs-Getting-Started?_ga=2.173162026.84959279.1650153500-266741931.1648820099 +
[2] From +https://github.com/linuxacademy/content-hashicorp-certified-terraform-associate-foundations +
[3] From https://releases.hashicorp.com/terraform/0.13.4/
+[4] From +https://learn.acloud.guru/course/using-terraform-to-manage-applications-and-infrastructure/overview

+ + diff --git a/_site/Cloud/1-terraform/lab1/README.html b/_site/Cloud/1-terraform/lab1/README.html new file mode 100644 index 0000000..9bbf19d --- /dev/null +++ b/_site/Cloud/1-terraform/lab1/README.html @@ -0,0 +1,251 @@ + + + + + + + README + + + + +

Using +Terraform CLI Commands (workspace and state) to Manipulate a Terraform +Deployment

+
+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

+

brown9804

+

Jan, 2022

+
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Clone files:

+

$ git clone https://github.com/linuxacademy/content-hashicorp-certified-terraform-associate-foundations.git

+

$ ls

+

$ cd content-hashicorp-certified-terraform-associate-foundations/section4-lesson3

+

$ ls

+

Initialize:

+

$ terraform workspace list

+

$ terraform workspace new test

+

$ terraform init

+

$ cat main.tf

+

$ cat network.tf

+

$ terraform workspace list

+

$ terraform apply --auto-approve

+

$ terraform state list

+
+

Inside other workspace

+
+

$ terraform workspace select default

+

$ terraform state list

+

$ cat main.tf

+

$ terraform workspace list

+

$ terraform apply --auto-approve

+

$ terraform state list

+

$ ls

+
+

Deleting resources

+
+

$ terraform workspace select test

+

$ terraform destroy --auto-approve

+

$ terraform workspace select default

+

$ terraform destroy --auto-approve

+

$ terraform workspace delete test

+

$ terraform workspace list

+

References

+

[1] From +https://help.acloud.guru/hc/en-us/articles/360001382275-Hands-On-Labs-Getting-Started?_ga=2.173162026.84959279.1650153500-266741931.1648820099 +
[2] From +https://github.com/linuxacademy/content-hashicorp-certified-terraform-associate-foundations +
[3] From https://releases.hashicorp.com/terraform/0.13.4/
+[4] From +https://learn.acloud.guru/course/using-terraform-to-manage-applications-and-infrastructure/overview

+ + diff --git a/_site/Cloud/1-terraform/lab2/README.html b/_site/Cloud/1-terraform/lab2/README.html new file mode 100644 index 0000000..ab54296 --- /dev/null +++ b/_site/Cloud/1-terraform/lab2/README.html @@ -0,0 +1,247 @@ + + + + + + + README + + + + +

# Building and +Testing a Basic Terraform Module

+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

+

brown9804

+

Jan, 2022

+
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Initialize:

+

$ terraform version

+

$ mkdir terraform_project

+

$ cd terraform_project/

+
+

Module

+
+

$ mkdir -p modules/vpc

+

$ cd ~/terraform_project/modules/vpc/

+

$ vim main.tf

+

$ cat main.tf

+

$ vim variables.tf

+

$ cat variables.tf

+

$ vim output.tf

+

$ cat output.tf

+

$ ls

+
+

Main Code

+
+

$ cd ~/terraform_project/

+

$ ls

+

$ vim main.tf

+

$ cat main.tf

+

$ vim output.tf

+

$ cat output.tf

+

$ ls

+
+

Execution

+
+

$ terraform fmt -recursive

+

$ terraform init

+

$ terraform validate

+

$ terraform plan

+

$ terraform apply --auto-approve

+

$ terraform state list

+

$ terraform destroy --auto-approve

+

References

+

[1] From +https://help.acloud.guru/hc/en-us/articles/360001382275-Hands-On-Labs-Getting-Started?_ga=2.173162026.84959279.1650153500-266741931.1648820099 +
[2] From +https://github.com/linuxacademy/content-hashicorp-certified-terraform-associate-foundations +
[3] From https://releases.hashicorp.com/terraform/0.13.4/
+[4] From +https://learn.acloud.guru/course/using-terraform-to-manage-applications-and-infrastructure/overview

+ + diff --git a/_site/Cloud/1-terraform/lab3/README.html b/_site/Cloud/1-terraform/lab3/README.html new file mode 100644 index 0000000..22155da --- /dev/null +++ b/_site/Cloud/1-terraform/lab3/README.html @@ -0,0 +1,241 @@ + + + + + + + README + + + + +

Exploring Terraform +State Functionality

+
+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

+

brown9804

+

Jan, 2022

+
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Initialize:

+

$ terraform version

+

$ minikube status

+

$ ls

+

$ cd lab_code/

+

$ ls

+

$ cd section2-hol1/

+

$ ls

+

$ cat main.tf

+

$ terraform init

+

$ terraform plan

+

$ ls

+

$ terraform apply --auto-approve

+

$ ls

+

$ kubectl get pods

+

$ terraform state list

+

$ terraform state show kubernetes_deployment.tf-k8s-deployment | egrep replicas

+

$ vim main.tf

+

Change replicas from 2 to 4

+

$ terraform plan

+

$ terraform apply --auto-approve

+

$ kubectl get pods

+

$ terraform state show kubernetes_deployment.tf-k8s-deployment | egrep replicas

+

$ terraform destroy --auto-approve

+

$ kubectl get pods

+

$ ls

+

$ cat terraform.tfstate

+

$ less terraform.tfstate.backup

+

You can re-deploy using backup file

+

References

+

[1] From +https://help.acloud.guru/hc/en-us/articles/360001382275-Hands-On-Labs-Getting-Started?_ga=2.173162026.84959279.1650153500-266741931.1648820099 +
[2] From +https://github.com/linuxacademy/content-hashicorp-certified-terraform-associate-foundations +
[3] From https://releases.hashicorp.com/terraform/0.13.4/
+[4] From +https://learn.acloud.guru/course/using-terraform-to-manage-applications-and-infrastructure/overview

+ + diff --git a/_site/Cloud/1-terraform/lab4/README.html b/_site/Cloud/1-terraform/lab4/README.html new file mode 100644 index 0000000..6d1e589 --- /dev/null +++ b/_site/Cloud/1-terraform/lab4/README.html @@ -0,0 +1,211 @@ + + + + + + + README + + + + +

# Deploy an +Azure Storage Account with Terraform

+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

+

brown9804

+

Jan, 2022

+
+

This is a summary based on References

+

Initialize:

+

$ terraform init

+

$ terraform plan

+

$ terraform apply

+
+

Enter a value: yes

+
+

$ terraform destroy

+

References

+

[1] +https://learn.acloud.guru/course/bd8060c6-e408-4801-a4a3-8317c45319bf/dashboard +

+ + diff --git a/_site/Cloud/1-terraform/lab5/README.html b/_site/Cloud/1-terraform/lab5/README.html new file mode 100644 index 0000000..fbea03e --- /dev/null +++ b/_site/Cloud/1-terraform/lab5/README.html @@ -0,0 +1,220 @@ + + + + + + + README + + + + +

Deploy +an Azure File Share and Blob Storage with Terraform

+
+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

+

brown9804

+

Jan, 2022

+
+

This is a summary based on References

+
+

Upload the file using the CLI upload/download button

+
+

Initialize:

+

$ terraform init

+

$ terraform plan

+

$ terraform apply

+
+

Enter a value: yes

+
+

$ terraform destroy

+
+

Enter a value: yes

+
+

References

+

[1] +https://learn.acloud.guru/course/bd8060c6-e408-4801-a4a3-8317c45319bf/dashboard +

+ + diff --git a/_site/Cloud/1-terraform/lab6/README.html b/_site/Cloud/1-terraform/lab6/README.html new file mode 100644 index 0000000..7e768ea --- /dev/null +++ b/_site/Cloud/1-terraform/lab6/README.html @@ -0,0 +1,222 @@ + + + + + + + README + + + + +

# Deploy Azure +VNETs and Subnets with Terraform

+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

+

brown9804

+

Jan, 2022

+
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Initializes:

+

export TF_LOG=TRACE

+

export TF_LOG_PATH=./terraform.log

+

$ terraform init

+

$ ls -a

+

$ terraform fmt

+

$ terraform plan

+

$ terraform apply

+
+

Enter a value: yes

+
+

$ terraform destroy

+
+

Enter a value: yes

+
+

Reference:

+

[1] From +https://learn.acloud.guru/course/bd8060c6-e408-4801-a4a3-8317c45319bf/dashboard +

+ + diff --git a/_site/Cloud/1-terraform/lab7/README.html b/_site/Cloud/1-terraform/lab7/README.html new file mode 100644 index 0000000..55342d8 --- /dev/null +++ b/_site/Cloud/1-terraform/lab7/README.html @@ -0,0 +1,223 @@ + + + + + + + README + + + + +

Create Azure NSGs with +Terraform

+
+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

+

brown9804

+

Jan, 2022

+
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Initializes:

+

export TF_LOG=TRACE

+

export TF_LOG_PATH=./terraform.log

+

$ terraform init

+

$ ls -a

+

$ terraform fmt

+

$ terraform plan

+

$ terraform apply

+
+

Enter a value: yes

+
+

$ terraform destroy

+
+

Enter a value: yes

+
+

Reference:

+

[1] From +https://learn.acloud.guru/course/bd8060c6-e408-4801-a4a3-8317c45319bf/dashboard +

+ + diff --git a/_site/Cloud/1-terraform/lab8/README.html b/_site/Cloud/1-terraform/lab8/README.html new file mode 100644 index 0000000..837c33e --- /dev/null +++ b/_site/Cloud/1-terraform/lab8/README.html @@ -0,0 +1,223 @@ + + + + + + + README + + + + +

Deploying an Azure VM with +Terraform

+
+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

+

brown9804

+

Jan, 2022

+
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Initializes:

+

export TF_LOG=TRACE

+

export TF_LOG_PATH=./terraform.log

+

$ terraform init

+

$ ls -a

+

$ terraform fmt

+

$ terraform plan

+

$ terraform apply

+
+

Enter a value: yes

+
+

$ terraform destroy

+
+

Enter a value: yes

+
+

Reference:

+

[1] From +https://learn.acloud.guru/course/bd8060c6-e408-4801-a4a3-8317c45319bf/dashboard +

+ + diff --git a/_site/Cloud/1-terraform/lab9/README.html b/_site/Cloud/1-terraform/lab9/README.html new file mode 100644 index 0000000..e8d6f56 --- /dev/null +++ b/_site/Cloud/1-terraform/lab9/README.html @@ -0,0 +1,221 @@ + + + + + + + README + + + + +

Deploying Web Applications

+
+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

+

brown9804

+

Jan, 2022

+
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Initializes:

+

export TF_LOG=TRACE

+

export TF_LOG_PATH=./terraform.log

+

$ terraform init

+

$ ls -a

+

$ terraform fmt

+

$ terraform plan

+

$ terraform apply

+
+

Enter a value: yes

+
+

$ terraform destroy

+
+

Enter a value: yes

+
+

Reference:

+

[1] From +https://learn.acloud.guru/course/bd8060c6-e408-4801-a4a3-8317c45319bf/dashboard +

+ + diff --git a/_site/Cloud/1-terraform/lab_10/README.html b/_site/Cloud/1-terraform/lab_10/README.html new file mode 100644 index 0000000..fbfda69 --- /dev/null +++ b/_site/Cloud/1-terraform/lab_10/README.html @@ -0,0 +1,223 @@ + + + + + + + README + + + + +

Deploy a MySQL Database +with Terraform

+
+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

+

brown9804

+

Jan, 2022

+
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Initializes:

+

export TF_LOG=TRACE

+

export TF_LOG_PATH=./terraform.log

+

$ terraform init

+

$ ls -a

+

$ terraform fmt

+

$ terraform plan

+

$ terraform apply

+
+

Enter a value: yes

+
+

$ terraform destroy

+
+

Enter a value: yes

+
+

Reference:

+

[1] From +https://learn.acloud.guru/course/bd8060c6-e408-4801-a4a3-8317c45319bf/dashboard +

+ + diff --git a/_site/Cloud/1-terraform/lab_11/README.html b/_site/Cloud/1-terraform/lab_11/README.html new file mode 100644 index 0000000..69bed5e --- /dev/null +++ b/_site/Cloud/1-terraform/lab_11/README.html @@ -0,0 +1,386 @@ + + + + + + + README + + + + +

Migrating +Terraform State to Terraform Cloud

+
+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

+

brown9804

+

Jan, 2022

+
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Set Up the Environment:

+

Set Up and +Apply Your Terraform Configuration:

+
    +
  1. In the ami = line, delete the “DUMMY_VALUE_AMI_ID” value and paste +in the AMI you copied from the resource_ids.txt file.
  2. +
  3. Save and exit the file by pressing the Escape key and entering +:wq!.
  4. +
  5. Open the resource_ids.txt file again:
    +vim ../resource_ids.txt
  6. +
  7. Copy the subnet _id value.
  8. +
  9. Exit the file by pressing Escape and entering :q!.
  10. +
  11. Open the main.tf file for editing:
    +vim main.tf
  12. +
  13. In the subnet_id = line, delete the “DUMMY_VALUE_SUBNET_ID” and +paste in the subnet ID you copied from the resource_ids.txt file.
  14. +
  15. Save and exit the file by pressing the Escape key and entering +:wq!.
  16. +
+

Initializes:

+

export TF_LOG=TRACE

+

export TF_LOG_PATH=./terraform.log

+

$ terraform init

+

$ ls -a

+

$ terraform fmt

+

$ terraform validate

+

$ terraform plan

+

$ terraform apply

+
+

Enter a value: yes

+
+

Generate +Your Access Key in the AWS Management Console:

+
    +
  1. In a browser window, navigate to the AWS Management Console and log +in with the credentials provided.
  2. +
  3. Under AWS services, click IAM.
  4. +
  5. On the IAM dashboard, under IAM resources, click Users.
  6. +
  7. In the list of users, click cloud_user.
  8. +
  9. Click the Security credentials tab.
  10. +
  11. Click the Create access key button.
  12. +
  13. In the Create access key pop-up, click the Download .csv file button +to download the access key in a file. > Note: You could also choose +to copy and paste the Access key ID and Secret access key values +directly from the Create access key pop-up in the next objective.
  14. +
+

Set Up Your Terraform +Cloud Workspace:

+

Create +the Workspace and Configure Your Environment Variables:

+
    +
  1. In a new browser tab, navigate to +https://app.terraform.io/session.
  2. +
  3. Click Create account and follow the prompts to create a new free +account or click Sign in to log in with an existing account.
  4. +
  5. Once logged in, select the Start from scratch setup workflow +option.
  6. +
  7. In the Organization name field, enter “ACG-Terraform-Labs”. > +NOTE: If this name is already taken you can make your own name up for +your organisation.
  8. +
  9. In the Email address field, enter your email address.
  10. +
  11. Click Create organization.
  12. +
  13. Select the CLI-driven workflow option.
  14. +
  15. In the Workspace Name field, enter “lab-migrate-state”.
  16. +
  17. Click Create workspace.
  18. +
  19. In the workspace, click on the Variables tab.
  20. +
  21. Scroll down to the Environment Variables section, and click the + +Add variable button.
  22. +
  23. In the Key field, type “AWS_ACCESS_KEY_ID”.
  24. +
  25. In the Value field, copy and paste the Access key ID value from the +Create access key pop-up in the AWS Management Console or from the CSV +file you downloaded.
  26. +
  27. Select the Sensitive checkbox, and click Save variable.
  28. +
  29. Click the + Add variable button.
  30. +
  31. In the Key field, type “AWS_SECRET_ACCESS_KEY”.
  32. +
  33. In the Value field, copy and paste the Secret access key value from +the Create access key pop-up in the AWS Management Console or from the +CSV file you downloaded.
  34. +
  35. Select the Sensitive checkbox, and click Save variable.
  36. +
+

Create Your +API Token for Terraform CLI Login:

+
    +
  1. At the top-right of the Terraform Cloud window, click your user +avatar and select User settings.
  2. +
  3. In the menu on the left, click Tokens.
  4. +
  5. Click Create an API token.
  6. +
  7. In the Description field, type “terraform_login”.
  8. +
  9. Click Create API token.
  10. +
  11. Copy the API token that is displayed in the Create API token pop-up +and click Done. > Note: Be sure that you have copied the API token, +as it will not be displayed again. You may want to paste it in an +accessible location, just in case.
  12. +
  13. At the top-left of the Terraform Cloud window, click the Choose an +organization drop-down and select ACG-Terraform-Labs.
  14. +
  15. In the list of workspaces, click lab-migrate-state.
  16. +
+

Adding the Backend +Configuration:

+
    +
  1. Back in the terminal, log in to Terraform Cloud from the CLI:
    +terraform login
  2. +
  3. Uncomment the backend config within the main.tf
  4. +
  5. Save it.
  6. +
  7. Check that the configuration file has been formatted properly:
    +terraform fmt
  8. +
  9. Initialize the working directory:
    +terraform init
  10. +
  11. When prompted to copy the existing Terraform state to the new +backend, type “yes” and hit Enter. Terraform will notify you when this +has completed successfully.
  12. +
  13. Verify that the terraform.tfstate.backup file has been added to the +directory:
    `ls
  14. +
  15. Delete the terraform.tfstate file:
    +rm -rf terraform.tfstate
  16. +
+

Apply +the Updated Configuration and Confirm the State Was Saved to Terraform +Cloud:

+
    +
  1. Apply the updated configuration:
    +terraform apply
  2. +
  3. Once the terraform apply has finished, navigate back to Terraform +Cloud in the browser.
  4. +
  5. On the Overview tab for the workspace, verify that the last run +appears as a new event in the Latest Run section and that 1 resource was +applied under Resources.
  6. +
  7. Click on the States tab and verify that the state file appears. You +can click on the file to view it.
  8. +
  9. Click on the Runs tab and view the latest runs that have +completed.
  10. +
  11. To view more information about the run, click on the Overview tab +and click the See details button for the run.
  12. +
+

Reminder:

+
+
+

Later on, remember to do the destroy:

+
+
+

$ terraform destroy

+
+

Enter a value: yes

+
+

Reference:

+

[1] From +https://learn.acloud.guru/course/bd8060c6-e408-4801-a4a3-8317c45319bf/dashboard +

+ + diff --git a/_site/Cloud/1-terraform/lab_12/README.html b/_site/Cloud/1-terraform/lab_12/README.html new file mode 100644 index 0000000..b53a6d6 --- /dev/null +++ b/_site/Cloud/1-terraform/lab_12/README.html @@ -0,0 +1,287 @@ + + + + + + + README + + + + +

Using +Terraform Provisioners to Set Up an Apache Web Server on AWS

+
+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

+

brown9804

+

Jan, 2022

+
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Examine the Code in the +main.tf File:

+
+

View the contents of the main.tf file using the cat command:

+
+

cat main.tf

+
+

Examine the code in the resource block and note the following:
+1. We are creating an AWS EC2 instance (virtual machine) named +webserver. 2. We are passing a number of parameters for the resource, +such as the AMI that the VM will be spun up as, the instance type, the +private key that the instance will be using, the public IP attached to +the instance, the security group applied to the instance, and the subnet +ID where the VM will be spun up.

+
+
+

Note: All of these resources are actually being created via the +setup.tf file, which you can view if desired.

+
+
    +
  1. Examine the code in the provisioner block and note the +following:
  2. +
+ +

Deploy +the Code and Access the Bootstrapped Webserver:

+

export TF_LOG=TRACE

+

export TF_LOG_PATH=./terraform.log

+

$ terraform init

+

$ ls -a

+

$ terraform fmt

+

$ terraform validate

+

$ terraform plan

+

$ terraform apply

+
+

Enter a value: yes

+
+
+

As the code is being deployed, you will notice that the Terraform +provisioner tries to connect to the EC2 instance, and once that +connection is established, it will run the bootstrapping that was +configured in the provisioner block against the instance.

+
+
+

When complete, it will output the public IP for the Apache webserver +as the Webserver-Public-IP value.

+
+
+

Copy the IP address, paste it in a new browser window or tab, and +press Enter.

+
+
+

Verify that the web page displays as My Test Website With Help From +Terraform Provisioner, validating that the provisioner within your code +worked as intended. The commands configured in the provisioner code were +issued and executed successfully on the EC2 instance that was created. +

+
+

$ terraform destroy

+
+

Enter a value: yes

+
+

Reference:

+

[1] From +https://learn.acloud.guru/course/bd8060c6-e408-4801-a4a3-8317c45319bf/dashboard +

+ + diff --git a/_site/Cloud/1-terraform/lab_13/README.html b/_site/Cloud/1-terraform/lab_13/README.html new file mode 100644 index 0000000..143ad53 --- /dev/null +++ b/_site/Cloud/1-terraform/lab_13/README.html @@ -0,0 +1,289 @@ + + + + + + + README + + + + +

Make Changes +to AWS Infrastructure Using Terraform

+
+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

+

brown9804

+

April, 2022

+
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Set Up the Environment:

+

Set Up and +Apply Your Terraform Configuration:

+
    +
  1. In the ami = line, delete the “DUMMY_VALUE_AMI_ID” value and paste +in the AMI you copied from the resource_ids.txt file.
  2. +
  3. Save and exit the file by pressing the Escape key and entering +:wq!.
  4. +
  5. Open the resource_ids.txt file again:
    +vim ../resource_ids.txt
  6. +
  7. Copy the subnet _id value.
  8. +
  9. Exit the file by pressing Escape and entering :q!.
  10. +
  11. Open the main.tf file for editing:
    +vim main.tf
  12. +
  13. In the subnet_id = line, delete the “DUMMY_VALUE_SUBNET_ID” and +paste in the subnet ID you copied from the resource_ids.txt file.
  14. +
  15. Save and exit the file by pressing the Escape key and entering +:wq!.
  16. +
+

Initializes:

+

export TF_LOG=TRACE

+

export TF_LOG_PATH=./terraform.log

+

$ terraform init

+

$ ls -a

+

$ terraform fmt

+

$ terraform validate

+

$ terraform plan

+

$ terraform apply

+
+

Enter a value: yes

+
+
+

To confirm that our configuration was successful and view all the +details of our infrastructure:

+
+

terraform show

+

Deploy Configuration Changes:

+
    +
  1. Open the main.tf file in the current directory for editing:
    +vim main.tf

  2. +
  3. Under the resource line, in the instance_type line, delete +“t2.micro”.

  4. +
  5. In the empty instance_type = line, enter “t3.micro”.

  6. +
  7. Under the tags line, in the Name = line, delete +“Batman”.

  8. +
  9. In the empty Name = line, enter in “Robin”.

  10. +
  11. Save and exit the file by pressing the Escape key and entering: +:wq

  12. +
  13. Check that our format is correct:
    +terraform fmt

  14. +
  15. Test the changes that we’re going to make:
    +terraform plan

  16. +
  17. Apply our configuration changes:
    +terraform apply > Enter a value: yes

  18. +
  19. Confirm that our changes were made successfully:
    +terraform show > Check that instance_type is now set to +t3.micro and the tags_all has the name set to Robin.

  20. +
+

Reminder:

+
+

Later on, remember to do the destroy:

+
+

$ terraform destroy

+
+

Enter a value: yes

+
+
+

Confirm that Terraform is no longer managing any resources:

+
+

terraform show

+

Reference:

+

[1] From +https://learn.acloud.guru/course/bd8060c6-e408-4801-a4a3-8317c45319bf/dashboard +

+ + diff --git a/_site/Cloud/1-terraform/lab_14/README.html b/_site/Cloud/1-terraform/lab_14/README.html new file mode 100644 index 0000000..f19085e --- /dev/null +++ b/_site/Cloud/1-terraform/lab_14/README.html @@ -0,0 +1,285 @@ + + + + + + + README + + + + +

Use +Output Variables to Query Data in AWS Using Terraform

+
+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

+

brown9804

+

April, 2022

+
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Set Up the Environment:

+

Set Up and +Apply Your Terraform Configuration:

+
    +
  1. In the ami = line, delete the “DUMMY_VALUE_AMI_ID” value and paste +in the AMI you copied from the resource_ids.txt file.
  2. +
  3. Save and exit the file by pressing the Escape key and entering +:wq!.
  4. +
  5. Open the resource_ids.txt file again:
    +vim ../resource_ids.txt
  6. +
  7. Copy the subnet _id value.
  8. +
  9. Exit the file by pressing Escape and entering :q!.
  10. +
  11. Open the main.tf file for editing:
    +vim main.tf
  12. +
  13. In the subnet_id = line, delete the “DUMMY_VALUE_SUBNET_ID” and +paste in the subnet ID you copied from the resource_ids.txt file.
  14. +
  15. Save and exit the file by pressing the Escape key and entering +:wq!.
  16. +
+

Initializes:

+

export TF_LOG=TRACE

+

export TF_LOG_PATH=./terraform.log

+

$ terraform init

+

$ ls -a

+

$ terraform fmt

+

$ terraform validate

+

$ terraform plan

+

$ terraform apply

+
+

Enter a value: yes

+
+
+

To confirm that our configuration was successful and view all the +details of our infrastructure:

+
+

terraform show

+
+

Add Output Variables and Deploy Changes:

+
+

$ terraform fmt

+

Confirm the Changes:

+

$ terraform plan

+

$ terraform apply

+
+

Enter a value: yes

+
+
+

View a simplified version of the output:

+
+

terraform output

+
+

To confirm that our configuration was successful and view all the +details of our infrastructure:

+
+

terraform show

+

Reminder:

+
+

Later on, remember to do the destroy:

+
+

$ terraform destroy

+
+

Enter a value: yes

+
+
+

Confirm that Terraform is no longer managing any resources:

+
+

terraform show

+

Reference:

+

[1] From +https://learn.acloud.guru/course/bd8060c6-e408-4801-a4a3-8317c45319bf/dashboard +

+ + diff --git a/_site/Cloud/1-terraform/lab_15/README.html b/_site/Cloud/1-terraform/lab_15/README.html new file mode 100644 index 0000000..b5938c8 --- /dev/null +++ b/_site/Cloud/1-terraform/lab_15/README.html @@ -0,0 +1,318 @@ + + + + + + + README + + + + +

Make +Changes to Azure Infrastructure Using Terraform

+
+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

+

brown9804

+

April, 2022

+
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Update the +virtual network’s resource_group_name:

+
    +
  1. Above Cloud Shell, copy your Azure resource group name. You may need +to pull down your Cloud Shell terminal to see it.
  2. +
  3. To the right of the resource_group_name variable, replace + with your copied resource group name.
  4. +
  5. Write and quit to save your changes: :wq!
  6. +
+

Initializes:

+

export TF_LOG=TRACE

+

export TF_LOG_PATH=./terraform.log

+

$ terraform init

+

$ ls -a

+

$ terraform fmt

+

$ terraform validate

+

$ terraform plan

+

$ terraform apply

+
+

Enter a value: yes

+
+
+

To confirm that our configuration was successful and view all the +details of our infrastructure:

+
+

terraform show

+
+

View a list of the resources Terraform is managing:

+
+

terraform state list

+

Add a Subnet to the +Configuration:

+
    +
  1. Edit the file:
    vim azure_resource_block.tf
  2. +
  3. Update the subnet’s resource_group_name:
  4. +
+ +

$ terraform plan

+

$ terraform apply

+
+

Enter a value: yes

+
+
+

To confirm that our configuration was successful and view all the +details of our infrastructure:

+
+

terraform show

+
+

View a list of the resources Terraform is managing:

+
+

terraform state list

+

Add a Tag to the +Configuration:

+
    +
  1. Uncomment the tag part within the main.tf.
  2. +
  3. Save it.
  4. +
+

$ terraform fmt

+

$ terraform validate

+

$ terraform plan

+

$ terraform apply

+
+

Enter a value: yes

+
+
+

To confirm that our configuration was successful and view all the +details of our infrastructure:

+
+

terraform show

+
+

View a list of the resources Terraform is managing:

+
+

terraform state list

+

Reminder:

+
+
+

Later on, remember to do the destroy:

+
+
+

$ terraform destroy

+
+

Enter a value: yes

+
+
+
+

Confirm that Terraform is no longer managing any resources:
+terraform show

+
+
+
+

View a list of the resources Terraform is managing:

+
+

terraform state list

+

Reference:

+

[1] From +https://learn.acloud.guru/course/bd8060c6-e408-4801-a4a3-8317c45319bf/dashboard +

+ + diff --git a/_site/Cloud/1-terraform/lab_16/README.html b/_site/Cloud/1-terraform/lab_16/README.html new file mode 100644 index 0000000..abfe18f --- /dev/null +++ b/_site/Cloud/1-terraform/lab_16/README.html @@ -0,0 +1,282 @@ + + + + + + + README + + + + +

Use +Output Variables to Query Data in Azure Using Terraform

+
+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

+

brown9804

+

April, 2022

+
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Deploy the Infrastructure:

+
    +
  1. Under # Create a virtual network, next to resource_group_name, +remove the filler name.
  2. +
  3. At the top of the screen, above the Cloud Shell terminal, copy the +name of the resource group automatically created by this lab, above +Resource group.
  4. +
  5. Paste it into the empty field next to resource_group_name.
  6. +
  7. Under #Create subnet, next to resource_group_name, delete the filler +and paste our lab’s resource group name.
  8. +
  9. Save and quit by pressing the Escape button and entering: +:wq
  10. +
+

Initializes:

+

export TF_LOG=TRACE

+

export TF_LOG_PATH=./terraform.log

+

$ terraform init

+

$ ls -a

+

$ terraform fmt

+

$ terraform validate

+

$ terraform plan

+

$ terraform apply

+
+

Enter a value: yes

+
+
+

To confirm that our configuration was successful and view all the +details of our infrastructure:

+
+

terraform show

+
+

View a list of the resources Terraform is managing:

+
+

terraform state list

+

Add the Outputs Variable +File:

+
+

output.tf file added

+
+

$ terraform fmt

+

$ terraform plan

+

$ terraform apply

+
+

Enter a value: yes

+
+
+

Display our outputs:

+
+

terraform output

+

Reminder:

+
+

Later on, remember to do the destroy:

+
+

$ terraform destroy

+
+

Enter a value: yes

+
+
+

Confirm that Terraform is no longer managing any resources:

+
+

terraform show

+
+

View a list of the resources Terraform is managing:

+
+

terraform state list

+

Reference:

+

[1] From +https://learn.acloud.guru/course/bd8060c6-e408-4801-a4a3-8317c45319bf/dashboard +

+ + diff --git a/_site/Cloud/1-terraform/lab_17/README.html b/_site/Cloud/1-terraform/lab_17/README.html new file mode 100644 index 0000000..a01f5f8 --- /dev/null +++ b/_site/Cloud/1-terraform/lab_17/README.html @@ -0,0 +1,332 @@ + + + + + + + README + + + + +

Use Terraform +to Create a Kubernetes Deployment

+
+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

+

brown9804

+

April, 2022

+
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Create Your Kubernetes +Cluster:

+
    +
  1. Create your Kubernetes cluster:
    +kind create cluster --name lab-terraform-kubernetes --config kind-config.yaml
  2. +
  3. When the cluster is successfully created, you should see all the +creation steps have green checkmarks, and you receive a ‘Have a nice +day!’ message.
  4. +
  5. Copy the provided command and paste it in the terminal:
    +kubectl cluster-info --context kind-lab-terraform-kubernetes
  6. +
  7. Verify your cluster was created:
    +kind get cluster
  8. +
+

Configure +Terraform for Use with the Kubernetes Cluster:

+
    +
  1. Run kubectl to get information about your cluster:
    +kubectl config view --minify --flatten --context=kind-lab-terraform-kubernetes
  2. +
  3. Add the server address to your terraform.tfvars file:
  4. +
+ +
    +
  1. Add the client certificate data to your terraform.tfvars file: +
  2. +
+ +
    +
  1. Add the client key data to your terraform.tfvars file:
  2. +
+ +
    +
  1. Add the certificate authority data to your terraform.tfvars file: +
  2. +
+ +
    +
  1. View your kubernetes.tf file:
    vim kubernetes.tf +> You can see this configuration file pulls from the terraform.tfvars +file to declare the variables and then pass them to the Kubernetes +provider.
  2. +
  3. Quit out of the file when you’re finished reviewing it: +:q
  4. +
+

Deploy Resources to +the Kubernetes Cluster:

+

export TF_LOG=TRACE

+

export TF_LOG_PATH=./terraform.log

+

$ terraform init

+

$ ls -a

+

$ terraform fmt

+

$ terraform validate

+

$ terraform plan

+

$ terraform apply

+
+

Enter a value: yes

+
+
+

View your deployment details:

+
+

kubectl get deployments

+
+

You should see your deployment, “long-live-the-bat” has two nodes up +and running.

+
+

Reminder:

+
+

Later on, remember to do the destroy:

+
+

$ terraform destroy

+
+

Enter a value: yes

+
+
+

Confirm that Terraform is no longer managing any resources:

+
+

terraform show

+
+

View a list of the resources Terraform is managing:

+
+

terraform state list

+

Reference:

+

[1] From +https://learn.acloud.guru/course/bd8060c6-e408-4801-a4a3-8317c45319bf/dashboard +

+ + diff --git a/_site/Cloud/1-terraform/lab_18/README.html b/_site/Cloud/1-terraform/lab_18/README.html new file mode 100644 index 0000000..824ac79 --- /dev/null +++ b/_site/Cloud/1-terraform/lab_18/README.html @@ -0,0 +1,337 @@ + + + + + + + README + + + + +

Manage Kubernetes +Resources with Terraform

+
+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

+

brown9804

+

April, 2022

+
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Set Up the Lab Environment:

+
    +
  1. Create your Kubernetes cluster:
    +kind create cluster --name lab-terraform-kubernetes --config kind-config.yaml
  2. +
  3. When the cluster is successfully created, you should see all the +creation steps have green checkmarks, and you receive a ‘Have a nice +day!’ message.
  4. +
  5. Copy the provided command and paste it in the terminal:
    +kubectl cluster-info --context kind-lab-terraform-kubernetes
  6. +
  7. Verify your cluster was created:
    +kind get cluster
  8. +
  9. Edit the cluster’s host address:
  10. +
+ +
    +
  1. Edit the cluster’s SSL certificate:
  2. +
+ +
    +
  1. Edit the cluster’s client key data:
  2. +
+ +
    +
  1. Edit the cluster’s certificate authority data:
  2. +
+ +

Deploy Resources to +the Kubernetes Cluster:

+

export TF_LOG=TRACE

+

export TF_LOG_PATH=./terraform.log

+

$ terraform init

+

$ ls -a

+

$ terraform fmt

+

$ terraform validate

+

$ terraform plan

+

$ terraform apply

+
+

Enter a value: yes

+
+

Add a Service:

+

$ terraform apply

+
+

Enter a value: yes

+
+
    +
  1. Verify the NodePort service was applied successfully:
    +kubectl get services > You should see the NodePort +service named robin listed in your services.
  2. +
+

Scale the Nodes:

+
    +
  1. Edit the lab_kubernetes_resources.tf file:
    +vim lab_kubernetes_resources.tf
  2. +
  3. Modify the replicas from 2 to 4:
  4. +
+
spec {
+replicas = 4
+selector {
+  match_labels = {
+    App = "longlivethebat"
+  }
+}
+
    +
  1. Write and quit to save your change to the file: +:wq!
  2. +
+

$ terraform apply

+
+

Enter a value: yes

+
+
+

Confirm the replicas were changed from 2 to 4, then type “yes” on the +Enter a value line to confirm the apply. 4. Verify your deployment is +now using 4 pods: kubectl get deployments

+
+

Reminder:

+
+

Later on, remember to do the destroy:

+
+

$ terraform destroy

+
+

Enter a value: yes

+
+
+

Delete your cluster:

+
+

kind delete cluster --name lab-terraform-kubernetes

+
+

Verify the cluster was deleted:

+
+

kind get clusters

+

Reference:

+

[1] From +https://learn.acloud.guru/course/bd8060c6-e408-4801-a4a3-8317c45319bf/dashboard +

+ + diff --git a/_site/Cloud/1-terraform/lab_19/README.html b/_site/Cloud/1-terraform/lab_19/README.html new file mode 100644 index 0000000..afb41d0 --- /dev/null +++ b/_site/Cloud/1-terraform/lab_19/README.html @@ -0,0 +1,264 @@ + + + + + + + README + + + + +

Use Terraform to +Create an EKS Deployment

+
+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

+

brown9804

+

April, 2022

+
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Configure the AWS CLI:

+

aws configure

+
    +
  1. When prompted for your AWS Access Key ID, copy and paste the Access +Key.
  2. +
  3. When prompted for your AWS Secret Access Key, copy and paste in the +Secret Access Key.
  4. +
  5. Press Enter to accept the default region.
  6. +
  7. Press Enter to accept the default output.
  8. +
+

Deploy the EKS Cluster:

+

export TF_LOG=TRACE

+

export TF_LOG_PATH=./terraform.log

+

$ terraform init

+

$ ls -a

+

$ terraform fmt

+

$ terraform validate

+

$ terraform plan

+

$ terraform apply

+
+

Enter a value: yes

+
+
+

Configure kubectl to interact with the cluster:

+
+

aws eks --region $(terraform output -raw region) update-kubeconfig --name $(terraform output -raw cluster_name)

+
+

Confirm that kubectl was configured properly and that the cluster was +successfully deployed:

+
+

kubectl get cs

+
+

The three components should be up and running with a status of +Healthy.

+
+

Deploy the NGINX Pods:

+

$ terraform plan

+

$ terraform apply

+
+

Enter a value: yes

+
+

kubectl get deployments

+

Reminder:

+
+

Later on, remember to do the destroy:

+
+

$ terraform destroy

+
+

Enter a value: yes

+
+

terraform show

+

Reference:

+

[1] From +https://learn.acloud.guru/course/bd8060c6-e408-4801-a4a3-8317c45319bf/dashboard +

+ + diff --git a/_site/Cloud/1-terraform/lab_20/README.html b/_site/Cloud/1-terraform/lab_20/README.html new file mode 100644 index 0000000..8d5170a --- /dev/null +++ b/_site/Cloud/1-terraform/lab_20/README.html @@ -0,0 +1,307 @@ + + + + + + + README + + + + +

Troubleshooting a +Terraform Deployment

+
+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

+

brown9804

+

April, 2022

+
+

This is a summary based on References

+

Connect to the server:

+

ssh <user_name>@<IPadress>

+

Correct the Variable +Interpolation Error:

+
    +
  1. Edit the ami in the main.tf file.
  2. +
+ +
    +
  1. Edit the subnet_id in the main.tf file:
  2. +
+ +

export TF_LOG=TRACE

+

export TF_LOG_PATH=./terraform.log

+

$ ls -a

+

$ terraform fmt

+
    +
  1. Apply line numbering to the file so you can identify the error more +easily:
    :set number
  2. +
  3. Update line 25 as follows to correct the variable interpolation +error:
    Name = "${var.name}-learn"
  4. +
+

$ terraform fmt

+

$ terraform init

+

Correct the Region +Declaration Error:

+

$ terraform validate

+
    +
  1. Check the variables.tf file:
    vim variables.tf +> You should see the variable regions, which is causing the error. +This should instead be region.

  2. +
  3. Update regions to region in the variables.tf file to correct the +region declaration error:

  4. +
+
variable "region" {
+  description = 'The AWS region your resources will be deployed'
+}
+

Correct the Syntax +Error for the Resource:

+

$ terraform validate

+
    +
  1. Edit the main.tf file.
  2. +
  3. Insert double quotes (““) around the ami and subnet_id values as +follows to correct the syntax error:
  4. +
+
resource "aws_instance" "web_app"
+  ami         = "ami-<YOUR_AMI_ID>"
+  subnet_id   = "subnet-<YOUR_SUBNET_ID>"
+

Correct the Outputs Error:

+

$ terraform validate

+
    +
  1. Edit the outputs.tf file.
  2. +
  3. Correct the first output error by changing the instance_public_ip +value from .public.ip to .pulic_ip as follows:
  4. +
+
output "instance_public_ip" {
+  description = "Public IP address of the EC2 instance"
+  value       = aws_instance.web_app.public_ip
+}
+
    +
  1. Correct the second output error by changing the instance_name value +from tag to tags as follows:
  2. +
+
output "instance_name" {
+  description = "Tags of the EC2 instance"
+  value       = aws_instance.web_app.tags.Name
+}
+

Deploy the Infrastructure:

+

$ terraform validate

+

$ terraform plan

+

$ terraform apply

+
+

Enter a value: yes

+
+

Reminder:

+
+

Later on, remember to do the destroy:

+
+

$ terraform destroy

+
+

Enter a value: yes

+
+

terraform show

+

Reference:

+

[1] From +https://learn.acloud.guru/course/bd8060c6-e408-4801-a4a3-8317c45319bf/dashboard +

+ + diff --git a/_site/Cloud/2-automation_principles/0_golang/README.html b/_site/Cloud/2-automation_principles/0_golang/README.html new file mode 100644 index 0000000..f8dac9c --- /dev/null +++ b/_site/Cloud/2-automation_principles/0_golang/README.html @@ -0,0 +1,202 @@ + + + + + + + README + + + + +

GoLang Learning Path

+
+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

brown9804

+

Aug, 2022

+
+
+

If you want to have a career in cloud-based programming, you should +consider learning Go, because platforms such as Amazon Web Services, +Kubernetes, and Google Cloud Platform (GCP) all support Go.

+
+

image

+

Wiki

+ + + diff --git a/_site/Cloud/2-automation_principles/0_golang/src/README.html b/_site/Cloud/2-automation_principles/0_golang/src/README.html new file mode 100644 index 0000000..8521800 --- /dev/null +++ b/_site/Cloud/2-automation_principles/0_golang/src/README.html @@ -0,0 +1,201 @@ + + + + + + + README + + + + +

GoLang - How it works!

+
+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

brown9804

+

Aug, 2022

+
+
+

If you want to have a career in cloud-based programming, you should +consider learning Go, because platforms such as Amazon Web Services, +Kubernetes, and Google Cloud Platform (GCP) all support Go.

+
+

image

+

Wiki

+ + + diff --git a/_site/Cloud/2-automation_principles/1_api_automations/3_countryinfo_travelers/README.html b/_site/Cloud/2-automation_principles/1_api_automations/3_countryinfo_travelers/README.html new file mode 100644 index 0000000..033b8fa --- /dev/null +++ b/_site/Cloud/2-automation_principles/1_api_automations/3_countryinfo_travelers/README.html @@ -0,0 +1,682 @@ + + + + + + + README + + + + +

Country +Information for Travelers (API integration)

+
+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

+

brown9804

+

Dec, 2022

+
+
+

This is designed to provide users with a comprehensive overview of +the country, including general information, COVID-19 data, weather +details based on latitude and longitude, and an overview of the +country’s currency exchange rates.

+
+

Wiki:

+ +

Setup

+

Please follow the makefile instructions at this location. And make +sure countryinfo module is cloned before started.

+
make env
+make activate
+make setupcountryinfo
+make run
+

Example, using Spain data:

+ +++ + + + + + + + + + + + + + +
country inf
Currency  and Weather
+

With OpenTelemetry +Instrumentation

+
Which country would you like information about?
+Spain
+
+************ Country - General Information ***********************
+
+                    Information                                                                                                                                                                                                                                                                                                                                                                                                                                                                                            Values
+                   Country Name                                                                                                                                                                                                                                                                                                                                                                                                                                                                                             Spain
+Other spelling for country name                                                                                                                                                                                                                                                                                                                                                                                                                                                           [ES, Kingdom of Spain, Reino de España]
+                        Capital                                                                                                                                                                                                                                                                                                                                                                                                                                                                                            Madrid
+                      Provinces [A Coruña, Álava, Albacete, Alicante, Almería, Asturias, Ávila, Badajoz, Balearic Islands, Barcelona, Biscay, Burgos, Cáceres, Cádiz, Cantabria, Castellón, Ciudad Real, Córdoba, Cuenca, Gipuzkoa, Girona, Granada, Guadalajara, Huelva, Huesca, Jaén, La Rioja, Las Palmas, León, Lleida, Lugo, Madrid, Málaga, Murcia, Navarre, Ourense, Palencia, Pontevedra, Salamanca, Santa Cruz de Tenerife, Segovia, Seville, Soria, Tarragona, Teruel, Toledo,  Valencia, Valladolid, Zamora, Zaragoza]
+                         Region                                                                                                                                                                                                                                                                                                                                                                                                                                                                                            Europe
+                      Subregion                                                                                                                                                                                                                                                                                                                                                                                                                                                                                   Southern europe
+                       Currency                                                                                                                                                                                                                                                                                                                                                                                                                                                                                             [EUR]
+                      Languages                                                                                                                                                                                                                                                                                                                                                                                                                                                                                              [es]
+                        Borders                                                                                                                                                                                                                                                                                                                                                                                                                                                                         [AND, FRA, GIB, PRT, MAR]
+                   Calling Code                                                                                                                                                                                                                                                                                                                                                                                                                                                                                              [34]
+                           Area                                                                                                                                                                                                                                                                                                                                                                                                                                                                                            505992
+                     Population                                                                                                                                                                                                                                                                                                                                                                                                                                                                                          46507760
+                       Lat/Long                                                                                                                                                                                                                                                                                                                                                                                                                                                                            [40.416705, -3.703582]
+                           Code                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                ES
+
+************ Country - Covid Data ***********************
+
+{'Total Cases': '13,914,811       ', 'Recovered Cases': '13,762,417', 'Total Deaths': '121,760'}
+
+************ Country - Weather Information ***********************
+
+Coordinates 40.41999816894531°N -3.6999998092651367°E
+Elevation 666.0 m asl
+Timezone None None
+Timezone difference to GMT+0 0 s
+                       date  temperature_2m
+0 2024-03-19 00:00:00+00:00       17.115000
+1 2024-03-19 01:00:00+00:00       16.565001
+2 2024-03-19 02:00:00+00:00       15.764999
+3 2024-03-19 03:00:00+00:00       16.014999
+4 2024-03-19 04:00:00+00:00       15.264999
+
+************ Country - Currency Convert ***********************
+
+Into which currency would you like to convert?
+{
+    "name": "span_country_info",
+    "context": {
+        "trace_id": "0x6908e6615ea20ccb8349158a18b7a428",
+        "span_id": "0x29f451e7453417d0",
+        "trace_state": "[]"
+    },
+    "kind": "SpanKind.INTERNAL",
+    "parent_id": "0x83505264fb5e3fc1",
+    "start_time": "2024-03-19T21:00:00.984521Z",
+    "end_time": "2024-03-19T21:00:01.038606Z",
+    "status": {
+        "status_code": "UNSET"
+    },
+    "attributes": {
+        "operation0": "Ok - Get information from CountryInfo",
+        "operation1": "Ok - Append country information to list",
+        "operation2": "Ok - Store information into dataframe"
+    },
+    "events": [],
+    "links": [],
+    "resource": {
+        "attributes": {
+            "telemetry.sdk.language": "python",
+            "telemetry.sdk.name": "opentelemetry",
+            "telemetry.sdk.version": "1.23.0",
+            "service.name": "unknown_service"
+        },
+        "schema_url": ""
+    }
+}
+{
+    "name": "Country_General_Inf",
+    "context": {
+        "trace_id": "0x6908e6615ea20ccb8349158a18b7a428",
+        "span_id": "0x83505264fb5e3fc1",
+        "trace_state": "[]"
+    },
+    "kind": "SpanKind.INTERNAL",
+    "parent_id": "0xa9fe995e56a5ea60",
+    "start_time": "2024-03-19T21:00:00.984195Z",
+    "end_time": "2024-03-19T21:00:01.041551Z",
+    "status": {
+        "status_code": "UNSET"
+    },
+    "attributes": {},
+    "events": [],
+    "links": [],
+    "resource": {
+        "attributes": {
+            "telemetry.sdk.language": "python",
+            "telemetry.sdk.name": "opentelemetry",
+            "telemetry.sdk.version": "1.23.0",
+            "service.name": "unknown_service"
+        },
+        "schema_url": ""
+    }
+}
+{
+    "name": "span_covid_data",
+    "context": {
+        "trace_id": "0x6908e6615ea20ccb8349158a18b7a428",
+        "span_id": "0x7302896895fcd04c",
+        "trace_state": "[]"
+    },
+    "kind": "SpanKind.INTERNAL",
+    "parent_id": "0x4144afc784ce1630",
+    "start_time": "2024-03-19T21:00:01.041681Z",
+    "end_time": "2024-03-19T21:00:02.088010Z",
+    "status": {
+        "status_code": "UNSET"
+    },
+    "attributes": {
+        "operation0": "Ok - Cast country name to lowerCase",
+        "operation1": "Ok - No need to replace empty space for - ",
+        "operation2": "Ok - get worldometers country's covid data",
+        "operation3": "Ok - converting the text",
+        "operation4": "Ok - finding meta info for cases",
+        "operation5": "Ok - getting total cases number",
+        "operation6": "Ok - getting recovered cases number",
+        "operation7": "Ok - getting deaths cases number",
+        "operation8": "Ok - whole data in place"
+    },
+    "events": [],
+    "links": [],
+    "resource": {
+        "attributes": {
+            "telemetry.sdk.language": "python",
+            "telemetry.sdk.name": "opentelemetry",
+            "telemetry.sdk.version": "1.23.0",
+            "service.name": "unknown_service"
+        },
+        "schema_url": ""
+    }
+}
+{
+    "name": "Country_CovidData",
+    "context": {
+        "trace_id": "0x6908e6615ea20ccb8349158a18b7a428",
+        "span_id": "0x4144afc784ce1630",
+        "trace_state": "[]"
+    },
+    "kind": "SpanKind.INTERNAL",
+    "parent_id": "0xa9fe995e56a5ea60",
+    "start_time": "2024-03-19T21:00:01.041627Z",
+    "end_time": "2024-03-19T21:00:02.088952Z",
+    "status": {
+        "status_code": "UNSET"
+    },
+    "attributes": {},
+    "events": [],
+    "links": [],
+    "resource": {
+        "attributes": {
+            "telemetry.sdk.language": "python",
+            "telemetry.sdk.name": "opentelemetry",
+            "telemetry.sdk.version": "1.23.0",
+            "service.name": "unknown_service"
+        },
+        "schema_url": ""
+    }
+}
+{
+    "name": "span_capital_weather",
+    "context": {
+        "trace_id": "0x6908e6615ea20ccb8349158a18b7a428",
+        "span_id": "0x422fae027683a48e",
+        "trace_state": "[]"
+    },
+    "kind": "SpanKind.INTERNAL",
+    "parent_id": "0xccd32033def63dab",
+    "start_time": "2024-03-19T21:00:02.089397Z",
+    "end_time": "2024-03-19T21:00:02.958786Z",
+    "status": {
+        "status_code": "UNSET"
+    },
+    "attributes": {
+        "operation0": "Ok - Setup the Open-Meteo API client with cache and retry on error",
+        "operation1": "Ok - Call Open-Meteo API",
+        "operation2": "Ok - Process first location. Add a for-loop for multiple locations or weather models",
+        "operation3": "Ok - Process hourly data",
+        "operation4": "Ok - Store data into Dataframe"
+    },
+    "events": [],
+    "links": [],
+    "resource": {
+        "attributes": {
+            "telemetry.sdk.language": "python",
+            "telemetry.sdk.name": "opentelemetry",
+            "telemetry.sdk.version": "1.23.0",
+            "service.name": "unknown_service"
+        },
+        "schema_url": ""
+    }
+}
+{
+    "name": "Country_WeatherInformation",
+    "context": {
+        "trace_id": "0x6908e6615ea20ccb8349158a18b7a428",
+        "span_id": "0xccd32033def63dab",
+        "trace_state": "[]"
+    },
+    "kind": "SpanKind.INTERNAL",
+    "parent_id": "0xa9fe995e56a5ea60",
+    "start_time": "2024-03-19T21:00:02.089031Z",
+    "end_time": "2024-03-19T21:00:02.961826Z",
+    "status": {
+        "status_code": "UNSET"
+    },
+    "attributes": {},
+    "events": [],
+    "links": [],
+    "resource": {
+        "attributes": {
+            "telemetry.sdk.language": "python",
+            "telemetry.sdk.name": "opentelemetry",
+            "telemetry.sdk.version": "1.23.0",
+            "service.name": "unknown_service"
+        },
+        "schema_url": ""
+    }
+}
+USD
+How much money would you like to convert?
+100
+Considering which YEAR for the exchange?
+2024
+Considering which MONTH for the exchange?
+2
+Considering which DAY for the exchange?
+12
+107.72999999999999 EUR to USD is: 107.72999999999999
+At 2024/2/12
+{
+    "resource_metrics": [
+        {
+            "resource": {
+                "attributes": {
+                    "telemetry.sdk.language": "python",
+                    "telemetry.sdk.name": "opentelemetry",
+                    "telemetry.sdk.version": "1.23.0",
+                    "service.name": "unknown_service"
+                },
+                "schema_url": ""
+            },
+            "scope_metrics": [
+                {
+                    "scope": {
+                        "name": "country_info_app_global_meterprovider",
+                        "version": "",
+                        "schema_url": ""
+                    },
+                    "metrics": [
+                        {
+                            "name": "work.counter",
+                            "description": "Counts the amount of work done",
+                            "unit": "1",
+                            "data": {
+                                "data_points": [
+                                    {
+                                        "attributes": {
+                                            "work.type": "country_info"
+                                        },
+                                        "start_time_unix_nano": 1710882000984390000,
+                                        "time_unix_nano": 1710882010506357000,
+                                        "value": 1
+                                    },
+                                    {
+                                        "attributes": {
+                                            "work.type": "covid_data"
+                                        },
+                                        "start_time_unix_nano": 1710882000984390000,
+                                        "time_unix_nano": 1710882010506357000,
+                                        "value": 1
+                                    },
+                                    {
+                                        "attributes": {
+                                            "work.type": "capital_weather"
+                                        },
+                                        "start_time_unix_nano": 1710882000984390000,
+                                        "time_unix_nano": 1710882010506357000,
+                                        "value": 1
+                                    },
+                                    {
+                                        "attributes": {
+                                            "work.type": "convert_currency"
+                                        },
+                                        "start_time_unix_nano": 1710882000984390000,
+                                        "time_unix_nano": 1710882010506357000,
+                                        "value": 1
+                                    }
+                                ],
+                                "aggregation_temporality": 2,
+                                "is_monotonic": true
+                            }
+                        }
+                    ],
+                    "schema_url": ""
+                }
+            ],
+            "schema_url": ""
+        }
+    ]
+}
+{
+    "name": "span_convert_currency",
+    "context": {
+        "trace_id": "0x6908e6615ea20ccb8349158a18b7a428",
+        "span_id": "0x0101e8a0eff5e1be",
+        "trace_state": "[]"
+    },
+    "kind": "SpanKind.INTERNAL",
+    "parent_id": "0xdd82d47940f359ab",
+    "start_time": "2024-03-19T21:00:10.362301Z",
+    "end_time": "2024-03-19T21:00:10.504280Z",
+    "status": {
+        "status_code": "UNSET"
+    },
+    "attributes": {
+        "operation0": "Ok - Call CurrencyConverter",
+        "operation1": "Ok - Currency Exchanged"
+    },
+    "events": [],
+    "links": [],
+    "resource": {
+        "attributes": {
+            "telemetry.sdk.language": "python",
+            "telemetry.sdk.name": "opentelemetry",
+            "telemetry.sdk.version": "1.23.0",
+            "service.name": "unknown_service"
+        },
+        "schema_url": ""
+    }
+}
+{
+    "name": "Country_CurrencyConvert",
+    "context": {
+        "trace_id": "0x6908e6615ea20ccb8349158a18b7a428",
+        "span_id": "0xdd82d47940f359ab",
+        "trace_state": "[]"
+    },
+    "kind": "SpanKind.INTERNAL",
+    "parent_id": "0xa9fe995e56a5ea60",
+    "start_time": "2024-03-19T21:00:02.961950Z",
+    "end_time": "2024-03-19T21:00:10.506232Z",
+    "status": {
+        "status_code": "UNSET"
+    },
+    "attributes": {},
+    "events": [
+        {
+            "name": "Input1 - Asking for ToCurrency exchange ...",
+            "timestamp": "2024-03-19T21:00:02.962060Z",
+            "attributes": {}
+        },
+        {
+            "name": "Input2 - Asking for Amount of Money for exchange ...",
+            "timestamp": "2024-03-19T21:00:05.465606Z",
+            "attributes": {}
+        },
+        {
+            "name": "Input3 - Asking for YEAR of exchange ...",
+            "timestamp": "2024-03-19T21:00:06.620951Z",
+            "attributes": {}
+        },
+        {
+            "name": "Input4 - Asking for MONTH of exchange ...",
+            "timestamp": "2024-03-19T21:00:08.236853Z",
+            "attributes": {}
+        },
+        {
+            "name": "Input5 - Asking for DAY of exchange ...",
+            "timestamp": "2024-03-19T21:00:09.467426Z",
+            "attributes": {}
+        }
+    ],
+    "links": [],
+    "resource": {
+        "attributes": {
+            "telemetry.sdk.language": "python",
+            "telemetry.sdk.name": "opentelemetry",
+            "telemetry.sdk.version": "1.23.0",
+            "service.name": "unknown_service"
+        },
+        "schema_url": ""
+    }
+}
+{
+    "name": "Country_Information_For_Travellers",
+    "context": {
+        "trace_id": "0x6908e6615ea20ccb8349158a18b7a428",
+        "span_id": "0xa9fe995e56a5ea60",
+        "trace_state": "[]"
+    },
+    "kind": "SpanKind.INTERNAL",
+    "parent_id": null,
+    "start_time": "2024-03-19T20:59:59.611590Z",
+    "end_time": "2024-03-19T21:00:10.506248Z",
+    "status": {
+        "status_code": "UNSET"
+    },
+    "attributes": {},
+    "events": [
+        {
+            "name": "Input0 - Asking for Country Name ...",
+            "timestamp": "2024-03-19T20:59:59.611606Z",
+            "attributes": {}
+        }
+    ],
+    "links": [],
+    "resource": {
+        "attributes": {
+            "telemetry.sdk.language": "python",
+            "telemetry.sdk.name": "opentelemetry",
+            "telemetry.sdk.version": "1.23.0",
+            "service.name": "unknown_service"
+        },
+        "schema_url": ""
+    }
+}
+ + diff --git a/_site/Cloud/2-automation_principles/1_api_automations/README.html b/_site/Cloud/2-automation_principles/1_api_automations/README.html new file mode 100644 index 0000000..f139f08 --- /dev/null +++ b/_site/Cloud/2-automation_principles/1_api_automations/README.html @@ -0,0 +1,320 @@ + + + + + + + README + + + + +

API Principles overview

+ +++ + + + + + + + + + + + + + + + + + +
Costa Rica
Belinda Brown, +belindabrownr04@gmail.com
brown9804
March, 2022
+

Content

+ +

Overview

+
+ + +
+
+ + +
+

REST (Representational State Transfer) is an +architecture that is based on web standards. It defines a set of +constraints and protocols which need to be followed +while creating web services.

+
+ + +
+ +
+
+

Qa Tech Hub From [2]

+
+
+
+ + +
+

REST is not a specification but a set of guidelines on how to +architect a network-connected software system.
A REST web service +is any web service that adheres to REST architecture constraints.
+These web services expose their data to the outside world through an +API. REST APIs provide access to web service data through public web +URLs.

+
+
+

Real Python From [1]

+
+
+
+ + +
+
+ + +
+

References

+

[1] From +https://realpython.com/api-integration-in-python/#rest-architecture +
[2] From https://qatechhub.com/rest-api-introduction/
[3] +From https://stevenpcurtis.medium.com/endpoint-vs-api-ee96a91e88ca
+[4] From +https://towardsdatascience.com/using-github-pages-for-creating-global-api-76b296c4b3b5 +
[5] From https://blog.hubspot.com/website/api-endpoint
[6] +From +https://github.com/brown9804/SAP_initial_path/tree/main/Pre-Requisites/REST%20APIs +
[7] From +https://www.guru99.com/api-vs-web-service-difference.html#:~:text=Web%20service%20is%20used%20for,APIs%20are%20not%20web%20services +
[8] From +https://site.financialmodelingprep.com/education/api-endpoint

+ + diff --git a/_site/Cloud/2-automation_principles/README.html b/_site/Cloud/2-automation_principles/README.html new file mode 100644 index 0000000..b89b91a --- /dev/null +++ b/_site/Cloud/2-automation_principles/README.html @@ -0,0 +1,259 @@ + + + + + + + README + + + + +

Automation Principles +Overview

+ +++ + + + + + + + + + + + + + + + + + +
Costa Rica
Belinda Brown, +belindabrownr04@gmail.com
brown9804
March, 2022
+

Content

+ +

image

+

Wiki

+ +
+ + +
+ + diff --git a/_site/Cloud/3-kubernetes_principles/README.html b/_site/Cloud/3-kubernetes_principles/README.html new file mode 100644 index 0000000..586cf03 --- /dev/null +++ b/_site/Cloud/3-kubernetes_principles/README.html @@ -0,0 +1,345 @@ + + + + + + + README + + + + +

Kubernetes Overview

+
+

Costa Rica

+

Belinda Brown, belindabrownr04@gmail.com

+

+

brown9804

+

April, 2021

+
+

Wiki

+ +

image

+

image

+ +

image

+ ++++ + + + + + + + + + + + + +
Single/Multi TenantsFederated
imageimage
+ + +++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ComponentDefinitionURL
KubernetesIs an open source system to deploy, scale, and manage containerized +applications anywhere.What is +k8s
ContainerA container is a standalone object that holds everything an +application needs to execute, including libraries, resource files, and +configuration files. Containers are lightweight compared to virtual +machines and can run regardless of infrastructure or environment. This +makes them ideal for ease-of-management across devices and operating +systems.What +is k8s container?
PodA container or group of containers running on a machine.What +is k8s pods?
NodeA node is a physical or virtual machine that operates at least a +single container, though they can (and often) run multiple +containers.What is +ks8 node?
ClusterA cluster is a set of virtual or physical machines, each operating +as a nod to run containerized applications. Clusters are managed by +Kubernetes.What +is k8s cluster?
DeploymentDeployment refers to the process of installing, executing, or +updating containers on a node. Efficient deployment takes advantage of +tools such as automation to simplify management and resource usage.Reference +here
ReplicasetA tool used to ensure availability, Replicaset defines the quantity +of stable replica Pods that must be running at any one time, then +manages resources to fulfill this. Replicaset creates or deletes Pods to +meet and maintain the target.Reference +here
OrchestrationContainer orchestration operates one level above deployment and +refers to the overall management and operational logistics that oversee +containers. Orchestration is necessary to balance resources in an +extremely dynamic environment.Reference +here
+

image

+

image

+ +

image

+

image

+

image

+ + diff --git a/_site/Cloud/README.html b/_site/Cloud/README.html new file mode 100644 index 0000000..64e715b --- /dev/null +++ b/_site/Cloud/README.html @@ -0,0 +1,578 @@ + + + + + + + README + + + + +

Cloud Principles Overview

+

Costa Rica

+

brown9804

+

Last updated: 2024-12-13

+
+

Wiki

+
+ +Table of Wiki (Click to expand) + + +
+

Content

+
+ +Table of Contents (click to open) + + +
+

Overview

+ +
+

+Total Visitors +

+

Visitor Count

+
+ + diff --git a/_site/DevOps/README.html b/_site/DevOps/README.html new file mode 100644 index 0000000..4b3fadf --- /dev/null +++ b/_site/DevOps/README.html @@ -0,0 +1,545 @@ + + + + + + + README + + + + +

DevOps Overview

+

Costa Rica

+

brown9804

+

Last updated: 2024-12-13

+
+

Has his roots in agile and iterative

+

Code + systems

+

Wiki

+
+ +Table of Wiki (Click to expand) + + +
+

Content

+ +

CAMS

+ +

*More feedback loops

+

Considering: 1. People, process, tools 2. Continuous delivery coding, +testing small parts 3. Lean management feedback loops, visualization 4. +Change control 5. infrastructure code - checked into source control

+

Practices

+
    +
  1. Uncident command system
  2. +
  3. Developers on call
  4. +
  5. Public status pages
  6. +
  7. Blameless postmortems
  8. +
  9. Embedded teams
  10. +
  11. Cloud - control infrastructure
  12. +
  13. Andon Cords - someone stops the production because catch +something
  14. +
  15. Dependency injection
  16. +
  17. Blue/ Green Deployment load balancer one is live, set - system
  18. +
  19. Chaos Monkey - high reliable - making caos for testing
  20. +
+

Example

+

The car or the horse?

+

Series of tools to address out needs like pipeline Reviewing logistic +tail which is related to a cost

+

A tool criteria is: 1. programmable 2. Verifiable -> exposes what +is doing 3. Well behaved operation point of view and deploy view

+

Communication on DevOps

+
    +
  1. Blameless postmortems 48 hours everything in time +line
  2. +
  3. Transparent uptime: +admit failure, sound like a human, communication channel, authentic.
  4. +
+

### The westrum model 1. Pathological (power oriented) 2. +Bereaucratic (rule-oriented) 2. Generative (performance oriented)

+

### Kaizen Change for the better.

+

gemba (locus the real place) Going to the code to see +gemba

+

Focus on symptoms: causes - effects. People don’t fail, +processes do. Don’t blame.

+

Agile, Lean and Itope

+

### Agile infrastructure: - Requirements - Design - Implementation - +Verification - Maintenance

+

--> Sprint 1, 2, 3 (plan, desing, buil, test, review, launch)

+

A sample value stream map:

+
+Alt text + +
+

And the Scrum life cycle:

+
+Alt text + +
+

Collaborations - Increase productivity and more ideas

+

### Lean

+

Systematic software: - Eliminate waste - muda Work that +absorb resources add no value - muri Unreasonable work +imposed on worker and machines - mura Work coming in +unevenly instead of the constant or regular flow - +Value stream Value information flows with the costumers

+

Important to consider lean principles:

+
+Alt text + +
+

Itil, Itsm, Sdlc

+ +
    +
  1. Service strategy
  2. +
  3. Service design
  4. +
  5. Service transition
  6. +
  7. Service operation
  8. +
+

2000 pages or more :)

+

CALMS

+

And know … calms with L of leans - Lean management - Amplify learning +- Decide as late as possible - Decide as fast as possible - Empower the +team - Build-in integrity - See the whole

+

Prod & Stage

+ ++++++ + + + + + + + + + + + + + + + + +
Important for Prod and Stage
Continuous delivery pipelineVersion controlApplication codeInfrastructure code
+

Amazon has cloud formation and azure has azure resource manager +templates and so on one model for my systems, another for os system and +other applications

+

Containers

+

Efficiency reasons: - nodes 1000 - OS dependecies - Docker - Maven +deb file and Docker containers

+

CMDB - Configuration Management Data Base

+

Zookeeper service as a central coordinated. Combining actions like +Kubernetes and Mesos. The container is basically the app configuration +management: - Chef - Puppet - Ansible - Salt - Cfengine - Services +directory tools - Etcd - Zookeeper - Consul

+

Docker - kubernetes - mesos

+

Private container services - Rancher - Google Cloud Platform - Amazon +web services ecs * Blue live * Green IDLE

+

CD/CI

+

Continuos Deploy Continous Delivery +Continuos Integration

+
    +
  1. Time to market goes down
  2. +
  3. Quality increases
  4. +
  5. Continuous Delivery limits your work in progress
  6. +
  7. Shortens lead times for changes
  8. +
  9. Improves mean time to recover
  10. +
+

Annotations: - Builds should pass the coffee test <5 minutes - +Commit small bits - Don’t leave the build broken - Use a trunk - bases +development flow - No flaky tests - The build should return a status, a +log, and an artifact

+

Important: 1. Only build artifacts once 2. Should be immutable 3. +Deployment should go to a copy of the production 4. Stop deploys if a +previous step fails 5. Deployments should be idempotent

+

Cycle and Overall Cycle Time

+

Types of testing 1. Unit testing 2. Code hygiene * Liting * Code +formatting * Banned function checks 3. Integration testing 4. Security +testing * Given I have a website * When I try to attack it with XSS * +Then it should not be vulnerable
+5. TDD Test Driven Development * State desired outcome as a test * Write +code to pass the test * Repeat 6. BDD Behavior Driven Development * Work +with stakeholders * Describe business functionality * Test is based on +natural language 7. ATDD Acceptance Test Driven Development * End user +perpective * Use case automated testing * Testing is continuous during +development 8. Infrastructure testing 9. Performance testing - types of +performance

+ ++++++ + + + + + + + + + + + + + + + + +
Annotations
Version control GitHubCI systems jenkins bambooBuild make/rake, maven, gulp, +packerTest j unit, golint / gofmt / rubocop
+

Integration testing

+ +

Performace testing apachebench, meter +Security testing brakeman, veracode

+

Where? - Artifactory - Nexus - Docker hub - AWS s3

+

Deployment: - Rundeck - Urbancode - Thoughtworks - Deployinator

+

Desing for operation theory

+ +

Metrics and monitoring

+

How complex systems fail? * Change introduces new forms of failure * +Complex system contain changing mixtures of failures latent within them +* All complex system is always running in degraded mode

+

Lean approach: 1. Build 2. Measure 3. Learn 4. Repeat

+

So: * Service performance and uptime * Software component metrics * +System metrics
+* App metrics * Performance: Linting, code formatting, banned function +checks * Security systems

+

5 ws of logging

+
    +
  1. What happend?
  2. +
  3. When
  4. +
  5. Where
  6. +
  7. Who
  8. +
  9. Where did that ebtuty come from?
  10. +
+

Remainders: - Do not collect log data if you never plan to use it - +Retain log data for as long as it is conceivable that it can be used - +Log all you can but alert only what you must respond to - Don’t try to +make your logging more available or more secure than your production +stack - Logs change

+

SRE tool chain

+

Software as a service monitoring: - Pingdom - +Datadog - Netuitive - Ruxit - +Librato - New relic - +App dynamics

+

Open source monitoring

+ +
+

+Total Visitors +

+

Visitor Count

+
+ + diff --git a/_site/GitHub/README.html b/_site/GitHub/README.html new file mode 100644 index 0000000..b9a46d1 --- /dev/null +++ b/_site/GitHub/README.html @@ -0,0 +1,255 @@ + + + + + + + README + + + + +

GitHub Overview

+

Costa Rica

+

brown9804

+

Last updated: 2024-12-13

+
+

Wiki

+
+ +Table of Wiki (Click to expand) + + +
+

How to Commit/Push to Github

+
git clone <repo>     or        git -c http.sslVerify=false clone <repository-name - ssh>
+
+cd <repo path>
+
+code . 
+
+git pull 
+
+git checkout -b <new branch_name>
+
+git status 
+
+git add -A
+
+git commit -m "" 
+
+git push origin [branch]
+
+git pull origin [branch]
+

Good Content List Format

+
<details><summary> <a href=""> </a></summary><ul>
+        <li> <a href=""> </a> </li>
+        <li> <a href=""> </a> </li>
+        <li> <a href=""> </a> </li>
+        <li> <a href=""> </a> </li>
+        <li> <a href="" > </a> </li>
+        <li> <a href="" > </a> </li>
+        <li> <a href="" > </a> </li>
+</details></li> <!-- End  -->
+
+

+Total Visitors +

+

Visitor Count

+
+ + diff --git a/_site/GitHub/demos/0_GithubAImodels.html b/_site/GitHub/demos/0_GithubAImodels.html new file mode 100644 index 0000000..5330c31 --- /dev/null +++ b/_site/GitHub/demos/0_GithubAImodels.html @@ -0,0 +1,609 @@ + + + + + + + 0_GithubAImodels + + + + +

Prototyping with AI Models +on GitHub

+

Costa Rica

+

brown9804

+

Last updated: 2025-01-13

+
+
+

GitHub provides a platform for developers to +prototype, experiment with, and integrate AI models into their projects. +This process involves several key steps and tools that facilitate the +development and deployment of AI-powered applications.

+
+

Wiki

+
+ +Table of Wiki (Click to expand) + + +
+

Content

+
+ +Table of Content (Click to expand) + + +
+

Overview

+ ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ComponentDescription
Finding AI ModelsYou can find various AI models on the GitHub Marketplace. Navigate +to the Models section to explore available models and view +their details.
Experimenting with AI ModelsGitHub offers a playground where you can test different models by +adjusting parameters and submitting prompts. You can experiment with AI +models using the API provided by GitHub.
Saving and Sharing ExperimentsYou can save your playground experiments and share them with others. +This is useful for collaboration and getting feedback on your +prototypes.
Integration with Visual Studio CodeGitHub models can be integrated into Visual Studio Code, allowing +you to experiment with AI models directly within your development +environment.
Going to ProductionOnce you are ready to move from prototyping to production, you can +switch to using a token from a paid Azure account. This provides more +robust and scalable options for deploying your AI models.
Rate LimitsThere are rate limits in place to ensure fair usage of the AI +models. These limits vary depending on the model and usage scenario. +Monitoring your usage and optimizing requests can help you stay within +these limits. The rate limits for the playground and free API usage are +intended to help you experiment with models and prototype your AI +application. For use beyond those limits, and to bring your application +to scale, you must provision resources from an Azure account, and +authenticate from there instead of your GitHub personal access +token.
+

Demo

+

Set Up Your Environment

+
    +
  1. Create a GitHub Repository: +
      +
    • Go to GitHub and sign in to your account.

    • +
    • Click on the + icon in the top right corner and +select New repository.

      +

      image

    • +
    • Name your repository and choose its visibility (public or +private).

    • +
    • Click Create repository.

      +

      image

    • +
  2. +
  3. Generate a Personal Access Token (PAT): +
      +
    • Click on your profile picture in the top right corner and select +Settings.

      +

      image

    • +
    • In the left sidebar, click Developer +settings.

      +

      image

    • +
    • Click Personal access tokens and then +Generate new token.

      +

      image

    • +
    • Select the scopes you need (e.g., repo, +workflow, copilot) and click Generate +token.

    • +
    • Copy the token and store it securely.

      + + + + + +
      +

      image

      +
      +

      image

      +
    • +
  4. +
+

Find and Select an AI Model

+
    +
  1. Navigate to GitHub Marketplace: +
      +
    • Go to the GitHub Marketplace.

    • +
    • Click on the Models section to explore available +AI models.

      + + + + + +
      +

      image

      +
      +

      image

      +
    • +
  2. +
  3. Select a Model: +
      +
    • Browse through the models and select one that fits your needs, +such as OpenAI GPT-4.

    • +
    • Review the model’s details, capabilities, and usage +instructions.

      + + + + + +
      +

      image

      +
      +

      image

      +
    • +
  4. +
+

Experiment in the Playground

+
    +
  1. Access the Playground: +
      +
    • Go to the playground section for the selected model.

    • +
    • You can find this in the model’s details page on GitHub +Marketplace.

      +

      image

    • +
  2. +
  3. Test the Model: +
      +
    • Adjust parameters such as prompt, temperature, and max +tokens.

    • +
    • Submit prompts to see how the model responds.

    • +
    • Example prompt: +Explain the basics of machine learning

      + + + + + +
      +

      image

      +
      +

      image

      +
    • +
    • You can compare the performance between models:

      + + + + + +
      +

      image

      +
      +

      image

      +
    • +
  4. +
  5. Save Experiments: +
      +
    • Save your experiments by clicking on the Save +button.

    • +
    • Share the saved experiments with collaborators for feedback.

      +

      image

    • +
  6. +
+

Integrate with Visual Studio +Code

+
    +
  1. Install Extensions: +
      +
    • Open Visual Studio Code.

    • +
    • Go to the Extensions view by clicking on the Extensions icon in +the Activity Bar.

    • +
    • Search for and install the GitHub Copilot +extension.

      +

      image

    • +
  2. +
  3. Set Up API Key: +
      +
    • Open a terminal in Visual Studio Code.

    • +
    • Set your API key by running:

      +
      export OPENAI_API_KEY="your-api-key"
    • +
  4. +
+

Make API Requests

+
    +
  1. Write a Script: +
      +
    • Create a new file in your repository, e.g., +ai_model_test.py.

    • +
    • Write a script to make API requests to the AI model. For +example:

      +
      import openai
      +
      +openai.api_key = "your-api-key"
      +
      +response = openai.Completion.create(
      +    model="gpt-4",
      +    prompt="Explain the basics of machine learning.",
      +    max_tokens=100
      +)
      +
      +print(response.choices[0].text)
    • +
  2. +
  3. Run the Script: Run your script in the terminal to +see the model’s response.
  4. +
+

Process and Use Responses

+ ++++ + + + + + + + + + + + + + + + + +
StepDescription
Integrate Responses- Use the responses from the AI model in your application.
- +For example, display the generated text in a web app or use it to +automate a task.
Optimize and Iterate- Continuously optimize your prompts and code based on the +responses.
- Iterate to improve the performance and relevance of +the AI model.
+

Transition to Production

+ ++++ + + + + + + + + + + + + + + + + + + + + +
StepDescription
Provision Azure Resources- Sign in to your Azure account.
- Provision the necessary +resources, such as Azure Cognitive Services.
Update Authentication- Switch from using your GitHub PAT to an Azure production key.
+- Update your environment variable:
+export OPENAI_API_KEY="your-azure-api-key"
Monitor and ScaleMonitor your usage and scale your application as needed using +Azure’s infrastructure
+
+

+Total Visitors +

+

Visitor Count

+
+ + diff --git a/_site/GitHub/demos/1_GitHubPagesOverview.html b/_site/GitHub/demos/1_GitHubPagesOverview.html new file mode 100644 index 0000000..acd5621 --- /dev/null +++ b/_site/GitHub/demos/1_GitHubPagesOverview.html @@ -0,0 +1,425 @@ + + + + + + + 1_GitHubPagesOverview + + + + +

GitHub Pages -Overview

+

Costa Rica

+

brown9804

+

Last updated: 2025-01-13

+
+
+

GitHub Pages is a feature provided by GitHub that +allows you to +host static websites directly from a GitHub repository. +It’s a great way to showcase your projects, create personal websites, or +host documentation for your repositories.

+
+

Wiki

+
+ +Table of Wiki (Click to expand) + + +
+

Content

+
+ +Table of Content (Click to expand) + + +
+
+

What is GitHub Pages?
GitHub Pages is a +free service that turns your GitHub repositories into websites. +You can host HTML, CSS, and JavaScript files, and it’s perfect for +static websites that don’t require server-side processing. +GitHub Pages supports custom domains, making it easy to create a +professional-looking website.

+
+

How is GitHub Pages Used?

+ +

Automate +the process of converting Markdown to static HTML and deploying it using +GitHub Pages and GitHub Actions

+
    +
  1. Create a GitHub Repository +
      +
    • Go to GitHub and create a new repository. Name it +username.github.io, where username is your +GitHub username.
    • +
    • Make sure the repository is public.
    • +
  2. +
  3. Add Your Markdown Files +
      +
    • Clone the repository to your local machine.
    • +
    • Add your Markdown files to the repository.
    • +
    • Commit and push the changes to GitHub.
    • +
  4. +
  5. Create a GitHub Actions Workflow +
      +
    • In your repository, create a .github/workflows +directory.
    • +
    • Inside this directory, create a file named +md-html-deploy.yml.
    • +
  6. +
  7. Define the Workflow: Add the following content to the +md-html-deploy.yml file to set up a workflow that converts +Markdown to HTML and deploys it to the main branch: +
      +
    1. Checkout Repository: This step checks out your +repository so that the workflow can access the files.
    2. +
    3. Set up Node.js: This step sets up Node.js, which is +required for some Markdown converters.
    4. +
    5. Install Dependencies: This step installs the +necessary dependencies for your project.
    6. +
    7. Convert Markdown to HTML: This step uses +pandoc to convert Markdown files to HTML and places them in +the _site directory.
    8. +
    9. Deploy to GitHub Pages: This step commits the +generated HTML files back to the main branch and pushes the +changes. This ensures that your GitHub Pages site is updated with the +latest HTML files.
    10. +
    +
    name: Convert Markdown to HTML and Deploy
    +
    +on:
    +  push:
    +    branches:
    +      - main  # Trigger the workflow on push to the main branch
    +
    +jobs:
    +  build-and-deploy:
    +    runs-on: ubuntu-latest
    +
    +    steps:
    +      - name: Checkout repository
    +        uses: actions/checkout@v2
    +
    +      - name: Set up Node.js
    +        uses: actions/setup-node@v2
    +        with:
    +          node-version: '14'
    +
    +      - name: Install dependencies
    +        run: npm install
    +
    +      - name: Convert Markdown to HTML
    +        run: |
    +          mkdir -p _site
    +          for file in *.md; do
    +            pandoc "$file" --standalone --toc -o "_site/${file%.md}.html"
    +          done
    +
    +      - name: Deploy to GitHub Pages
    +        run: |
    +          git config --global user.name 'github-actions[bot]'
    +          git config --global user.email 'github-actions[bot]@users.noreply.github.com'
    +          git add _site
    +          git commit -m 'Deploy static HTML files'
    +          git push origin main
  8. +
+

Setting Up GitHub Pages

+
    +
  1. Create a Repository: Create a new repository on +GitHub or use an existing one.
  2. +
  3. Enable GitHub Pages: +
      +
    • Go to the repository settings on GitHub.

    • +
    • Under the GitHub Pages section, select the +main branch as the source.

      +

      image

    • +
    +
    +

    Static HTML refers to web pages that are delivered to the user’s +browser exactly as stored, without any server-side processing. Static +sites are fast, secure, and easy to deploy, making them ideal for simple +websites, portfolios, blogs, and documentation.

    +
  4. +
  5. Push Your Code: Commit and push your code to the main branch. The +GitHub Actions workflow will automatically run and deploy your site to +GitHub Pages.
  6. +
+
+

+Total Visitors +

+

Visitor Count

+
+ + diff --git a/_site/Network/README.html b/_site/Network/README.html new file mode 100644 index 0000000..0d7d9ca --- /dev/null +++ b/_site/Network/README.html @@ -0,0 +1,264 @@ + + + + + + + README + + + + +

Cloud Networking Overview

+

Costa Rica

+

brown9804

+

Last updated: 2024-12-13

+
+

Overview

+

image

+

image

+

image

+

Wiki

+ +
+

+Total Visitors +

+

Visitor Count

+
+ + diff --git a/_site/README.html b/_site/README.html index 586cf03..bb9125c 100644 --- a/_site/README.html +++ b/_site/README.html @@ -167,179 +167,196 @@ -

Kubernetes Overview

-
+

Cloud DevOps - Learning Path

Costa Rica

-

Belinda Brown, belindabrownr04@gmail.com

-

GitHub brown9804

-

April, 2021

+

Last updated: 2024-12-13


-

Wiki

+
+

Provides the essential knowledge required to work effectively within +Azure and embrace DevOps/Agile methodologies. Additionally, it offers +insights into fundamental cloud concepts.

+
+
+

+Total Visitors +

+

Visitor Count

+
+

Content

+
+ +Table of Contents (Click to expand) + -

image

-

image

+
  • Agile
  • +
  • DevOps
  • +
  • Network
  • +
  • GitHub
  • +
  • Cloud +Principles
  • -

    image

    - ---- - - - - - - - - - - - - -
    Single/Multi TenantsFederated
    imageimage
    +
    +

    Wiki

    - ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ComponentDefinitionURL
    KubernetesIs an open source system to deploy, scale, and manage containerized -applications anywhere.What is -k8s
    ContainerA container is a standalone object that holds everything an -application needs to execute, including libraries, resource files, and -configuration files. Containers are lightweight compared to virtual -machines and can run regardless of infrastructure or environment. This -makes them ideal for ease-of-management across devices and operating -systems.What -is k8s container?
    PodA container or group of containers running on a machine.What -is k8s pods?
    NodeA node is a physical or virtual machine that operates at least a -single container, though they can (and often) run multiple -containers.What is -ks8 node?
    ClusterA cluster is a set of virtual or physical machines, each operating -as a nod to run containerized applications. Clusters are managed by -Kubernetes.What -is k8s cluster?
    DeploymentDeployment refers to the process of installing, executing, or -updating containers on a node. Efficient deployment takes advantage of -tools such as automation to simplify management and resource usage.Reference -here
    ReplicasetA tool used to ensure availability, Replicaset defines the quantity -of stable replica Pods that must be running at any one time, then -manages resources to fulfill this. Replicaset creates or deletes Pods to -meet and maintain the target.Reference -here
    OrchestrationContainer orchestration operates one level above deployment and -refers to the overall management and operational logistics that oversee -containers. Orchestration is necessary to balance resources in an -extremely dynamic environment.Reference -here
    -

    image

    -

    image

    +
    + + +
    +
    + + +
    +
    + + +
    -

    image

    -

    image

    -

    image

    +
    + + +