diff --git a/Dockerfile.generator b/Dockerfile.generator index 4f25d9d..48ddbe1 100644 --- a/Dockerfile.generator +++ b/Dockerfile.generator @@ -1,10 +1,11 @@ -FROM registry.fedoraproject.org/fedora:36 +FROM quay.io/fedora/fedora:36 ENV CWT_DIR=/tmp/container-workflow-tool RUN dnf install -y go-md2man \ make git python3-PyYAML \ python3-GitPython \ python3-requests-kerberos \ python3-pip \ + python3-gitlab \ distgen COPY ./ ${CWT_DIR}/ diff --git a/Dockerfile.tests b/Dockerfile.tests index a17fce4..f601cbd 100644 --- a/Dockerfile.tests +++ b/Dockerfile.tests @@ -1,4 +1,4 @@ -FROM registry.fedoraproject.org/fedora:36 +FROM quay.io/fedora/fedora:36 RUN dnf install -y ansible diff --git a/container_workflow_tool/cli_common.py b/container_workflow_tool/cli_common.py index b5f21fa..1652e6a 100644 --- a/container_workflow_tool/cli_common.py +++ b/container_workflow_tool/cli_common.py @@ -59,6 +59,10 @@ def get_parser(self): parsers['git'].add_argument('--rebuild-reason', help='Use a custom reason for rebuilding') parsers['git'].add_argument('--commit-msg', help='Use a custom message instead of the default one') parsers['git'].add_argument('--check-script', help='Script/command to be run when checking repositories') + parsers['git'].add_argument( + '--gitlab', action='store_true', default=False, + help='File a merge request to corresponding repository instead of directly to dist-git' + ) parsers['build'].add_argument( '--repo-url', help='Set the url of a .repo file to be used when building the image' ) @@ -106,6 +110,7 @@ def git_usage(self): --commit-msg - Use a custom message instead of the default one --rebuild-reason - Use a custom reason for rebuilding --check-script - Script/command to be run when checking repositories + --gitlab - Use GitLab for filling merge requests instead of direct pushing to dist-git """ return action_help diff --git a/container_workflow_tool/distgit.py b/container_workflow_tool/distgit.py index 946e032..209954e 100644 --- a/container_workflow_tool/distgit.py +++ b/container_workflow_tool/distgit.py @@ -5,7 +5,7 @@ from git import Repo from git.exc import GitCommandError -import container_workflow_tool.utility as u +import container_workflow_tool.utility as utility from container_workflow_tool.utility import RebuilderError from container_workflow_tool.dockerfile import DockerfileHandler from container_workflow_tool.sync import SyncHandler @@ -49,7 +49,7 @@ def check_script(self, component, script_path, component_path): self.logger.info(template.format(name=component, status="Affected")) err = ret.stderr.decode('utf-8').strip() if err: - self.logger.error(u._2sp(err)) + self.logger.error(utility._2sp(err)) else: self.logger.info(template.format(name=component, status="OK")) @@ -63,7 +63,7 @@ def dist_git_changes(self, images, rebase=False): rebase (bool, optional): Specify if a rebase should be done instead """ try: - for image in (images): + for image in images: name = image["name"] component = image["component"] branch = image["git_branch"] @@ -92,18 +92,17 @@ def dist_git_changes(self, images, rebase=False): ups_name = name.split('-')[0] # Clone upstream repository ups_path = os.path.join('upstreams/', ups_name) - self._clone_upstream(url, ups_path, commands=commands) + self.clone_upstream(url, ups_path, commands=commands) # Save the upstream commit hash ups_hash = Repo(ups_path).commit().hexsha - self._pull_upstream(component, path, url, repo, ups_name, commands) + self.pull_upstream(component, path, url, repo, ups_name, commands) self.df_handler.update_dockerfile( df_path, from_tag, downstream_from=downstream_from ) repo.git.add("Dockerfile") # It is possible for the git repository to have no changes if repo.is_dirty(): - commit = self.get_commit_msg(rebase, image, ups_hash - ) + commit = self.get_commit_msg(rebase, image, ups_hash) if commit: repo.git.commit("-m", commit) else: @@ -122,8 +121,8 @@ def _clone_downstream(self, component, branch): self.logger.info("Using existing downstream repo: " + component) repo = Repo(component) else: - hostname_url = u._get_hostname_url(self.conf) - packager = u._get_packager(self.conf) + hostname_url = utility._get_hostname_url(self.conf) + packager = utility._get_packager(self.conf) # if packager is fedpkg then namespace is `container` else `containers` namespace = "container" if packager == "fedpkg" else "containers" component_path = f"{namespace}/{component}" @@ -162,7 +161,7 @@ def push_changes(self, tmp, images): # commit_msg is set so it is always returned commit = self.get_commit_msg(None, image) repo.git.commit("-am", commit) - if self._get_unpushed_commits(repo): + if self.are_unpushed_commits_available(repo): self.logger.info("Pushing: " + component) repo.git.push() @@ -176,7 +175,7 @@ def push_changes(self, tmp, images): if failed: self.logger.error("Failed pushing images:") for image in failed: - self.logger.error(u._2sp(image["component"])) + self.logger.error(utility._2sp(image["component"])) self.logger.error("Please check the failures and push the changes manually.") # TODO: Multiple future branches? @@ -204,5 +203,5 @@ def merge_future_branches(self, images): if failed: self.logger.error("Failed merging images:") for image in failed: - self.logger.error(u._2sp(image["component"])) + self.logger.error(utility._2sp(image["component"])) self.logger.error("Please check the failures and push the changes manually.") diff --git a/container_workflow_tool/git_operations.py b/container_workflow_tool/git_operations.py index 7ef94bf..a0b3383 100644 --- a/container_workflow_tool/git_operations.py +++ b/container_workflow_tool/git_operations.py @@ -55,7 +55,7 @@ def set_commit_msg(self, msg): """ self.commit_msg = msg - def _do_git_reset(self, repo): + def do_git_reset(self, repo): file_list = ['--', '.gitignore'] + self.conf.ignore_files repo.git.reset(file_list) # One file at a time to make sure all files get reset even on error @@ -68,7 +68,7 @@ def _do_git_reset(self, repo): repo.git.clean('-xfd', f) self.logger.debug("Removing untracked ignored file: " + f) - def _clone_upstream(self, url, ups_path, commands=None): + def clone_upstream(self, url, ups_path, commands=None): """ :params: url is URL to repofile from upstream. https://github.com/sclorg :param: ups_path is path where URL is cloned locally @@ -229,7 +229,7 @@ def get_commit_msg(self, rebase, image=None, ups_hash=None): commit += "\n created from upstream commit: " + ups_hash return commit - def _pull_upstream(self, component, path, url, repo, ups_name, commands): + def pull_upstream(self, component, path, url, repo, ups_name, commands): """Pulls an upstream repo and copies it into downstream""" ups_path = os.path.join('upstreams/', ups_name) cp_path = os.path.join(ups_path, path) @@ -268,7 +268,7 @@ def _pull_upstream(self, component, path, url, repo, ups_name, commands): self.update_test_openshift_yaml(test_openshift_yaml_file, path, short_name=ups_name) repo.git.add("*") - self._do_git_reset(repo) + self.do_git_reset(repo) # TODO: Configurable? df_ext = self.df_ext df_path = os.path.join(component, "Dockerfile") diff --git a/container_workflow_tool/gitlab.py b/container_workflow_tool/gitlab.py new file mode 100644 index 0000000..29dd43f --- /dev/null +++ b/container_workflow_tool/gitlab.py @@ -0,0 +1,447 @@ +# MIT License +# +# Copyright (c) 2020 SCL team at Red Hat +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import os +import time +import gitlab +import shutil +import subprocess +from pathlib import Path +from typing import List, Any + +from gitlab import Gitlab +from git import Repo +from git.exc import GitCommandError + +import container_workflow_tool.utility as utility +from container_workflow_tool.utility import RebuilderError, cwd +from container_workflow_tool.dockerfile import DockerfileHandler +from container_workflow_tool.git_operations import GitOperations +from container_workflow_tool.sync import SyncHandler +from container_workflow_tool.named_tuples import ( + ProjectMRs, + ProjectBranches, + ProjectForks, + CurrentUser, + ProjectMR, + ProjectInfo, + ProjectCreateFork, + ProtectedBranches, +) + + +class GitLabAPI(GitOperations): + """Class for working with GitLab.""" + + def __init__(self, *args, **kwargs): + super(GitLabAPI, self).__init__(*args, **kwargs) + self.df_ext = self.conf.df_ext + self._gitlab_api = None + self.commit_msg = None + self.project_id = 0 + self.fork_id = 0 + self.target_project = None + self.source_project = None + self.df_handler = DockerfileHandler(self.base_image) + self.sync_handler = SyncHandler(logger=self.logger) + self.sync_branch_name = self.base_image.replace(":", "-") + + def __str__(self) -> str: + return f"conf:{self.conf}\n" f"base_image:{self.base_image}" + + @property + def gitlab_api(self): + if not self._gitlab_api: + self._gitlab_api = Gitlab( + "https://gitlab.com", + private_token=os.environ["GITLAB_API_TOKEN"].strip(), + ) + assert self._gitlab_api + return self._gitlab_api + + def check_authentication(self) -> CurrentUser: + """ + Checks if authentication to GitLab is properly set. + Otherwise it failed with GitLab exception GitLabAuthenticationError + :return: CurrentUser - named tuple + """ + try: + self.gitlab_api.auth() + current_user = self.gitlab_api.user + return CurrentUser(current_user.id, current_user.username) + except gitlab.exceptions.GitlabAuthenticationError as gae: + self.logger.error(f"Authentication failed with reason {gae}.") + return None + + def check_username(self) -> bool: + """ + Checks if current user is logged to GitLab and username corresponds with logged username + :return: true - authentication was successful + false - authentication failed + """ + user_name = self.check_authentication() + if not user_name: + raise RebuilderError("Authentication to GitLab failed." + "Check if GITLAB_API_TOKEN environment variable is properly set.") + if user_name.username != os.environ["USER"]: + raise RebuilderError("Authentication to GitLab failed." + "User GITLAB_API_TOKEN is different from your current login name.") + return True + + def get_project_info(self) -> ProjectInfo: + self.logger.debug(f"Get information for project {self.target_project.name} with id {self.target_project.id}") + return ProjectInfo(self.target_project.id, self.target_project.name, self.target_project.ssh_url_to_repo, + self.target_project.web_url) + + def load_project(self, fork=False): + if fork: + self.source_project = self.gitlab_api.projects.get(self.fork_id) + else: + self.target_project = self.gitlab_api.projects.get(self.project_id) + + def load_forked_project(self): + for cnt in range(0, 20): + try: + self.load_project(fork=True) + break + except gitlab.exceptions.GitlabGetError as gge: + self.logger.debug(gge.response_code, gge.error_message) + if gge.response_code == 404: + self.logger.debug("Let's wait couple seconds, till fork is not created.") + else: + raise RebuilderError("Fork was created but does not exist after 20 seconds") + # Let's wait 2 more seconds + time.sleep(2) + + def get_project_forks(self) -> List[ProjectForks]: + """ + Returns list for forks defined as named_types[ProjectForks] + """ + self.logger.debug(f"Get forks for project {self.base_image}") + return [ + ProjectForks( + x.id, + x.name, + x.ssh_url_to_repo, + x.owner["username"], + x.forked_from_project["id"], + x.forked_from_project["ssh_url_to_repo"], + ) + for x in self.target_project.forks.list() + ] + + def get_project_branches(self, fork=False) -> List[ProjectBranches]: + self.logger.debug(f"Get branches for project {self.project_id}") + if fork: + return [ + ProjectBranches(x.name, x.web_url, x.protected) + for x in self.source_project.branches.list() + ] + return [ + ProjectBranches(x.name, x.web_url, x.protected) + for x in self.target_project.branches.list() + ] + + def get_project_mergerequests(self) -> List[ProjectMRs]: + self.logger.debug(f"Get mergerequests for project {self.project_id}") + project_mr = self.target_project.mergerequests.list(state="opened") + return [ + ProjectMRs( + x.iid, x.project_id, x.target_branch, x.title, x.author["username"] + ) + for x in project_mr + ] + + def create_project_fork(self) -> ProjectCreateFork: + self.logger.debug(f"Create fork for project {self.project_id} with base_image {self.base_image}") + project_mr = self.target_project.forks.create() + return ProjectCreateFork( + project_mr.id, project_mr.name, + project_mr.ssh_url_to_repo, project_mr.web_url, + project_mr.forked_from_project["id"] + ) + + def get_protected_branches(self) -> List[ProtectedBranches]: + self.logger.debug(f"Get protected branches for fork {self.fork_id}") + protected_branches = self.source_project.protectedbranches.list() + return [ + ProtectedBranches(x.name) for x in protected_branches + ] + + def create_project_mergerequest(self, data) -> ProjectMR: + self.logger.debug( + f"Create mergerequest for project {self.project_id} with base-image{self.base_image} with data {data}" + ) + try: + mr = self.source_project.mergerequests.create(data) + return ProjectMR( + mr.iid, + mr.title, + mr.description, + mr.target_branch, + mr.author["username"], + mr.source_project_id, + mr.target_project_id, + mr.web_url, + ) + except gitlab.exceptions.GitlabCreateError as gce: + self.logger.error(f"{gce.error_message} and {gce.response_code}") + if gce.response_code == 409: + self.logger.error("Another PR already exists") + return None + + def _check_project_forks(self) -> bool: + fork_exist = False + self.logger.debug("Check if project forks exists") + project_forks = self.get_project_forks() + for fork in project_forks: + if fork.username != os.environ["USER"]: + continue + self.fork_id = fork.id + fork_exist = True + break + self.logger.debug(f"Fork status: {fork_exist}") + return fork_exist + + def fork_project(self) -> bool: + self.logger.debug(f"Create fork for project {self.project_id}") + fork: ProjectCreateFork + try: + fork = self.create_project_fork() + self.logger.debug(f"Fork result {fork}") + except gitlab.exceptions.GitlabCreateError as gce: + if gce.response_code == 409: + self.logger.debug(f"Fork for project {self.project_id} already exists with id {self.fork_id}") + return True + self.logger.error(f"{gce.error_message} and {gce.response_code}") + raise RebuilderError + + if fork.forked_from_project_id != self.project_id: + raise RebuilderError(f"Forked for project {self.base_image} was created from wrong {self.project_id}." + f"Check CWT configuration file {self.conf}.") + self.fork_id = fork.id + try: + self.load_forked_project() + except RebuilderError: + raise RebuilderError(f"CWT tool detected problem with fork for project {self.project_id}.") + protected_branches = self.get_protected_branches() + self.logger.debug(f"Protected branches are {protected_branches}") + for brn in protected_branches: + self.source_project.protectedbranches.delete(brn.name) + return True + + def gitlab_git_changes(self, images, rebase): + """Method to merge changes from upstream into downstream + + Pulls both downstream and upstream repositories into a temporary dir. + Merge is done by copying tracked files from upstream into downstream. + + Args: + rebase (bool, optional): Specify if a rebase should be done instead + """ + try: + for image in images: + name = image["name"] + component = image["component"] + branch = image["git_branch"] + path = image["git_path"] + url = image["git_url"] + try: + self.project_id = image["project_id"] + except KeyError: + self.logger.info(f"Image {name} for component {component} does not have setup GitLab project id." + f"The value is mandatory." + f"Update corresponding configuration file. Skipping for now.") + continue + commands = image["commands"] + self.load_project() + if not self._check_project_forks(): + if not self.fork_project(): + raise RebuilderError("CWT tool was not able to create a fork. Check logs.") + else: + self.load_project(fork=True) + pull_upstr = image.get("pull_upstream", True) + repo = self._clone_downstream(component, branch) + df_path = os.path.join(component, "Dockerfile") + downstream_from = self.df_handler.get_from_df(df_path) + self.logger.debug(f"Downstream_from: {downstream_from}\n") + from_tag = self.conf.get("from_tag", "latest") + if rebase or not pull_upstr: + self.df_handler.update_dockerfile(df_path, from_tag, downstream_from=downstream_from) + # It is possible for the git repository to have no changes + if repo.is_dirty(): + commit = self.get_commit_msg(rebase, image) + if commit: + repo.git.commit("-am", commit) + else: + msg = "Not creating new commit in: " + self.logger.info(msg + component) + else: + ups_name = name.split('-')[0] + # Clone upstream repository + ups_path = os.path.join('upstreams/', ups_name) + self.clone_upstream(url, ups_path, commands=commands) + # Save the upstream commit hash + ups_hash = Repo(ups_path).commit().hexsha + self.pull_upstream(component, path, url, repo, ups_name, commands) + self.df_handler.update_dockerfile(df_path, from_tag, downstream_from=downstream_from) + repo.git.add("Dockerfile") + # It is possible for the git repository to have no changes + if repo.is_dirty(): + commit = self.get_commit_msg(rebase, image, ups_hash) + if commit: + repo.git.commit("-m", commit) + else: + msg = "Not creating new commit in: " + self.logger.info(msg + component) + + self._check_labels(df_path) + finally: + # Cleanup upstream repos + shutil.rmtree("upstreams", ignore_errors=True) + + # FIXME: This should be provided by some external Dockerfile linters + def _check_labels(self, dockerfile_path): + old_labels = ['Release=', 'Name=', 'Version='] + with open(dockerfile_path) as f: + fdata = f.read() + for label in old_labels: + if label in fdata: + self.logger.warn("Wrong label '{}' found in {}".format(label, dockerfile_path)) + + def add_user_name_fork(self) -> bool: + """ + Add username fork as remote to the git repostiory in the current working directory + """ + username = os.environ["USER"] + self.logger.debug(f"Adding {username} fork as git remote {self.source_project.ssh_url_to_repo}.") + ret = subprocess.run(f"git remote add {username} {self.source_project.ssh_url_to_repo}", + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, shell=True, + executable='/bin/bash') + + if ret.returncode != 0: + self.logger.error(ret.stderr.strip()) + return False + return True + + def _clone_downstream(self, component, branch) -> Any: + """Clones downstream forked GitLab repo and not original repo""" + # Do not set up downstream repo if it already exists + if os.path.isdir(component): + self.logger.info("Using existing downstream repo: " + component) + repo = Repo(component) + else: + project_info = self.get_project_info() + ccomponent = project_info.ssh_url_to_repo + self.logger.info(f"Cloning into: {ccomponent} to directory {os.getcwd()}") + ret = subprocess.run(f"git clone {ccomponent}", stdout=subprocess.PIPE, + stderr=subprocess.PIPE, shell=True, + executable='/bin/bash') + if ret.returncode != 0: + template = "{} failed to clone {} with return value {}." + raise RebuilderError(template.format("git", ccomponent, + ret.returncode)) + self.logger.info(f"Path with clone is {os.getcwd()}/{project_info.name}") + with cwd(Path(os.getcwd()) / project_info.name) as _: + if not self.add_user_name_fork(): + raise RebuilderError("Problem with adding for as remote repository") + + for prt_brn in self.get_protected_branches(): + self.logger.debug(f"Remove protected branch {prt_brn.name}.") + self.source_project.protectedbranches.delete(prt_brn.name) + branch_exist = False + for project_brn in self.get_project_branches(fork=True): + if project_brn.name == f"{branch}-{self.base_image}": + self.logger.info(f"Branch {branch}-{self.base_image} already exists {project_brn.web_url}") + branch_exist = True + + # Let's create branch for sync with the name + if not branch_exist: + ret = subprocess.run(f"git checkout -b {branch}-{self.sync_branch_name}", stdout=subprocess.PIPE, + stderr=subprocess.PIPE, shell=True, + executable='/bin/bash') + if ret.returncode != 0: + raise RebuilderError( + f"git failed to clone {ccomponent} with return value {ret.returncode} and {ret.stderr}." + ) + ret = subprocess.run("git branch -avv", stdout=subprocess.PIPE, + stderr=subprocess.PIPE, shell=True, + executable='/bin/bash') + self.logger.debug(ret.stdout.strip()) + repo = Repo(component) + return repo + + def push_changes(self, tmp, images): + """Pushes changes for components into downstream dist-git repository""" + # Check for kerberos ticket + failed = [] + for image in images: + component = image["component"] + branch = image["git_branch"] + self.project_id = image["project_id"] + self.target_project = self.gitlab_api.projects.get(self.project_id) + if not self._check_project_forks(): + raise RebuilderError("CWT tool detected that there are not forks. Call first cwt git pullupstream.") + else: + self.source_project = self.gitlab_api.projects.get(self.fork_id) + try: + self.logger.debug(f"Component to push {component}") + repo = Repo(component) + with cwd(Path(os.getcwd()) / component) as _: + # If a commit message is provided do a commit first + commit = "" + if self.commit_msg and repo.is_dirty(): + # commit_msg is set so it is always returned + commit = self.get_commit_msg(None, image) + repo.git.commit("-am", commit) + if self.are_unpushed_commits_available(repo=repo): + self.logger.info("Pushing: " + component) + ret = subprocess.run(f"git push -u {os.environ['USER']} {branch}-{self.sync_branch_name}", + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, shell=True, + executable='/bin/bash') + if ret.returncode != 0: + raise RebuilderError( + f"git failed to push {component} with return value {ret.returncode}" + f"and {ret.stderr}." + ) + # repo.git.push(os.environ["USER"], f"{branch}-{self.sync_branch_name}") + mr = self.create_project_mergerequest({ + "source_branch": f"{branch}-{self.sync_branch_name}", + "target_branch": branch, + "title": f"Rebuild for latest {self.base_image}", + "target_project_id": self.project_id + }) + self.logger.info(f"Create MR for project {self.project_id} is: {mr}.") + else: + self.logger.info(f"There are no unpushed commits." + f" Push skipped for {component}.") + except GitCommandError as e: + failed.append(image) + self.logger.error(e) + + if failed: + self.logger.error("Failed pushing images:") + for image in failed: + self.logger.error(utility._2sp(image["component"])) + self.logger.error("Please check the failures and push the changes manually.") diff --git a/container_workflow_tool/main.py b/container_workflow_tool/main.py index 07b4564..f3a5c63 100755 --- a/container_workflow_tool/main.py +++ b/container_workflow_tool/main.py @@ -9,7 +9,6 @@ import re import tempfile import pprint -import getpass import logging from git import Repo, GitError @@ -22,6 +21,7 @@ from container_workflow_tool.git_operations import GitOperations from container_workflow_tool.utility import RebuilderError from container_workflow_tool.config import Config +from container_workflow_tool.gitlab import GitLabAPI class ImageRebuilder: @@ -30,6 +30,7 @@ class ImageRebuilder: def __init__(self, base_image: str, rebuild_reason: str = None, + gitlab_usage: bool = False, config: str = "default.yaml", release: str = "current"): """ Init method of ImageRebuilder class @@ -45,6 +46,7 @@ def __init__(self, self._brewapi: KojiAPI = None self._distgit: DistgitAPI = None self._git_ops: GitOperations = None + self._gitlab_api: GitLabAPI = None self.commit_msg = None self.args = None self.tmp_workdir: str = None @@ -53,6 +55,7 @@ def __init__(self, self.conf_name = config self.rebuild_reason = rebuild_reason + self.gitlab_usage = gitlab_usage self.do_image = None self.exclude_image = None self.do_set = None @@ -117,6 +120,8 @@ def _setup_args(self, args): self.set_commit_msg(args.commit_msg) if getattr(args, 'rebuild_reason', None) is not None and args.rebuild_reason: self.rebuild_reason = args.rebuild_reason + if getattr(args, 'gitlab', None) is not None and args.gitlab: + self.gitlab_usage = args.gitlab if getattr(args, 'check_script', None) is not None and args.check_script: self.check_script = args.check_script if getattr(args, 'disable_klist', None) is not None and args.disable_klist: @@ -137,6 +142,14 @@ def _get_set_from_config(self, layer: str) -> str: raise RebuilderError(err_msg) return i + @property + def gitlab_api(self): + if not self._gitlab_api: + self._gitlab_api = GitLabAPI(self.base_image, self.conf, + self.rebuild_reason, + self.logger.getChild("gitlab")) + return self._gitlab_api + @property def distgit(self): if not self._distgit: @@ -150,7 +163,7 @@ def git_ops(self): if not self._git_ops: self._git_ops = GitOperations(self.base_image, self.conf, self.rebuild_reason, - self.logger.getChild("-git-ops")) + self.logger.getChild("git-ops")) return self._git_ops @property @@ -527,9 +540,7 @@ def pull_upstream(self): for i in images: # Use unversioned name as a path for the repository ups_name = i["name"].split('-')[0] - self.distgit._clone_upstream(i["git_url"], - ups_name, - commands=i["commands"]) + self.git_ops.clone_upstream(i["git_url"], ups_name, commands=i["commands"]) # If check script is set, run the script provided for each config entry if self.check_script: for i in images: @@ -548,7 +559,10 @@ def push_changes(self): self._change_workdir(tmp) images = self._get_images() - self.distgit.push_changes(tmp, images) + if self.gitlab_usage: + self.gitlab_api.push_changes(tmp, images) + else: + self.distgit.push_changes(tmp, images) def dist_git_rebase(self): """ @@ -571,7 +585,10 @@ def dist_git_changes(self, rebase: bool = False): tmp = self._get_tmp_workdir() self._change_workdir(tmp) images = self._get_images() - self.distgit.dist_git_changes(images, rebase) + if self.gitlab_usage: + self.gitlab_api.gitlab_git_changes(images, rebase) + else: + self.distgit.dist_git_changes(images, rebase) self.logger.info("\nGit location: " + tmp) if self.args: tmp_str = ' --tmp ' + self.tmp_workdir if self.tmp_workdir else '"' diff --git a/container_workflow_tool/named_tuples.py b/container_workflow_tool/named_tuples.py new file mode 100644 index 0000000..8c91905 --- /dev/null +++ b/container_workflow_tool/named_tuples.py @@ -0,0 +1,60 @@ +# MIT License +# +# Copyright (c) 2020 SCL team at Red Hat +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from collections import namedtuple + + +ProjectMRs = namedtuple( + "ProjectMRs", ["iid", "project_id", "target_branch", "title", "username"] +) +ProjectBranches = namedtuple("ProjectBranches", ["name", "web_url", "protected"]) +ProjectForks = namedtuple( + "ProjectForks", + [ + "id", + "name", + "ssh_url_to_repo", + "username", + "forked_id", + "forked_ssh_url_to_repo", + ], +) +CurrentUser = namedtuple("CurrentUser", ["id", "username"]) +ProjectMR = namedtuple( + "ProjectMR", + [ + "iid", + "title", + "description", + "target_branch", + "author", + "source_project_id", + "target_project_id", + "web_url", + ], +) + +ProjectInfo = namedtuple("ProjectInfo", ["id", "name", "ssh_url_to_repo", "web_url"]) +ProjectCreateFork = namedtuple( + "ProjectCreateFork", ["id", "name", "ssh_url_to_repo", "web_url", "forked_from_project_id"] +) +ProtectedBranches = namedtuple("ProtectedBranches", ["name"]) diff --git a/container_workflow_tool/utility.py b/container_workflow_tool/utility.py index 1512f30..a542306 100644 --- a/container_workflow_tool/utility.py +++ b/container_workflow_tool/utility.py @@ -2,8 +2,11 @@ import argparse import os import logging - +import shutil +import re +from pathlib import Path import textwrap +import contextlib class RebuilderError(Exception): @@ -73,6 +76,23 @@ def _split_config_path(config: str): return config_path, image_set +@contextlib.contextmanager +def cwd(path): + """ + Checks out a git repository into a temporary directory. + Changes CWD to the temporary directory. + Yields the temporary directory. + On exit, the temporary directory is removed and CWD is restored. + """ + prev_cwd = Path.cwd() + print(f"Current WD: {prev_cwd}") + os.chdir(path) + try: + yield + finally: + os.chdir(prev_cwd) + + def setup_logger(logger_id, level=logging.INFO): logger = logging.getLogger(logger_id) logger.setLevel(level) diff --git a/image-requirements/install-requirements.yaml b/image-requirements/install-requirements.yaml index 66b8a9a..bd2946c 100644 --- a/image-requirements/install-requirements.yaml +++ b/image-requirements/install-requirements.yaml @@ -13,4 +13,5 @@ - krb5-devel - krb5-workstation - golang-github-cpuguy83-md2man + - python3-gitlab become: true diff --git a/tests/data/nodejs-16/Dockerfile b/tests/data/nodejs-16/Dockerfile new file mode 100644 index 0000000..727fac2 --- /dev/null +++ b/tests/data/nodejs-16/Dockerfile @@ -0,0 +1,103 @@ +FROM rhscl/s2i-core-rhel7:1 + +# RHSCL rh-nginx116 image. +# +# Volumes: +# * /var/opt/rh/rh-nginx116/log/nginx/ - Storage for logs + +EXPOSE 8080 +EXPOSE 8443 + +ENV NAME=nginx \ + NGINX_VERSION=1.16 \ + NGINX_SHORT_VER=116 \ + PERL_SCL_SHORT_VER=526 \ + VERSION=0 + +# Set SCL related variables in Dockerfile so that the collection is enabled by default +ENV SUMMARY="Platform for running nginx $NGINX_VERSION or building nginx-based application" \ + DESCRIPTION="Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP \ +protocols, with a strong focus on high concurrency, performance and low memory usage. The container \ +image provides a containerized packaging of the nginx $NGINX_VERSION daemon. The image can be used \ +as a base image for other applications based on nginx $NGINX_VERSION web server. \ +Nginx server image can be extended using source-to-image tool." \ + X_SCLS="rh-perl$PERL_SCL_SHORT_VER rh-nginx$NGINX_SHORT_VER" \ + PATH=/opt/rh/rh-perl$PERL_SCL_SHORT_VER/root/usr/local/bin:/opt/rh/rh-perl$PERL_SCL_SHORT_VER/root/usr/bin:/opt/rh/rh-nginx$NGINX_SHORT_VER/root/usr/bin:/opt/rh/rh-nginx$NGINX_SHORT_VER/root/usr/sbin${PATH:+:${PATH}} \ + MANPATH=/opt/rh/rh-perl$PERL_SCL_SHORT_VER/root/usr/share/man:/opt/rh/rh-nginx$NGINX_SHORT_VER/root/usr/share/man:${MANPATH} \ + PKG_CONFIG_PATH=/opt/rh/rh-nginx$NGINX_SHORT_VER/root/usr/lib64/pkgconfig${PKG_CONFIG_PATH:+:${PKG_CONFIG_PATH}} \ + LD_LIBRARY_PATH=/opt/rh/rh-perl$PERL_SCL_SHORT_VER/root/usr/lib64 \ + PERL5LIB="/opt/rh/rh-nginx$NGINX_SHORT_VER/root/usr/lib64/perl5/vendor_perl${PERL5LIB:+:${PERL5LIB}}" + +LABEL summary="${SUMMARY}" \ + description="${DESCRIPTION}" \ + io.k8s.description="${DESCRIPTION}" \ + io.k8s.display-name="Nginx ${NGINX_VERSION}" \ + io.openshift.expose-services="8080:http" \ + io.openshift.expose-services="8443:https" \ + io.openshift.tags="builder,${NAME},rh-${NAME}${NGINX_SHORT_VER}" \ + com.redhat.component="rh-${NAME}${NGINX_SHORT_VER}-container" \ + name="rhscl/${NAME}-${NGINX_SHORT_VER}-rhel7" \ + version="1" \ + com.redhat.license_terms="https://www.redhat.com/en/about/red-hat-end-user-license-agreements#rhel" \ + maintainer="SoftwareCollections.org " \ + help="For more information visit https://github.com/sclorg/${NAME}-container" \ + usage="s2i build rhscl/${NAME}-${NGINX_SHORT_VER}-rhel7:latest " + +ENV NGINX_CONFIGURATION_PATH=${APP_ROOT}/etc/nginx.d \ + NGINX_CONF_PATH=/etc/opt/rh/rh-nginx${NGINX_SHORT_VER}/nginx/nginx.conf \ + NGINX_DEFAULT_CONF_PATH=${APP_ROOT}/etc/nginx.default.d \ + NGINX_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/nginx \ + NGINX_APP_ROOT=${APP_ROOT} \ + NGINX_LOG_PATH=/var/opt/rh/rh-nginx${NGINX_SHORT_VER}/log/nginx \ + NGINX_PERL_MODULE_PATH=${APP_ROOT}/etc/perl + +RUN yum install -y yum-utils && \ + prepare-yum-repositories rhel-server-rhscl-7-rpms && \ + INSTALL_PKGS="nss_wrapper bind-utils gettext hostname rh-nginx${NGINX_SHORT_VER} rh-nginx${NGINX_SHORT_VER}-nginx \ + rh-nginx${NGINX_SHORT_VER}-nginx-mod-stream rh-nginx${NGINX_SHORT_VER}-nginx-mod-http-perl" && \ + yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ + rpm -V $INSTALL_PKGS && \ + yum -y clean all --enablerepo='*' + +# Copy the S2I scripts from the specific language image to $STI_SCRIPTS_PATH +COPY ./s2i/bin/ $STI_SCRIPTS_PATH + +# Copy extra files to the image. +COPY ./root/ / + +# In order to drop the root user, we have to make some directories world +# writeable as OpenShift default security model is to run the container under +# random UID. +RUN sed -i -f ${NGINX_APP_ROOT}/nginxconf.sed ${NGINX_CONF_PATH} && \ + chmod a+rwx ${NGINX_CONF_PATH} && \ + mkdir -p ${NGINX_APP_ROOT}/etc/nginx.d/ && \ + mkdir -p ${NGINX_APP_ROOT}/etc/nginx.default.d/ && \ + mkdir -p ${NGINX_APP_ROOT}/src/nginx-start/ && \ + mkdir -p ${NGINX_CONTAINER_SCRIPTS_PATH}/nginx-start && \ + mkdir -p ${NGINX_LOG_PATH} && \ + mkdir -p ${NGINX_PERL_MODULE_PATH} && \ + ln -s ${NGINX_LOG_PATH} /var/log/nginx && \ + ln -s /etc/opt/rh/rh-nginx${NGINX_SHORT_VER}/nginx /etc/nginx && \ + ln -s /opt/rh/rh-nginx${NGINX_SHORT_VER}/root/usr/share/nginx /usr/share/nginx && \ + chmod -R a+rwx ${NGINX_APP_ROOT}/etc && \ + chmod -R a+rwx /var/opt/rh/rh-nginx${NGINX_SHORT_VER} && \ + chmod -R a+rwx ${NGINX_CONTAINER_SCRIPTS_PATH}/nginx-start && \ + chown -R 1001:0 ${NGINX_APP_ROOT} && \ + chown -R 1001:0 /var/opt/rh/rh-nginx${NGINX_SHORT_VER} && \ + chown -R 1001:0 ${NGINX_CONTAINER_SCRIPTS_PATH}/nginx-start && \ + chmod -R a+rwx /var/run && \ + chown -R 1001:0 /var/run && \ + rpm-file-permissions + +USER 1001 + +# Not using VOLUME statement since it's not working in OpenShift Online: +# https://github.com/sclorg/httpd-container/issues/30 +# VOLUME ["/opt/rh/rh-nginx116/root/usr/share/nginx/html"] +# VOLUME ["/var/opt/rh/rh-nginx116/log/nginx/"] + +ENV BASH_ENV=${NGINX_APP_ROOT}/etc/scl_enable \ + ENV=${NGINX_APP_ROOT}/etc/scl_enable \ + PROMPT_COMMAND=". ${NGINX_APP_ROOT}/etc/scl_enable" + +CMD $STI_SCRIPTS_PATH/usage diff --git a/tests/data/nodejs-16/Dockerfile.rhel7 b/tests/data/nodejs-16/Dockerfile.rhel7 new file mode 100644 index 0000000..727fac2 --- /dev/null +++ b/tests/data/nodejs-16/Dockerfile.rhel7 @@ -0,0 +1,103 @@ +FROM rhscl/s2i-core-rhel7:1 + +# RHSCL rh-nginx116 image. +# +# Volumes: +# * /var/opt/rh/rh-nginx116/log/nginx/ - Storage for logs + +EXPOSE 8080 +EXPOSE 8443 + +ENV NAME=nginx \ + NGINX_VERSION=1.16 \ + NGINX_SHORT_VER=116 \ + PERL_SCL_SHORT_VER=526 \ + VERSION=0 + +# Set SCL related variables in Dockerfile so that the collection is enabled by default +ENV SUMMARY="Platform for running nginx $NGINX_VERSION or building nginx-based application" \ + DESCRIPTION="Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP \ +protocols, with a strong focus on high concurrency, performance and low memory usage. The container \ +image provides a containerized packaging of the nginx $NGINX_VERSION daemon. The image can be used \ +as a base image for other applications based on nginx $NGINX_VERSION web server. \ +Nginx server image can be extended using source-to-image tool." \ + X_SCLS="rh-perl$PERL_SCL_SHORT_VER rh-nginx$NGINX_SHORT_VER" \ + PATH=/opt/rh/rh-perl$PERL_SCL_SHORT_VER/root/usr/local/bin:/opt/rh/rh-perl$PERL_SCL_SHORT_VER/root/usr/bin:/opt/rh/rh-nginx$NGINX_SHORT_VER/root/usr/bin:/opt/rh/rh-nginx$NGINX_SHORT_VER/root/usr/sbin${PATH:+:${PATH}} \ + MANPATH=/opt/rh/rh-perl$PERL_SCL_SHORT_VER/root/usr/share/man:/opt/rh/rh-nginx$NGINX_SHORT_VER/root/usr/share/man:${MANPATH} \ + PKG_CONFIG_PATH=/opt/rh/rh-nginx$NGINX_SHORT_VER/root/usr/lib64/pkgconfig${PKG_CONFIG_PATH:+:${PKG_CONFIG_PATH}} \ + LD_LIBRARY_PATH=/opt/rh/rh-perl$PERL_SCL_SHORT_VER/root/usr/lib64 \ + PERL5LIB="/opt/rh/rh-nginx$NGINX_SHORT_VER/root/usr/lib64/perl5/vendor_perl${PERL5LIB:+:${PERL5LIB}}" + +LABEL summary="${SUMMARY}" \ + description="${DESCRIPTION}" \ + io.k8s.description="${DESCRIPTION}" \ + io.k8s.display-name="Nginx ${NGINX_VERSION}" \ + io.openshift.expose-services="8080:http" \ + io.openshift.expose-services="8443:https" \ + io.openshift.tags="builder,${NAME},rh-${NAME}${NGINX_SHORT_VER}" \ + com.redhat.component="rh-${NAME}${NGINX_SHORT_VER}-container" \ + name="rhscl/${NAME}-${NGINX_SHORT_VER}-rhel7" \ + version="1" \ + com.redhat.license_terms="https://www.redhat.com/en/about/red-hat-end-user-license-agreements#rhel" \ + maintainer="SoftwareCollections.org " \ + help="For more information visit https://github.com/sclorg/${NAME}-container" \ + usage="s2i build rhscl/${NAME}-${NGINX_SHORT_VER}-rhel7:latest " + +ENV NGINX_CONFIGURATION_PATH=${APP_ROOT}/etc/nginx.d \ + NGINX_CONF_PATH=/etc/opt/rh/rh-nginx${NGINX_SHORT_VER}/nginx/nginx.conf \ + NGINX_DEFAULT_CONF_PATH=${APP_ROOT}/etc/nginx.default.d \ + NGINX_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/nginx \ + NGINX_APP_ROOT=${APP_ROOT} \ + NGINX_LOG_PATH=/var/opt/rh/rh-nginx${NGINX_SHORT_VER}/log/nginx \ + NGINX_PERL_MODULE_PATH=${APP_ROOT}/etc/perl + +RUN yum install -y yum-utils && \ + prepare-yum-repositories rhel-server-rhscl-7-rpms && \ + INSTALL_PKGS="nss_wrapper bind-utils gettext hostname rh-nginx${NGINX_SHORT_VER} rh-nginx${NGINX_SHORT_VER}-nginx \ + rh-nginx${NGINX_SHORT_VER}-nginx-mod-stream rh-nginx${NGINX_SHORT_VER}-nginx-mod-http-perl" && \ + yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ + rpm -V $INSTALL_PKGS && \ + yum -y clean all --enablerepo='*' + +# Copy the S2I scripts from the specific language image to $STI_SCRIPTS_PATH +COPY ./s2i/bin/ $STI_SCRIPTS_PATH + +# Copy extra files to the image. +COPY ./root/ / + +# In order to drop the root user, we have to make some directories world +# writeable as OpenShift default security model is to run the container under +# random UID. +RUN sed -i -f ${NGINX_APP_ROOT}/nginxconf.sed ${NGINX_CONF_PATH} && \ + chmod a+rwx ${NGINX_CONF_PATH} && \ + mkdir -p ${NGINX_APP_ROOT}/etc/nginx.d/ && \ + mkdir -p ${NGINX_APP_ROOT}/etc/nginx.default.d/ && \ + mkdir -p ${NGINX_APP_ROOT}/src/nginx-start/ && \ + mkdir -p ${NGINX_CONTAINER_SCRIPTS_PATH}/nginx-start && \ + mkdir -p ${NGINX_LOG_PATH} && \ + mkdir -p ${NGINX_PERL_MODULE_PATH} && \ + ln -s ${NGINX_LOG_PATH} /var/log/nginx && \ + ln -s /etc/opt/rh/rh-nginx${NGINX_SHORT_VER}/nginx /etc/nginx && \ + ln -s /opt/rh/rh-nginx${NGINX_SHORT_VER}/root/usr/share/nginx /usr/share/nginx && \ + chmod -R a+rwx ${NGINX_APP_ROOT}/etc && \ + chmod -R a+rwx /var/opt/rh/rh-nginx${NGINX_SHORT_VER} && \ + chmod -R a+rwx ${NGINX_CONTAINER_SCRIPTS_PATH}/nginx-start && \ + chown -R 1001:0 ${NGINX_APP_ROOT} && \ + chown -R 1001:0 /var/opt/rh/rh-nginx${NGINX_SHORT_VER} && \ + chown -R 1001:0 ${NGINX_CONTAINER_SCRIPTS_PATH}/nginx-start && \ + chmod -R a+rwx /var/run && \ + chown -R 1001:0 /var/run && \ + rpm-file-permissions + +USER 1001 + +# Not using VOLUME statement since it's not working in OpenShift Online: +# https://github.com/sclorg/httpd-container/issues/30 +# VOLUME ["/opt/rh/rh-nginx116/root/usr/share/nginx/html"] +# VOLUME ["/var/opt/rh/rh-nginx116/log/nginx/"] + +ENV BASH_ENV=${NGINX_APP_ROOT}/etc/scl_enable \ + ENV=${NGINX_APP_ROOT}/etc/scl_enable \ + PROMPT_COMMAND=". ${NGINX_APP_ROOT}/etc/scl_enable" + +CMD $STI_SCRIPTS_PATH/usage diff --git a/tests/data/nodejs-16/README.md b/tests/data/nodejs-16/README.md new file mode 100644 index 0000000..a787130 --- /dev/null +++ b/tests/data/nodejs-16/README.md @@ -0,0 +1,207 @@ +Nginx 1.16 server and a reverse proxy server container image +============================================================ +This container image includes Nginx 1.16 server and a reverse server for OpenShift and general usage. +Users can choose between RHEL, CentOS and Fedora based images. +The RHEL images are available in the [Red Hat Container Catalog](https://access.redhat.com/containers/), +the CentOS images are available on [Quay.io](https://quay.io/organization/centos7), +and the Fedora images are available in [Fedora Registry](https://registry.fedoraproject.org/). +The resulting image can be run using [podman](https://github.com/containers/libpod). + +Note: while the examples in this README are calling `podman`, you can replace any such calls by `docker` with the same arguments. + + +Description +----------- + +Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP +protocols, with a strong focus on high concurrency, performance and low memory usage. The container +image provides a containerized packaging of the nginx 1.16 daemon. The image can be used +as a base image for other applications based on nginx 1.16 web server. +Nginx server image can be extended using Openshift's `Source` build feature. + + +Usage in OpenShift +------------------ +In this example, we assume that you are using the `rhel8/nginx-116` image, available through the `nginx:1.16` imagestream tag in Openshift. +To build a simple [test-app](https://github.com/sclorg/nginx-container/tree/master/examples/1.16/test-app) application in Openshift: + +``` +oc new-app nginx:1.16~https://github.com/sclorg/nginx-container.git --context-dir=1.16/test/test-app/ +``` + +To access the application: +``` +$ oc get pods +$ oc exec -- curl 127.0.0.1:8080 +``` + + +Source-to-Image framework and scripts +------------------------------------- +This image supports the [Source-to-Image](https://docs.openshift.com/container-platform/4.4/builds/build-strategies.html#images-create-s2i_build-strategies) +(S2I) strategy in OpenShift. The Source-to-Image is an OpenShift framework +which makes it easy to write images that take application source code as +an input, use a builder image like this Nginx container image, and produce +a new image that runs the assembled application as an output. + +In case of Nginx container image, the application source code is typically +either static HTML pages or configuration files. + +To support the Source-to-Image framework, important scripts are included in the builder image: + +* The `/usr/libexec/s2i/run` script is set as the default command in the resulting container image (the new image with the application artifacts). + +* The `/usr/libexec/s2i/assemble` script inside the image is run to produce a new image with the application artifacts. The script takes sources of a given application (HTML pages), Nginx configuration files, and places them into appropriate directories inside the image. The structure of nginx-app can look like this: + +**`./nginx.conf`**-- + The main nginx configuration file + +**`./nginx-cfg/*.conf`** + Should contain all nginx configuration we want to include into image + +**`./nginx-default-cfg/*.conf`** + Contains any nginx config snippets to include in the default server block + +**`./nginx-start/*.sh`** + Contains shell scripts that are sourced right before nginx is launched + +**`./nginx-perl/*.pm`** + Contains perl modules to be use by `perl_modules` and `perl_require` directives + +**`./`** + Should contain nginx application source code + + +Build an application using a Dockerfile +--------------------------------------- +Compared to the Source-to-Image strategy, using a Dockerfile is a more +flexible way to build an Nginx container image with an application. +Use a Dockerfile when Source-to-Image is not sufficiently flexible for you or +when you build the image outside of the OpenShift environment. + +To use the Nginx image in a Dockerfile, follow these steps: + +#### 1. Pull a base builder image to build on + +podman pull rhel8/nginx-116 + +#### 2. Pull an application code + +An example application available at https://github.com/sclorg/nginx-container.git is used here. To adjust the example application, clone the repository. + +``` +git clone https://github.com/sclorg/nginx-container.git nginx-container +cd nginx-container/examples/1.16/ +``` + +#### 3. Prepare an application inside a container + +This step usually consists of at least these parts: + +* putting the application source into the container +* moving configuration files to the correct place (if available in the application source code) +* setting the default command in the resulting image + +For all these three parts, you can either set up all manually and use the `nginx` command explicitly in the Dockerfile ([3.1.](#31-to-use-own-setup-create-a-dockerfile-with-this-content)), or you can use the Source-to-Image scripts inside the image ([3.2.](#32-to-use-the-source-to-image-scripts-and-build-an-image-using-a-dockerfile-create-a-dockerfile-with-this-content). For more information about these scripts, which enable you to set-up and run the nginx daemon, see the "Source-to-Image framework and scripts" section above. + +##### 3.1. To use your own setup, create a Dockerfile with this content: + +``` +FROM registry.access.redhat.com/ubi8/nginx-116 + +# Add application sources +ADD test-app/nginx.conf "${NGINX_CONF_PATH}" +ADD test-app/nginx-default-cfg/*.conf "${NGINX_DEFAULT_CONF_PATH}" +ADD test-app/nginx-cfg/*.conf "${NGINX_CONFIGURATION_PATH}" +ADD test-app/*.html . + +# Run script uses standard ways to run the application +CMD nginx -g "daemon off;" +``` + +##### 3.2. To use the Source-to-Image scripts and build an image using a Dockerfile, create a Dockerfile with this content: + +``` +FROM registry.access.redhat.com/ubi8/nginx-116 + +# Add application sources to a directory that the assemble script expects them +# and set permissions so that the container runs without root access +# With older docker that does not support --chown option for ADD statement, +# use these statements instead: +# USER 0 +# ADD app-src /tmp/src +# RUN chown -R 1001:0 /tmp/src +# USER 1001 +ADD --chown 1001:0 app-src /tmp/src + +# Let the assemble script to install the dependencies +RUN /usr/libexec/s2i/assemble + +# Run script uses standard ways to run the application +CMD /usr/libexec/s2i/run +``` + +#### 4. Build a new image from a Dockerfile prepared in the previous step +``` +podman build -t nginx-app . +``` + +#### 5. Run the resulting image with the final application +``` +podman run -d nginx-app +``` + + +Direct usage with a mounted directory +------------------------------------- +An example of the data on the host for the following example: +``` +$ ls -lZ /wwwdata/html +-rw-r--r--. 1 1001 1001 54321 Jan 01 12:34 index.html +-rw-r--r--. 1 1001 1001 5678 Jan 01 12:34 page.html +``` + +If you want to run the image directly and mount the static pages available in the `/wwwdata/` directory on the host +as a container volume, execute the following command: + +``` +$ podman run -d --name nginx -p 8080:8080 -v /wwwdata:/opt/app-root/src:Z rhel8/nginx-116 nginx -g "daemon off;" +``` + +This creates a container named `nginx` running the Nginx server, serving data from +the `/wwwdata/` directory. Port 8080 is exposed and mapped to the host. +You can pull the data from the nginx container using this command: + +``` +$ curl -Lk 127.0.0.1:8080 +``` + +You can replace `/wwwdata/` with location of your web root. Please note that this has to be an **absolute** path, due to podman requirements. + + +Environment variables and volumes +--------------------------------- +The nginx container image supports the following configuration variable, which can be set by using the `-e` option with the podman run command: + + +**`NGINX_LOG_TO_VOLUME`** + When `NGINX_LOG_TO_VOLUME` is set, nginx logs into `/var/log/nginx/`. In case of RHEL-7 and CentOS-7 images, this is a symlink to `/var/opt/rh/rh-nginx116/log/nginx/`. + + +Troubleshooting +--------------- +By default, nginx access logs are written to standard output and error logs are written to standard error, so both are available in the container log. The log can be examined by running: + + podman logs + +**If `NGINX_LOG_TO_VOLUME` variable is set, nginx logs into `/var/log/nginx/`. In case of RHEL-7 and CentOS-7 images, this is a symlink to `/var/opt/rh/rh-nginx116/log/nginx/`, which can be mounted to host system using the container volumes.** + + +See also +-------- +Dockerfile and other sources for this container image are available on +https://github.com/sclorg/nginx-container. +In that repository you also can find another versions of Python environment Dockerfiles. +Dockerfile for CentOS is called `Dockerfile`, Dockerfile for RHEL7 is called `Dockerfile.rhel7`, +for RHEL8 it's `Dockerfile.rhel8` and the Fedora Dockerfile is called Dockerfile.fedora. + diff --git a/tests/data/nodejs-16/help.md b/tests/data/nodejs-16/help.md new file mode 100644 index 0000000..a787130 --- /dev/null +++ b/tests/data/nodejs-16/help.md @@ -0,0 +1,207 @@ +Nginx 1.16 server and a reverse proxy server container image +============================================================ +This container image includes Nginx 1.16 server and a reverse server for OpenShift and general usage. +Users can choose between RHEL, CentOS and Fedora based images. +The RHEL images are available in the [Red Hat Container Catalog](https://access.redhat.com/containers/), +the CentOS images are available on [Quay.io](https://quay.io/organization/centos7), +and the Fedora images are available in [Fedora Registry](https://registry.fedoraproject.org/). +The resulting image can be run using [podman](https://github.com/containers/libpod). + +Note: while the examples in this README are calling `podman`, you can replace any such calls by `docker` with the same arguments. + + +Description +----------- + +Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP +protocols, with a strong focus on high concurrency, performance and low memory usage. The container +image provides a containerized packaging of the nginx 1.16 daemon. The image can be used +as a base image for other applications based on nginx 1.16 web server. +Nginx server image can be extended using Openshift's `Source` build feature. + + +Usage in OpenShift +------------------ +In this example, we assume that you are using the `rhel8/nginx-116` image, available through the `nginx:1.16` imagestream tag in Openshift. +To build a simple [test-app](https://github.com/sclorg/nginx-container/tree/master/examples/1.16/test-app) application in Openshift: + +``` +oc new-app nginx:1.16~https://github.com/sclorg/nginx-container.git --context-dir=1.16/test/test-app/ +``` + +To access the application: +``` +$ oc get pods +$ oc exec -- curl 127.0.0.1:8080 +``` + + +Source-to-Image framework and scripts +------------------------------------- +This image supports the [Source-to-Image](https://docs.openshift.com/container-platform/4.4/builds/build-strategies.html#images-create-s2i_build-strategies) +(S2I) strategy in OpenShift. The Source-to-Image is an OpenShift framework +which makes it easy to write images that take application source code as +an input, use a builder image like this Nginx container image, and produce +a new image that runs the assembled application as an output. + +In case of Nginx container image, the application source code is typically +either static HTML pages or configuration files. + +To support the Source-to-Image framework, important scripts are included in the builder image: + +* The `/usr/libexec/s2i/run` script is set as the default command in the resulting container image (the new image with the application artifacts). + +* The `/usr/libexec/s2i/assemble` script inside the image is run to produce a new image with the application artifacts. The script takes sources of a given application (HTML pages), Nginx configuration files, and places them into appropriate directories inside the image. The structure of nginx-app can look like this: + +**`./nginx.conf`**-- + The main nginx configuration file + +**`./nginx-cfg/*.conf`** + Should contain all nginx configuration we want to include into image + +**`./nginx-default-cfg/*.conf`** + Contains any nginx config snippets to include in the default server block + +**`./nginx-start/*.sh`** + Contains shell scripts that are sourced right before nginx is launched + +**`./nginx-perl/*.pm`** + Contains perl modules to be use by `perl_modules` and `perl_require` directives + +**`./`** + Should contain nginx application source code + + +Build an application using a Dockerfile +--------------------------------------- +Compared to the Source-to-Image strategy, using a Dockerfile is a more +flexible way to build an Nginx container image with an application. +Use a Dockerfile when Source-to-Image is not sufficiently flexible for you or +when you build the image outside of the OpenShift environment. + +To use the Nginx image in a Dockerfile, follow these steps: + +#### 1. Pull a base builder image to build on + +podman pull rhel8/nginx-116 + +#### 2. Pull an application code + +An example application available at https://github.com/sclorg/nginx-container.git is used here. To adjust the example application, clone the repository. + +``` +git clone https://github.com/sclorg/nginx-container.git nginx-container +cd nginx-container/examples/1.16/ +``` + +#### 3. Prepare an application inside a container + +This step usually consists of at least these parts: + +* putting the application source into the container +* moving configuration files to the correct place (if available in the application source code) +* setting the default command in the resulting image + +For all these three parts, you can either set up all manually and use the `nginx` command explicitly in the Dockerfile ([3.1.](#31-to-use-own-setup-create-a-dockerfile-with-this-content)), or you can use the Source-to-Image scripts inside the image ([3.2.](#32-to-use-the-source-to-image-scripts-and-build-an-image-using-a-dockerfile-create-a-dockerfile-with-this-content). For more information about these scripts, which enable you to set-up and run the nginx daemon, see the "Source-to-Image framework and scripts" section above. + +##### 3.1. To use your own setup, create a Dockerfile with this content: + +``` +FROM registry.access.redhat.com/ubi8/nginx-116 + +# Add application sources +ADD test-app/nginx.conf "${NGINX_CONF_PATH}" +ADD test-app/nginx-default-cfg/*.conf "${NGINX_DEFAULT_CONF_PATH}" +ADD test-app/nginx-cfg/*.conf "${NGINX_CONFIGURATION_PATH}" +ADD test-app/*.html . + +# Run script uses standard ways to run the application +CMD nginx -g "daemon off;" +``` + +##### 3.2. To use the Source-to-Image scripts and build an image using a Dockerfile, create a Dockerfile with this content: + +``` +FROM registry.access.redhat.com/ubi8/nginx-116 + +# Add application sources to a directory that the assemble script expects them +# and set permissions so that the container runs without root access +# With older docker that does not support --chown option for ADD statement, +# use these statements instead: +# USER 0 +# ADD app-src /tmp/src +# RUN chown -R 1001:0 /tmp/src +# USER 1001 +ADD --chown 1001:0 app-src /tmp/src + +# Let the assemble script to install the dependencies +RUN /usr/libexec/s2i/assemble + +# Run script uses standard ways to run the application +CMD /usr/libexec/s2i/run +``` + +#### 4. Build a new image from a Dockerfile prepared in the previous step +``` +podman build -t nginx-app . +``` + +#### 5. Run the resulting image with the final application +``` +podman run -d nginx-app +``` + + +Direct usage with a mounted directory +------------------------------------- +An example of the data on the host for the following example: +``` +$ ls -lZ /wwwdata/html +-rw-r--r--. 1 1001 1001 54321 Jan 01 12:34 index.html +-rw-r--r--. 1 1001 1001 5678 Jan 01 12:34 page.html +``` + +If you want to run the image directly and mount the static pages available in the `/wwwdata/` directory on the host +as a container volume, execute the following command: + +``` +$ podman run -d --name nginx -p 8080:8080 -v /wwwdata:/opt/app-root/src:Z rhel8/nginx-116 nginx -g "daemon off;" +``` + +This creates a container named `nginx` running the Nginx server, serving data from +the `/wwwdata/` directory. Port 8080 is exposed and mapped to the host. +You can pull the data from the nginx container using this command: + +``` +$ curl -Lk 127.0.0.1:8080 +``` + +You can replace `/wwwdata/` with location of your web root. Please note that this has to be an **absolute** path, due to podman requirements. + + +Environment variables and volumes +--------------------------------- +The nginx container image supports the following configuration variable, which can be set by using the `-e` option with the podman run command: + + +**`NGINX_LOG_TO_VOLUME`** + When `NGINX_LOG_TO_VOLUME` is set, nginx logs into `/var/log/nginx/`. In case of RHEL-7 and CentOS-7 images, this is a symlink to `/var/opt/rh/rh-nginx116/log/nginx/`. + + +Troubleshooting +--------------- +By default, nginx access logs are written to standard output and error logs are written to standard error, so both are available in the container log. The log can be examined by running: + + podman logs + +**If `NGINX_LOG_TO_VOLUME` variable is set, nginx logs into `/var/log/nginx/`. In case of RHEL-7 and CentOS-7 images, this is a symlink to `/var/opt/rh/rh-nginx116/log/nginx/`, which can be mounted to host system using the container volumes.** + + +See also +-------- +Dockerfile and other sources for this container image are available on +https://github.com/sclorg/nginx-container. +In that repository you also can find another versions of Python environment Dockerfiles. +Dockerfile for CentOS is called `Dockerfile`, Dockerfile for RHEL7 is called `Dockerfile.rhel7`, +for RHEL8 it's `Dockerfile.rhel8` and the Fedora Dockerfile is called Dockerfile.fedora. + diff --git a/tests/test_gitlab.py b/tests/test_gitlab.py new file mode 100644 index 0000000..265434e --- /dev/null +++ b/tests/test_gitlab.py @@ -0,0 +1,188 @@ +# MIT License +# +# Copyright (c) 2020 SCL team at Red Hat +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import os +import pytest +import shutil +import gitlab + +from flexmock import flexmock +from pathlib import Path + +from container_workflow_tool.named_tuples import ( + ProjectBranches, + CurrentUser, + ProjectMR, + ProjectForks, + ProjectInfo, + ProjectCreateFork +) +from container_workflow_tool.cli import ImageRebuilder +from container_workflow_tool.gitlab import GitLabAPI +from container_workflow_tool.utility import RebuilderError + +from tests.spellbook import DATA_DIR + + +class TestGitLab(object): + def setup_method(self): + self.component = 's2i-base' + self.ir = ImageRebuilder('Testing') + os.environ["GITLAB_API_TOKEN"] = "somethingInteresting" + self.ir.set_config('default.yaml', release="rawhide") + # Partner BZ testing + self.ir.rebuild_reason = "Unit testing" + self.ir.disable_klist = True + self.ir.set_do_images([self.component]) + self.ga = GitLabAPI(self.ir.base_image, self.ir.conf, rebuild_reason=self.ir.rebuild_reason, logger=self.ir.logger) + + def test_valid_user(self): + flexmock(self.ga).should_receive("check_authentication").and_return( + CurrentUser(id=1234123, username="fooUser") + ) + os.environ["USER"] = "fooUser" + assert self.ga.check_username() + + def test_missing_user(self): + flexmock(self.ga).should_receive("check_authentication").and_return(None) + with pytest.raises(RebuilderError): + self.ga.check_username() + + def test_wrong_resp_user(self): + flexmock(self.ga).should_receive("check_authentication").and_return( + CurrentUser(id=1234123, username="foobot") + ) + with pytest.raises(RebuilderError): + self.ga.check_username() + + def test_get_branches(self): + flexmock(self.ga).should_receive("get_project_branches").and_return( + ProjectBranches("rhel-8.6.0", "something", True), + ProjectBranches("rhel-8.8.0", "something", True), + ) + assert len(self.ga.get_project_branches()) == 2 + assert self.ga.get_project_branches()[0].name == "rhel-8.6.0" + assert self.ga.get_project_branches()[1].name == "rhel-8.8.0" + + def test_gitlab_fork(self): + flexmock(self.ga).should_receive("get_project_forks").and_return( + [ + ProjectForks( + 123456, "foobar", "git@gitlab.com/namespace/cont/foo", "fooUser", 654321, "git@gitlab.com/phracek/foo" + ) + ] + ) + os.environ["USER"] = "fooUser" + fork_exist = self.ga._check_project_forks() + assert fork_exist + + def test_gitlab_fork_is_missing(self): + flexmock(self.ga).should_receive("get_project_forks").and_return([]) + fork_exist = self.ga._check_project_forks() + assert not fork_exist + + def test_gitlab_project_info(self): + flexmock(self.ga).should_receive("get_project_info").and_return( + ProjectInfo( + 123456, + "nodejs-16", + "git@gitlab.com/namespace/nodejs-16.git", + "https://gitlab.com/namespace/nodejs-16" + ) + ) + assert self.ga.get_project_info().id == 123456 + assert self.ga.get_project_info().name == "nodejs-16" + assert self.ga.get_project_info().ssh_url_to_repo == "git@gitlab.com/namespace/nodejs-16.git" + assert self.ga.get_project_info().web_url == "https://gitlab.com/namespace/nodejs-16" + + def test_fork_project_already_exists(self): + flexmock(self.ga).should_receive("create_project_fork").and_raise(gitlab.exceptions.GitlabCreateError) + with pytest.raises(RebuilderError): + self.ga.fork_project() + + def test_fork_project_different_fork_id(self): + flexmock(self.ga).should_receive("create_project_fork").and_return( + ProjectCreateFork(987654, "nodejs-16", "git@gitlab.com/fooUser/nodejs-16.git", "https://gitlab.com/fooUser/nodejs-16", 123456) + ) + self.ga.project_id = 123457 + with pytest.raises(RebuilderError): + self.ga.fork_project() + + def test_fork_project_load_problem(self): + flexmock(self.ga).should_receive("create_project_fork").and_return( + ProjectCreateFork(987654, "nodejs-16", "git@gitlab.com/fooUser/nodejs-16.git", "https://gitlab.com/fooUser/nodejs-16", 123456) + ) + flexmock(self.ga).should_receive("load_forked_project").and_raise(RebuilderError) + self.ga.project_id = 123457 + with pytest.raises(RebuilderError): + self.ga.fork_project() + + def test_fork_project(self): + flexmock(self.ga).should_receive("create_project_fork").and_return( + ProjectCreateFork( + 987654, "nodejs-16", "git@gitlab.com/fooUser/nodejs-16.git", + "https://gitlab.com/fooUser/nodejs-16", 123456 + ) + ) + flexmock(self.ga).should_receive("load_forked_project").and_return(True) + flexmock(self.ga).should_receive("get_protected_branches").and_return([]) + self.ga.project_id = 123456 + self.ga.fork_project() + + def test_gitlab_gist_changes(self): + self.ga.conf["from_tag"] = "test" + self.component = "nodejs-16" + flexmock(self.ga).should_receive("check_authentication").and_return( + CurrentUser(id=1234123, username="fooUser") + ) + flexmock(self.ga).should_receive("create_project_fork").and_return( + ProjectCreateFork( + 987654, self.component, "git@gitlab.com/fooUser/nodejs-16.git", + "https://gitlab.com/fooUser/nodejs-16", 123456 + ) + ) + flexmock(self.ga).should_receive("get_project_forks").and_return( + [ + ProjectForks( + 123456, "foobar", "git@gitlab.com/namespace/cont/foo", "fooUser", 654321, "git@gitlab.com/phracek/foo" + ) + ] + ) + flexmock(self.ga).should_receive("get_project_info").and_return( + ProjectInfo( + 123456, + self.component, + "git@gitlab.com/namespace/nodejs-16.git", + "https://gitlab.com/namespace/nodejs-16" + ) + ) + flexmock(self.ga).should_receive("load_forked_project").and_return(True) + flexmock(self.ga).should_receive("get_protected_branches").and_return([]) + os.environ["USER"] = "fooUser" + tmp = Path(self.ir._get_tmp_workdir()) + shutil.copytree(Path(DATA_DIR) / self.component, Path(tmp) / self.component) + images = self.ga.conf["raw"]["images"]["s2i-base"] + self.ga.gitlab_git_changes(images=[images], rebase=False) + dpath = tmp / self.component / 'Dockerfile' + assert os.path.isfile(dpath) + assert not (tmp / self.component / "test" / "test-openshift.yaml").exists() + shutil.rmtree(tmp / self.component)