diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index c0f74b5bc55..a2ca0d5e484 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -135,6 +135,11 @@ body: - 3.14.6-nightly.1 - 3.14.5 - 3.14.5-nightly.3 + - 3.14.5-nightly.2 + - 3.14.5-nightly.1 + - 3.14.4 + - 3.14.4-nightly.4 + validations: required: true - type: dropdown diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index 2b31e61d6d7..c37b54ea9a9 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -295,6 +295,13 @@ def process(self, instance): "colorspaceView": colorspace_data["view"], } + rr_settings = ( + context.data["system_settings"]["modules"]["royalrender"] + ) + if rr_settings["enabled"]: + data["rrPathName"] = instance.data.get("rrPathName") + self.log.info(data["rrPathName"]) + if self.sync_workfile_version: data["version"] = context.data["version"] for instance in context: diff --git a/openpype/modules/deadline/abstract_submit_deadline.py b/openpype/modules/deadline/abstract_submit_deadline.py index 551a2f73737..3fa427204b3 100644 --- a/openpype/modules/deadline/abstract_submit_deadline.py +++ b/openpype/modules/deadline/abstract_submit_deadline.py @@ -22,6 +22,9 @@ KnownPublishError, OpenPypePyblishPluginMixin ) +from openpype.pipeline.publish.lib import ( + replace_with_published_scene_path +) JSONDecodeError = getattr(json.decoder, "JSONDecodeError", ValueError) @@ -525,72 +528,8 @@ def from_published_scene(self, replace_in_path=True): published. """ - instance = self._instance - workfile_instance = self._get_workfile_instance(instance.context) - if workfile_instance is None: - return - - # determine published path from Anatomy. - template_data = workfile_instance.data.get("anatomyData") - rep = workfile_instance.data["representations"][0] - template_data["representation"] = rep.get("name") - template_data["ext"] = rep.get("ext") - template_data["comment"] = None - - anatomy = instance.context.data['anatomy'] - template_obj = anatomy.templates_obj["publish"]["path"] - template_filled = template_obj.format_strict(template_data) - file_path = os.path.normpath(template_filled) - - self.log.info("Using published scene for render {}".format(file_path)) - - if not os.path.exists(file_path): - self.log.error("published scene does not exist!") - raise - - if not replace_in_path: - return file_path - - # now we need to switch scene in expected files - # because token will now point to published - # scene file and that might differ from current one - def _clean_name(path): - return os.path.splitext(os.path.basename(path))[0] - - new_scene = _clean_name(file_path) - orig_scene = _clean_name(instance.context.data["currentFile"]) - expected_files = instance.data.get("expectedFiles") - - if isinstance(expected_files[0], dict): - # we have aovs and we need to iterate over them - new_exp = {} - for aov, files in expected_files[0].items(): - replaced_files = [] - for f in files: - replaced_files.append( - str(f).replace(orig_scene, new_scene) - ) - new_exp[aov] = replaced_files - # [] might be too much here, TODO - instance.data["expectedFiles"] = [new_exp] - else: - new_exp = [] - for f in expected_files: - new_exp.append( - str(f).replace(orig_scene, new_scene) - ) - instance.data["expectedFiles"] = new_exp - - metadata_folder = instance.data.get("publishRenderMetadataFolder") - if metadata_folder: - metadata_folder = metadata_folder.replace(orig_scene, - new_scene) - instance.data["publishRenderMetadataFolder"] = metadata_folder - self.log.info("Scene name was switched {} -> {}".format( - orig_scene, new_scene - )) - - return file_path + return replace_with_published_scene_path( + self._instance, replace_in_path=replace_in_path) def assemble_payload( self, job_info=None, plugin_info=None, aux_files=None): @@ -651,22 +590,3 @@ def submit(self, payload): self._instance.data["deadlineSubmissionJob"] = result return result["_id"] - - @staticmethod - def _get_workfile_instance(context): - """Find workfile instance in context""" - for instance in context: - - is_workfile = ( - "workfile" in instance.data.get("families", []) or - instance.data["family"] == "workfile" - ) - if not is_workfile: - continue - - # test if there is instance of workfile waiting - # to be published. - assert instance.data.get("publish", True) is True, ( - "Workfile (scene) must be published along") - - return instance diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py index 159ac43289a..1dfb6e0e5c0 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -47,6 +47,7 @@ from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo from openpype.tests.lib import is_in_tests from openpype.lib import is_running_from_build +from openpype.pipeline.farm.tools import iter_expected_files def _validate_deadline_bool_value(instance, attribute, value): @@ -238,7 +239,7 @@ def get_job_info(self): # Add list of expected files to job # --------------------------------- exp = instance.data.get("expectedFiles") - for filepath in self._iter_expected_files(exp): + for filepath in iter_expected_files(exp): job_info.OutputDirectory += os.path.dirname(filepath) job_info.OutputFilename += os.path.basename(filepath) @@ -296,7 +297,7 @@ def process_submission(self): # TODO: Avoid the need for this logic here, needed for submit publish # Store output dir for unified publisher (filesequence) expected_files = instance.data["expectedFiles"] - first_file = next(self._iter_expected_files(expected_files)) + first_file = next(iter_expected_files(expected_files)) output_dir = os.path.dirname(first_file) instance.data["outputDir"] = output_dir instance.data["toBeRenderedOn"] = "deadline" @@ -815,16 +816,6 @@ def _job_info_label(self, label): end=int(self._instance.data["frameEndHandle"]), ) - @staticmethod - def _iter_expected_files(exp): - if isinstance(exp[0], dict): - for _aov, files in exp[0].items(): - for file in files: - yield file - else: - for file in exp: - yield file - @classmethod def get_attribute_defs(cls): defs = super(MayaSubmitDeadline, cls).get_attribute_defs() @@ -863,7 +854,6 @@ def get_attribute_defs(cls): return defs - def _format_tiles( filename, index, diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py index 01a5c552863..0529fb8a70d 100644 --- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py +++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- """Submit publishing job to farm.""" - import os import json import re @@ -12,47 +11,22 @@ from openpype.client import ( get_last_version_by_subset_name, - get_representations, ) from openpype.pipeline import ( - get_representation_path, legacy_io, ) -from openpype.pipeline.publish import OpenPypePyblishPluginMixin +from openpype.pipeline import publish from openpype.lib import EnumDef from openpype.tests.lib import is_in_tests -from openpype.pipeline.farm.patterning import match_aov_pattern from openpype.lib import is_running_from_build -from openpype.pipeline import publish - -def get_resources(project_name, version, extension=None): - """Get the files from the specific version.""" - - # TODO this functions seems to be weird - # - it's looking for representation with one extension or first (any) - # representation from a version? - # - not sure how this should work, maybe it does for specific use cases - # but probably can't be used for all resources from 2D workflows - extensions = None - if extension: - extensions = [extension] - repre_docs = list(get_representations( - project_name, version_ids=[version["_id"]], extensions=extensions - )) - assert repre_docs, "This is a bug" - - representation = repre_docs[0] - directory = get_representation_path(representation) - print("Source: ", directory) - resources = sorted( - [ - os.path.normpath(os.path.join(directory, fname)) - for fname in os.listdir(directory) - ] - ) - - return resources +from openpype.pipeline.farm.pyblish_functions import ( + create_skeleton_instance, + create_instances_for_aov, + attach_instances_to_subset, + prepare_representations, + create_metadata_path +) def get_resource_files(resources, frame_range=None): @@ -83,7 +57,8 @@ def get_resource_files(resources, frame_range=None): class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, - OpenPypePyblishPluginMixin): + publish.OpenPypePyblishPluginMixin, + publish.ColormanagedPyblishPluginMixin): """Process Job submitted on farm. These jobs are dependent on a deadline or muster job @@ -185,36 +160,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, # poor man exclusion skip_integration_repre_list = [] - def _create_metadata_path(self, instance): - ins_data = instance.data - # Ensure output dir exists - output_dir = ins_data.get( - "publishRenderMetadataFolder", ins_data["outputDir"]) - - try: - if not os.path.isdir(output_dir): - os.makedirs(output_dir) - except OSError: - # directory is not available - self.log.warning("Path is unreachable: `{}`".format(output_dir)) - - metadata_filename = "{}_metadata.json".format(ins_data["subset"]) - - metadata_path = os.path.join(output_dir, metadata_filename) - - # Convert output dir to `{root}/rest/of/path/...` with Anatomy - success, rootless_mtdt_p = self.anatomy.find_root_template_from_path( - metadata_path) - if not success: - # `rootless_path` is not set to `output_dir` if none of roots match - self.log.warning(( - "Could not find root path for remapping \"{}\"." - " This may cause issues on farm." - ).format(output_dir)) - rootless_mtdt_p = metadata_path - - return metadata_path, rootless_mtdt_p - def _submit_deadline_post_job(self, instance, job, instances): """Submit publish job to Deadline. @@ -229,6 +174,8 @@ def _submit_deadline_post_job(self, instance, job, instances): subset = data["subset"] job_name = "Publish - {subset}".format(subset=subset) + anatomy = instance.context.data['anatomy'] + # instance.data.get("subset") != instances[0]["subset"] # 'Main' vs 'renderMain' override_version = None @@ -237,7 +184,7 @@ def _submit_deadline_post_job(self, instance, job, instances): override_version = instance_version output_dir = self._get_publish_folder( - instance.context.data['anatomy'], + anatomy, deepcopy(instance.data["anatomyData"]), instance.data.get("asset"), instances[0]["subset"], @@ -248,7 +195,7 @@ def _submit_deadline_post_job(self, instance, job, instances): # Transfer the environment from the original job to this dependent # job so they use the same environment metadata_path, rootless_metadata_path = \ - self._create_metadata_path(instance) + create_metadata_path(instance, anatomy) environment = { "AVALON_PROJECT": instance.context.data["projectName"], @@ -335,13 +282,15 @@ def _submit_deadline_post_job(self, instance, job, instances): self.log.info("Adding tile assembly jobs as dependencies...") job_index = 0 for assembly_id in instance.data.get("assemblySubmissionJobs"): - payload["JobInfo"]["JobDependency{}".format(job_index)] = assembly_id # noqa: E501 + payload["JobInfo"]["JobDependency{}".format( + job_index)] = assembly_id # noqa: E501 job_index += 1 elif instance.data.get("bakingSubmissionJobs"): self.log.info("Adding baking submission jobs as dependencies...") job_index = 0 for assembly_id in instance.data["bakingSubmissionJobs"]: - payload["JobInfo"]["JobDependency{}".format(job_index)] = assembly_id # noqa: E501 + payload["JobInfo"]["JobDependency{}".format( + job_index)] = assembly_id # noqa: E501 job_index += 1 else: payload["JobInfo"]["JobDependency0"] = job["_id"] @@ -369,413 +318,13 @@ def _submit_deadline_post_job(self, instance, job, instances): return deadline_publish_job_id - def _copy_extend_frames(self, instance, representation): - """Copy existing frames from latest version. - - This will copy all existing frames from subset's latest version back - to render directory and rename them to what renderer is expecting. - - Arguments: - instance (pyblish.plugin.Instance): instance to get required - data from - representation (dict): presentation to operate on - - """ - import speedcopy - - self.log.info("Preparing to copy ...") - start = instance.data.get("frameStart") - end = instance.data.get("frameEnd") - - # get latest version of subset - # this will stop if subset wasn't published yet - project_name = self.context.data["projectName"] - version = get_last_version_by_subset_name( - project_name, - instance.data.get("subset"), - asset_name=instance.data.get("asset") - ) - - # get its files based on extension - subset_resources = get_resources( - project_name, version, representation.get("ext") - ) - r_col, _ = clique.assemble(subset_resources) - - # if override remove all frames we are expecting to be rendered - # so we'll copy only those missing from current render - if instance.data.get("overrideExistingFrame"): - for frame in range(start, end + 1): - if frame not in r_col.indexes: - continue - r_col.indexes.remove(frame) - - # now we need to translate published names from representation - # back. This is tricky, right now we'll just use same naming - # and only switch frame numbers - resource_files = [] - r_filename = os.path.basename( - representation.get("files")[0]) # first file - op = re.search(self.R_FRAME_NUMBER, r_filename) - pre = r_filename[:op.start("frame")] - post = r_filename[op.end("frame"):] - assert op is not None, "padding string wasn't found" - for frame in list(r_col): - fn = re.search(self.R_FRAME_NUMBER, frame) - # silencing linter as we need to compare to True, not to - # type - assert fn is not None, "padding string wasn't found" - # list of tuples (source, destination) - staging = representation.get("stagingDir") - staging = self.anatomy.fill_root(staging) - resource_files.append( - (frame, - os.path.join(staging, - "{}{}{}".format(pre, - fn.group("frame"), - post))) - ) - - # test if destination dir exists and create it if not - output_dir = os.path.dirname(representation.get("files")[0]) - if not os.path.isdir(output_dir): - os.makedirs(output_dir) - - # copy files - for source in resource_files: - speedcopy.copy(source[0], source[1]) - self.log.info(" > {}".format(source[1])) - - self.log.info( - "Finished copying %i files" % len(resource_files)) - - def _create_instances_for_aov( - self, instance_data, exp_files, additional_data, do_not_add_review - ): - """Create instance for each AOV found. - - This will create new instance for every aov it can detect in expected - files list. - - Arguments: - instance_data (pyblish.plugin.Instance): skeleton data for instance - (those needed) later by collector - exp_files (list): list of expected files divided by aovs - additional_data (dict): - do_not_add_review (bool): explicitly skip review - - Returns: - list of instances - - """ - task = self.context.data["task"] - host_name = self.context.data["hostName"] - subset = instance_data["subset"] - cameras = instance_data.get("cameras", []) - instances = [] - # go through aovs in expected files - for aov, files in exp_files[0].items(): - cols, rem = clique.assemble(files) - # we shouldn't have any reminders. And if we do, it should - # be just one item for single frame renders. - if not cols and rem: - assert len(rem) == 1, ("Found multiple non related files " - "to render, don't know what to do " - "with them.") - col = rem[0] - ext = os.path.splitext(col)[1].lstrip(".") - else: - # but we really expect only one collection. - # Nothing else make sense. - assert len(cols) == 1, "only one image sequence type is expected" # noqa: E501 - ext = cols[0].tail.lstrip(".") - col = list(cols[0]) - - self.log.debug(col) - # create subset name `familyTaskSubset_AOV` - group_name = 'render{}{}{}{}'.format( - task[0].upper(), task[1:], - subset[0].upper(), subset[1:]) - - cam = [c for c in cameras if c in col.head] - if cam: - if aov: - subset_name = '{}_{}_{}'.format(group_name, cam, aov) - else: - subset_name = '{}_{}'.format(group_name, cam) - else: - if aov: - subset_name = '{}_{}'.format(group_name, aov) - else: - subset_name = '{}'.format(group_name) - - if isinstance(col, (list, tuple)): - staging = os.path.dirname(col[0]) - else: - staging = os.path.dirname(col) - - success, rootless_staging_dir = ( - self.anatomy.find_root_template_from_path(staging) - ) - if success: - staging = rootless_staging_dir - else: - self.log.warning(( - "Could not find root path for remapping \"{}\"." - " This may cause issues on farm." - ).format(staging)) - - self.log.info("Creating data for: {}".format(subset_name)) - - if isinstance(col, list): - render_file_name = os.path.basename(col[0]) - else: - render_file_name = os.path.basename(col) - aov_patterns = self.aov_filter - - preview = match_aov_pattern( - host_name, aov_patterns, render_file_name - ) - # toggle preview on if multipart is on - - if instance_data.get("multipartExr"): - self.log.debug("Adding preview tag because its multipartExr") - preview = True - self.log.debug("preview:{}".format(preview)) - new_instance = deepcopy(instance_data) - new_instance["subset"] = subset_name - new_instance["subsetGroup"] = group_name - - preview = preview and not do_not_add_review - if preview: - new_instance["review"] = True - - # create representation - if isinstance(col, (list, tuple)): - files = [os.path.basename(f) for f in col] - else: - files = os.path.basename(col) - - # Copy render product "colorspace" data to representation. - colorspace = "" - products = additional_data["renderProducts"].layer_data.products - for product in products: - if product.productName == aov: - colorspace = product.colorspace - break - - rep = { - "name": ext, - "ext": ext, - "files": files, - "frameStart": int(instance_data.get("frameStartHandle")), - "frameEnd": int(instance_data.get("frameEndHandle")), - # If expectedFile are absolute, we need only filenames - "stagingDir": staging, - "fps": new_instance.get("fps"), - "tags": ["review"] if preview else [], - "colorspaceData": { - "colorspace": colorspace, - "config": { - "path": additional_data["colorspaceConfig"], - "template": additional_data["colorspaceTemplate"] - }, - "display": additional_data["display"], - "view": additional_data["view"] - } - } - - # support conversion from tiled to scanline - if instance_data.get("convertToScanline"): - self.log.info("Adding scanline conversion.") - rep["tags"].append("toScanline") - - # poor man exclusion - if ext in self.skip_integration_repre_list: - rep["tags"].append("delete") - - self._solve_families(new_instance, preview) - - new_instance["representations"] = [rep] - - # if extending frames from existing version, copy files from there - # into our destination directory - if new_instance.get("extendFrames", False): - self._copy_extend_frames(new_instance, rep) - instances.append(new_instance) - self.log.debug("instances:{}".format(instances)) - return instances - - def _get_representations(self, instance_data, exp_files, - do_not_add_review): - """Create representations for file sequences. - - This will return representations of expected files if they are not - in hierarchy of aovs. There should be only one sequence of files for - most cases, but if not - we create representation from each of them. - - Arguments: - instance_data (dict): instance.data for which we are - setting representations - exp_files (list): list of expected files - do_not_add_review (bool): explicitly skip review - - Returns: - list of representations - - """ - representations = [] - host_name = self.context.data["hostName"] - collections, remainders = clique.assemble(exp_files) - - # create representation for every collected sequence - for collection in collections: - ext = collection.tail.lstrip(".") - preview = False - # TODO 'useSequenceForReview' is temporary solution which does - # not work for 100% of cases. We must be able to tell what - # expected files contains more explicitly and from what - # should be review made. - # - "review" tag is never added when is set to 'False' - if instance_data["useSequenceForReview"]: - # toggle preview on if multipart is on - if instance_data.get("multipartExr", False): - self.log.debug( - "Adding preview tag because its multipartExr" - ) - preview = True - else: - render_file_name = list(collection)[0] - # if filtered aov name is found in filename, toggle it for - # preview video rendering - preview = match_aov_pattern( - host_name, self.aov_filter, render_file_name - ) - - staging = os.path.dirname(list(collection)[0]) - success, rootless_staging_dir = ( - self.anatomy.find_root_template_from_path(staging) - ) - if success: - staging = rootless_staging_dir - else: - self.log.warning(( - "Could not find root path for remapping \"{}\"." - " This may cause issues on farm." - ).format(staging)) - - frame_start = int(instance_data.get("frameStartHandle")) - if instance_data.get("slate"): - frame_start -= 1 - - preview = preview and not do_not_add_review - rep = { - "name": ext, - "ext": ext, - "files": [os.path.basename(f) for f in list(collection)], - "frameStart": frame_start, - "frameEnd": int(instance_data.get("frameEndHandle")), - # If expectedFile are absolute, we need only filenames - "stagingDir": staging, - "fps": instance_data.get("fps"), - "tags": ["review"] if preview else [], - } - - # poor man exclusion - if ext in self.skip_integration_repre_list: - rep["tags"].append("delete") - - if instance_data.get("multipartExr", False): - rep["tags"].append("multipartExr") - - # support conversion from tiled to scanline - if instance_data.get("convertToScanline"): - self.log.info("Adding scanline conversion.") - rep["tags"].append("toScanline") - - representations.append(rep) - - self._solve_families(instance_data, preview) - - # add remainders as representations - for remainder in remainders: - ext = remainder.split(".")[-1] - - staging = os.path.dirname(remainder) - success, rootless_staging_dir = ( - self.anatomy.find_root_template_from_path(staging) - ) - if success: - staging = rootless_staging_dir - else: - self.log.warning(( - "Could not find root path for remapping \"{}\"." - " This may cause issues on farm." - ).format(staging)) - - rep = { - "name": ext, - "ext": ext, - "files": os.path.basename(remainder), - "stagingDir": staging, - } - - preview = match_aov_pattern( - host_name, self.aov_filter, remainder - ) - preview = preview and not do_not_add_review - if preview: - rep.update({ - "fps": instance_data.get("fps"), - "tags": ["review"] - }) - self._solve_families(instance_data, preview) - - already_there = False - for repre in instance_data.get("representations", []): - # might be added explicitly before by publish_on_farm - already_there = repre.get("files") == rep["files"] - if already_there: - self.log.debug("repre {} already_there".format(repre)) - break - - if not already_there: - representations.append(rep) - - for rep in representations: - # inject colorspace data - self.set_representation_colorspace( - rep, self.context, - colorspace=instance_data["colorspace"] - ) - - return representations - - def _solve_families(self, instance, preview=False): - families = instance.get("families") - - # if we have one representation with preview tag - # flag whole instance for review and for ftrack - if preview: - if "ftrack" not in families: - if os.environ.get("FTRACK_SERVER"): - self.log.debug( - "Adding \"ftrack\" to families because of preview tag." - ) - families.append("ftrack") - if "review" not in families: - self.log.debug( - "Adding \"review\" to families because of preview tag." - ) - families.append("review") - instance["families"] = families def process(self, instance): # type: (pyblish.api.Instance) -> None """Process plugin. - Detect type of renderfarm submission and create and post dependent job - in case of Deadline. It creates json file with metadata needed for + Detect type of render farm submission and create and post dependent + job in case of Deadline. It creates json file with metadata needed for publishing in directory of render. Args: @@ -786,151 +335,14 @@ def process(self, instance): self.log.debug("Skipping local instance.") return - data = instance.data.copy() - context = instance.context - self.context = context - self.anatomy = instance.context.data["anatomy"] - - asset = data.get("asset") or context.data["asset"] - subset = data.get("subset") - - start = instance.data.get("frameStart") - if start is None: - start = context.data["frameStart"] - - end = instance.data.get("frameEnd") - if end is None: - end = context.data["frameEnd"] - - handle_start = instance.data.get("handleStart") - if handle_start is None: - handle_start = context.data["handleStart"] - - handle_end = instance.data.get("handleEnd") - if handle_end is None: - handle_end = context.data["handleEnd"] - - fps = instance.data.get("fps") - if fps is None: - fps = context.data["fps"] - - if data.get("extendFrames", False): - start, end = self._extend_frames( - asset, - subset, - start, - end, - data["overrideExistingFrame"]) - - try: - source = data["source"] - except KeyError: - source = context.data["currentFile"] - - success, rootless_path = ( - self.anatomy.find_root_template_from_path(source) - ) - if success: - source = rootless_path - - else: - # `rootless_path` is not set to `source` if none of roots match - self.log.warning(( - "Could not find root path for remapping \"{}\"." - " This may cause issues." - ).format(source)) - - family = "render" - if ("prerender" in instance.data["families"] or - "prerender.farm" in instance.data["families"]): - family = "prerender" - families = [family] - - # pass review to families if marked as review - do_not_add_review = False - if data.get("review"): - families.append("review") - elif data.get("review") == False: - self.log.debug("Instance has review explicitly disabled.") - do_not_add_review = True - - instance_skeleton_data = { - "family": family, - "subset": subset, - "families": families, - "asset": asset, - "frameStart": start, - "frameEnd": end, - "handleStart": handle_start, - "handleEnd": handle_end, - "frameStartHandle": start - handle_start, - "frameEndHandle": end + handle_end, - "comment": instance.data["comment"], - "fps": fps, - "source": source, - "extendFrames": data.get("extendFrames"), - "overrideExistingFrame": data.get("overrideExistingFrame"), - "pixelAspect": data.get("pixelAspect", 1), - "resolutionWidth": data.get("resolutionWidth", 1920), - "resolutionHeight": data.get("resolutionHeight", 1080), - "multipartExr": data.get("multipartExr", False), - "jobBatchName": data.get("jobBatchName", ""), - "useSequenceForReview": data.get("useSequenceForReview", True), - # map inputVersions `ObjectId` -> `str` so json supports it - "inputVersions": list(map(str, data.get("inputVersions", []))), - "colorspace": instance.data.get("colorspace") - } - - # skip locking version if we are creating v01 - instance_version = instance.data.get("version") # take this if exists - if instance_version != 1: - instance_skeleton_data["version"] = instance_version - - # transfer specific families from original instance to new render - for item in self.families_transfer: - if item in instance.data.get("families", []): - instance_skeleton_data["families"] += [item] - - # transfer specific properties from original instance based on - # mapping dictionary `instance_transfer` - for key, values in self.instance_transfer.items(): - if key in instance.data.get("families", []): - for v in values: - instance_skeleton_data[v] = instance.data.get(v) - - # look into instance data if representations are not having any - # which are having tag `publish_on_farm` and include them - for repre in instance.data.get("representations", []): - staging_dir = repre.get("stagingDir") - if staging_dir: - success, rootless_staging_dir = ( - self.anatomy.find_root_template_from_path( - staging_dir - ) - ) - if success: - repre["stagingDir"] = rootless_staging_dir - else: - self.log.warning(( - "Could not find root path for remapping \"{}\"." - " This may cause issues on farm." - ).format(staging_dir)) - repre["stagingDir"] = staging_dir - - if "publish_on_farm" in repre.get("tags"): - # create representations attribute of not there - if "representations" not in instance_skeleton_data.keys(): - instance_skeleton_data["representations"] = [] - - instance_skeleton_data["representations"].append(repre) - - instances = None - assert data.get("expectedFiles"), ("Submission from old Pype version" - " - missing expectedFiles") + anatomy = instance.context.data["anatomy"] + instance_skeleton_data = create_skeleton_instance( + instance, families_transfer=self.families_transfer, + instance_transfer=self.instance_transfer) """ - if content of `expectedFiles` are dictionaries, we will handle - it as list of AOVs, creating instance from every one of them. + if content of `expectedFiles` list are dictionaries, we will handle + it as list of AOVs, creating instance for every one of them. Example: -------- @@ -952,7 +364,7 @@ def process(self, instance): This will create instances for `beauty` and `Z` subset adding those files to their respective representations. - If we've got only list of files, we collect all filesequences. + If we have only list of files, we collect all file sequences. More then one doesn't probably make sense, but we'll handle it like creating one instance with multiple representations. @@ -969,58 +381,26 @@ def process(self, instance): This will result in one instance with two representations: `foo` and `xxx` """ + do_not_add_review = False + if instance.data.get("review") is False: + self.log.debug("Instance has review explicitly disabled.") + do_not_add_review = True - self.log.info(data.get("expectedFiles")) - - if isinstance(data.get("expectedFiles")[0], dict): - # we cannot attach AOVs to other subsets as we consider every - # AOV subset of its own. - - additional_data = { - "renderProducts": instance.data["renderProducts"], - "colorspaceConfig": instance.data["colorspaceConfig"], - "display": instance.data["colorspaceDisplay"], - "view": instance.data["colorspaceView"] - } - - # Get templated path from absolute config path. - anatomy = instance.context.data["anatomy"] - colorspaceTemplate = instance.data["colorspaceConfig"] - success, rootless_staging_dir = ( - anatomy.find_root_template_from_path(colorspaceTemplate) - ) - if success: - colorspaceTemplate = rootless_staging_dir - else: - self.log.warning(( - "Could not find root path for remapping \"{}\"." - " This may cause issues on farm." - ).format(colorspaceTemplate)) - additional_data["colorspaceTemplate"] = colorspaceTemplate - - if len(data.get("attachTo")) > 0: - assert len(data.get("expectedFiles")[0].keys()) == 1, ( - "attaching multiple AOVs or renderable cameras to " - "subset is not supported") - - # create instances for every AOV we found in expected files. - # note: this is done for every AOV and every render camere (if - # there are multiple renderable cameras in scene) - instances = self._create_instances_for_aov( - instance_skeleton_data, - data.get("expectedFiles"), - additional_data, - do_not_add_review - ) - self.log.info("got {} instance{}".format( - len(instances), - "s" if len(instances) > 1 else "")) - + if isinstance(instance.data.get("expectedFiles")[0], dict): + instances = create_instances_for_aov( + instance, instance_skeleton_data, + self.aov_filter, self.skip_integration_repre_list, + do_not_add_review) else: - representations = self._get_representations( + representations = prepare_representations( instance_skeleton_data, - data.get("expectedFiles"), - do_not_add_review + instance.data.get("expectedFiles"), + anatomy, + self.aov_filter, + self.skip_integration_repre_list, + do_not_add_review, + instance.context, + self ) if "representations" not in instance_skeleton_data.keys(): @@ -1030,25 +410,11 @@ def process(self, instance): instance_skeleton_data["representations"] += representations instances = [instance_skeleton_data] - # if we are attaching to other subsets, create copy of existing - # instances, change data to match its subset and replace - # existing instances with modified data + # attach instances to subset if instance.data.get("attachTo"): - self.log.info("Attaching render to subset:") - new_instances = [] - for at in instance.data.get("attachTo"): - for i in instances: - new_i = copy(i) - new_i["version"] = at.get("version") - new_i["subset"] = at.get("subset") - new_i["family"] = at.get("family") - new_i["append"] = True - # don't set subsetGroup if we are attaching - new_i.pop("subsetGroup") - new_instances.append(new_i) - self.log.info(" - {} / v{}".format( - at.get("subset"), at.get("version"))) - instances = new_instances + instances = attach_instances_to_subset( + instance.data.get("attachTo"), instances + ) r''' SUBMiT PUBLiSH JOB 2 D34DLiN3 ____ @@ -1063,11 +429,11 @@ def process(self, instance): render_job = None submission_type = "" if instance.data.get("toBeRenderedOn") == "deadline": - render_job = data.pop("deadlineSubmissionJob", None) + render_job = instance.data.pop("deadlineSubmissionJob", None) submission_type = "deadline" if instance.data.get("toBeRenderedOn") == "muster": - render_job = data.pop("musterSubmissionJob", None) + render_job = instance.data.pop("musterSubmissionJob", None) submission_type = "muster" if not render_job and instance.data.get("tileRendering") is False: @@ -1089,10 +455,11 @@ def process(self, instance): render_job["Props"]["Batch"] = instance.data.get( "jobBatchName") else: - render_job["Props"]["Batch"] = os.path.splitext( - os.path.basename(context.data.get("currentFile")))[0] + batch = os.path.splitext(os.path.basename( + instance.context.data.get("currentFile")))[0] + render_job["Props"]["Batch"] = batch # User is deadline user - render_job["Props"]["User"] = context.data.get( + render_job["Props"]["User"] = instance.context.data.get( "deadlineUser", getpass.getuser()) render_job["Props"]["Env"] = { @@ -1118,15 +485,15 @@ def process(self, instance): # publish job file publish_job = { - "asset": asset, - "frameStart": start, - "frameEnd": end, - "fps": context.data.get("fps", None), - "source": source, - "user": context.data["user"], - "version": context.data["version"], # this is workfile version - "intent": context.data.get("intent"), - "comment": context.data.get("comment"), + "asset": instance_skeleton_data["asset"], + "frameStart": instance_skeleton_data["frameStart"], + "frameEnd": instance_skeleton_data["frameEnd"], + "fps": instance_skeleton_data["fps"], + "source": instance_skeleton_data["source"], + "user": instance.context.data["user"], + "version": instance.context.data["version"], # workfile version + "intent": instance.context.data.get("intent"), + "comment": instance.context.data.get("comment"), "job": render_job or None, "session": legacy_io.Session.copy(), "instances": instances @@ -1136,7 +503,7 @@ def process(self, instance): publish_job["deadline_publish_job_id"] = deadline_publish_job_id # add audio to metadata file if available - audio_file = context.data.get("audioFile") + audio_file = instance.context.data.get("audioFile") if audio_file and os.path.isfile(audio_file): publish_job.update({"audio": audio_file}) @@ -1149,54 +516,12 @@ def process(self, instance): } publish_job.update({"ftrack": ftrack}) - metadata_path, rootless_metadata_path = self._create_metadata_path( - instance) + metadata_path, rootless_metadata_path = \ + create_metadata_path(instance, anatomy) - self.log.info("Writing json file: {}".format(metadata_path)) with open(metadata_path, "w") as f: json.dump(publish_job, f, indent=4, sort_keys=True) - def _extend_frames(self, asset, subset, start, end): - """Get latest version of asset nad update frame range. - - Based on minimum and maximuma values. - - Arguments: - asset (str): asset name - subset (str): subset name - start (int): start frame - end (int): end frame - - Returns: - (int, int): upddate frame start/end - - """ - # Frame comparison - prev_start = None - prev_end = None - - project_name = self.context.data["projectName"] - version = get_last_version_by_subset_name( - project_name, - subset, - asset_name=asset - ) - - # Set prev start / end frames for comparison - if not prev_start and not prev_end: - prev_start = version["data"]["frameStart"] - prev_end = version["data"]["frameEnd"] - - updated_start = min(start, prev_start) - updated_end = max(end, prev_end) - - self.log.info( - "Updating start / end frame : " - "{} - {}".format(updated_start, updated_end) - ) - - return updated_start, updated_end - def _get_publish_folder(self, anatomy, template_data, asset, subset, family='render', version=None): diff --git a/openpype/modules/royalrender/api.py b/openpype/modules/royalrender/api.py index de1dba8724b..e610a0c8a8e 100644 --- a/openpype/modules/royalrender/api.py +++ b/openpype/modules/royalrender/api.py @@ -3,10 +3,10 @@ import sys import os -from openpype.settings import get_project_settings from openpype.lib.local_settings import OpenPypeSettingsRegistry from openpype.lib import Logger, run_subprocess from .rr_job import RRJob, SubmitFile, SubmitterParameter +from openpype.lib.vendor_bin_utils import find_tool_in_custom_paths class Api: @@ -15,69 +15,57 @@ class Api: RR_SUBMIT_CONSOLE = 1 RR_SUBMIT_API = 2 - def __init__(self, settings, project=None): + def __init__(self, rr_path=None): self.log = Logger.get_logger("RoyalRender") - self._settings = settings - self._initialize_rr(project) + self._rr_path = rr_path + os.environ["RR_ROOT"] = rr_path - def _initialize_rr(self, project=None): - # type: (str) -> None - """Initialize RR Path. + @staticmethod + def get_rr_bin_path(rr_root, tool_name=None): + # type: (str, str) -> str + """Get path to RR bin folder. Args: - project (str, Optional): Project name to set RR api in - context. + tool_name (str): Name of RR executable you want. + rr_root (str): Custom RR root if needed. - """ - if project: - project_settings = get_project_settings(project) - rr_path = ( - project_settings - ["royalrender"] - ["rr_paths"] - ) - else: - rr_path = ( - self._settings - ["modules"] - ["royalrender"] - ["rr_path"] - ["default"] - ) - os.environ["RR_ROOT"] = rr_path - self._rr_path = rr_path + Returns: + str: Path to the tool based on current platform. - def _get_rr_bin_path(self, rr_root=None): - # type: (str) -> str - """Get path to RR bin folder.""" - rr_root = rr_root or self._rr_path + """ is_64bit_python = sys.maxsize > 2 ** 32 - rr_bin_path = "" + rr_bin_parts = [rr_root, "bin"] if sys.platform.lower() == "win32": - rr_bin_path = "/bin/win64" - if not is_64bit_python: - # we are using 64bit python - rr_bin_path = "/bin/win" - rr_bin_path = rr_bin_path.replace( - "/", os.path.sep - ) + rr_bin_parts.append("win") if sys.platform.lower() == "darwin": - rr_bin_path = "/bin/mac64" - if not is_64bit_python: - rr_bin_path = "/bin/mac" + rr_bin_parts.append("mac") + + if sys.platform.lower().startswith("linux"): + rr_bin_parts.append("lx") + + rr_bin_path = os.sep.join(rr_bin_parts) + + paths_to_check = [] + # if we use 64bit python, append 64bit specific path first + if is_64bit_python: + if not tool_name: + return rr_bin_path + "64" + paths_to_check.append(rr_bin_path + "64") - if sys.platform.lower() == "linux": - rr_bin_path = "/bin/lx64" + # otherwise use 32bit + if not tool_name: + return rr_bin_path + paths_to_check.append(rr_bin_path) - return os.path.join(rr_root, rr_bin_path) + return find_tool_in_custom_paths(paths_to_check, tool_name) def _initialize_module_path(self): # type: () -> None """Set RR modules for Python.""" # default for linux - rr_bin = self._get_rr_bin_path() + rr_bin = self.get_rr_bin_path(self._rr_path) rr_module_path = os.path.join(rr_bin, "lx64/lib") if sys.platform.lower() == "win32": @@ -91,51 +79,46 @@ def _initialize_module_path(self): sys.path.append(os.path.join(self._rr_path, rr_module_path)) - def create_submission(self, jobs, submitter_attributes, file_name=None): - # type: (list[RRJob], list[SubmitterParameter], str) -> SubmitFile + @staticmethod + def create_submission(jobs, submitter_attributes): + # type: (list[RRJob], list[SubmitterParameter]) -> SubmitFile """Create jobs submission file. Args: jobs (list): List of :class:`RRJob` submitter_attributes (list): List of submitter attributes :class:`SubmitterParameter` for whole submission batch. - file_name (str), optional): File path to write data to. Returns: str: XML data of job submission files. """ - raise NotImplementedError + return SubmitFile(SubmitterParameters=submitter_attributes, Jobs=jobs) def submit_file(self, file, mode=RR_SUBMIT_CONSOLE): # type: (SubmitFile, int) -> None if mode == self.RR_SUBMIT_CONSOLE: self._submit_using_console(file) + return - # RR v7 supports only Python 2.7 so we bail out in fear + # RR v7 supports only Python 2.7, so we bail out in fear # until there is support for Python 3 😰 raise NotImplementedError( "Submission via RoyalRender API is not supported yet") # self._submit_using_api(file) - def _submit_using_console(self, file): - # type: (SubmitFile) -> bool - rr_console = os.path.join( - self._get_rr_bin_path(), - "rrSubmitterconsole" - ) - - if sys.platform.lower() == "darwin": - if "/bin/mac64" in rr_console: - rr_console = rr_console.replace("/bin/mac64", "/bin/mac") + def _submit_using_console(self, job_file): + # type: (SubmitFile) -> None + rr_start_local = self.get_rr_bin_path( + self._rr_path, "rrStartLocal") - if sys.platform.lower() == "win32": - if "/bin/win64" in rr_console: - rr_console = rr_console.replace("/bin/win64", "/bin/win") - rr_console += ".exe" + self.log.info("rr_console: {}".format(rr_start_local)) - args = [rr_console, file] - run_subprocess(" ".join(args), logger=self.log) + args = [rr_start_local, "rrSubmitterconsole", job_file] + self.log.info("Executing: {}".format(" ".join(args))) + env = os.environ + env["RR_ROOT"] = self._rr_path + run_subprocess(args, logger=self.log, env=env) def _submit_using_api(self, file): # type: (SubmitFile) -> None diff --git a/openpype/modules/royalrender/lib.py b/openpype/modules/royalrender/lib.py new file mode 100644 index 00000000000..4708d25eed3 --- /dev/null +++ b/openpype/modules/royalrender/lib.py @@ -0,0 +1,304 @@ +# -*- coding: utf-8 -*- +"""Submitting render job to RoyalRender.""" +import os +import re +import platform +from datetime import datetime + +import pyblish.api +from openpype.tests.lib import is_in_tests +from openpype.pipeline.publish.lib import get_published_workfile_instance +from openpype.pipeline.publish import KnownPublishError +from openpype.modules.royalrender.api import Api as rrApi +from openpype.modules.royalrender.rr_job import ( + RRJob, CustomAttribute, get_rr_platform) +from openpype.lib import ( + is_running_from_build, + BoolDef, + NumberDef, +) +from openpype.pipeline import OpenPypePyblishPluginMixin + + +class BaseCreateRoyalRenderJob(pyblish.api.InstancePlugin, + OpenPypePyblishPluginMixin): + """Creates separate rendering job for Royal Render""" + label = "Create Nuke Render job in RR" + order = pyblish.api.IntegratorOrder + 0.1 + hosts = ["nuke"] + families = ["render", "prerender"] + targets = ["local"] + optional = True + + priority = 50 + chunk_size = 1 + concurrent_tasks = 1 + use_gpu = True + use_published = True + + @classmethod + def get_attribute_defs(cls): + return [ + NumberDef( + "priority", + label="Priority", + default=cls.priority, + decimals=0 + ), + NumberDef( + "chunk", + label="Frames Per Task", + default=cls.chunk_size, + decimals=0, + minimum=1, + maximum=1000 + ), + NumberDef( + "concurrency", + label="Concurrency", + default=cls.concurrent_tasks, + decimals=0, + minimum=1, + maximum=10 + ), + BoolDef( + "use_gpu", + default=cls.use_gpu, + label="Use GPU" + ), + BoolDef( + "suspend_publish", + default=False, + label="Suspend publish" + ), + BoolDef( + "use_published", + default=cls.use_published, + label="Use published workfile" + ) + ] + + def __init__(self, *args, **kwargs): + self._rr_root = None + self.scene_path = None + self.job = None + self.submission_parameters = None + self.rr_api = None + + def process(self, instance): + if not instance.data.get("farm"): + self.log.info("Skipping local instance.") + return + + instance.data["attributeValues"] = self.get_attr_values_from_data( + instance.data) + + # add suspend_publish attributeValue to instance data + instance.data["suspend_publish"] = instance.data["attributeValues"][ + "suspend_publish"] + + context = instance.context + + self._rr_root = self._resolve_rr_path(context, instance.data.get( + "rrPathName")) # noqa + self.log.debug(self._rr_root) + if not self._rr_root: + raise KnownPublishError( + ("Missing RoyalRender root. " + "You need to configure RoyalRender module.")) + + self.rr_api = rrApi(self._rr_root) + + self.scene_path = context.data["currentFile"] + if self.use_published: + published_workfile = get_published_workfile_instance(context) + + # fallback if nothing was set + if published_workfile is None: + self.log.warning("Falling back to workfile") + file_path = context.data["currentFile"] + else: + workfile_repre = published_workfile.data["representations"][0] + file_path = workfile_repre["published_path"] + + self.scene_path = file_path + self.log.info( + "Using published scene for render {}".format(self.scene_path) + ) + + if not instance.data.get("expectedFiles"): + instance.data["expectedFiles"] = [] + + if not instance.data.get("rrJobs"): + instance.data["rrJobs"] = [] + + def get_job(self, instance, script_path, render_path, node_name): + """Get RR job based on current instance. + + Args: + script_path (str): Path to Nuke script. + render_path (str): Output path. + node_name (str): Name of the render node. + + Returns: + RRJob: RoyalRender Job instance. + + """ + start_frame = int(instance.data["frameStartHandle"]) + end_frame = int(instance.data["frameEndHandle"]) + + batch_name = os.path.basename(script_path) + jobname = "%s - %s" % (batch_name, instance.name) + if is_in_tests(): + batch_name += datetime.now().strftime("%d%m%Y%H%M%S") + + render_dir = os.path.normpath(os.path.dirname(render_path)) + output_filename_0 = self.pad_file_name(render_path, str(start_frame)) + file_name, file_ext = os.path.splitext( + os.path.basename(output_filename_0)) + + custom_attributes = [] + if is_running_from_build(): + custom_attributes = [ + CustomAttribute( + name="OpenPypeVersion", + value=os.environ.get("OPENPYPE_VERSION")) + ] + + # this will append expected files to instance as needed. + expected_files = self.expected_files( + instance, render_path, start_frame, end_frame) + instance.data["expectedFiles"].extend(expected_files) + + job = RRJob( + Software="", + Renderer="", + SeqStart=int(start_frame), + SeqEnd=int(end_frame), + SeqStep=int(instance.data.get("byFrameStep", 1)), + SeqFileOffset=0, + Version=0, + SceneName=script_path, + IsActive=True, + ImageDir=render_dir.replace("\\", "/"), + ImageFilename=file_name, + ImageExtension=file_ext, + ImagePreNumberLetter="", + ImageSingleOutputFile=False, + SceneOS=get_rr_platform(), + Layer=node_name, + SceneDatabaseDir=script_path, + CustomSHotName=jobname, + CompanyProjectName=instance.context.data["projectName"], + ImageWidth=instance.data["resolutionWidth"], + ImageHeight=instance.data["resolutionHeight"], + CustomAttributes=custom_attributes + ) + + return job + + def update_job_with_host_specific(self, instance, job): + """Host specific mapping for RRJob""" + raise NotImplementedError + + @staticmethod + def _resolve_rr_path(context, rr_path_name): + # type: (pyblish.api.Context, str) -> str + rr_settings = ( + context.data + ["system_settings"] + ["modules"] + ["royalrender"] + ) + try: + default_servers = rr_settings["rr_paths"] + project_servers = ( + context.data + ["project_settings"] + ["royalrender"] + ["rr_paths"] + ) + rr_servers = { + k: default_servers[k] + for k in project_servers + if k in default_servers + } + + except (AttributeError, KeyError): + # Handle situation were we had only one url for royal render. + return context.data["defaultRRPath"][platform.system().lower()] + + return rr_servers[rr_path_name][platform.system().lower()] + + def expected_files(self, instance, path, start_frame, end_frame): + """Get expected files. + + This function generate expected files from provided + path and start/end frames. + + It was taken from Deadline module, but this should be + probably handled better in collector to support more + flexible scenarios. + + Args: + instance (Instance) + path (str): Output path. + start_frame (int): Start frame. + end_frame (int): End frame. + + Returns: + list: List of expected files. + + """ + dir_name = os.path.dirname(path) + file = os.path.basename(path) + + expected_files = [] + + if "#" in file: + pparts = file.split("#") + padding = "%0{}d".format(len(pparts) - 1) + file = pparts[0] + padding + pparts[-1] + + if "%" not in file: + expected_files.append(path) + return expected_files + + if instance.data.get("slate"): + start_frame -= 1 + + expected_files.extend( + os.path.join(dir_name, (file % i)).replace("\\", "/") + for i in range(start_frame, (end_frame + 1)) + ) + return expected_files + + def pad_file_name(self, path, first_frame): + """Return output file path with #### for padding. + + RR requires the path to be formatted with # in place of numbers. + For example `/path/to/render.####.png` + + Args: + path (str): path to rendered image + first_frame (str): from representation to cleany replace with # + padding + + Returns: + str + + """ + self.log.debug("pad_file_name path: `{}`".format(path)) + if "%" in path: + search_results = re.search(r"(%0)(\d)(d.)", path).groups() + self.log.debug("_ search_results: `{}`".format(search_results)) + return int(search_results[1]) + if "#" in path: + self.log.debug("already padded: `{}`".format(path)) + return path + + if first_frame: + padding = len(first_frame) + path = path.replace(first_frame, "#" * padding) + + return path diff --git a/openpype/modules/royalrender/plugins/publish/collect_default_rr_path.py b/openpype/modules/royalrender/plugins/publish/collect_default_rr_path.py deleted file mode 100644 index 3ce95e0c50e..00000000000 --- a/openpype/modules/royalrender/plugins/publish/collect_default_rr_path.py +++ /dev/null @@ -1,23 +0,0 @@ -# -*- coding: utf-8 -*- -"""Collect default Deadline server.""" -import pyblish.api - - -class CollectDefaultRRPath(pyblish.api.ContextPlugin): - """Collect default Royal Render path.""" - - order = pyblish.api.CollectorOrder - label = "Default Royal Render Path" - - def process(self, context): - try: - rr_module = context.data.get( - "openPypeModules")["royalrender"] - except AttributeError: - msg = "Cannot get OpenPype Royal Render module." - self.log.error(msg) - raise AssertionError(msg) - - # get default deadline webservice url from deadline module - self.log.debug(rr_module.rr_paths) - context.data["defaultRRPath"] = rr_module.rr_paths["default"] # noqa: E501 diff --git a/openpype/modules/royalrender/plugins/publish/collect_rr_path_from_instance.py b/openpype/modules/royalrender/plugins/publish/collect_rr_path_from_instance.py index 6a3dc276f37..e978ce5bed1 100644 --- a/openpype/modules/royalrender/plugins/publish/collect_rr_path_from_instance.py +++ b/openpype/modules/royalrender/plugins/publish/collect_rr_path_from_instance.py @@ -5,29 +5,31 @@ class CollectRRPathFromInstance(pyblish.api.InstancePlugin): """Collect RR Path from instance.""" - order = pyblish.api.CollectorOrder + 0.01 - label = "Royal Render Path from the Instance" - families = ["rendering"] + order = pyblish.api.CollectorOrder + label = "Collect Royal Render path name from the Instance" + families = ["render", "prerender", "renderlayer"] def process(self, instance): - instance.data["rrPath"] = self._collect_rr_path(instance) + instance.data["rrPathName"] = self._collect_rr_path_name(instance) self.log.info( - "Using {} for submission.".format(instance.data["rrPath"])) + "Using '{}' for submission.".format(instance.data["rrPathName"])) @staticmethod - def _collect_rr_path(render_instance): + def _collect_rr_path_name(instance): # type: (pyblish.api.Instance) -> str - """Get Royal Render path from render instance.""" + """Get Royal Render pat name from render instance.""" rr_settings = ( - render_instance.context.data + instance.context.data ["system_settings"] ["modules"] ["royalrender"] ) + if not instance.data.get("rrPaths"): + return "default" try: default_servers = rr_settings["rr_paths"] project_servers = ( - render_instance.context.data + instance.context.data ["project_settings"] ["royalrender"] ["rr_paths"] @@ -40,10 +42,6 @@ def _collect_rr_path(render_instance): except (AttributeError, KeyError): # Handle situation were we had only one url for royal render. - return render_instance.context.data["defaultRRPath"] + return rr_settings["rr_paths"]["default"] - return rr_servers[ - list(rr_servers.keys())[ - int(render_instance.data.get("rrPaths")) - ] - ] + return list(rr_servers.keys())[int(instance.data.get("rrPaths"))] diff --git a/openpype/modules/royalrender/plugins/publish/collect_sequences_from_job.py b/openpype/modules/royalrender/plugins/publish/collect_sequences_from_job.py index 42351b781ba..1bfee19e3d7 100644 --- a/openpype/modules/royalrender/plugins/publish/collect_sequences_from_job.py +++ b/openpype/modules/royalrender/plugins/publish/collect_sequences_from_job.py @@ -71,7 +71,7 @@ class CollectSequencesFromJob(pyblish.api.ContextPlugin): """Gather file sequences from job directory. When "OPENPYPE_PUBLISH_DATA" environment variable is set these paths - (folders or .json files) are parsed for image sequences. Otherwise the + (folders or .json files) are parsed for image sequences. Otherwise, the current working directory is searched for file sequences. """ diff --git a/openpype/modules/royalrender/plugins/publish/create_maya_royalrender_job.py b/openpype/modules/royalrender/plugins/publish/create_maya_royalrender_job.py new file mode 100644 index 00000000000..22d910b7cd0 --- /dev/null +++ b/openpype/modules/royalrender/plugins/publish/create_maya_royalrender_job.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +"""Submitting render job to RoyalRender.""" +import os + +from maya.OpenMaya import MGlobal + +from openpype.modules.royalrender import lib +from openpype.pipeline.farm.tools import iter_expected_files + + +class CreateMayaRoyalRenderJob(lib.BaseCreateRoyalRenderJob): + label = "Create Maya Render job in RR" + hosts = ["maya"] + families = ["renderlayer"] + + def update_job_with_host_specific(self, instance, job): + job.Software = "Maya" + job.Version = "{0:.2f}".format(MGlobal.apiVersion() / 10000) + if instance.data.get("cameras"): + job.Camera = instance.data["cameras"][0].replace("'", '"') + workspace = instance.context.data["workspaceDir"] + job.SceneDatabaseDir = workspace + + return job + + def process(self, instance): + """Plugin entry point.""" + super(CreateMayaRoyalRenderJob, self).process(instance) + + expected_files = instance.data["expectedFiles"] + first_file_path = next(iter_expected_files(expected_files)) + output_dir = os.path.dirname(first_file_path) + instance.data["outputDir"] = output_dir + + layer = instance.data["setMembers"] # type: str + layer_name = layer.removeprefix("rs_") + + job = self.get_job(instance, self.scene_path, first_file_path, + layer_name) + job = self.update_job_with_host_specific(instance, job) + + instance.data["rrJobs"].append(job) diff --git a/openpype/modules/royalrender/plugins/publish/create_nuke_royalrender_job.py b/openpype/modules/royalrender/plugins/publish/create_nuke_royalrender_job.py new file mode 100644 index 00000000000..71daa6edf80 --- /dev/null +++ b/openpype/modules/royalrender/plugins/publish/create_nuke_royalrender_job.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +"""Submitting render job to RoyalRender.""" +import re + +from openpype.modules.royalrender import lib + + +class CreateNukeRoyalRenderJob(lib.BaseCreateRoyalRenderJob): + """Creates separate rendering job for Royal Render""" + label = "Create Nuke Render job in RR" + hosts = ["nuke"] + families = ["render", "prerender"] + + def process(self, instance): + super(CreateNukeRoyalRenderJob, self).process(instance) + + # redefinition of families + if "render" in instance.data["family"]: + instance.data["family"] = "write" + instance.data["families"].insert(0, "render2d") + elif "prerender" in instance.data["family"]: + instance.data["family"] = "write" + instance.data["families"].insert(0, "prerender") + + jobs = self.create_jobs(instance) + for job in jobs: + job = self.update_job_with_host_specific(instance, job) + + instance.data["rrJobs"].append(job) + + def update_job_with_host_specific(self, instance, job): + nuke_version = re.search( + r"\d+\.\d+", instance.context.data.get("hostVersion")) + + job.Software = "Nuke" + job.Version = nuke_version.group() + + return job + + def create_jobs(self, instance): + """Nuke creates multiple RR jobs - for baking etc.""" + # get output path + render_path = instance.data['path'] + script_path = self.scene_path + node = instance.data["transientData"]["node"] + + # main job + jobs = [ + self.get_job( + instance, + script_path, + render_path, + node.name() + ) + ] + + for baking_script in instance.data.get("bakingNukeScripts", []): + render_path = baking_script["bakeRenderPath"] + script_path = baking_script["bakeScriptPath"] + exe_node_name = baking_script["bakeWriteNodeName"] + + jobs.append(self.get_job( + instance, + script_path, + render_path, + exe_node_name + )) + + return jobs diff --git a/openpype/modules/royalrender/plugins/publish/create_publish_royalrender_job.py b/openpype/modules/royalrender/plugins/publish/create_publish_royalrender_job.py new file mode 100644 index 00000000000..3eb49a39ee4 --- /dev/null +++ b/openpype/modules/royalrender/plugins/publish/create_publish_royalrender_job.py @@ -0,0 +1,286 @@ +# -*- coding: utf-8 -*- +"""Create publishing job on RoyalRender.""" +import os +import attr +import json +import re + +import pyblish.api + +from openpype.modules.royalrender.rr_job import ( + RRJob, + RREnvList, + get_rr_platform +) +from openpype.pipeline.publish import KnownPublishError +from openpype.pipeline import ( + legacy_io, +) +from openpype.pipeline.farm.pyblish_functions import ( + create_skeleton_instance, + create_instances_for_aov, + attach_instances_to_subset, + prepare_representations, + create_metadata_path +) +from openpype.pipeline import publish + + +class CreatePublishRoyalRenderJob(pyblish.api.InstancePlugin, + publish.ColormanagedPyblishPluginMixin): + """Creates job which publishes rendered files to publish area. + + Job waits until all rendering jobs are finished, triggers `publish` command + where it reads from prepared .json file with metadata about what should + be published, renames prepared images and publishes them. + + When triggered it produces .log file next to .json file in work area. + """ + label = "Create publish job in RR" + order = pyblish.api.IntegratorOrder + 0.2 + icon = "tractor" + targets = ["local"] + hosts = ["fusion", "maya", "nuke", "celaction", "aftereffects", "harmony"] + families = ["render.farm", "prerender.farm", + "renderlayer", "imagesequence", "vrayscene"] + aov_filter = {"maya": [r".*([Bb]eauty).*"], + "aftereffects": [r".*"], # for everything from AE + "harmony": [r".*"], # for everything from AE + "celaction": [r".*"]} + + skip_integration_repre_list = [] + + # mapping of instance properties to be transferred to new instance + # for every specified family + instance_transfer = { + "slate": ["slateFrames", "slate"], + "review": ["lutPath"], + "render2d": ["bakingNukeScripts", "version"], + "renderlayer": ["convertToScanline"] + } + + # list of family names to transfer to new family if present + families_transfer = ["render3d", "render2d", "ftrack", "slate"] + + environ_job_filter = [ + "OPENPYPE_METADATA_FILE" + ] + + environ_keys = [ + "FTRACK_API_USER", + "FTRACK_API_KEY", + "FTRACK_SERVER", + "AVALON_APP_NAME", + "OPENPYPE_USERNAME", + "OPENPYPE_SG_USER", + "OPENPYPE_MONGO" + ] + priority = 50 + + def process(self, instance): + context = instance.context + self.context = context + self.anatomy = instance.context.data["anatomy"] + + if not instance.data.get("farm"): + self.log.info("Skipping local instance.") + return + + instance_skeleton_data = create_skeleton_instance( + instance, + families_transfer=self.families_transfer, + instance_transfer=self.instance_transfer) + + do_not_add_review = False + if instance.data.get("review") is False: + self.log.debug("Instance has review explicitly disabled.") + do_not_add_review = True + + if isinstance(instance.data.get("expectedFiles")[0], dict): + instances = create_instances_for_aov( + instance, instance_skeleton_data, + self.aov_filter, self.skip_integration_repre_list, + do_not_add_review) + + else: + representations = prepare_representations( + instance_skeleton_data, + instance.data.get("expectedFiles"), + self.anatomy, + self.aov_filter, + self.skip_integration_repre_list, + do_not_add_review, + instance.context, + self + ) + + if "representations" not in instance_skeleton_data.keys(): + instance_skeleton_data["representations"] = [] + + # add representation + instance_skeleton_data["representations"] += representations + instances = [instance_skeleton_data] + + # attach instances to subset + if instance.data.get("attachTo"): + instances = attach_instances_to_subset( + instance.data.get("attachTo"), instances + ) + + self.log.info("Creating RoyalRender Publish job ...") + + if not instance.data.get("rrJobs"): + self.log.error(("There is no prior RoyalRender " + "job on the instance.")) + raise KnownPublishError( + "Can't create publish job without prior rendering jobs first") + + rr_job = self.get_job(instance, instances) + instance.data["rrJobs"].append(rr_job) + + # publish job file + publish_job = { + "asset": instance_skeleton_data["asset"], + "frameStart": instance_skeleton_data["frameStart"], + "frameEnd": instance_skeleton_data["frameEnd"], + "fps": instance_skeleton_data["fps"], + "source": instance_skeleton_data["source"], + "user": instance.context.data["user"], + "version": instance.context.data["version"], # workfile version + "intent": instance.context.data.get("intent"), + "comment": instance.context.data.get("comment"), + "job": attr.asdict(rr_job), + "session": legacy_io.Session.copy(), + "instances": instances + } + + metadata_path, rootless_metadata_path = \ + create_metadata_path(instance, self.anatomy) + + self.log.info("Writing json file: {}".format(metadata_path)) + with open(metadata_path, "w") as f: + json.dump(publish_job, f, indent=4, sort_keys=True) + + def get_job(self, instance, instances): + """Create RR publishing job. + + Based on provided original instance and additional instances, + create publishing job and return it to be submitted to farm. + + Args: + instance (Instance): Original instance. + instances (list of Instance): List of instances to + be published on farm. + + Returns: + RRJob: RoyalRender publish job. + + """ + data = instance.data.copy() + subset = data["subset"] + jobname = "Publish - {subset}".format(subset=subset) + + # Transfer the environment from the original job to this dependent + # job, so they use the same environment + metadata_path, rootless_metadata_path = \ + create_metadata_path(instance, self.anatomy) + + anatomy_data = instance.context.data["anatomyData"] + + environment = RREnvList({ + "AVALON_PROJECT": anatomy_data["project"]["name"], + "AVALON_ASSET": anatomy_data["asset"], + "AVALON_TASK": anatomy_data["task"]["name"], + "OPENPYPE_USERNAME": anatomy_data["user"] + }) + + # add environments from self.environ_keys + for env_key in self.environ_keys: + if os.getenv(env_key): + environment[env_key] = os.environ[env_key] + + # pass environment keys from self.environ_job_filter + # and collect all pre_ids to wait for + job_environ = {} + jobs_pre_ids = [] + for job in instance.data["rrJobs"]: # type: RRJob + if job.rrEnvList: + job_environ.update( + dict(RREnvList.parse(job.rrEnvList)) + ) + jobs_pre_ids.append(job.PreID) + + for env_j_key in self.environ_job_filter: + if job_environ.get(env_j_key): + environment[env_j_key] = job_environ[env_j_key] + + priority = self.priority or instance.data.get("priority", 50) + + # rr requires absolut path or all jobs won't show up in rControl + abs_metadata_path = self.anatomy.fill_root(rootless_metadata_path) + + # command line set in E01__OpenPype__PublishJob.cfg, here only + # additional logging + args = [ + ">", os.path.join(os.path.dirname(abs_metadata_path), + "rr_out.log"), + "2>&1" + ] + + job = RRJob( + Software="OpenPype", + Renderer="Once", + SeqStart=1, + SeqEnd=1, + SeqStep=1, + SeqFileOffset=0, + Version=self._sanitize_version(os.environ.get("OPENPYPE_VERSION")), + SceneName=abs_metadata_path, + # command line arguments + CustomAddCmdFlags=" ".join(args), + IsActive=True, + ImageFilename="execOnce.file", + ImageDir="", + ImageExtension="", + ImagePreNumberLetter="", + SceneOS=get_rr_platform(), + rrEnvList=environment.serialize(), + Priority=priority, + CustomSHotName=jobname, + CompanyProjectName=instance.context.data["projectName"] + ) + + # add assembly jobs as dependencies + if instance.data.get("tileRendering"): + self.log.info("Adding tile assembly jobs as dependencies...") + job.WaitForPreIDs += instance.data.get("assemblySubmissionJobs") + elif instance.data.get("bakingSubmissionJobs"): + self.log.info("Adding baking submission jobs as dependencies...") + job.WaitForPreIDs += instance.data["bakingSubmissionJobs"] + else: + job.WaitForPreIDs += jobs_pre_ids + + return job + + def _sanitize_version(self, version): + """Returns version in format MAJOR.MINORPATCH + + 3.15.7-nightly.2 >> 3.157 + """ + VERSION_REGEX = re.compile( + r"(?P0|[1-9]\d*)" + r"\.(?P0|[1-9]\d*)" + r"\.(?P0|[1-9]\d*)" + r"(?:-(?P[a-zA-Z\d\-.]*))?" + r"(?:\+(?P[a-zA-Z\d\-.]*))?" + ) + + valid_parts = VERSION_REGEX.findall(version) + if len(valid_parts) != 1: + # Return invalid version with filled 'origin' attribute + return version + + # Unpack found version + major, minor, patch, pre, post = valid_parts[0] + + return "{}.{}{}".format(major, minor, patch) diff --git a/openpype/modules/royalrender/plugins/publish/submit_jobs_to_royalrender.py b/openpype/modules/royalrender/plugins/publish/submit_jobs_to_royalrender.py new file mode 100644 index 00000000000..8fc8604b839 --- /dev/null +++ b/openpype/modules/royalrender/plugins/publish/submit_jobs_to_royalrender.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +"""Submit jobs to RoyalRender.""" +import tempfile +import platform + +import pyblish.api +from openpype.modules.royalrender.api import ( + RRJob, + Api as rrApi, + SubmitterParameter +) +from openpype.pipeline.publish import KnownPublishError + + +class SubmitJobsToRoyalRender(pyblish.api.ContextPlugin): + """Find all jobs, create submission XML and submit it to RoyalRender.""" + label = "Submit jobs to RoyalRender" + order = pyblish.api.IntegratorOrder + 0.3 + targets = ["local"] + + def __init__(self): + super(SubmitJobsToRoyalRender, self).__init__() + self._rr_root = None + self._rr_api = None + self._submission_parameters = [] + + def process(self, context): + rr_settings = ( + context.data + ["system_settings"] + ["modules"] + ["royalrender"] + ) + + if rr_settings["enabled"] is not True: + self.log.warning("RoyalRender modules is disabled.") + return + + # iterate over all instances and try to find RRJobs + jobs = [] + instance_rr_path = None + for instance in context: + if isinstance(instance.data.get("rrJob"), RRJob): + jobs.append(instance.data.get("rrJob")) + if instance.data.get("rrJobs"): + if all( + isinstance(job, RRJob) + for job in instance.data.get("rrJobs")): + jobs += instance.data.get("rrJobs") + if instance.data.get("rrPathName"): + instance_rr_path = instance.data["rrPathName"] + + if jobs: + self._rr_root = self._resolve_rr_path(context, instance_rr_path) + if not self._rr_root: + raise KnownPublishError( + ("Missing RoyalRender root. " + "You need to configure RoyalRender module.")) + self._rr_api = rrApi(self._rr_root) + self._submission_parameters = self.get_submission_parameters() + self.process_submission(jobs) + return + + self.log.info("No RoyalRender jobs found") + + def process_submission(self, jobs): + # type: ([RRJob]) -> None + + idx_pre_id = 0 + for job in jobs: + job.PreID = idx_pre_id + if idx_pre_id > 0: + job.WaitForPreIDs.append(idx_pre_id - 1) + idx_pre_id += 1 + + submission = rrApi.create_submission( + jobs, + self._submission_parameters) + + xml = tempfile.NamedTemporaryFile(suffix=".xml", delete=False) + with open(xml.name, "w") as f: + f.write(submission.serialize()) + + self.log.info("submitting job(s) file: {}".format(xml.name)) + self._rr_api.submit_file(file=xml.name) + + def create_file(self, name, ext, contents=None): + temp = tempfile.NamedTemporaryFile( + dir=self.tempdir, + suffix=ext, + prefix=name + '.', + delete=False, + ) + + if contents: + with open(temp.name, 'w') as f: + f.write(contents) + + return temp.name + + def get_submission_parameters(self): + return [SubmitterParameter("RequiredMemory", "0")] + + @staticmethod + def _resolve_rr_path(context, rr_path_name): + # type: (pyblish.api.Context, str) -> str + rr_settings = ( + context.data + ["system_settings"] + ["modules"] + ["royalrender"] + ) + try: + default_servers = rr_settings["rr_paths"] + project_servers = ( + context.data + ["project_settings"] + ["royalrender"] + ["rr_paths"] + ) + rr_servers = { + k: default_servers[k] + for k in project_servers + if k in default_servers + } + + except (AttributeError, KeyError): + # Handle situation were we had only one url for royal render. + return context.data["defaultRRPath"][platform.system().lower()] + + return rr_servers[rr_path_name][platform.system().lower()] diff --git a/openpype/modules/royalrender/rr_job.py b/openpype/modules/royalrender/rr_job.py index c660eceac76..b85ac592f88 100644 --- a/openpype/modules/royalrender/rr_job.py +++ b/openpype/modules/royalrender/rr_job.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- """Python wrapper for RoyalRender XML job file.""" +import sys from xml.dom import minidom as md import attr from collections import namedtuple, OrderedDict @@ -8,8 +9,36 @@ CustomAttribute = namedtuple("CustomAttribute", ["name", "value"]) +def get_rr_platform(): + # type: () -> str + """Returns name of platform used in rr jobs.""" + if sys.platform.lower() in ["win32", "win64"]: + return "windows" + elif sys.platform.lower() == "darwin": + return "mac" + else: + return "linux" + + +class RREnvList(dict): + def serialize(self): + # VariableA=ValueA~~~VariableB=ValueB + return "~~~".join( + ["{}={}".format(k, v) for k, v in sorted(self.items())]) + + @staticmethod + def parse(data): + # type: (str) -> RREnvList + """Parse rrEnvList string and return it as RREnvList object.""" + out = RREnvList() + for var in data.split("~~~"): + k, v = var.split("=") + out[k] = v + return out + + @attr.s -class RRJob: +class RRJob(object): """Mapping of Royal Render job file to a data class.""" # Required @@ -35,7 +64,7 @@ class RRJob: # Is the job enabled for submission? # enabled by default - IsActive = attr.ib() # type: str + IsActive = attr.ib() # type: bool # Sequence settings of this job SeqStart = attr.ib() # type: int @@ -60,7 +89,7 @@ class RRJob: # If you render a single file, e.g. Quicktime or Avi, then you have to # set this value. Videos have to be rendered at once on one client. - ImageSingleOutputFile = attr.ib(default="false") # type: str + ImageSingleOutputFile = attr.ib(default=False) # type: bool # Semi-Required (required for some render applications) # ----------------------------------------------------- @@ -87,7 +116,7 @@ class RRJob: # Frame Padding of the frame number in the rendered filename. # Some render config files are setting the padding at render time. - ImageFramePadding = attr.ib(default=None) # type: str + ImageFramePadding = attr.ib(default=None) # type: int # Some render applications support overriding the image format at # the render commandline. @@ -108,7 +137,7 @@ class RRJob: # jobs send from this machine. If a job with the PreID was found, then # this jobs waits for the other job. Note: This flag can be used multiple # times to wait for multiple jobs. - WaitForPreID = attr.ib(default=None) # type: int + WaitForPreIDs = attr.ib(factory=list) # type: list # List of submitter options per job # list item must be of `SubmitterParameter` type @@ -120,6 +149,9 @@ class RRJob: # list item must be of `CustomAttribute` named tuple CustomAttributes = attr.ib(factory=list) # type: list + # This is used to hold command line arguments for Execute job + CustomAddCmdFlags = attr.ib(default=None) # type: str + # Additional information for subsequent publish script and # for better display in rrControl UserName = attr.ib(default=None) # type: str @@ -129,6 +161,7 @@ class RRJob: CustomUserInfo = attr.ib(default=None) # type: str SubmitMachine = attr.ib(default=None) # type: str Color_ID = attr.ib(default=2) # type: int + CompanyProjectName = attr.ib(default=None) # type: str RequiredLicenses = attr.ib(default=None) # type: str @@ -137,6 +170,10 @@ class RRJob: TotalFrames = attr.ib(default=None) # type: int Tiled = attr.ib(default=None) # type: str + # Environment + # only used in RR 8.3 and newer + rrEnvList = attr.ib(default=None) # type: str + class SubmitterParameter: """Wrapper for Submitter Parameters.""" @@ -160,7 +197,7 @@ def serialize(self): @attr.s -class SubmitFile: +class SubmitFile(object): """Class wrapping Royal Render submission XML file.""" # Syntax version of the submission file. @@ -169,11 +206,11 @@ class SubmitFile: # Delete submission file after processing DeleteXML = attr.ib(default=1) # type: int - # List of submitter options per job + # List of the submitter options per job. # list item must be of `SubmitterParameter` type SubmitterParameters = attr.ib(factory=list) # type: list - # List of job is submission batch. + # List of the jobs in submission batch. # list item must be of type `RRJob` Jobs = attr.ib(factory=list) # type: list @@ -225,7 +262,7 @@ def filter_data(a, v): # foo=bar~baz~goo self._process_submitter_parameters( self.SubmitterParameters, root, job_file) - + root.appendChild(job_file) for job in self.Jobs: # type: RRJob if not isinstance(job, RRJob): raise AttributeError( @@ -241,16 +278,28 @@ def filter_data(a, v): job, dict_factory=OrderedDict, filter=filter_data) serialized_job.pop("CustomAttributes") serialized_job.pop("SubmitterParameters") + # we are handling `WaitForPreIDs` separately. + wait_pre_ids = serialized_job.pop("WaitForPreIDs", []) for custom_attr in job_custom_attributes: # type: CustomAttribute serialized_job["Custom{}".format( custom_attr.name)] = custom_attr.value for item, value in serialized_job.items(): - xml_attr = root.create(item) + xml_attr = root.createElement(item) + xml_attr.appendChild( + root.createTextNode(str(value)) + ) + xml_job.appendChild(xml_attr) + + # WaitForPreID - can be used multiple times + for pre_id in wait_pre_ids: + xml_attr = root.createElement("WaitForPreID") xml_attr.appendChild( - root.createTextNode(value) + root.createTextNode(str(pre_id)) ) xml_job.appendChild(xml_attr) + job_file.appendChild(xml_job) + return root.toprettyxml(indent="\t") diff --git a/openpype/modules/royalrender/rr_root/render_apps/_config/E01__OpenPype.png b/openpype/modules/royalrender/rr_root/render_apps/_config/E01__OpenPype.png new file mode 100644 index 00000000000..68c5aec1174 Binary files /dev/null and b/openpype/modules/royalrender/rr_root/render_apps/_config/E01__OpenPype.png differ diff --git a/openpype/modules/royalrender/rr_root/render_apps/_config/E01__OpenPype__PublishJob.cfg b/openpype/modules/royalrender/rr_root/render_apps/_config/E01__OpenPype__PublishJob.cfg new file mode 100644 index 00000000000..864eeaf15aa --- /dev/null +++ b/openpype/modules/royalrender/rr_root/render_apps/_config/E01__OpenPype__PublishJob.cfg @@ -0,0 +1,71 @@ +IconApp= E01__OpenPype.png +Name= OpenPype +rendererName= Once +Version= 1 +Version_Minor= 0 +Type=Execute +TYPEv9=Execute +ExecuteJobType=Once + + +################################# [Windows] [Linux] [Osx] ################################## + + +CommandLine=> + +CommandLine= + + +::win CommandLine= set "CUDA_VISIBLE_DEVICES=" +::lx CommandLine= setenv CUDA_VISIBLE_DEVICES +::osx CommandLine= setenv CUDA_VISIBLE_DEVICES + + +CommandLine= + + +CommandLine= + + +CommandLine= + + +CommandLine= "" --headless publish + --targets royalrender + --targets farm + + + +CommandLine= + + + + +################################## Render Settings ################################## + + + +################################## Submitter Settings ################################## +StartMultipleInstances= 0~0 +SceneFileExtension= *.json +AllowImageNameChange= 0 +AllowImageDirChange= 0 +SequenceDivide= 0~1 +PPSequenceCheck=0~0 +PPCreateSmallVideo=0~0 +PPCreateFullVideo=0~0 +AllowLocalRenderOut= 0~0 + + +################################## Client Settings ################################## + +IconApp=E01__OpenPype.png + +licenseFailLine= + +errorSearchLine= + +permanentErrorSearchLine = + +Frozen_MinCoreUsage=0.3 +Frozen_Minutes=30 diff --git a/openpype/modules/royalrender/rr_root/render_apps/_config/E01__OpenPype___global.inc b/openpype/modules/royalrender/rr_root/render_apps/_config/E01__OpenPype___global.inc new file mode 100644 index 00000000000..ba38337340d --- /dev/null +++ b/openpype/modules/royalrender/rr_root/render_apps/_config/E01__OpenPype___global.inc @@ -0,0 +1,2 @@ +IconApp= E01__OpenPype.png +Name= OpenPype diff --git a/openpype/modules/royalrender/rr_root/render_apps/_install_paths/OpenPype.cfg b/openpype/modules/royalrender/rr_root/render_apps/_install_paths/OpenPype.cfg new file mode 100644 index 00000000000..07f7547d297 --- /dev/null +++ b/openpype/modules/royalrender/rr_root/render_apps/_install_paths/OpenPype.cfg @@ -0,0 +1,12 @@ +[Windows] +Executable= openpype_console.exe +Path= OS; \OpenPype\*\openpype_console.exe +Path= 32; \OpenPype\*\openpype_console.exe + +[Linux] +Executable= openpype_console +Path= OS; /opt/openpype/*/openpype_console + +[Mac] +Executable= openpype_console +Path= OS; /Applications/OpenPype*/Content/MacOS/openpype_console diff --git a/openpype/modules/royalrender/rr_root/render_apps/_prepost_scripts/OpenPypeEnvironment.cfg b/openpype/modules/royalrender/rr_root/render_apps/_prepost_scripts/OpenPypeEnvironment.cfg new file mode 100644 index 00000000000..70f0bc2e244 --- /dev/null +++ b/openpype/modules/royalrender/rr_root/render_apps/_prepost_scripts/OpenPypeEnvironment.cfg @@ -0,0 +1,11 @@ +PrePostType= pre +CommandLine= + +CommandLine= rrPythonconsole" > "render_apps/_prepost_scripts/PreOpenPypeInjectEnvironments.py" + +CommandLine= + + +CommandLine= "" +CommandLine= + diff --git a/openpype/modules/royalrender/rr_root/render_apps/_prepost_scripts/PreOpenPypeInjectEnvironments.py b/openpype/modules/royalrender/rr_root/render_apps/_prepost_scripts/PreOpenPypeInjectEnvironments.py new file mode 100644 index 00000000000..891de9594c5 --- /dev/null +++ b/openpype/modules/royalrender/rr_root/render_apps/_prepost_scripts/PreOpenPypeInjectEnvironments.py @@ -0,0 +1,4 @@ +# -*- coding: utf-8 -*- +import os + +os.environ["OPENYPYPE_TESTVAR"] = "OpenPype was here" diff --git a/openpype/pipeline/farm/pyblish_functions.py b/openpype/pipeline/farm/pyblish_functions.py new file mode 100644 index 00000000000..2df8269d792 --- /dev/null +++ b/openpype/pipeline/farm/pyblish_functions.py @@ -0,0 +1,881 @@ +import copy +import attr +import pyblish.api +import os +import clique +from copy import deepcopy +import re +import warnings + +from openpype.pipeline import ( + get_current_project_name, + get_representation_path, + Anatomy, +) +from openpype.client import ( + get_last_version_by_subset_name, + get_representations +) +from openpype.lib import Logger +from openpype.pipeline.publish import KnownPublishError +from openpype.pipeline.farm.patterning import match_aov_pattern + + +@attr.s +class TimeData(object): + """Structure used to handle time related data.""" + start = attr.ib(type=int) + end = attr.ib(type=int) + fps = attr.ib() + step = attr.ib(default=1, type=int) + handle_start = attr.ib(default=0, type=int) + handle_end = attr.ib(default=0, type=int) + + +def remap_source(path, anatomy): + """Try to remap path to rootless path. + + Args: + path (str): Path to be remapped to rootless. + anatomy (Anatomy): Anatomy object to handle remapping + itself. + + Returns: + str: Remapped path. + + Throws: + ValueError: if the root cannot be found. + + """ + success, rootless_path = ( + anatomy.find_root_template_from_path(path) + ) + if success: + source = rootless_path + else: + raise ValueError( + "Root from template path cannot be found: {}".format(path)) + return source + + +def extend_frames(asset, subset, start, end): + """Get latest version of asset nad update frame range. + + Based on minimum and maximum values. + + Arguments: + asset (str): asset name + subset (str): subset name + start (int): start frame + end (int): end frame + + Returns: + (int, int): update frame start/end + + """ + # Frame comparison + prev_start = None + prev_end = None + + project_name = get_current_project_name() + version = get_last_version_by_subset_name( + project_name, + subset, + asset_name=asset + ) + + # Set prev start / end frames for comparison + if not prev_start and not prev_end: + prev_start = version["data"]["frameStart"] + prev_end = version["data"]["frameEnd"] + + updated_start = min(start, prev_start) + updated_end = max(end, prev_end) + + return updated_start, updated_end + + +def get_time_data_from_instance_or_context(instance): + """Get time data from instance (or context). + + If time data is not found on instance, data from context will be used. + + Args: + instance (pyblish.api.Instance): Source instance. + + Returns: + TimeData: dataclass holding time information. + + """ + return TimeData( + start=(instance.data.get("frameStart") or + instance.context.data.get("frameStart")), + end=(instance.data.get("frameEnd") or + instance.context.data.get("frameEnd")), + fps=(instance.data.get("fps") or + instance.context.data.get("fps")), + handle_start=(instance.data.get("handleStart") or + instance.context.data.get("handleStart")), # noqa: E501 + handle_end=(instance.data.get("handleStart") or + instance.context.data.get("handleStart")) + ) + + +def get_transferable_representations(instance): + """Transfer representations from original instance. + + This will get all representations on the original instance that + are flagged with `publish_on_farm` and return them to be included + on skeleton instance if needed. + + Args: + instance (pyblish.api.Instance): Original instance to be processed. + + Return: + list of dicts: List of transferable representations. + + """ + anatomy = instance.context.data["anatomy"] # type: Anatomy + to_transfer = [] + + for representation in instance.data.get("representations", []): + if "publish_on_farm" not in representation.get("tags"): + continue + + trans_rep = representation.copy() + + staging_dir = trans_rep.get("stagingDir") + + if staging_dir: + try: + trans_rep["stagingDir"] = remap_source(staging_dir, anatomy) + except ValueError: + log = Logger.get_logger("farm_publishing") + log.warning( + ("Could not find root path for remapping \"{}\". " + "This may cause issues on farm.").format(staging_dir)) + + to_transfer.append(trans_rep) + return to_transfer + + +def create_skeleton_instance( + instance, families_transfer=None, instance_transfer=None): + # type: (pyblish.api.Instance, list, dict) -> dict + """Create skeleton instance from original instance data. + + This will create dictionary containing skeleton + - common - data used for publishing rendered instances. + This skeleton instance is then extended with additional data + and serialized to be processed by farm job. + + Args: + instance (pyblish.api.Instance): Original instance to + be used as a source of data. + families_transfer (list): List of family names to transfer + from the original instance to the skeleton. + instance_transfer (dict): Dict with keys as families and + values as a list of property names to transfer to the + new skeleton. + + Returns: + dict: Dictionary with skeleton instance data. + + """ + # list of family names to transfer to new family if present + + context = instance.context + data = instance.data.copy() + anatomy = instance.context.data["anatomy"] # type: Anatomy + + # get time related data from instance (or context) + time_data = get_time_data_from_instance_or_context(instance) + + if data.get("extendFrames", False): + time_data.start, time_data.end = extend_frames( + data["asset"], + data["subset"], + time_data.start, + time_data.end, + ) + + source = data.get("source") or context.data.get("currentFile") + success, rootless_path = ( + anatomy.find_root_template_from_path(source) + ) + if success: + source = rootless_path + else: + # `rootless_path` is not set to `source` if none of roots match + log = Logger.get_logger("farm_publishing") + log.warning(("Could not find root path for remapping \"{}\". " + "This may cause issues.").format(source)) + + family = ("render" + if "prerender" not in instance.data["families"] + else "prerender") + families = [family] + + # pass review to families if marked as review + if data.get("review"): + families.append("review") + + instance_skeleton_data = { + "family": family, + "subset": data["subset"], + "families": families, + "asset": data["asset"], + "frameStart": time_data.start, + "frameEnd": time_data.end, + "handleStart": time_data.handle_start, + "handleEnd": time_data.handle_end, + "frameStartHandle": time_data.start - time_data.handle_start, + "frameEndHandle": time_data.end + time_data.handle_end, + "comment": data.get("comment"), + "fps": time_data.fps, + "source": source, + "extendFrames": data.get("extendFrames"), + "overrideExistingFrame": data.get("overrideExistingFrame"), + "pixelAspect": data.get("pixelAspect", 1), + "resolutionWidth": data.get("resolutionWidth", 1920), + "resolutionHeight": data.get("resolutionHeight", 1080), + "multipartExr": data.get("multipartExr", False), + "jobBatchName": data.get("jobBatchName", ""), + "useSequenceForReview": data.get("useSequenceForReview", True), + # map inputVersions `ObjectId` -> `str` so json supports it + "inputVersions": list(map(str, data.get("inputVersions", []))), + "colorspace": data.get("colorspace") + } + + # skip locking version if we are creating v01 + instance_version = data.get("version") # take this if exists + if instance_version != 1: + instance_skeleton_data["version"] = instance_version + + # transfer specific families from original instance to new render + for item in families_transfer: + if item in instance.data.get("families", []): + instance_skeleton_data["families"] += [item] + + # transfer specific properties from original instance based on + # mapping dictionary `instance_transfer` + for key, values in instance_transfer.items(): + if key in instance.data.get("families", []): + for v in values: + instance_skeleton_data[v] = instance.data.get(v) + + representations = get_transferable_representations(instance) + instance_skeleton_data["representations"] = [] + instance_skeleton_data["representations"] += representations + + return instance_skeleton_data + + +def _add_review_families(families): + """Adds review flag to families. + + Handles situation when new instances are created which should have review + in families. In that case they should have 'ftrack' too. + + TODO: This is ugly and needs to be refactored. Ftrack family should be + added in different way (based on if the module is enabled?) + + """ + # if we have one representation with preview tag + # flag whole instance for review and for ftrack + if "ftrack" not in families and os.environ.get("FTRACK_SERVER"): + families.append("ftrack") + if "review" not in families: + families.append("review") + return families + + +def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter, + skip_integration_repre_list, + do_not_add_review, + context, + color_managed_plugin): + """Create representations for file sequences. + + This will return representations of expected files if they are not + in hierarchy of aovs. There should be only one sequence of files for + most cases, but if not - we create representation from each of them. + + Arguments: + skeleton_data (dict): instance data for which we are + setting representations + exp_files (list): list of expected files + anatomy (Anatomy): + aov_filter (dict): add review for specific aov names + skip_integration_repre_list (list): exclude specific extensions, + do_not_add_review (bool): explicitly skip review + color_managed_plugin (publish.ColormanagedPyblishPluginMixin) + Returns: + list of representations + + """ + representations = [] + host_name = os.environ.get("AVALON_APP", "") + collections, remainders = clique.assemble(exp_files) + + log = Logger.get_logger("farm_publishing") + + # create representation for every collected sequence + for collection in collections: + ext = collection.tail.lstrip(".") + preview = False + # TODO 'useSequenceForReview' is temporary solution which does + # not work for 100% of cases. We must be able to tell what + # expected files contains more explicitly and from what + # should be review made. + # - "review" tag is never added when is set to 'False' + if skeleton_data["useSequenceForReview"]: + # toggle preview on if multipart is on + if skeleton_data.get("multipartExr", False): + log.debug( + "Adding preview tag because its multipartExr" + ) + preview = True + else: + render_file_name = list(collection)[0] + # if filtered aov name is found in filename, toggle it for + # preview video rendering + preview = match_aov_pattern( + host_name, aov_filter, render_file_name + ) + + staging = os.path.dirname(list(collection)[0]) + success, rootless_staging_dir = ( + anatomy.find_root_template_from_path(staging) + ) + if success: + staging = rootless_staging_dir + else: + log.warning(( + "Could not find root path for remapping \"{}\"." + " This may cause issues on farm." + ).format(staging)) + + frame_start = int(skeleton_data.get("frameStartHandle")) + if skeleton_data.get("slate"): + frame_start -= 1 + + # explicitly disable review by user + preview = preview and not do_not_add_review + rep = { + "name": ext, + "ext": ext, + "files": [os.path.basename(f) for f in list(collection)], + "frameStart": frame_start, + "frameEnd": int(skeleton_data.get("frameEndHandle")), + # If expectedFile are absolute, we need only filenames + "stagingDir": staging, + "fps": skeleton_data.get("fps"), + "tags": ["review"] if preview else [], + } + + # poor man exclusion + if ext in skip_integration_repre_list: + rep["tags"].append("delete") + + if skeleton_data.get("multipartExr", False): + rep["tags"].append("multipartExr") + + # support conversion from tiled to scanline + if skeleton_data.get("convertToScanline"): + log.info("Adding scanline conversion.") + rep["tags"].append("toScanline") + + representations.append(rep) + + if preview: + skeleton_data["families"] = _add_review_families( + skeleton_data["families"]) + + # add remainders as representations + for remainder in remainders: + ext = remainder.split(".")[-1] + + staging = os.path.dirname(remainder) + success, rootless_staging_dir = ( + anatomy.find_root_template_from_path(staging) + ) + if success: + staging = rootless_staging_dir + else: + log.warning(( + "Could not find root path for remapping \"{}\"." + " This may cause issues on farm." + ).format(staging)) + + rep = { + "name": ext, + "ext": ext, + "files": os.path.basename(remainder), + "stagingDir": staging, + } + + preview = match_aov_pattern( + host_name, aov_filter, remainder + ) + preview = preview and not do_not_add_review + if preview: + rep.update({ + "fps": skeleton_data.get("fps"), + "tags": ["review"] + }) + skeleton_data["families"] = \ + _add_review_families(skeleton_data["families"]) + + already_there = False + for repre in skeleton_data.get("representations", []): + # might be added explicitly before by publish_on_farm + already_there = repre.get("files") == rep["files"] + if already_there: + log.debug("repre {} already_there".format(repre)) + break + + if not already_there: + representations.append(rep) + + for rep in representations: + # inject colorspace data + color_managed_plugin.set_representation_colorspace( + rep, context, + colorspace=skeleton_data["colorspace"] + ) + + return representations + + +def create_instances_for_aov(instance, skeleton, aov_filter, + skip_integration_repre_list, + do_not_add_review): + """Create instances from AOVs. + + This will create new pyblish.api.Instances by going over expected + files defined on original instance. + + Args: + instance (pyblish.api.Instance): Original instance. + skeleton (dict): Skeleton instance data. + skip_integration_repre_list (list): skip + + Returns: + list of pyblish.api.Instance: Instances created from + expected files. + + """ + # we cannot attach AOVs to other subsets as we consider every + # AOV subset of its own. + + log = Logger.get_logger("farm_publishing") + additional_color_data = { + "renderProducts": instance.data["renderProducts"], + "colorspaceConfig": instance.data["colorspaceConfig"], + "display": instance.data["colorspaceDisplay"], + "view": instance.data["colorspaceView"] + } + + # Get templated path from absolute config path. + anatomy = instance.context.data["anatomy"] + colorspace_template = instance.data["colorspaceConfig"] + try: + additional_color_data["colorspaceTemplate"] = remap_source( + colorspace_template, anatomy) + except ValueError as e: + log.warning(e) + additional_color_data["colorspaceTemplate"] = colorspace_template + + # if there are subset to attach to and more than one AOV, + # we cannot proceed. + if ( + len(instance.data.get("attachTo", [])) > 0 + and len(instance.data.get("expectedFiles")[0].keys()) != 1 + ): + raise KnownPublishError( + "attaching multiple AOVs or renderable cameras to " + "subset is not supported yet.") + + # create instances for every AOV we found in expected files. + # NOTE: this is done for every AOV and every render camera (if + # there are multiple renderable cameras in scene) + return _create_instances_for_aov( + instance, + skeleton, + aov_filter, + additional_color_data, + skip_integration_repre_list, + do_not_add_review + ) + + +def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data, + skip_integration_repre_list, do_not_add_review): + """Create instance for each AOV found. + + This will create new instance for every AOV it can detect in expected + files list. + + Args: + instance (pyblish.api.Instance): Original instance. + skeleton (dict): Skeleton data for instance (those needed) later + by collector. + additional_data (dict): .. + skip_integration_repre_list (list): list of extensions that shouldn't + be published + do_not_addbe _review (bool): explicitly disable review + + + Returns: + list of instances + + Throws: + ValueError: + + """ + # TODO: this needs to be taking the task from context or instance + task = os.environ["AVALON_TASK"] + + anatomy = instance.context.data["anatomy"] + subset = skeleton["subset"] + cameras = instance.data.get("cameras", []) + exp_files = instance.data["expectedFiles"] + log = Logger.get_logger("farm_publishing") + + instances = [] + # go through AOVs in expected files + for aov, files in exp_files[0].items(): + cols, rem = clique.assemble(files) + # we shouldn't have any reminders. And if we do, it should + # be just one item for single frame renders. + if not cols and rem: + if len(rem) != 1: + raise ValueError("Found multiple non related files " + "to render, don't know what to do " + "with them.") + col = rem[0] + ext = os.path.splitext(col)[1].lstrip(".") + else: + # but we really expect only one collection. + # Nothing else make sense. + if len(cols) != 1: + raise ValueError("Only one image sequence type is expected.") # noqa: E501 + ext = cols[0].tail.lstrip(".") + col = list(cols[0]) + + # create subset name `familyTaskSubset_AOV` + group_name = 'render{}{}{}{}'.format( + task[0].upper(), task[1:], + subset[0].upper(), subset[1:]) + + # if there are multiple cameras, we need to add camera name + if isinstance(col, (list, tuple)): + cam = [c for c in cameras if c in col[0]] + else: + # in case of single frame + cam = [c for c in cameras if c in col] + if cam: + if aov: + subset_name = '{}_{}_{}'.format(group_name, cam, aov) + else: + subset_name = '{}_{}'.format(group_name, cam) + else: + if aov: + subset_name = '{}_{}'.format(group_name, aov) + else: + subset_name = '{}'.format(group_name) + + if isinstance(col, (list, tuple)): + staging = os.path.dirname(col[0]) + else: + staging = os.path.dirname(col) + + try: + staging = remap_source(staging, anatomy) + except ValueError as e: + log.warning(e) + + log.info("Creating data for: {}".format(subset_name)) + + app = os.environ.get("AVALON_APP", "") + + if isinstance(col, list): + render_file_name = os.path.basename(col[0]) + else: + render_file_name = os.path.basename(col) + aov_patterns = aov_filter + + preview = match_aov_pattern(app, aov_patterns, render_file_name) + # toggle preview on if multipart is on + if instance.data.get("multipartExr"): + log.debug("Adding preview tag because its multipartExr") + preview = True + + new_instance = deepcopy(skeleton) + new_instance["subset"] = subset_name + new_instance["subsetGroup"] = group_name + + # explicitly disable review by user + preview = preview and not do_not_add_review + if preview: + new_instance["review"] = True + + # create representation + if isinstance(col, (list, tuple)): + files = [os.path.basename(f) for f in col] + else: + files = os.path.basename(col) + + # Copy render product "colorspace" data to representation. + colorspace = "" + products = additional_data["renderProducts"].layer_data.products + for product in products: + if product.productName == aov: + colorspace = product.colorspace + break + + rep = { + "name": ext, + "ext": ext, + "files": files, + "frameStart": int(skeleton["frameStartHandle"]), + "frameEnd": int(skeleton["frameEndHandle"]), + # If expectedFile are absolute, we need only filenames + "stagingDir": staging, + "fps": new_instance.get("fps"), + "tags": ["review"] if preview else [], + "colorspaceData": { + "colorspace": colorspace, + "config": { + "path": additional_data["colorspaceConfig"], + "template": additional_data["colorspaceTemplate"] + }, + "display": additional_data["display"], + "view": additional_data["view"] + } + } + + # support conversion from tiled to scanline + if instance.data.get("convertToScanline"): + log.info("Adding scanline conversion.") + rep["tags"].append("toScanline") + + # poor man exclusion + if ext in skip_integration_repre_list: + rep["tags"].append("delete") + + if preview: + new_instance["families"] = _add_review_families( + new_instance["families"]) + + new_instance["representations"] = [rep] + + # if extending frames from existing version, copy files from there + # into our destination directory + if new_instance.get("extendFrames", False): + copy_extend_frames(new_instance, rep) + instances.append(new_instance) + log.debug("instances:{}".format(instances)) + return instances + + +def get_resources(project_name, version, extension=None): + """Get the files from the specific version. + + This will return all get all files from representation. + + Todo: + This is really weird function, and it's use is + highly controversial. First, it will not probably work + ar all in final release of AYON, second, the logic isn't sound. + It should try to find representation matching the current one - + because it is used to pull out files from previous version to + be included in this one. + + .. deprecated:: 3.15.5 + This won't work in AYON and even the logic must be refactored. + + Args: + project_name (str): Name of the project. + version (dict): Version document. + extension (str): extension used to filter + representations. + + Returns: + list: of files + + """ + warnings.warn(( + "This won't work in AYON and even " + "the logic must be refactored."), DeprecationWarning) + extensions = [] + if extension: + extensions = [extension] + + # there is a `context_filter` argument that won't probably work in + # final release of AYON. SO we'll rather not use it + repre_docs = list(get_representations( + project_name, version_ids=[version["_id"]])) + + filtered = [] + for doc in repre_docs: + if doc["context"]["ext"] in extensions: + filtered.append(doc) + + representation = filtered[0] + directory = get_representation_path(representation) + print("Source: ", directory) + resources = sorted( + [ + os.path.normpath(os.path.join(directory, file_name)) + for file_name in os.listdir(directory) + ] + ) + + return resources + + +def copy_extend_frames(instance, representation): + """Copy existing frames from latest version. + + This will copy all existing frames from subset's latest version back + to render directory and rename them to what renderer is expecting. + + Arguments: + instance (pyblish.plugin.Instance): instance to get required + data from + representation (dict): presentation to operate on + + """ + import speedcopy + + R_FRAME_NUMBER = re.compile( + r".+\.(?P[0-9]+)\..+") + + log = Logger.get_logger("farm_publishing") + log.info("Preparing to copy ...") + start = instance.data.get("frameStart") + end = instance.data.get("frameEnd") + project_name = instance.context.data["project"] + anatomy = instance.context.data["anatomy"] # type: Anatomy + + # get latest version of subset + # this will stop if subset wasn't published yet + + version = get_last_version_by_subset_name( + project_name, + instance.data.get("subset"), + asset_name=instance.data.get("asset") + ) + + # get its files based on extension + subset_resources = get_resources( + project_name, version, representation.get("ext") + ) + r_col, _ = clique.assemble(subset_resources) + + # if override remove all frames we are expecting to be rendered, + # so we'll copy only those missing from current render + if instance.data.get("overrideExistingFrame"): + for frame in range(start, end + 1): + if frame not in r_col.indexes: + continue + r_col.indexes.remove(frame) + + # now we need to translate published names from representation + # back. This is tricky, right now we'll just use same naming + # and only switch frame numbers + resource_files = [] + r_filename = os.path.basename( + representation.get("files")[0]) # first file + op = re.search(R_FRAME_NUMBER, r_filename) + pre = r_filename[:op.start("frame")] + post = r_filename[op.end("frame"):] + assert op is not None, "padding string wasn't found" + for frame in list(r_col): + fn = re.search(R_FRAME_NUMBER, frame) + # silencing linter as we need to compare to True, not to + # type + assert fn is not None, "padding string wasn't found" + # list of tuples (source, destination) + staging = representation.get("stagingDir") + staging = anatomy.fill_root(staging) + resource_files.append( + (frame, os.path.join( + staging, "{}{}{}".format(pre, fn["frame"], post))) + ) + + # test if destination dir exists and create it if not + output_dir = os.path.dirname(representation.get("files")[0]) + if not os.path.isdir(output_dir): + os.makedirs(output_dir) + + # copy files + for source in resource_files: + speedcopy.copy(source[0], source[1]) + log.info(" > {}".format(source[1])) + + log.info("Finished copying %i files" % len(resource_files)) + + +def attach_instances_to_subset(attach_to, instances): + """Attach instance to subset. + + If we are attaching to other subsets, create copy of existing + instances, change data to match its subset and replace + existing instances with modified data. + + Args: + attach_to (list): List of instances to attach to. + instances (list): List of instances to attach. + + Returns: + list: List of attached instances. + + """ + new_instances = [] + for attach_instance in attach_to: + for i in instances: + new_inst = copy.deepcopy(i) + new_inst["version"] = attach_instance.get("version") + new_inst["subset"] = attach_instance.get("subset") + new_inst["family"] = attach_instance.get("family") + new_inst["append"] = True + # don't set subsetGroup if we are attaching + new_inst.pop("subsetGroup") + new_instances.append(new_inst) + return new_instances + + +def create_metadata_path(instance, anatomy): + ins_data = instance.data + # Ensure output dir exists + output_dir = ins_data.get( + "publishRenderMetadataFolder", ins_data["outputDir"]) + + log = Logger.get_logger("farm_publishing") + + try: + if not os.path.isdir(output_dir): + os.makedirs(output_dir) + except OSError: + # directory is not available + log.warning("Path is unreachable: `{}`".format(output_dir)) + + metadata_filename = "{}_metadata.json".format(ins_data["subset"]) + + metadata_path = os.path.join(output_dir, metadata_filename) + + # Convert output dir to `{root}/rest/of/path/...` with Anatomy + success, rootless_mtdt_p = anatomy.find_root_template_from_path( + metadata_path) + if not success: + # `rootless_path` is not set to `output_dir` if none of roots match + log.warning(( + "Could not find root path for remapping \"{}\"." + " This may cause issues on farm." + ).format(output_dir)) + rootless_mtdt_p = metadata_path + + return metadata_path, rootless_mtdt_p diff --git a/openpype/pipeline/farm/pyblish_functions.pyi b/openpype/pipeline/farm/pyblish_functions.pyi new file mode 100644 index 00000000000..76f7c34dcd6 --- /dev/null +++ b/openpype/pipeline/farm/pyblish_functions.pyi @@ -0,0 +1,24 @@ +import pyblish.api +from openpype.pipeline import Anatomy +from typing import Tuple, Union, List + + +class TimeData: + start: int + end: int + fps: float | int + step: int + handle_start: int + handle_end: int + + def __init__(self, start: int, end: int, fps: float | int, step: int, handle_start: int, handle_end: int): + ... + ... + +def remap_source(source: str, anatomy: Anatomy): ... +def extend_frames(asset: str, subset: str, start: int, end: int) -> Tuple[int, int]: ... +def get_time_data_from_instance_or_context(instance: pyblish.api.Instance) -> TimeData: ... +def get_transferable_representations(instance: pyblish.api.Instance) -> list: ... +def create_skeleton_instance(instance: pyblish.api.Instance, families_transfer: list = ..., instance_transfer: dict = ...) -> dict: ... +def create_instances_for_aov(instance: pyblish.api.Instance, skeleton: dict, aov_filter: dict) -> List[pyblish.api.Instance]: ... +def attach_instances_to_subset(attach_to: list, instances: list) -> list: ... diff --git a/openpype/pipeline/farm/tools.py b/openpype/pipeline/farm/tools.py new file mode 100644 index 00000000000..f3acac7a323 --- /dev/null +++ b/openpype/pipeline/farm/tools.py @@ -0,0 +1,112 @@ +import os + + +def get_published_workfile_instance(context): + """Find workfile instance in context""" + for i in context: + is_workfile = ( + "workfile" in i.data.get("families", []) or + i.data["family"] == "workfile" + ) + if not is_workfile: + continue + + # test if there is instance of workfile waiting + # to be published. + if i.data["publish"] is not True: + continue + + return i + + +def from_published_scene(instance, replace_in_path=True): + """Switch work scene for published scene. + + If rendering/exporting from published scenes is enabled, this will + replace paths from working scene to published scene. + + Args: + instance (pyblish.api.Instance): Instance data to process. + replace_in_path (bool): if True, it will try to find + old scene name in path of expected files and replace it + with name of published scene. + + Returns: + str: Published scene path. + None: if no published scene is found. + + Note: + Published scene path is actually determined from project Anatomy + as at the time this plugin is running the scene can be still + un-published. + + """ + workfile_instance = get_published_workfile_instance(instance.context) + if workfile_instance is None: + return + + # determine published path from Anatomy. + template_data = workfile_instance.data.get("anatomyData") + rep = workfile_instance.data["representations"][0] + template_data["representation"] = rep.get("name") + template_data["ext"] = rep.get("ext") + template_data["comment"] = None + + anatomy = instance.context.data['anatomy'] + template_obj = anatomy.templates_obj["publish"]["path"] + template_filled = template_obj.format_strict(template_data) + file_path = os.path.normpath(template_filled) + + if not os.path.exists(file_path): + raise + + if not replace_in_path: + return file_path + + # now we need to switch scene in expected files + # because token will now point to published + # scene file and that might differ from current one + def _clean_name(path): + return os.path.splitext(os.path.basename(path))[0] + + new_scene = _clean_name(file_path) + orig_scene = _clean_name(instance.context.data["currentFile"]) + expected_files = instance.data.get("expectedFiles") + + if isinstance(expected_files[0], dict): + # we have aovs and we need to iterate over them + new_exp = {} + for aov, files in expected_files[0].items(): + replaced_files = [] + for f in files: + replaced_files.append( + str(f).replace(orig_scene, new_scene) + ) + new_exp[aov] = replaced_files + # [] might be too much here, TODO + instance.data["expectedFiles"] = [new_exp] + else: + new_exp = [] + for f in expected_files: + new_exp.append( + str(f).replace(orig_scene, new_scene) + ) + instance.data["expectedFiles"] = new_exp + + metadata_folder = instance.data.get("publishRenderMetadataFolder") + if metadata_folder: + metadata_folder = metadata_folder.replace(orig_scene, + new_scene) + instance.data["publishRenderMetadataFolder"] = metadata_folder + + return file_path + + +def iter_expected_files(exp): + if isinstance(exp[0], dict): + for _aov, files in exp[0].items(): + for file in files: + yield file + else: + for file in exp: + yield file diff --git a/openpype/pipeline/publish/lib.py b/openpype/pipeline/publish/lib.py index 0961d792340..2768fe3fa1c 100644 --- a/openpype/pipeline/publish/lib.py +++ b/openpype/pipeline/publish/lib.py @@ -869,6 +869,110 @@ def _validate_transient_template(project_name, template_name, anatomy): ).format(template_name, project_name)) +def get_published_workfile_instance(context): + """Find workfile instance in context""" + for i in context: + is_workfile = ( + "workfile" in i.data.get("families", []) or + i.data["family"] == "workfile" + ) + if not is_workfile: + continue + + # test if there is instance of workfile waiting + # to be published. + if not i.data.get("publish", True): + continue + + return i + + +def replace_with_published_scene_path(instance, replace_in_path=True): + """Switch work scene path for published scene. + If rendering/exporting from published scenes is enabled, this will + replace paths from working scene to published scene. + This only works if publish contains workfile instance! + Args: + instance (pyblish.api.Instance): Pyblish instance. + replace_in_path (bool): if True, it will try to find + old scene name in path of expected files and replace it + with name of published scene. + Returns: + str: Published scene path. + None: if no published scene is found. + Note: + Published scene path is actually determined from project Anatomy + as at the time this plugin is running scene can still not be + published. + """ + log = Logger.get_logger("published_workfile") + workfile_instance = get_published_workfile_instance(instance.context) + if workfile_instance is None: + return + + # determine published path from Anatomy. + template_data = workfile_instance.data.get("anatomyData") + rep = workfile_instance.data["representations"][0] + template_data["representation"] = rep.get("name") + template_data["ext"] = rep.get("ext") + template_data["comment"] = None + + anatomy = instance.context.data['anatomy'] + anatomy_filled = anatomy.format(template_data) + template_filled = anatomy_filled["publish"]["path"] + file_path = os.path.normpath(template_filled) + + log.info("Using published scene for render {}".format(file_path)) + + if not os.path.exists(file_path): + log.error("published scene does not exist!") + raise + + if not replace_in_path: + return file_path + + # now we need to switch scene in expected files + # because token will now point to published + # scene file and that might differ from current one + def _clean_name(path): + return os.path.splitext(os.path.basename(path))[0] + + new_scene = _clean_name(file_path) + orig_scene = _clean_name(instance.context.data["currentFile"]) + expected_files = instance.data.get("expectedFiles") + + if isinstance(expected_files[0], dict): + # we have aovs and we need to iterate over them + new_exp = {} + for aov, files in expected_files[0].items(): + replaced_files = [] + for f in files: + replaced_files.append( + str(f).replace(orig_scene, new_scene) + ) + new_exp[aov] = replaced_files + # [] might be too much here, TODO + instance.data["expectedFiles"] = [new_exp] + else: + new_exp = [] + for f in expected_files: + new_exp.append( + str(f).replace(orig_scene, new_scene) + ) + instance.data["expectedFiles"] = new_exp + + metadata_folder = instance.data.get("publishRenderMetadataFolder") + if metadata_folder: + metadata_folder = metadata_folder.replace(orig_scene, + new_scene) + instance.data["publishRenderMetadataFolder"] = metadata_folder + + log.info("Scene name was switched {} -> {}".format( + orig_scene, new_scene + )) + + return file_path + def add_repre_files_for_cleanup(instance, repre): """ Explicitly mark repre files to be deleted. diff --git a/openpype/settings/defaults/project_settings/royalrender.json b/openpype/settings/defaults/project_settings/royalrender.json index b72fed84745..14e36058aaf 100644 --- a/openpype/settings/defaults/project_settings/royalrender.json +++ b/openpype/settings/defaults/project_settings/royalrender.json @@ -1,4 +1,7 @@ { + "rr_paths": [ + "default" + ], "publish": { "CollectSequencesFromJob": { "review": true diff --git a/openpype/settings/defaults/system_settings/modules.json b/openpype/settings/defaults/system_settings/modules.json index 1ddbfd2726c..f524f01d455 100644 --- a/openpype/settings/defaults/system_settings/modules.json +++ b/openpype/settings/defaults/system_settings/modules.json @@ -185,9 +185,9 @@ "enabled": false, "rr_paths": { "default": { - "windows": "", - "darwin": "", - "linux": "" + "windows": "C:\\RR8", + "darwin": "/Volumes/share/RR8", + "linux": "/mnt/studio/RR8" } } }, diff --git a/openpype/settings/entities/__init__.py b/openpype/settings/entities/__init__.py index 5e3a76094e4..00db2b33a7d 100644 --- a/openpype/settings/entities/__init__.py +++ b/openpype/settings/entities/__init__.py @@ -107,7 +107,8 @@ TaskTypeEnumEntity, DeadlineUrlEnumEntity, AnatomyTemplatesEnumEntity, - ShotgridUrlEnumEntity + ShotgridUrlEnumEntity, + RoyalRenderRootEnumEntity ) from .list_entity import ListEntity @@ -170,6 +171,7 @@ "TaskTypeEnumEntity", "DeadlineUrlEnumEntity", "ShotgridUrlEnumEntity", + "RoyalRenderRootEnumEntity", "AnatomyTemplatesEnumEntity", "ListEntity", diff --git a/openpype/settings/entities/enum_entity.py b/openpype/settings/entities/enum_entity.py index de3bd353eb9..36deb3176ef 100644 --- a/openpype/settings/entities/enum_entity.py +++ b/openpype/settings/entities/enum_entity.py @@ -1,3 +1,5 @@ +import abc +import six import copy from .input_entities import InputEntity from .exceptions import EntitySchemaError @@ -477,8 +479,9 @@ def set_override_state(self, *args, **kwargs): self.set(value_on_not_set) -class DeadlineUrlEnumEntity(BaseEnumEntity): - schema_types = ["deadline_url-enum"] +@six.add_metaclass(abc.ABCMeta) +class FarmRootEnumEntity(BaseEnumEntity): + schema_types = [] def _item_initialization(self): self.multiselection = self.schema_data.get("multiselection", True) @@ -496,22 +499,8 @@ def _item_initialization(self): # GUI attribute self.placeholder = self.schema_data.get("placeholder") - def _get_enum_values(self): - deadline_urls_entity = self.get_entity_from_path( - "system_settings/modules/deadline/deadline_urls" - ) - - valid_keys = set() - enum_items_list = [] - for server_name, url_entity in deadline_urls_entity.items(): - enum_items_list.append( - {server_name: "{}: {}".format(server_name, url_entity.value)} - ) - valid_keys.add(server_name) - return enum_items_list, valid_keys - def set_override_state(self, *args, **kwargs): - super(DeadlineUrlEnumEntity, self).set_override_state(*args, **kwargs) + super(FarmRootEnumEntity, self).set_override_state(*args, **kwargs) self.enum_items, self.valid_keys = self._get_enum_values() if self.multiselection: @@ -528,21 +517,49 @@ def set_override_state(self, *args, **kwargs): elif self._current_value not in self.valid_keys: self._current_value = tuple(self.valid_keys)[0] + @abc.abstractmethod + def _get_enum_values(self): + pass -class ShotgridUrlEnumEntity(BaseEnumEntity): - schema_types = ["shotgrid_url-enum"] - def _item_initialization(self): - self.multiselection = False +class DeadlineUrlEnumEntity(FarmRootEnumEntity): + schema_types = ["deadline_url-enum"] - self.enum_items = [] - self.valid_keys = set() + def _get_enum_values(self): + deadline_urls_entity = self.get_entity_from_path( + "system_settings/modules/deadline/deadline_urls" + ) - self.valid_value_types = (STRING_TYPE,) - self.value_on_not_set = "" + valid_keys = set() + enum_items_list = [] + for server_name, url_entity in deadline_urls_entity.items(): + enum_items_list.append( + {server_name: "{}: {}".format(server_name, url_entity.value)} + ) + valid_keys.add(server_name) + return enum_items_list, valid_keys - # GUI attribute - self.placeholder = self.schema_data.get("placeholder") + +class RoyalRenderRootEnumEntity(FarmRootEnumEntity): + schema_types = ["rr_root-enum"] + + def _get_enum_values(self): + rr_root_entity = self.get_entity_from_path( + "system_settings/modules/royalrender/rr_paths" + ) + + valid_keys = set() + enum_items_list = [] + for server_name, url_entity in rr_root_entity.items(): + enum_items_list.append( + {server_name: "{}: {}".format(server_name, url_entity.value)} + ) + valid_keys.add(server_name) + return enum_items_list, valid_keys + + +class ShotgridUrlEnumEntity(FarmRootEnumEntity): + schema_types = ["shotgrid_url-enum"] def _get_enum_values(self): shotgrid_settings = self.get_entity_from_path( @@ -562,16 +579,6 @@ def _get_enum_values(self): valid_keys.add(server_name) return enum_items_list, valid_keys - def set_override_state(self, *args, **kwargs): - super(ShotgridUrlEnumEntity, self).set_override_state(*args, **kwargs) - - self.enum_items, self.valid_keys = self._get_enum_values() - if not self.valid_keys: - self._current_value = "" - - elif self._current_value not in self.valid_keys: - self._current_value = tuple(self.valid_keys)[0] - class AnatomyTemplatesEnumEntity(BaseEnumEntity): schema_types = ["anatomy-templates-enum"] diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_royalrender.json b/openpype/settings/entities/schemas/projects_schema/schema_project_royalrender.json index cabb4747d5e..f4bf2f51ba6 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_royalrender.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_royalrender.json @@ -5,6 +5,12 @@ "collapsible": true, "is_file": true, "children": [ + { + "type": "rr_root-enum", + "key": "rr_paths", + "label": "Royal Render Roots", + "multiselect": true + }, { "type": "dict", "collapsible": true, diff --git a/tests/unit/openpype/default_modules/royal_render/test_rr_job.py b/tests/unit/openpype/default_modules/royal_render/test_rr_job.py deleted file mode 100644 index ab8b1bfd50b..00000000000 --- a/tests/unit/openpype/default_modules/royal_render/test_rr_job.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8 -*- -"""Test suite for User Settings.""" -# import pytest -# from openpype.modules import ModulesManager - - -def test_rr_job(): - # manager = ModulesManager() - # rr_module = manager.modules_by_name["royalrender"] - ... diff --git a/website/docs/admin_settings_system.md b/website/docs/admin_settings_system.md index d61713ccd55..8abcefd24d6 100644 --- a/website/docs/admin_settings_system.md +++ b/website/docs/admin_settings_system.md @@ -102,6 +102,10 @@ workstation that should be submitting render jobs to muster via OpenPype. **`templates mapping`** - you can customize Muster templates to match your existing setup here. +### Royal Render + +**`Royal Render Root Paths`** - multi platform paths to Royal Render installation. + ### Clockify **`Workspace Name`** - name of the clockify workspace where you would like to be sending all the timelogs. diff --git a/website/docs/module_royalrender.md b/website/docs/module_royalrender.md new file mode 100644 index 00000000000..2b75fbefef8 --- /dev/null +++ b/website/docs/module_royalrender.md @@ -0,0 +1,37 @@ +--- +id: module_royalrender +title: Royal Render Administration +sidebar_label: Royal Render +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +## Preparation + +For [Royal Render](hhttps://www.royalrender.de/) support you need to set a few things up in both OpenPype and Royal Render itself + +1. Deploy OpenPype executable to all nodes of Royal Render farm. See [Install & Run](admin_use.md) + +2. Enable Royal Render Module in the [OpenPype Admin Settings](admin_settings_system.md#royal-render). + +3. Point OpenPype to your Royal Render installation in the [OpenPype Admin Settings](admin_settings_system.md#royal-render). + +4. Install our custom plugin and scripts to your RR repository. It should be as simple as copying content of `openpype/modules/royalrender/rr_root` to `path/to/your/royalrender/repository`. + + +## Configuration + +OpenPype integration for Royal Render consists of pointing RR to location of Openpype executable. That is being done by copying `_install_paths/OpenPype.cfg` to +RR root folder. This file contains reasonable defaults. They could be changed in this file or modified Render apps in `rrControl`. + + +## Debugging + +Current implementation uses dynamically build '.xml' file which is stored in temporary folder accessible by RR. It might make sense to +use this Openpype built file and try to run it via `*__rrServerConsole` executable from command line in case of unforeseeable issues. + +## Known issues + +Currently environment values set in Openpype are not propagated into render jobs on RR. It is studio responsibility to synchronize environment variables from Openpype with all render nodes for now. diff --git a/website/sidebars.js b/website/sidebars.js index 267cc7f6d7e..b885181fb69 100644 --- a/website/sidebars.js +++ b/website/sidebars.js @@ -111,6 +111,7 @@ module.exports = { "module_site_sync", "module_deadline", "module_muster", + "module_royalrender", "module_clockify", "module_slack" ],