diff --git a/src/octopus/dispatcher/db/pulidb.py b/src/octopus/dispatcher/db/pulidb.py index 3ee9df7e..475215a2 100644 --- a/src/octopus/dispatcher/db/pulidb.py +++ b/src/octopus/dispatcher/db/pulidb.py @@ -256,7 +256,7 @@ def createElements(self, elements): conn = TaskNodes._connection fields = {TaskNodes.q.id.fieldName: element.id, TaskNodes.q.name.fieldName: element.name, - TaskNodes.q.parentId.fieldName: element.parent.id if element.parent else None, + TaskNodes.q.parentId.fieldName: element.parent.id, TaskNodes.q.user.fieldName: element.user, TaskNodes.q.priority.fieldName: element.priority, TaskNodes.q.dispatchKey.fieldName: element.dispatchKey, diff --git a/src/octopus/dispatcher/dispatcher.py b/src/octopus/dispatcher/dispatcher.py index a4cd1f65..d386a2f7 100644 --- a/src/octopus/dispatcher/dispatcher.py +++ b/src/octopus/dispatcher/dispatcher.py @@ -459,13 +459,19 @@ def computeAssignments(self): # If we have dedicated render nodes for this poolShare if not any([poolShare.hasRenderNodesAvailable() for poolShare in entryPoint.poolShares.values()]): continue - - for (rn, com) in entryPoint.dispatchIterator(lambda: self.queue.qsize() > 0): - assignments.append((rn, com)) - # increment the allocatedRN for the poolshare - entryPoint.mainPoolShare().allocatedRN += 1 - # save the active poolshare of the rendernode - rn.currentpoolshare = entryPoint.mainPoolShare() + + try: + for (rn, com) in entryPoint.dispatchIterator(lambda: self.queue.qsize() > 0): + assignments.append((rn, com)) + # increment the allocatedRN for the poolshare + entryPoint.mainPoolShare().allocatedRN += 1 + # save the active poolshare of the rendernode + rn.currentpoolshare = entryPoint.mainPoolShare() + except NoRenderNodeAvailable: + pass + except NoLicenseAvailableForTask: + LOGGER.info("Missing license for node \"%s\" (other commands can start anyway)." % entryPoint.name) + pass assignmentDict = collections.defaultdict(list) for (rn, com) in assignments: diff --git a/src/octopus/dispatcher/model/node.py b/src/octopus/dispatcher/model/node.py index ecb4404b..593e7599 100644 --- a/src/octopus/dispatcher/model/node.py +++ b/src/octopus/dispatcher/model/node.py @@ -557,24 +557,21 @@ def reserve_rendernode(self, command, ep): if ep is None: ep = self - # PRA : can we have several poolShares for one job ? - # Exception when the pool of a job is changed and some command are still in computation - for poolShare in ep.poolShares.values(): - # We need some RNs dedicated to this job available - if not poolShare.hasRenderNodesAvailable(): - raise NoRenderNodeAvailable - - # Sort the RNs according to their performance value - rnList = sorted(poolShare.pool.renderNodes, key=lambda rn: rn.performance, reverse=True) + for poolshare in [poolShare for poolShare in ep.poolShares.values() if poolShare.hasRenderNodesAvailable()]: + # first, sort the rendernodes according their performance value + rnList = sorted(poolshare.pool.renderNodes, key=lambda rn: rn.performance, reverse=True) for rendernode in rnList: - # Check for a suitable RN : ie available and respecting the constraints for this command if rendernode.isAvailable() and rendernode.canRun(command): - # Check licence - if not rendernode.reserveLicense(command, self.dispatcher.licenseManager): + if rendernode.reserveLicense(command, self.dispatcher.licenseManager): + rendernode.addAssignment(command) + return rendernode + else: raise NoLicenseAvailableForTask - rendernode.addAssignment(command) - return rendernode - # PRA : Can happen if there are available RNs but they don't match the command constraint + + # Might not be necessary anymore because first loop is based on poolShare's hasRNSavailable method + # It was not taking into account the tests before assignment: RN.canRun() + if not [poolShare for poolShare in ep.poolShares.values() if poolShare.hasRenderNodesAvailable()]: + raise NoRenderNodeAvailable return None def updateCompletionAndStatus(self):