id
stringlengths
7
14
text
stringlengths
1
106k
223551095_1062
protected Set<HostResponse> getHosts(Set<HostRequest> requests) throws AmbariException { Set<HostResponse> response = new HashSet<>(); AmbariManagementController controller = getManagementController(); for (HostRequest request : requests) { try { response.addAll(getHosts(controller, request, osFamily)); } catch (HostNotFoundException e) { if (requests.size() == 1) { // only throw exception if 1 request. // there will be > 1 request in case of OR predicate throw e; } } } return response; }
223551095_1063
protected Set<HostResponse> getHosts(Set<HostRequest> requests) throws AmbariException { Set<HostResponse> response = new HashSet<>(); AmbariManagementController controller = getManagementController(); for (HostRequest request : requests) { try { response.addAll(getHosts(controller, request, osFamily)); } catch (HostNotFoundException e) { if (requests.size() == 1) { // only throw exception if 1 request. // there will be > 1 request in case of OR predicate throw e; } } } return response; }
223551095_1064
protected Set<HostResponse> getHosts(Set<HostRequest> requests) throws AmbariException { Set<HostResponse> response = new HashSet<>(); AmbariManagementController controller = getManagementController(); for (HostRequest request : requests) { try { response.addAll(getHosts(controller, request, osFamily)); } catch (HostNotFoundException e) { if (requests.size() == 1) { // only throw exception if 1 request. // there will be > 1 request in case of OR predicate throw e; } } } return response; }
223551095_1065
protected Set<HostResponse> getHosts(Set<HostRequest> requests) throws AmbariException { Set<HostResponse> response = new HashSet<>(); AmbariManagementController controller = getManagementController(); for (HostRequest request : requests) { try { response.addAll(getHosts(controller, request, osFamily)); } catch (HostNotFoundException e) { if (requests.size() == 1) { // only throw exception if 1 request. // there will be > 1 request in case of OR predicate throw e; } } } return response; }
223551095_1066
protected Set<HostResponse> getHosts(Set<HostRequest> requests) throws AmbariException { Set<HostResponse> response = new HashSet<>(); AmbariManagementController controller = getManagementController(); for (HostRequest request : requests) { try { response.addAll(getHosts(controller, request, osFamily)); } catch (HostNotFoundException e) { if (requests.size() == 1) { // only throw exception if 1 request. // there will be > 1 request in case of OR predicate throw e; } } } return response; }
223551095_1067
@Override public RequestStatus createResources(Request request) throws SystemException, UnsupportedPropertyException, NoSuchParentResourceException, ResourceAlreadyExistsException { if (request.getProperties().size() > 1) { throw new UnsupportedOperationException("Multiple actions/commands cannot be executed at the same time."); } final ExecuteActionRequest actionRequest = getActionRequest(request); final Map<String, String> requestInfoProperties = request.getRequestInfoProperties(); return getRequestStatus(createResources(new Command<RequestStatusResponse>() { @Override public RequestStatusResponse invoke() throws AmbariException, AuthorizationException { String clusterName = actionRequest.getClusterName(); ResourceType resourceType; Long resourceId; if (StringUtils.isEmpty(clusterName)) { resourceType = ResourceType.AMBARI; resourceId = null; } else { resourceType = ResourceType.CLUSTER; resourceId = getClusterResourceId(clusterName); } if (actionRequest.isCommand()) { String commandName = actionRequest.getCommandName(); if (StringUtils.isEmpty(commandName)) { commandName = "_unknown_command_"; } if (commandName.endsWith("_SERVICE_CHECK")) { if (!AuthorizationHelper.isAuthorized(resourceType, resourceId, RoleAuthorization.SERVICE_RUN_SERVICE_CHECK)) { throw new AuthorizationException("The authenticated user is not authorized to execute service checks."); } } else if (commandName.equals("DECOMMISSION")) { if (!AuthorizationHelper.isAuthorized(resourceType, resourceId, RoleAuthorization.SERVICE_DECOMMISSION_RECOMMISSION)) { throw new AuthorizationException("The authenticated user is not authorized to decommission services."); } } else { if (!AuthorizationHelper.isAuthorized(resourceType, resourceId, RoleAuthorization.SERVICE_RUN_CUSTOM_COMMAND)) { throw new AuthorizationException(String.format("The authenticated user is not authorized to execute the command, %s.", commandName)); } } } else { String actionName = actionRequest.getActionName(); if (StringUtils.isEmpty(actionName)) { actionName = "_unknown_action_"; } if (actionName.contains("SERVICE_CHECK")) { if (!AuthorizationHelper.isAuthorized(resourceType, resourceId, RoleAuthorization.SERVICE_RUN_SERVICE_CHECK)) { throw new AuthorizationException("The authenticated user is not authorized to execute service checks."); } } else { // A custom action has been requested ActionDefinition actionDefinition = (actionName == null) ? null : getManagementController().getAmbariMetaInfo().getActionDefinition(actionName); Set<RoleAuthorization> permissions = (actionDefinition == null) ? null : actionDefinition.getPermissions(); // here goes ResourceType handling for some specific custom actions ResourceType customActionResourceType = resourceType; if (actionName.contains("check_host")) { // check_host custom action customActionResourceType = ResourceType.CLUSTER; } if (!AuthorizationHelper.isAuthorized(customActionResourceType, resourceId, permissions)) { throw new AuthorizationException(String.format("The authenticated user is not authorized to execute the action %s.", actionName)); } } } return getManagementController().createAction(actionRequest, requestInfoProperties); } })); }
223551095_1068
@Override public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { Set<String> requestedIds = getRequestPropertyIds(request, predicate); Set<Resource> resources = new HashSet<>(); String maxResultsRaw = request.getRequestInfoProperties().get(BaseRequest.PAGE_SIZE_PROPERTY_KEY); String ascOrderRaw = request.getRequestInfoProperties().get(BaseRequest.ASC_ORDER_PROPERTY_KEY); Integer maxResults = (maxResultsRaw == null ? null : Integer.parseInt(maxResultsRaw)); Boolean ascOrder = (ascOrderRaw == null ? null : Boolean.parseBoolean(ascOrderRaw)); if (null == predicate) { authorizeGetResources(null); // the no-arg call to /requests is here resources.addAll(getRequestResources(null, null, null, maxResults, ascOrder, requestedIds)); } else { // process /requests with a predicate // process /clusters/[cluster]/requests // process /clusters/[cluster]/requests with a predicate for (Map<String, Object> properties : getPropertyMaps(predicate)) { String clusterName = (String) properties.get(REQUEST_CLUSTER_NAME_PROPERTY_ID); Long requestId = null; if (properties.get(REQUEST_ID_PROPERTY_ID) != null) { requestId = Long.valueOf((String) properties.get(REQUEST_ID_PROPERTY_ID)); } String requestStatus = null; if (properties.get(REQUEST_STATUS_PROPERTY_ID) != null) { requestStatus = (String) properties.get(REQUEST_STATUS_PROPERTY_ID); } authorizeGetResources(clusterName); resources.addAll(getRequestResources(clusterName, requestId, requestStatus, maxResults, ascOrder, requestedIds)); } } return resources; }
223551095_1069
@Override public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { Set<String> requestedIds = getRequestPropertyIds(request, predicate); Set<Resource> resources = new HashSet<>(); String maxResultsRaw = request.getRequestInfoProperties().get(BaseRequest.PAGE_SIZE_PROPERTY_KEY); String ascOrderRaw = request.getRequestInfoProperties().get(BaseRequest.ASC_ORDER_PROPERTY_KEY); Integer maxResults = (maxResultsRaw == null ? null : Integer.parseInt(maxResultsRaw)); Boolean ascOrder = (ascOrderRaw == null ? null : Boolean.parseBoolean(ascOrderRaw)); if (null == predicate) { authorizeGetResources(null); // the no-arg call to /requests is here resources.addAll(getRequestResources(null, null, null, maxResults, ascOrder, requestedIds)); } else { // process /requests with a predicate // process /clusters/[cluster]/requests // process /clusters/[cluster]/requests with a predicate for (Map<String, Object> properties : getPropertyMaps(predicate)) { String clusterName = (String) properties.get(REQUEST_CLUSTER_NAME_PROPERTY_ID); Long requestId = null; if (properties.get(REQUEST_ID_PROPERTY_ID) != null) { requestId = Long.valueOf((String) properties.get(REQUEST_ID_PROPERTY_ID)); } String requestStatus = null; if (properties.get(REQUEST_STATUS_PROPERTY_ID) != null) { requestStatus = (String) properties.get(REQUEST_STATUS_PROPERTY_ID); } authorizeGetResources(clusterName); resources.addAll(getRequestResources(clusterName, requestId, requestStatus, maxResults, ascOrder, requestedIds)); } } return resources; }
223551095_1070
@Override public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { Set<String> requestedIds = getRequestPropertyIds(request, predicate); Set<Resource> resources = new HashSet<>(); String maxResultsRaw = request.getRequestInfoProperties().get(BaseRequest.PAGE_SIZE_PROPERTY_KEY); String ascOrderRaw = request.getRequestInfoProperties().get(BaseRequest.ASC_ORDER_PROPERTY_KEY); Integer maxResults = (maxResultsRaw == null ? null : Integer.parseInt(maxResultsRaw)); Boolean ascOrder = (ascOrderRaw == null ? null : Boolean.parseBoolean(ascOrderRaw)); if (null == predicate) { authorizeGetResources(null); // the no-arg call to /requests is here resources.addAll(getRequestResources(null, null, null, maxResults, ascOrder, requestedIds)); } else { // process /requests with a predicate // process /clusters/[cluster]/requests // process /clusters/[cluster]/requests with a predicate for (Map<String, Object> properties : getPropertyMaps(predicate)) { String clusterName = (String) properties.get(REQUEST_CLUSTER_NAME_PROPERTY_ID); Long requestId = null; if (properties.get(REQUEST_ID_PROPERTY_ID) != null) { requestId = Long.valueOf((String) properties.get(REQUEST_ID_PROPERTY_ID)); } String requestStatus = null; if (properties.get(REQUEST_STATUS_PROPERTY_ID) != null) { requestStatus = (String) properties.get(REQUEST_STATUS_PROPERTY_ID); } authorizeGetResources(clusterName); resources.addAll(getRequestResources(clusterName, requestId, requestStatus, maxResults, ascOrder, requestedIds)); } } return resources; }
223551095_1071
@Override public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { Set<String> requestedIds = getRequestPropertyIds(request, predicate); Set<Resource> resources = new HashSet<>(); String maxResultsRaw = request.getRequestInfoProperties().get(BaseRequest.PAGE_SIZE_PROPERTY_KEY); String ascOrderRaw = request.getRequestInfoProperties().get(BaseRequest.ASC_ORDER_PROPERTY_KEY); Integer maxResults = (maxResultsRaw == null ? null : Integer.parseInt(maxResultsRaw)); Boolean ascOrder = (ascOrderRaw == null ? null : Boolean.parseBoolean(ascOrderRaw)); if (null == predicate) { authorizeGetResources(null); // the no-arg call to /requests is here resources.addAll(getRequestResources(null, null, null, maxResults, ascOrder, requestedIds)); } else { // process /requests with a predicate // process /clusters/[cluster]/requests // process /clusters/[cluster]/requests with a predicate for (Map<String, Object> properties : getPropertyMaps(predicate)) { String clusterName = (String) properties.get(REQUEST_CLUSTER_NAME_PROPERTY_ID); Long requestId = null; if (properties.get(REQUEST_ID_PROPERTY_ID) != null) { requestId = Long.valueOf((String) properties.get(REQUEST_ID_PROPERTY_ID)); } String requestStatus = null; if (properties.get(REQUEST_STATUS_PROPERTY_ID) != null) { requestStatus = (String) properties.get(REQUEST_STATUS_PROPERTY_ID); } authorizeGetResources(clusterName); resources.addAll(getRequestResources(clusterName, requestId, requestStatus, maxResults, ascOrder, requestedIds)); } } return resources; }
223551095_1072
@Override public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { Set<String> requestedIds = getRequestPropertyIds(request, predicate); Set<Resource> resources = new HashSet<>(); String maxResultsRaw = request.getRequestInfoProperties().get(BaseRequest.PAGE_SIZE_PROPERTY_KEY); String ascOrderRaw = request.getRequestInfoProperties().get(BaseRequest.ASC_ORDER_PROPERTY_KEY); Integer maxResults = (maxResultsRaw == null ? null : Integer.parseInt(maxResultsRaw)); Boolean ascOrder = (ascOrderRaw == null ? null : Boolean.parseBoolean(ascOrderRaw)); if (null == predicate) { authorizeGetResources(null); // the no-arg call to /requests is here resources.addAll(getRequestResources(null, null, null, maxResults, ascOrder, requestedIds)); } else { // process /requests with a predicate // process /clusters/[cluster]/requests // process /clusters/[cluster]/requests with a predicate for (Map<String, Object> properties : getPropertyMaps(predicate)) { String clusterName = (String) properties.get(REQUEST_CLUSTER_NAME_PROPERTY_ID); Long requestId = null; if (properties.get(REQUEST_ID_PROPERTY_ID) != null) { requestId = Long.valueOf((String) properties.get(REQUEST_ID_PROPERTY_ID)); } String requestStatus = null; if (properties.get(REQUEST_STATUS_PROPERTY_ID) != null) { requestStatus = (String) properties.get(REQUEST_STATUS_PROPERTY_ID); } authorizeGetResources(clusterName); resources.addAll(getRequestResources(clusterName, requestId, requestStatus, maxResults, ascOrder, requestedIds)); } } return resources; }
223551095_1073
@Override public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { Set<String> requestedIds = getRequestPropertyIds(request, predicate); Set<Resource> resources = new HashSet<>(); String maxResultsRaw = request.getRequestInfoProperties().get(BaseRequest.PAGE_SIZE_PROPERTY_KEY); String ascOrderRaw = request.getRequestInfoProperties().get(BaseRequest.ASC_ORDER_PROPERTY_KEY); Integer maxResults = (maxResultsRaw == null ? null : Integer.parseInt(maxResultsRaw)); Boolean ascOrder = (ascOrderRaw == null ? null : Boolean.parseBoolean(ascOrderRaw)); if (null == predicate) { authorizeGetResources(null); // the no-arg call to /requests is here resources.addAll(getRequestResources(null, null, null, maxResults, ascOrder, requestedIds)); } else { // process /requests with a predicate // process /clusters/[cluster]/requests // process /clusters/[cluster]/requests with a predicate for (Map<String, Object> properties : getPropertyMaps(predicate)) { String clusterName = (String) properties.get(REQUEST_CLUSTER_NAME_PROPERTY_ID); Long requestId = null; if (properties.get(REQUEST_ID_PROPERTY_ID) != null) { requestId = Long.valueOf((String) properties.get(REQUEST_ID_PROPERTY_ID)); } String requestStatus = null; if (properties.get(REQUEST_STATUS_PROPERTY_ID) != null) { requestStatus = (String) properties.get(REQUEST_STATUS_PROPERTY_ID); } authorizeGetResources(clusterName); resources.addAll(getRequestResources(clusterName, requestId, requestStatus, maxResults, ascOrder, requestedIds)); } } return resources; }
223551095_1074
@Override public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { Set<String> requestedIds = getRequestPropertyIds(request, predicate); Set<Resource> resources = new HashSet<>(); String maxResultsRaw = request.getRequestInfoProperties().get(BaseRequest.PAGE_SIZE_PROPERTY_KEY); String ascOrderRaw = request.getRequestInfoProperties().get(BaseRequest.ASC_ORDER_PROPERTY_KEY); Integer maxResults = (maxResultsRaw == null ? null : Integer.parseInt(maxResultsRaw)); Boolean ascOrder = (ascOrderRaw == null ? null : Boolean.parseBoolean(ascOrderRaw)); if (null == predicate) { authorizeGetResources(null); // the no-arg call to /requests is here resources.addAll(getRequestResources(null, null, null, maxResults, ascOrder, requestedIds)); } else { // process /requests with a predicate // process /clusters/[cluster]/requests // process /clusters/[cluster]/requests with a predicate for (Map<String, Object> properties : getPropertyMaps(predicate)) { String clusterName = (String) properties.get(REQUEST_CLUSTER_NAME_PROPERTY_ID); Long requestId = null; if (properties.get(REQUEST_ID_PROPERTY_ID) != null) { requestId = Long.valueOf((String) properties.get(REQUEST_ID_PROPERTY_ID)); } String requestStatus = null; if (properties.get(REQUEST_STATUS_PROPERTY_ID) != null) { requestStatus = (String) properties.get(REQUEST_STATUS_PROPERTY_ID); } authorizeGetResources(clusterName); resources.addAll(getRequestResources(clusterName, requestId, requestStatus, maxResults, ascOrder, requestedIds)); } } return resources; }
223551095_1075
@Override public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { Set<String> requestedIds = getRequestPropertyIds(request, predicate); Set<Resource> resources = new HashSet<>(); String maxResultsRaw = request.getRequestInfoProperties().get(BaseRequest.PAGE_SIZE_PROPERTY_KEY); String ascOrderRaw = request.getRequestInfoProperties().get(BaseRequest.ASC_ORDER_PROPERTY_KEY); Integer maxResults = (maxResultsRaw == null ? null : Integer.parseInt(maxResultsRaw)); Boolean ascOrder = (ascOrderRaw == null ? null : Boolean.parseBoolean(ascOrderRaw)); if (null == predicate) { authorizeGetResources(null); // the no-arg call to /requests is here resources.addAll(getRequestResources(null, null, null, maxResults, ascOrder, requestedIds)); } else { // process /requests with a predicate // process /clusters/[cluster]/requests // process /clusters/[cluster]/requests with a predicate for (Map<String, Object> properties : getPropertyMaps(predicate)) { String clusterName = (String) properties.get(REQUEST_CLUSTER_NAME_PROPERTY_ID); Long requestId = null; if (properties.get(REQUEST_ID_PROPERTY_ID) != null) { requestId = Long.valueOf((String) properties.get(REQUEST_ID_PROPERTY_ID)); } String requestStatus = null; if (properties.get(REQUEST_STATUS_PROPERTY_ID) != null) { requestStatus = (String) properties.get(REQUEST_STATUS_PROPERTY_ID); } authorizeGetResources(clusterName); resources.addAll(getRequestResources(clusterName, requestId, requestStatus, maxResults, ascOrder, requestedIds)); } } return resources; }
223551095_1076
@Override public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { Set<String> requestedIds = getRequestPropertyIds(request, predicate); Set<Resource> resources = new HashSet<>(); String maxResultsRaw = request.getRequestInfoProperties().get(BaseRequest.PAGE_SIZE_PROPERTY_KEY); String ascOrderRaw = request.getRequestInfoProperties().get(BaseRequest.ASC_ORDER_PROPERTY_KEY); Integer maxResults = (maxResultsRaw == null ? null : Integer.parseInt(maxResultsRaw)); Boolean ascOrder = (ascOrderRaw == null ? null : Boolean.parseBoolean(ascOrderRaw)); if (null == predicate) { authorizeGetResources(null); // the no-arg call to /requests is here resources.addAll(getRequestResources(null, null, null, maxResults, ascOrder, requestedIds)); } else { // process /requests with a predicate // process /clusters/[cluster]/requests // process /clusters/[cluster]/requests with a predicate for (Map<String, Object> properties : getPropertyMaps(predicate)) { String clusterName = (String) properties.get(REQUEST_CLUSTER_NAME_PROPERTY_ID); Long requestId = null; if (properties.get(REQUEST_ID_PROPERTY_ID) != null) { requestId = Long.valueOf((String) properties.get(REQUEST_ID_PROPERTY_ID)); } String requestStatus = null; if (properties.get(REQUEST_STATUS_PROPERTY_ID) != null) { requestStatus = (String) properties.get(REQUEST_STATUS_PROPERTY_ID); } authorizeGetResources(clusterName); resources.addAll(getRequestResources(clusterName, requestId, requestStatus, maxResults, ascOrder, requestedIds)); } } return resources; }
223551095_1077
@Override public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { Set<String> requestedIds = getRequestPropertyIds(request, predicate); Set<Resource> resources = new HashSet<>(); String maxResultsRaw = request.getRequestInfoProperties().get(BaseRequest.PAGE_SIZE_PROPERTY_KEY); String ascOrderRaw = request.getRequestInfoProperties().get(BaseRequest.ASC_ORDER_PROPERTY_KEY); Integer maxResults = (maxResultsRaw == null ? null : Integer.parseInt(maxResultsRaw)); Boolean ascOrder = (ascOrderRaw == null ? null : Boolean.parseBoolean(ascOrderRaw)); if (null == predicate) { authorizeGetResources(null); // the no-arg call to /requests is here resources.addAll(getRequestResources(null, null, null, maxResults, ascOrder, requestedIds)); } else { // process /requests with a predicate // process /clusters/[cluster]/requests // process /clusters/[cluster]/requests with a predicate for (Map<String, Object> properties : getPropertyMaps(predicate)) { String clusterName = (String) properties.get(REQUEST_CLUSTER_NAME_PROPERTY_ID); Long requestId = null; if (properties.get(REQUEST_ID_PROPERTY_ID) != null) { requestId = Long.valueOf((String) properties.get(REQUEST_ID_PROPERTY_ID)); } String requestStatus = null; if (properties.get(REQUEST_STATUS_PROPERTY_ID) != null) { requestStatus = (String) properties.get(REQUEST_STATUS_PROPERTY_ID); } authorizeGetResources(clusterName); resources.addAll(getRequestResources(clusterName, requestId, requestStatus, maxResults, ascOrder, requestedIds)); } } return resources; }
223551095_1078
@Override public RequestStatus updateResources(Request requestInfo, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { AmbariManagementController amc = getManagementController(); final Set<RequestRequest> requests = new HashSet<>(); Iterator<Map<String,Object>> iterator = requestInfo.getProperties().iterator(); if (iterator.hasNext()) { for (Map<String, Object> propertyMap : getPropertyMaps(iterator.next(), predicate)) { requests.add(getRequest(propertyMap)); } } // Validate List<org.apache.ambari.server.actionmanager.Request> targets = new ArrayList<>(); for (RequestRequest updateRequest : requests) { ActionManager actionManager = amc.getActionManager(); List<org.apache.ambari.server.actionmanager.Request> internalRequests = actionManager.getRequests(Collections.singletonList(updateRequest.getRequestId())); if (internalRequests.size() == 0) { throw new IllegalArgumentException( String.format("Request %s does not exist", updateRequest.getRequestId())); } // There should be only one request with this id (or no request at all) org.apache.ambari.server.actionmanager.Request internalRequest = internalRequests.get(0); if (updateRequest.isRemovePendingHostRequests()) { if (internalRequest instanceof LogicalRequest) { targets.add(internalRequest); } else { throw new IllegalArgumentException("Request with id: " + internalRequest.getRequestId() + "is not a Logical Request."); } } else { // Validate update request (check constraints on state value and presence of abort reason) if (updateRequest.getAbortReason() == null || updateRequest.getAbortReason().isEmpty()) { throw new IllegalArgumentException("Abort reason can not be empty."); } if (updateRequest.getStatus() != HostRoleStatus.ABORTED) { throw new IllegalArgumentException( String.format("%s is wrong value. The only allowed value " + "for updating request status is ABORTED", updateRequest.getStatus())); } HostRoleStatus internalRequestStatus = CalculatedStatus.statusFromStages(internalRequest.getStages()).getStatus(); if (internalRequestStatus.isCompletedState()) { // Ignore updates to completed requests to avoid throwing exception on race condition } else { // Validation passed targets.add(internalRequest); } } } // Perform update Iterator<RequestRequest> reqIterator = requests.iterator(); for (org.apache.ambari.server.actionmanager.Request target : targets) { if (target instanceof LogicalRequest) { topologyManager.removePendingHostRequests(target.getClusterName(), target.getRequestId()); } else { String reason = reqIterator.next().getAbortReason(); amc.getActionManager().cancelRequest(target.getRequestId(), reason); } } return getRequestStatus(null); }
223551095_1079
@Override public RequestStatus deleteResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { throw new UnsupportedOperationException("Not currently supported."); }
223551095_1080
@Override public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { Set<String> requestedIds = getRequestPropertyIds(request, predicate); Set<Resource> resources = new HashSet<>(); String maxResultsRaw = request.getRequestInfoProperties().get(BaseRequest.PAGE_SIZE_PROPERTY_KEY); String ascOrderRaw = request.getRequestInfoProperties().get(BaseRequest.ASC_ORDER_PROPERTY_KEY); Integer maxResults = (maxResultsRaw == null ? null : Integer.parseInt(maxResultsRaw)); Boolean ascOrder = (ascOrderRaw == null ? null : Boolean.parseBoolean(ascOrderRaw)); if (null == predicate) { authorizeGetResources(null); // the no-arg call to /requests is here resources.addAll(getRequestResources(null, null, null, maxResults, ascOrder, requestedIds)); } else { // process /requests with a predicate // process /clusters/[cluster]/requests // process /clusters/[cluster]/requests with a predicate for (Map<String, Object> properties : getPropertyMaps(predicate)) { String clusterName = (String) properties.get(REQUEST_CLUSTER_NAME_PROPERTY_ID); Long requestId = null; if (properties.get(REQUEST_ID_PROPERTY_ID) != null) { requestId = Long.valueOf((String) properties.get(REQUEST_ID_PROPERTY_ID)); } String requestStatus = null; if (properties.get(REQUEST_STATUS_PROPERTY_ID) != null) { requestStatus = (String) properties.get(REQUEST_STATUS_PROPERTY_ID); } authorizeGetResources(clusterName); resources.addAll(getRequestResources(clusterName, requestId, requestStatus, maxResults, ascOrder, requestedIds)); } } return resources; }
223551095_1081
@Override public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { Set<String> requestedIds = getRequestPropertyIds(request, predicate); Set<Resource> resources = new HashSet<>(); String maxResultsRaw = request.getRequestInfoProperties().get(BaseRequest.PAGE_SIZE_PROPERTY_KEY); String ascOrderRaw = request.getRequestInfoProperties().get(BaseRequest.ASC_ORDER_PROPERTY_KEY); Integer maxResults = (maxResultsRaw == null ? null : Integer.parseInt(maxResultsRaw)); Boolean ascOrder = (ascOrderRaw == null ? null : Boolean.parseBoolean(ascOrderRaw)); if (null == predicate) { authorizeGetResources(null); // the no-arg call to /requests is here resources.addAll(getRequestResources(null, null, null, maxResults, ascOrder, requestedIds)); } else { // process /requests with a predicate // process /clusters/[cluster]/requests // process /clusters/[cluster]/requests with a predicate for (Map<String, Object> properties : getPropertyMaps(predicate)) { String clusterName = (String) properties.get(REQUEST_CLUSTER_NAME_PROPERTY_ID); Long requestId = null; if (properties.get(REQUEST_ID_PROPERTY_ID) != null) { requestId = Long.valueOf((String) properties.get(REQUEST_ID_PROPERTY_ID)); } String requestStatus = null; if (properties.get(REQUEST_STATUS_PROPERTY_ID) != null) { requestStatus = (String) properties.get(REQUEST_STATUS_PROPERTY_ID); } authorizeGetResources(clusterName); resources.addAll(getRequestResources(clusterName, requestId, requestStatus, maxResults, ascOrder, requestedIds)); } } return resources; }
223551095_1082
@Override public Map<String, RootServiceComponentConfiguration> getComponentConfigurations(String categoryName) { Map<String, RootServiceComponentConfiguration> configurations = null; List<AmbariConfigurationEntity> entities = (categoryName == null) ? ambariConfigurationDAO.findAll() : ambariConfigurationDAO.findByCategory(categoryName); if (entities != null) { configurations = new HashMap<>(); for (AmbariConfigurationEntity entity : entities) { String category = entity.getCategoryName(); RootServiceComponentConfiguration configuration = configurations.get(category); if (configuration == null) { configuration = new RootServiceComponentConfiguration(); configurations.put(category, configuration); } configuration.addProperty(entity.getPropertyName(), entity.getPropertyValue()); if (categoryName != null) { configuration.addPropertyType(entity.getPropertyName(), AmbariServerConfigurationUtils.getConfigurationPropertyTypeName(categoryName, entity.getPropertyName())); } } } return configurations; }
223551095_1083
@Override public void removeComponentConfiguration(String categoryName) { if (null == categoryName) { LOGGER.debug("No resource id provided in the request"); } else { LOGGER.debug("Deleting Ambari configuration with id: {}", categoryName); if (ambariConfigurationDAO.removeByCategory(categoryName) > 0) { publisher.publish(new AmbariConfigurationChangedEvent(categoryName)); } } }
223551095_1084
@Override public void updateComponentCategory(String categoryName, Map<String, String> properties, boolean removePropertiesIfNotSpecified) throws AmbariException { boolean toBePublished = false; final Iterator<Map.Entry<String, String>> propertiesIterator = properties.entrySet().iterator(); while (propertiesIterator.hasNext()) { Map.Entry<String, String> property = propertiesIterator.next(); // Ensure the incoming property is valid AmbariServerConfigurationKey key = AmbariServerConfigurationUtils.getConfigurationKey(categoryName, property.getKey()); if(key == null) { throw new IllegalArgumentException(String.format("Invalid Ambari server configuration key: %s:%s", categoryName, property.getKey())); } if (AmbariServerConfigurationUtils.isPassword(key)) { final String passwordFileOrCredentialStoreAlias = fetchPasswordFileNameOrCredentialStoreAlias(categoryName, property.getKey()); if (StringUtils.isNotBlank(passwordFileOrCredentialStoreAlias)) { //if blank -> this is the first time setup; we simply need to store the alias/file name if (updatePasswordIfNeeded(categoryName, property.getKey(), property.getValue())) { toBePublished = true; } propertiesIterator.remove(); //we do not need to change the any PASSWORD type configuration going forward } } } if (!properties.isEmpty()) { toBePublished = ambariConfigurationDAO.reconcileCategory(categoryName, properties, removePropertiesIfNotSpecified) || toBePublished; } if (toBePublished) { // notify subscribers about the configuration changes publisher.publish(new AmbariConfigurationChangedEvent(categoryName)); } }
223551095_1085
public Map<String, Map<String, String>> getConfigurations() { Map<String, Map<String, String>> configurations = new HashMap<>(); List<AmbariConfigurationEntity> entities = ambariConfigurationDAO.findAll(); if (entities != null) { for (AmbariConfigurationEntity entity : entities) { String category = entity.getCategoryName(); Map<String, String> configuration = configurations.computeIfAbsent(category, k -> new HashMap<>()); configuration.put(entity.getPropertyName(), entity.getPropertyValue()); } } return configurations; }
223551095_1086
public Map<String, String> getConfigurationProperties(String categoryName) { Map<String, String> properties = null; List<AmbariConfigurationEntity> entities = ambariConfigurationDAO.findByCategory(categoryName); if (entities != null) { properties = new HashMap<>(); for (AmbariConfigurationEntity entity : entities) { properties.put(entity.getPropertyName(), entity.getPropertyValue()); } } return properties; }
223551095_1087
@Override public RequestStatus createResources(final Request request) throws SystemException, UnsupportedPropertyException, ResourceAlreadyExistsException, NoSuchParentResourceException { StackAdvisorRequest validationRequest = prepareStackAdvisorRequest(request); final ValidationResponse response; try { response = saHelper.validate(validationRequest); } catch (StackAdvisorRequestException e) { LOG.warn("Error occurred during validation", e); throw new IllegalArgumentException(e.getMessage(), e); } catch (StackAdvisorException e) { LOG.warn("Error occurred during validation", e); throw new SystemException(e.getMessage(), e); } Resource validation = createResources(new Command<Resource>() { @Override public Resource invoke() throws AmbariException { Resource resource = new ResourceImpl(Resource.Type.Validation); setResourceProperty(resource, VALIDATION_ID_PROPERTY_ID, response.getId(), getPropertyIds()); setResourceProperty(resource, STACK_NAME_PROPERTY_ID, response.getVersion().getStackName(), getPropertyIds()); setResourceProperty(resource, STACK_VERSION_PROPERTY_ID, response.getVersion().getStackVersion(), getPropertyIds()); List<Map<String, Object>> listItemProps = new ArrayList<>(); Set<ValidationItem> items = response.getItems(); for (ValidationItem item : items) { Map<String, Object> mapItemProps = new HashMap<>(); mapItemProps.put(TYPE_PROPERTY_ID, item.getType()); mapItemProps.put(LEVE_PROPERTY_ID, item.getLevel()); mapItemProps.put(MESSAGE_PROPERTY_ID, item.getMessage()); if (item.getComponentName() != null) { mapItemProps.put(COMPONENT_NAME_PROPERTY_ID, item.getComponentName()); } if (item.getHost() != null) { mapItemProps.put(HOST_PROPERTY_ID, item.getHost()); } if (item.getConfigType() != null) { mapItemProps.put(CONFIG_TYPE_PROPERTY_ID, item.getConfigType()); mapItemProps.put(CONFIG_NAME_PROPERTY_ID, item.getConfigName()); } listItemProps.add(mapItemProps); } setResourceProperty(resource, ITEMS_PROPERTY_ID, listItemProps, getPropertyIds()); return resource; } }); notifyCreate(Resource.Type.Validation, request); Set<Resource> resources = new HashSet<>(Arrays.asList(validation)); return new RequestStatusImpl(null, resources); }
223551095_1088
public Collection<String> getAllConfigurationTypes(String service) { return serviceConfigurations.get(service).keySet(); }
223551095_1089
public String getServiceForConfigType(String config) { for (Map.Entry<String, Map<String, Map<String, ConfigProperty>>> entry : serviceConfigurations.entrySet()) { Map<String, Map<String, ConfigProperty>> typeMap = entry.getValue(); String serviceName = entry.getKey(); if (typeMap.containsKey(config) && !getExcludedConfigurationTypes(serviceName).contains(config)) { return serviceName; } } throw new IllegalArgumentException( "Specified configuration type is not associated with any service: " + config); }
223551095_1090
@Override public Set<Resource> getResources(Request event, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { Set<Resource> resources = new HashSet<>(); Set<String> requestedIds = getRequestPropertyIds(event, predicate); for (LdapSyncEventEntity eventEntity : events.values()) { resources.add(toResource(eventEntity, requestedIds)); } return resources; }
223551095_1091
@Override public Set<Resource> getResources(Request event, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { Set<Resource> resources = new HashSet<>(); Set<String> requestedIds = getRequestPropertyIds(event, predicate); for (LdapSyncEventEntity eventEntity : events.values()) { resources.add(toResource(eventEntity, requestedIds)); } return resources; }
223551095_1093
@Override public Set<Resource> getResources(Request event, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { Set<Resource> resources = new HashSet<>(); Set<String> requestedIds = getRequestPropertyIds(event, predicate); for (LdapSyncEventEntity eventEntity : events.values()) { resources.add(toResource(eventEntity, requestedIds)); } return resources; }
223551095_1094
@Override public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { final Set<Resource> resources = new HashSet<>(); final Set<String> requestedIds = getRequestPropertyIds(request, predicate); final Set<Map<String, Object>> propertyMaps = getPropertyMaps(predicate); String author = AuthorizationHelper.getAuthenticatedName(); List<WidgetEntity> requestedEntities = new ArrayList<>(); for (Map<String, Object> propertyMap: propertyMaps) { if (propertyMap.get(WIDGET_ID_PROPERTY_ID) != null) { final Long id; try { id = Long.parseLong(propertyMap.get(WIDGET_ID_PROPERTY_ID).toString()); } catch (Exception ex) { throw new SystemException("WidgetLayout should have numerical id"); } final WidgetEntity entity = widgetDAO.findById(id); if (entity == null) { throw new NoSuchResourceException("WidgetLayout with id " + id + " does not exists"); } if (!(entity.getAuthor().equals(author) || entity.getScope().equals(SCOPE.CLUSTER.name()))) { throw new AccessDeniedException("User must be author of the widget or widget must have cluster scope"); } requestedEntities.add(entity); } else { requestedEntities.addAll(widgetDAO.findByScopeOrAuthor(author, SCOPE.CLUSTER.name())); } } for (WidgetEntity entity: requestedEntities) { final Resource resource = new ResourceImpl(Type.Widget); resource.setProperty(WIDGET_ID_PROPERTY_ID, entity.getId()); resource.setProperty(WIDGET_WIDGET_NAME_PROPERTY_ID, entity.getWidgetName()); resource.setProperty(WIDGET_WIDGET_TYPE_PROPERTY_ID, entity.getWidgetType()); setResourceProperty(resource, WIDGET_METRICS_PROPERTY_ID, entity.getMetrics(), requestedIds); setResourceProperty(resource, WIDGET_TIME_CREATED_PROPERTY_ID, entity.getTimeCreated(), requestedIds); resource.setProperty(WIDGET_AUTHOR_PROPERTY_ID, entity.getAuthor()); setResourceProperty(resource, WIDGET_DESCRIPTION_PROPERTY_ID, entity.getDescription(), requestedIds); resource.setProperty(WIDGET_SCOPE_PROPERTY_ID, entity.getScope()); setResourceProperty(resource, WIDGET_VALUES_PROPERTY_ID, entity.getWidgetValues(), requestedIds); setResourceProperty(resource, WIDGET_PROPERTIES_PROPERTY_ID, entity.getProperties(), requestedIds); setResourceProperty(resource, WIDGET_TAG_PROPERTY_ID, entity.getTag(), requestedIds); String clusterName = null; try { clusterName = getManagementController().getClusters().getClusterById(entity.getClusterId()).getClusterName(); } catch (AmbariException e) { throw new SystemException(e.getMessage()); } setResourceProperty(resource, WIDGET_CLUSTER_NAME_PROPERTY_ID, clusterName, requestedIds); resources.add(resource); } return resources; }
223551095_1095
@Override public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { final Set<Resource> resources = new HashSet<>(); final Set<String> requestedIds = getRequestPropertyIds(request, predicate); final Set<Map<String, Object>> propertyMaps = getPropertyMaps(predicate); String author = AuthorizationHelper.getAuthenticatedName(); List<WidgetEntity> requestedEntities = new ArrayList<>(); for (Map<String, Object> propertyMap: propertyMaps) { if (propertyMap.get(WIDGET_ID_PROPERTY_ID) != null) { final Long id; try { id = Long.parseLong(propertyMap.get(WIDGET_ID_PROPERTY_ID).toString()); } catch (Exception ex) { throw new SystemException("WidgetLayout should have numerical id"); } final WidgetEntity entity = widgetDAO.findById(id); if (entity == null) { throw new NoSuchResourceException("WidgetLayout with id " + id + " does not exists"); } if (!(entity.getAuthor().equals(author) || entity.getScope().equals(SCOPE.CLUSTER.name()))) { throw new AccessDeniedException("User must be author of the widget or widget must have cluster scope"); } requestedEntities.add(entity); } else { requestedEntities.addAll(widgetDAO.findByScopeOrAuthor(author, SCOPE.CLUSTER.name())); } } for (WidgetEntity entity: requestedEntities) { final Resource resource = new ResourceImpl(Type.Widget); resource.setProperty(WIDGET_ID_PROPERTY_ID, entity.getId()); resource.setProperty(WIDGET_WIDGET_NAME_PROPERTY_ID, entity.getWidgetName()); resource.setProperty(WIDGET_WIDGET_TYPE_PROPERTY_ID, entity.getWidgetType()); setResourceProperty(resource, WIDGET_METRICS_PROPERTY_ID, entity.getMetrics(), requestedIds); setResourceProperty(resource, WIDGET_TIME_CREATED_PROPERTY_ID, entity.getTimeCreated(), requestedIds); resource.setProperty(WIDGET_AUTHOR_PROPERTY_ID, entity.getAuthor()); setResourceProperty(resource, WIDGET_DESCRIPTION_PROPERTY_ID, entity.getDescription(), requestedIds); resource.setProperty(WIDGET_SCOPE_PROPERTY_ID, entity.getScope()); setResourceProperty(resource, WIDGET_VALUES_PROPERTY_ID, entity.getWidgetValues(), requestedIds); setResourceProperty(resource, WIDGET_PROPERTIES_PROPERTY_ID, entity.getProperties(), requestedIds); setResourceProperty(resource, WIDGET_TAG_PROPERTY_ID, entity.getTag(), requestedIds); String clusterName = null; try { clusterName = getManagementController().getClusters().getClusterById(entity.getClusterId()).getClusterName(); } catch (AmbariException e) { throw new SystemException(e.getMessage()); } setResourceProperty(resource, WIDGET_CLUSTER_NAME_PROPERTY_ID, clusterName, requestedIds); resources.add(resource); } return resources; }
223551095_1096
@Override public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { final Set<Resource> resources = new HashSet<>(); final Set<String> requestedIds = getRequestPropertyIds(request, predicate); final Set<Map<String, Object>> propertyMaps = getPropertyMaps(predicate); String author = AuthorizationHelper.getAuthenticatedName(); List<WidgetEntity> requestedEntities = new ArrayList<>(); for (Map<String, Object> propertyMap: propertyMaps) { if (propertyMap.get(WIDGET_ID_PROPERTY_ID) != null) { final Long id; try { id = Long.parseLong(propertyMap.get(WIDGET_ID_PROPERTY_ID).toString()); } catch (Exception ex) { throw new SystemException("WidgetLayout should have numerical id"); } final WidgetEntity entity = widgetDAO.findById(id); if (entity == null) { throw new NoSuchResourceException("WidgetLayout with id " + id + " does not exists"); } if (!(entity.getAuthor().equals(author) || entity.getScope().equals(SCOPE.CLUSTER.name()))) { throw new AccessDeniedException("User must be author of the widget or widget must have cluster scope"); } requestedEntities.add(entity); } else { requestedEntities.addAll(widgetDAO.findByScopeOrAuthor(author, SCOPE.CLUSTER.name())); } } for (WidgetEntity entity: requestedEntities) { final Resource resource = new ResourceImpl(Type.Widget); resource.setProperty(WIDGET_ID_PROPERTY_ID, entity.getId()); resource.setProperty(WIDGET_WIDGET_NAME_PROPERTY_ID, entity.getWidgetName()); resource.setProperty(WIDGET_WIDGET_TYPE_PROPERTY_ID, entity.getWidgetType()); setResourceProperty(resource, WIDGET_METRICS_PROPERTY_ID, entity.getMetrics(), requestedIds); setResourceProperty(resource, WIDGET_TIME_CREATED_PROPERTY_ID, entity.getTimeCreated(), requestedIds); resource.setProperty(WIDGET_AUTHOR_PROPERTY_ID, entity.getAuthor()); setResourceProperty(resource, WIDGET_DESCRIPTION_PROPERTY_ID, entity.getDescription(), requestedIds); resource.setProperty(WIDGET_SCOPE_PROPERTY_ID, entity.getScope()); setResourceProperty(resource, WIDGET_VALUES_PROPERTY_ID, entity.getWidgetValues(), requestedIds); setResourceProperty(resource, WIDGET_PROPERTIES_PROPERTY_ID, entity.getProperties(), requestedIds); setResourceProperty(resource, WIDGET_TAG_PROPERTY_ID, entity.getTag(), requestedIds); String clusterName = null; try { clusterName = getManagementController().getClusters().getClusterById(entity.getClusterId()).getClusterName(); } catch (AmbariException e) { throw new SystemException(e.getMessage()); } setResourceProperty(resource, WIDGET_CLUSTER_NAME_PROPERTY_ID, clusterName, requestedIds); resources.add(resource); } return resources; }
223551095_1097
@Override public RequestStatus createResources(final Request request) throws SystemException, UnsupportedPropertyException, ResourceAlreadyExistsException, NoSuchParentResourceException { Set<Resource> associatedResources = new HashSet<>(); for (final Map<String, Object> properties : request.getProperties()) { WidgetEntity widgetEntity = createResources(new Command<WidgetEntity>() { @Override public WidgetEntity invoke() throws AmbariException { final String[] requiredProperties = { WIDGET_CLUSTER_NAME_PROPERTY_ID, WIDGET_WIDGET_NAME_PROPERTY_ID, WIDGET_WIDGET_TYPE_PROPERTY_ID, WIDGET_SCOPE_PROPERTY_ID }; for (String propertyName: requiredProperties) { if (properties.get(propertyName) == null) { throw new AmbariException("Property " + propertyName + " should be provided"); } } final WidgetEntity entity = new WidgetEntity(); String clusterName = properties.get(WIDGET_CLUSTER_NAME_PROPERTY_ID).toString(); String scope = properties.get(WIDGET_SCOPE_PROPERTY_ID).toString(); if (!isScopeAllowedForUser(scope, clusterName)) { throw new AccessDeniedException("Only cluster operator can create widgets with cluster scope"); } entity.setWidgetName(properties.get(WIDGET_WIDGET_NAME_PROPERTY_ID).toString()); entity.setWidgetType(properties.get(WIDGET_WIDGET_TYPE_PROPERTY_ID).toString()); entity.setClusterId(getManagementController().getClusters().getCluster(clusterName).getClusterId()); entity.setScope(scope); String metrics = (properties.containsKey(WIDGET_METRICS_PROPERTY_ID)) ? gson.toJson(properties.get(WIDGET_METRICS_PROPERTY_ID)) : null; entity.setMetrics(metrics); entity.setAuthor(getAuthorName(properties)); String description = (properties.containsKey(WIDGET_DESCRIPTION_PROPERTY_ID)) ? properties.get(WIDGET_DESCRIPTION_PROPERTY_ID).toString() : null; entity.setDescription(description); String values = (properties.containsKey(WIDGET_VALUES_PROPERTY_ID)) ? gson.toJson(properties.get(WIDGET_VALUES_PROPERTY_ID)) : null; entity.setWidgetValues(values); Map<String, Object> widgetPropertiesMap = new HashMap<>(); for (Map.Entry<String, Object> entry : properties.entrySet()) { if (PropertyHelper.getPropertyCategory(entry.getKey()).equals(WIDGET_PROPERTIES_PROPERTY_ID)) { widgetPropertiesMap.put(PropertyHelper.getPropertyName(entry.getKey()), entry.getValue()); } } String widgetProperties = (widgetPropertiesMap.isEmpty()) ? null : gson.toJson(widgetPropertiesMap); entity.setProperties(widgetProperties); if (properties.containsKey(WIDGET_TAG_PROPERTY_ID)){ entity.setTag(properties.get(WIDGET_TAG_PROPERTY_ID).toString()); } widgetDAO.create(entity); notifyCreate(Type.Widget, request); return entity; } }); Resource resource = new ResourceImpl(Type.Widget); resource.setProperty(WIDGET_ID_PROPERTY_ID, widgetEntity.getId()); associatedResources.add(resource); } return getRequestStatus(null, associatedResources); }
223551095_1098
@Override public RequestStatus updateResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { final Set<Map<String, Object>> propertyMaps = request.getProperties(); modifyResources(new Command<Void>() { @Override public Void invoke() throws AmbariException { for (Map<String, Object> propertyMap : propertyMaps) { final Long id; try { id = Long.parseLong(propertyMap.get(WIDGET_ID_PROPERTY_ID).toString()); } catch (Exception ex) { throw new AmbariException("Widget should have numerical id"); } final WidgetEntity entity = widgetDAO.findById(id); if (entity == null) { throw new ObjectNotFoundException("There is no widget with id " + id); } if (StringUtils.isNotBlank(ObjectUtils.toString(propertyMap.get(WIDGET_WIDGET_NAME_PROPERTY_ID)))) { entity.setWidgetName(propertyMap.get(WIDGET_WIDGET_NAME_PROPERTY_ID).toString()); } if (StringUtils.isNotBlank(ObjectUtils.toString(propertyMap.get(WIDGET_WIDGET_TYPE_PROPERTY_ID)))) { entity.setWidgetType(propertyMap.get(WIDGET_WIDGET_TYPE_PROPERTY_ID).toString()); } if (StringUtils.isNotBlank(ObjectUtils.toString(propertyMap.get(WIDGET_METRICS_PROPERTY_ID)))) { entity.setMetrics(gson.toJson(propertyMap.get(WIDGET_METRICS_PROPERTY_ID))); } entity.setAuthor(getAuthorName(propertyMap)); if (StringUtils.isNotBlank(ObjectUtils.toString(propertyMap.get(WIDGET_DESCRIPTION_PROPERTY_ID)))) { entity.setDescription(propertyMap.get(WIDGET_DESCRIPTION_PROPERTY_ID).toString()); } if (StringUtils.isNotBlank(ObjectUtils.toString(propertyMap.get(WIDGET_SCOPE_PROPERTY_ID)))) { String scope = propertyMap.get(WIDGET_SCOPE_PROPERTY_ID).toString(); String clusterName = propertyMap.get(WIDGET_CLUSTER_NAME_PROPERTY_ID).toString(); if (!isScopeAllowedForUser(scope, clusterName)) { throw new AmbariException("Only cluster operator can create widgets with cluster scope"); } entity.setScope(scope); } if (StringUtils.isNotBlank(ObjectUtils.toString(propertyMap.get(WIDGET_VALUES_PROPERTY_ID)))) { entity.setWidgetValues(gson.toJson(propertyMap.get(WIDGET_VALUES_PROPERTY_ID))); } Map<String, Object> widgetPropertiesMap = new HashMap<>(); for (Map.Entry<String, Object> entry : propertyMap.entrySet()) { if (PropertyHelper.getPropertyCategory(entry.getKey()).equals(WIDGET_PROPERTIES_PROPERTY_ID)) { widgetPropertiesMap.put(PropertyHelper.getPropertyName(entry.getKey()), entry.getValue()); } } if (!widgetPropertiesMap.isEmpty()) { entity.setProperties(gson.toJson(widgetPropertiesMap)); } if (StringUtils.isNotBlank(ObjectUtils.toString(propertyMap.get(WIDGET_TAG_PROPERTY_ID)))) { entity.setTag(propertyMap.get(WIDGET_TAG_PROPERTY_ID).toString()); } widgetDAO.merge(entity); } return null; } }); return getRequestStatus(null); }
223551095_1099
@Override public RequestStatus deleteResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { final Set<Map<String, Object>> propertyMaps = getPropertyMaps(predicate); final List<WidgetEntity> entitiesToBeRemoved = new ArrayList<>(); for (Map<String, Object> propertyMap : propertyMaps) { final Long id; try { id = Long.parseLong(propertyMap.get(WIDGET_ID_PROPERTY_ID).toString()); } catch (Exception ex) { throw new SystemException("Widget should have numerical id"); } final WidgetEntity entity = widgetDAO.findById(id); if (entity == null) { throw new NoSuchResourceException("There is no widget with id " + id); } entitiesToBeRemoved.add(entity); } for (WidgetEntity entity: entitiesToBeRemoved) { if (entity.getListWidgetLayoutUserWidgetEntity() != null) { for (WidgetLayoutUserWidgetEntity layoutUserWidgetEntity : entity.getListWidgetLayoutUserWidgetEntity()) { if (layoutUserWidgetEntity.getWidgetLayout().getListWidgetLayoutUserWidgetEntity() != null) { layoutUserWidgetEntity.getWidgetLayout().getListWidgetLayoutUserWidgetEntity().remove(layoutUserWidgetEntity); } } } widgetDAO.remove(entity); } return getRequestStatus(null); }
223551095_1100
@Override public RequestStatus createResources(final Request request) throws SystemException, UnsupportedPropertyException, ResourceAlreadyExistsException, NoSuchParentResourceException { Set<Resource> associatedResources = new HashSet<>(); for (final Map<String, Object> properties : request.getProperties()) { WidgetEntity widgetEntity = createResources(new Command<WidgetEntity>() { @Override public WidgetEntity invoke() throws AmbariException { final String[] requiredProperties = { WIDGET_CLUSTER_NAME_PROPERTY_ID, WIDGET_WIDGET_NAME_PROPERTY_ID, WIDGET_WIDGET_TYPE_PROPERTY_ID, WIDGET_SCOPE_PROPERTY_ID }; for (String propertyName: requiredProperties) { if (properties.get(propertyName) == null) { throw new AmbariException("Property " + propertyName + " should be provided"); } } final WidgetEntity entity = new WidgetEntity(); String clusterName = properties.get(WIDGET_CLUSTER_NAME_PROPERTY_ID).toString(); String scope = properties.get(WIDGET_SCOPE_PROPERTY_ID).toString(); if (!isScopeAllowedForUser(scope, clusterName)) { throw new AccessDeniedException("Only cluster operator can create widgets with cluster scope"); } entity.setWidgetName(properties.get(WIDGET_WIDGET_NAME_PROPERTY_ID).toString()); entity.setWidgetType(properties.get(WIDGET_WIDGET_TYPE_PROPERTY_ID).toString()); entity.setClusterId(getManagementController().getClusters().getCluster(clusterName).getClusterId()); entity.setScope(scope); String metrics = (properties.containsKey(WIDGET_METRICS_PROPERTY_ID)) ? gson.toJson(properties.get(WIDGET_METRICS_PROPERTY_ID)) : null; entity.setMetrics(metrics); entity.setAuthor(getAuthorName(properties)); String description = (properties.containsKey(WIDGET_DESCRIPTION_PROPERTY_ID)) ? properties.get(WIDGET_DESCRIPTION_PROPERTY_ID).toString() : null; entity.setDescription(description); String values = (properties.containsKey(WIDGET_VALUES_PROPERTY_ID)) ? gson.toJson(properties.get(WIDGET_VALUES_PROPERTY_ID)) : null; entity.setWidgetValues(values); Map<String, Object> widgetPropertiesMap = new HashMap<>(); for (Map.Entry<String, Object> entry : properties.entrySet()) { if (PropertyHelper.getPropertyCategory(entry.getKey()).equals(WIDGET_PROPERTIES_PROPERTY_ID)) { widgetPropertiesMap.put(PropertyHelper.getPropertyName(entry.getKey()), entry.getValue()); } } String widgetProperties = (widgetPropertiesMap.isEmpty()) ? null : gson.toJson(widgetPropertiesMap); entity.setProperties(widgetProperties); if (properties.containsKey(WIDGET_TAG_PROPERTY_ID)){ entity.setTag(properties.get(WIDGET_TAG_PROPERTY_ID).toString()); } widgetDAO.create(entity); notifyCreate(Type.Widget, request); return entity; } }); Resource resource = new ResourceImpl(Type.Widget); resource.setProperty(WIDGET_ID_PROPERTY_ID, widgetEntity.getId()); associatedResources.add(resource); } return getRequestStatus(null, associatedResources); }
223551095_1101
@Override public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { Set<Resource> resources = new LinkedHashSet<>(); resources.addAll(getQuickLinks(request, predicate)); // add other artifacts types here if (resources.isEmpty()) { throw new NoSuchResourceException( "The requested resource doesn't exist: QuickLink not found, " + predicate); } return resources; }
223551095_1102
@Override public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { Set<Resource> resources = new LinkedHashSet<>(); resources.addAll(getQuickLinks(request, predicate)); // add other artifacts types here if (resources.isEmpty()) { throw new NoSuchResourceException( "The requested resource doesn't exist: QuickLink not found, " + predicate); } return resources; }
223551095_1103
@Override public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { Set<Resource> resources = new LinkedHashSet<>(); resources.addAll(getQuickLinks(request, predicate)); // add other artifacts types here if (resources.isEmpty()) { throw new NoSuchResourceException( "The requested resource doesn't exist: QuickLink not found, " + predicate); } return resources; }
223551095_1104
@Override public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { Set<Resource> resources = new LinkedHashSet<>(); resources.addAll(getQuickLinks(request, predicate)); // add other artifacts types here if (resources.isEmpty()) { throw new NoSuchResourceException( "The requested resource doesn't exist: QuickLink not found, " + predicate); } return resources; }
223551095_1105
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1106
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1107
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1108
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1109
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1110
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1111
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1112
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1113
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1114
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1115
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1116
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1117
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1118
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1119
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1120
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1121
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1122
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1123
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1124
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1125
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1126
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1127
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1128
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1129
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1130
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1131
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1132
public Map<String, Map<String, PropertyUpdater>> getRemovePropertyUpdaters() { return removePropertyUpdaters; }
223551095_1133
public Map<String, Map<String, PropertyUpdater>> getRemovePropertyUpdaters() { return removePropertyUpdaters; }
223551095_1134
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1135
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1136
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1137
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1138
public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException { Set<String> configTypesUpdated = new HashSet<>(); Configuration clusterConfig = clusterTopology.getConfiguration(); doRecommendConfigurations(clusterConfig, configTypesUpdated); // filter out any properties that should not be included, based on the dependencies // specified in the stacks, and the filters defined in this class doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated); Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES); if (!propertiesMoved.isEmpty()) { configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME); configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE); } // this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned // set of properties (copy) doesn't include the removed properties. If an updater // removes a property other than the property it is registered for then we will // have an issue as it won't be removed from the clusterProps map as it is a copy. Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties(); doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated); //todo: lots of hard coded HA rules included here if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated); } // Explicitly set any properties that are required but not currently provided in the stack definition. setStackToolsAndFeatures(clusterConfig, configTypesUpdated); setRetryConfiguration(clusterConfig, configTypesUpdated); setupHDFSProxyUsers(clusterConfig, configTypesUpdated); addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack()); trimProperties(clusterConfig, clusterTopology); return configTypesUpdated; }
223551095_1139
public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException { Set<String> configTypesUpdated = new HashSet<>(); Configuration clusterConfig = clusterTopology.getConfiguration(); doRecommendConfigurations(clusterConfig, configTypesUpdated); // filter out any properties that should not be included, based on the dependencies // specified in the stacks, and the filters defined in this class doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated); Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES); if (!propertiesMoved.isEmpty()) { configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME); configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE); } // this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned // set of properties (copy) doesn't include the removed properties. If an updater // removes a property other than the property it is registered for then we will // have an issue as it won't be removed from the clusterProps map as it is a copy. Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties(); doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated); //todo: lots of hard coded HA rules included here if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated); } // Explicitly set any properties that are required but not currently provided in the stack definition. setStackToolsAndFeatures(clusterConfig, configTypesUpdated); setRetryConfiguration(clusterConfig, configTypesUpdated); setupHDFSProxyUsers(clusterConfig, configTypesUpdated); addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack()); trimProperties(clusterConfig, clusterTopology); return configTypesUpdated; }
223551095_1140
public Set<String> getRequiredHostGroups() { Set<String> requiredHostGroups = new HashSet<>(); Collection<Map<String, Map<String, PropertyUpdater>>> updaters = createCollectionOfUpdaters(); // Iterate all registered updaters and collect host groups referenced by related properties and // extracted by the updaters for (Map<String, Map<String, PropertyUpdater>> updaterMap : updaters) { for (Map.Entry<String, Map<String, PropertyUpdater>> entry : updaterMap.entrySet()) { String type = entry.getKey(); for (Map.Entry<String, PropertyUpdater> updaterEntry : entry.getValue().entrySet()) { String propertyName = updaterEntry.getKey(); PropertyUpdater updater = updaterEntry.getValue(); // cluster scoped configuration which also includes all default and BP properties Map<String, Map<String, String>> clusterProps = clusterTopology.getConfiguration().getFullProperties(); Map<String, String> typeMap = clusterProps.get(type); if (typeMap != null && typeMap.containsKey(propertyName) && typeMap.get(propertyName) != null) { requiredHostGroups.addAll(updater.getRequiredHostGroups( propertyName, typeMap.get(propertyName), clusterProps, clusterTopology)); } // host group configs for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Map<String, Map<String, String>> hgConfigProps = groupInfo.getConfiguration().getProperties(); Map<String, String> hgTypeMap = hgConfigProps.get(type); if (hgTypeMap != null && hgTypeMap.containsKey(propertyName)) { requiredHostGroups.addAll(updater.getRequiredHostGroups( propertyName, hgTypeMap.get(propertyName), hgConfigProps, clusterTopology)); } } } } } // Iterate through all user defined properties (blueprint + cluster template only, no stack defaults) that do not // have a registered updater. These properties can reference host groups too which should be extracted by the default // updater Set<Pair<String, String>> propertiesWithUpdaters = getAllPropertiesWithUpdaters(updaters); // apply default updater on cluster config Map<String, Map<String, String>> userDefinedClusterProperties = clusterTopology.getConfiguration().getFullProperties(1); addRequiredHostgroupsByDefaultUpdater(userDefinedClusterProperties, propertiesWithUpdaters, requiredHostGroups); // apply default updater on hostgroup configs clusterTopology.getHostGroupInfo().values().stream().forEach( hostGroup -> { Configuration hostGroupConfig = hostGroup.getConfiguration(); Map<String, Map<String, String>> hostGroupConfigProps = hostGroupConfig.getFullProperties(1); addRequiredHostgroupsByDefaultUpdater(hostGroupConfigProps, propertiesWithUpdaters, requiredHostGroups); }); return requiredHostGroups; }
223551095_1141
Collection<Map<String, Map<String, PropertyUpdater>>> createCollectionOfUpdaters() { Collection<Map<String, Map<String, PropertyUpdater>>> updaters = allUpdaters; if (clusterTopology.isNameNodeHAEnabled()) { updaters = addNameNodeHAUpdaters(updaters); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { updaters = addYarnResourceManagerHAUpdaters(updaters); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { updaters = addOozieServerHAUpdaters(updaters); } return updaters; }
223551095_1142
public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException { Set<String> configTypesUpdated = new HashSet<>(); Configuration clusterConfig = clusterTopology.getConfiguration(); doRecommendConfigurations(clusterConfig, configTypesUpdated); // filter out any properties that should not be included, based on the dependencies // specified in the stacks, and the filters defined in this class doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated); Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES); if (!propertiesMoved.isEmpty()) { configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME); configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE); } // this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned // set of properties (copy) doesn't include the removed properties. If an updater // removes a property other than the property it is registered for then we will // have an issue as it won't be removed from the clusterProps map as it is a copy. Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties(); doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated); //todo: lots of hard coded HA rules included here if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated); } // Explicitly set any properties that are required but not currently provided in the stack definition. setStackToolsAndFeatures(clusterConfig, configTypesUpdated); setRetryConfiguration(clusterConfig, configTypesUpdated); setupHDFSProxyUsers(clusterConfig, configTypesUpdated); addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack()); trimProperties(clusterConfig, clusterTopology); return configTypesUpdated; }
223551095_1143
public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException { Set<String> configTypesUpdated = new HashSet<>(); Configuration clusterConfig = clusterTopology.getConfiguration(); doRecommendConfigurations(clusterConfig, configTypesUpdated); // filter out any properties that should not be included, based on the dependencies // specified in the stacks, and the filters defined in this class doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated); Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES); if (!propertiesMoved.isEmpty()) { configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME); configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE); } // this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned // set of properties (copy) doesn't include the removed properties. If an updater // removes a property other than the property it is registered for then we will // have an issue as it won't be removed from the clusterProps map as it is a copy. Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties(); doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated); //todo: lots of hard coded HA rules included here if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated); } // Explicitly set any properties that are required but not currently provided in the stack definition. setStackToolsAndFeatures(clusterConfig, configTypesUpdated); setRetryConfiguration(clusterConfig, configTypesUpdated); setupHDFSProxyUsers(clusterConfig, configTypesUpdated); addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack()); trimProperties(clusterConfig, clusterTopology); return configTypesUpdated; }
223551095_1144
public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException { Set<String> configTypesUpdated = new HashSet<>(); Configuration clusterConfig = clusterTopology.getConfiguration(); doRecommendConfigurations(clusterConfig, configTypesUpdated); // filter out any properties that should not be included, based on the dependencies // specified in the stacks, and the filters defined in this class doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated); Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES); if (!propertiesMoved.isEmpty()) { configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME); configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE); } // this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned // set of properties (copy) doesn't include the removed properties. If an updater // removes a property other than the property it is registered for then we will // have an issue as it won't be removed from the clusterProps map as it is a copy. Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties(); doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated); //todo: lots of hard coded HA rules included here if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated); } // Explicitly set any properties that are required but not currently provided in the stack definition. setStackToolsAndFeatures(clusterConfig, configTypesUpdated); setRetryConfiguration(clusterConfig, configTypesUpdated); setupHDFSProxyUsers(clusterConfig, configTypesUpdated); addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack()); trimProperties(clusterConfig, clusterTopology); return configTypesUpdated; }
223551095_1145
public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException { Set<String> configTypesUpdated = new HashSet<>(); Configuration clusterConfig = clusterTopology.getConfiguration(); doRecommendConfigurations(clusterConfig, configTypesUpdated); // filter out any properties that should not be included, based on the dependencies // specified in the stacks, and the filters defined in this class doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated); Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES); if (!propertiesMoved.isEmpty()) { configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME); configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE); } // this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned // set of properties (copy) doesn't include the removed properties. If an updater // removes a property other than the property it is registered for then we will // have an issue as it won't be removed from the clusterProps map as it is a copy. Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties(); doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated); //todo: lots of hard coded HA rules included here if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated); } // Explicitly set any properties that are required but not currently provided in the stack definition. setStackToolsAndFeatures(clusterConfig, configTypesUpdated); setRetryConfiguration(clusterConfig, configTypesUpdated); setupHDFSProxyUsers(clusterConfig, configTypesUpdated); addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack()); trimProperties(clusterConfig, clusterTopology); return configTypesUpdated; }
223551095_1146
public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException { Set<String> configTypesUpdated = new HashSet<>(); Configuration clusterConfig = clusterTopology.getConfiguration(); doRecommendConfigurations(clusterConfig, configTypesUpdated); // filter out any properties that should not be included, based on the dependencies // specified in the stacks, and the filters defined in this class doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated); Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES); if (!propertiesMoved.isEmpty()) { configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME); configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE); } // this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned // set of properties (copy) doesn't include the removed properties. If an updater // removes a property other than the property it is registered for then we will // have an issue as it won't be removed from the clusterProps map as it is a copy. Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties(); doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated); //todo: lots of hard coded HA rules included here if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated); } // Explicitly set any properties that are required but not currently provided in the stack definition. setStackToolsAndFeatures(clusterConfig, configTypesUpdated); setRetryConfiguration(clusterConfig, configTypesUpdated); setupHDFSProxyUsers(clusterConfig, configTypesUpdated); addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack()); trimProperties(clusterConfig, clusterTopology); return configTypesUpdated; }
223551095_1147
public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException { Set<String> configTypesUpdated = new HashSet<>(); Configuration clusterConfig = clusterTopology.getConfiguration(); doRecommendConfigurations(clusterConfig, configTypesUpdated); // filter out any properties that should not be included, based on the dependencies // specified in the stacks, and the filters defined in this class doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated); Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES); if (!propertiesMoved.isEmpty()) { configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME); configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE); } // this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned // set of properties (copy) doesn't include the removed properties. If an updater // removes a property other than the property it is registered for then we will // have an issue as it won't be removed from the clusterProps map as it is a copy. Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties(); doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated); //todo: lots of hard coded HA rules included here if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated); } // Explicitly set any properties that are required but not currently provided in the stack definition. setStackToolsAndFeatures(clusterConfig, configTypesUpdated); setRetryConfiguration(clusterConfig, configTypesUpdated); setupHDFSProxyUsers(clusterConfig, configTypesUpdated); addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack()); trimProperties(clusterConfig, clusterTopology); return configTypesUpdated; }
223551095_1148
public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException { Set<String> configTypesUpdated = new HashSet<>(); Configuration clusterConfig = clusterTopology.getConfiguration(); doRecommendConfigurations(clusterConfig, configTypesUpdated); // filter out any properties that should not be included, based on the dependencies // specified in the stacks, and the filters defined in this class doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated); Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES); if (!propertiesMoved.isEmpty()) { configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME); configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE); } // this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned // set of properties (copy) doesn't include the removed properties. If an updater // removes a property other than the property it is registered for then we will // have an issue as it won't be removed from the clusterProps map as it is a copy. Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties(); doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated); //todo: lots of hard coded HA rules included here if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated); } // Explicitly set any properties that are required but not currently provided in the stack definition. setStackToolsAndFeatures(clusterConfig, configTypesUpdated); setRetryConfiguration(clusterConfig, configTypesUpdated); setupHDFSProxyUsers(clusterConfig, configTypesUpdated); addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack()); trimProperties(clusterConfig, clusterTopology); return configTypesUpdated; }
223551095_1149
public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException { Set<String> configTypesUpdated = new HashSet<>(); Configuration clusterConfig = clusterTopology.getConfiguration(); doRecommendConfigurations(clusterConfig, configTypesUpdated); // filter out any properties that should not be included, based on the dependencies // specified in the stacks, and the filters defined in this class doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated); Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES); if (!propertiesMoved.isEmpty()) { configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME); configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE); } // this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned // set of properties (copy) doesn't include the removed properties. If an updater // removes a property other than the property it is registered for then we will // have an issue as it won't be removed from the clusterProps map as it is a copy. Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties(); doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated); //todo: lots of hard coded HA rules included here if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated); } // Explicitly set any properties that are required but not currently provided in the stack definition. setStackToolsAndFeatures(clusterConfig, configTypesUpdated); setRetryConfiguration(clusterConfig, configTypesUpdated); setupHDFSProxyUsers(clusterConfig, configTypesUpdated); addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack()); trimProperties(clusterConfig, clusterTopology); return configTypesUpdated; }
223551095_1150
public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException { Set<String> configTypesUpdated = new HashSet<>(); Configuration clusterConfig = clusterTopology.getConfiguration(); doRecommendConfigurations(clusterConfig, configTypesUpdated); // filter out any properties that should not be included, based on the dependencies // specified in the stacks, and the filters defined in this class doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated); Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES); if (!propertiesMoved.isEmpty()) { configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME); configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE); } // this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned // set of properties (copy) doesn't include the removed properties. If an updater // removes a property other than the property it is registered for then we will // have an issue as it won't be removed from the clusterProps map as it is a copy. Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties(); doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated); //todo: lots of hard coded HA rules included here if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated); } // Explicitly set any properties that are required but not currently provided in the stack definition. setStackToolsAndFeatures(clusterConfig, configTypesUpdated); setRetryConfiguration(clusterConfig, configTypesUpdated); setupHDFSProxyUsers(clusterConfig, configTypesUpdated); addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack()); trimProperties(clusterConfig, clusterTopology); return configTypesUpdated; }
223551095_1151
public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException { Set<String> configTypesUpdated = new HashSet<>(); Configuration clusterConfig = clusterTopology.getConfiguration(); doRecommendConfigurations(clusterConfig, configTypesUpdated); // filter out any properties that should not be included, based on the dependencies // specified in the stacks, and the filters defined in this class doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated); Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES); if (!propertiesMoved.isEmpty()) { configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME); configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE); } // this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned // set of properties (copy) doesn't include the removed properties. If an updater // removes a property other than the property it is registered for then we will // have an issue as it won't be removed from the clusterProps map as it is a copy. Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties(); doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated); //todo: lots of hard coded HA rules included here if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated); } // Explicitly set any properties that are required but not currently provided in the stack definition. setStackToolsAndFeatures(clusterConfig, configTypesUpdated); setRetryConfiguration(clusterConfig, configTypesUpdated); setupHDFSProxyUsers(clusterConfig, configTypesUpdated); addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack()); trimProperties(clusterConfig, clusterTopology); return configTypesUpdated; }
223551095_1152
public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException { Set<String> configTypesUpdated = new HashSet<>(); Configuration clusterConfig = clusterTopology.getConfiguration(); doRecommendConfigurations(clusterConfig, configTypesUpdated); // filter out any properties that should not be included, based on the dependencies // specified in the stacks, and the filters defined in this class doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated); Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES); if (!propertiesMoved.isEmpty()) { configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME); configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE); } // this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned // set of properties (copy) doesn't include the removed properties. If an updater // removes a property other than the property it is registered for then we will // have an issue as it won't be removed from the clusterProps map as it is a copy. Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties(); doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated); //todo: lots of hard coded HA rules included here if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated); } // Explicitly set any properties that are required but not currently provided in the stack definition. setStackToolsAndFeatures(clusterConfig, configTypesUpdated); setRetryConfiguration(clusterConfig, configTypesUpdated); setupHDFSProxyUsers(clusterConfig, configTypesUpdated); addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack()); trimProperties(clusterConfig, clusterTopology); return configTypesUpdated; }
223551095_1153
public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException { Set<String> configTypesUpdated = new HashSet<>(); Configuration clusterConfig = clusterTopology.getConfiguration(); doRecommendConfigurations(clusterConfig, configTypesUpdated); // filter out any properties that should not be included, based on the dependencies // specified in the stacks, and the filters defined in this class doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated); Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES); if (!propertiesMoved.isEmpty()) { configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME); configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE); } // this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned // set of properties (copy) doesn't include the removed properties. If an updater // removes a property other than the property it is registered for then we will // have an issue as it won't be removed from the clusterProps map as it is a copy. Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties(); doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated); //todo: lots of hard coded HA rules included here if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated); } // Explicitly set any properties that are required but not currently provided in the stack definition. setStackToolsAndFeatures(clusterConfig, configTypesUpdated); setRetryConfiguration(clusterConfig, configTypesUpdated); setupHDFSProxyUsers(clusterConfig, configTypesUpdated); addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack()); trimProperties(clusterConfig, clusterTopology); return configTypesUpdated; }
223551095_1154
public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException { Set<String> configTypesUpdated = new HashSet<>(); Configuration clusterConfig = clusterTopology.getConfiguration(); doRecommendConfigurations(clusterConfig, configTypesUpdated); // filter out any properties that should not be included, based on the dependencies // specified in the stacks, and the filters defined in this class doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated); Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES); if (!propertiesMoved.isEmpty()) { configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME); configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE); } // this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned // set of properties (copy) doesn't include the removed properties. If an updater // removes a property other than the property it is registered for then we will // have an issue as it won't be removed from the clusterProps map as it is a copy. Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties(); doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated); //todo: lots of hard coded HA rules included here if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated); } // Explicitly set any properties that are required but not currently provided in the stack definition. setStackToolsAndFeatures(clusterConfig, configTypesUpdated); setRetryConfiguration(clusterConfig, configTypesUpdated); setupHDFSProxyUsers(clusterConfig, configTypesUpdated); addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack()); trimProperties(clusterConfig, clusterTopology); return configTypesUpdated; }
223551095_1155
public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException { Set<String> configTypesUpdated = new HashSet<>(); Configuration clusterConfig = clusterTopology.getConfiguration(); doRecommendConfigurations(clusterConfig, configTypesUpdated); // filter out any properties that should not be included, based on the dependencies // specified in the stacks, and the filters defined in this class doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated); Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES); if (!propertiesMoved.isEmpty()) { configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME); configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE); } // this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned // set of properties (copy) doesn't include the removed properties. If an updater // removes a property other than the property it is registered for then we will // have an issue as it won't be removed from the clusterProps map as it is a copy. Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties(); doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated); //todo: lots of hard coded HA rules included here if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated); } // Explicitly set any properties that are required but not currently provided in the stack definition. setStackToolsAndFeatures(clusterConfig, configTypesUpdated); setRetryConfiguration(clusterConfig, configTypesUpdated); setupHDFSProxyUsers(clusterConfig, configTypesUpdated); addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack()); trimProperties(clusterConfig, clusterTopology); return configTypesUpdated; }
223551095_1156
public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException { Set<String> configTypesUpdated = new HashSet<>(); Configuration clusterConfig = clusterTopology.getConfiguration(); doRecommendConfigurations(clusterConfig, configTypesUpdated); // filter out any properties that should not be included, based on the dependencies // specified in the stacks, and the filters defined in this class doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated); Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES); if (!propertiesMoved.isEmpty()) { configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME); configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE); } // this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned // set of properties (copy) doesn't include the removed properties. If an updater // removes a property other than the property it is registered for then we will // have an issue as it won't be removed from the clusterProps map as it is a copy. Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties(); doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated); //todo: lots of hard coded HA rules included here if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated); } // Explicitly set any properties that are required but not currently provided in the stack definition. setStackToolsAndFeatures(clusterConfig, configTypesUpdated); setRetryConfiguration(clusterConfig, configTypesUpdated); setupHDFSProxyUsers(clusterConfig, configTypesUpdated); addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack()); trimProperties(clusterConfig, clusterTopology); return configTypesUpdated; }
223551095_1157
public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException { Set<String> configTypesUpdated = new HashSet<>(); Configuration clusterConfig = clusterTopology.getConfiguration(); doRecommendConfigurations(clusterConfig, configTypesUpdated); // filter out any properties that should not be included, based on the dependencies // specified in the stacks, and the filters defined in this class doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated); Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES); if (!propertiesMoved.isEmpty()) { configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME); configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE); } // this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned // set of properties (copy) doesn't include the removed properties. If an updater // removes a property other than the property it is registered for then we will // have an issue as it won't be removed from the clusterProps map as it is a copy. Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties(); doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated); //todo: lots of hard coded HA rules included here if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated); } // Explicitly set any properties that are required but not currently provided in the stack definition. setStackToolsAndFeatures(clusterConfig, configTypesUpdated); setRetryConfiguration(clusterConfig, configTypesUpdated); setupHDFSProxyUsers(clusterConfig, configTypesUpdated); addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack()); trimProperties(clusterConfig, clusterTopology); return configTypesUpdated; }
223551095_1158
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1159
public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException { Set<String> configTypesUpdated = new HashSet<>(); Configuration clusterConfig = clusterTopology.getConfiguration(); doRecommendConfigurations(clusterConfig, configTypesUpdated); // filter out any properties that should not be included, based on the dependencies // specified in the stacks, and the filters defined in this class doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated); Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES); if (!propertiesMoved.isEmpty()) { configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME); configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE); } // this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned // set of properties (copy) doesn't include the removed properties. If an updater // removes a property other than the property it is registered for then we will // have an issue as it won't be removed from the clusterProps map as it is a copy. Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties(); doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated); //todo: lots of hard coded HA rules included here if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated); } // Explicitly set any properties that are required but not currently provided in the stack definition. setStackToolsAndFeatures(clusterConfig, configTypesUpdated); setRetryConfiguration(clusterConfig, configTypesUpdated); setupHDFSProxyUsers(clusterConfig, configTypesUpdated); addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack()); trimProperties(clusterConfig, clusterTopology); return configTypesUpdated; }
223551095_1160
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }
223551095_1161
public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException { Set<String> configTypesUpdated = new HashSet<>(); Configuration clusterConfig = clusterTopology.getConfiguration(); doRecommendConfigurations(clusterConfig, configTypesUpdated); // filter out any properties that should not be included, based on the dependencies // specified in the stacks, and the filters defined in this class doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated); Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES); if (!propertiesMoved.isEmpty()) { configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME); configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE); } // this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned // set of properties (copy) doesn't include the removed properties. If an updater // removes a property other than the property it is registered for then we will // have an issue as it won't be removed from the clusterProps map as it is a copy. Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties(); doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated); //todo: lots of hard coded HA rules included here if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated); } // Explicitly set any properties that are required but not currently provided in the stack definition. setStackToolsAndFeatures(clusterConfig, configTypesUpdated); setRetryConfiguration(clusterConfig, configTypesUpdated); setupHDFSProxyUsers(clusterConfig, configTypesUpdated); addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack()); trimProperties(clusterConfig, clusterTopology); return configTypesUpdated; }
223551095_1162
public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } Collection<Configuration> allConfigs = new ArrayList<>(); allConfigs.add(clusterTopology.getConfiguration()); for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) { Configuration hgConfiguration = groupInfo.getConfiguration(); if (! hgConfiguration.getFullProperties(1).isEmpty()) { // create new configuration which only contains properties specified in host group and BP host group allConfigs.add(new Configuration(hgConfiguration.getProperties(), null, new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null))); } } for (Configuration configuration : allConfigs) { doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration); doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration); doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration); doNonTopologyUpdate(nonTopologyUpdaters, configuration); doRemovePropertyExport(removePropertyUpdaters, configuration); doFilterPriorToExport(configuration); } }