diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index ca79aba529f..a16c4cc85a5 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -44,23 +44,22 @@ /security-command-center @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/dee-infra @GoogleCloudPlatform/gcp-security-command-center /servicedirectory @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/dee-infra /webrisk @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/dee-infra +/tpu @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/dee-infra # DEE Platform Ops (DEEPO) /errorreporting @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers /monitoring @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers -/opencensus @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers -/trace @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers # Cloud SDK Databases & Data Analytics teams # ---* Cloud Native DB -/bigtable @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/cloud-native-db-dpes +/bigtable @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/cloud-native-db-dpes @GoogleCloudPlatform/bigtable-eng /memorystore @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers /spanner @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/api-spanner-java # ---* Cloud Storage /storage @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/cloud-storage-dpes /storage-transfer @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/cloud-storage-dpes # ---* Infra DB -/cloud-sql @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/infra-db-sdk +/cloud-sql @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/cloud-sql-connectors # Data & AI /aiplatform @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/text-embedding diff --git a/.github/blunderbuss.yml b/.github/blunderbuss.yml index f9bbd1cfac6..d2733b96f92 100644 --- a/.github/blunderbuss.yml +++ b/.github/blunderbuss.yml @@ -38,7 +38,7 @@ assign_issues_by: - labels: - 'api: cloudsql' to: - - GoogleCloudPlatform/infra-db-sdk + - GoogleCloudPlatform/cloud-sql-connectors - labels: - 'api: spanner' to: @@ -115,7 +115,7 @@ assign_prs_by: - labels: - 'api: cloudsql' to: - - GoogleCloudPlatform/infra-db-sdk + - GoogleCloudPlatform/cloud-sql-connectors - labels: - 'api: spanner' to: diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 5a9bf7c5360..85e6da61771 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -1,142 +1,136 @@ -// find legacy configuration at https://github.com/GoogleCloudPlatform/java-docs-samples/blob/91792d4da53a12f96032f4556815f7d91f27257b/renovate.json { - "extends": [ - "config:recommended", - ":approveMajorUpdates", - "schedule:earlyMondays", - ":ignoreUnstable", + extends: [ + 'config:recommended', + ':approveMajorUpdates', + 'schedule:earlyMondays', + ':ignoreUnstable', ], - "labels": [ - "dependencies", - "automerge" + labels: [ + 'dependencies', + 'automerge', ], - "minimumReleaseAge": "7 days", - "dependencyDashboardLabels": [ - "type: process", + minimumReleaseAge: '7 days', + dependencyDashboardLabels: [ + 'type: process', ], - // discontinue upgrades for java8 code samples - "ignorePaths": ["**/*java8*/**", "**/*java-8*/**"], - "packageRules": [ + ignorePaths: [ + '**/*java8*/**', + '**/*java-8*/**', + ], + packageRules: [ { - "matchCategories": [ - "java" + matchCategories: [ + 'java', + ], + addLabels: [ + 'lang: java', ], - "addLabels": [ - "lang: java" - ] }, - // TODO: check if auto-merge rules will work at all { - "matchUpdateTypes": [ - "minor", - "patch", - "digest", - "lockFileMaintenance" + matchUpdateTypes: [ + 'minor', + 'patch', + 'digest', + 'lockFileMaintenance', ], - "automerge": true + automerge: true, }, { - "matchDepTypes": [ - "devDependencies" + matchDepTypes: [ + 'devDependencies', ], - "automerge": true + automerge: true, }, - // group all Dockerfile dependencies { - "matchCategories": [ - "docker" - ], - "matchUpdateTypes": [ - "minor", - "patch", - "digest", - "lockFileMaintenance" - ], - "groupName": "docker", - "pinDigests": true, - "automerge": true + matchCategories: [ + 'docker', + ], + matchUpdateTypes: [ + 'minor', + 'patch', + 'digest', + 'lockFileMaintenance', + ], + groupName: 'docker', + pinDigests: true, + automerge: true, }, - // group all terraform dependencies for google providers { - "matchCategories": [ - "terraform" + matchCategories: [ + 'terraform', + ], + matchDepTypes: [ + 'provider', + 'required_provider', ], - "matchDepTypes": [ - "provider", - "required_provider" + groupName: 'Terraform Google providers', + matchPackageNames: [ + '/^google/', ], - "matchPackagePatterns": "^google", - "groupName": "Terraform Google providers", }, - // *** Java dependency rules: - // group *ALL* Java dependencies { - "matchCategories": [ - "java" + matchCategories: [ + 'java', ], - "matchUpdateTypes": [ - "minor", - "patch", - "digest", - "lockFileMaintenance" + matchUpdateTypes: [ + 'minor', + 'patch', + 'digest', + 'lockFileMaintenance', ], - "groupName": "java", - "automerge": true + groupName: 'java', + automerge: true, }, - // do not allow Spring Boot 3 upgrades yet { - "matchCategories": [ - "java" + matchCategories: [ + 'java', ], - "matchPackagePatterns": [ - "org.springframework.boot" + matchCurrentVersion: '>=2.0.0, <3.0.0', + allowedVersions: '<3', + groupName: 'Spring Boot upgrades for v2', + description: '@akitsch: Spring Boot V3 requires Java 17', + matchPackageNames: [ + '/org.springframework.boot/', ], - "matchCurrentVersion": ">=2.0.0, <3.0.0", - "allowedVersions": "<3", - "groupName": "Spring Boot upgrades for v2", - "description": "@akitsch: Spring Boot V3 requires Java 17" }, - // limit micronaut upgrades for versions <= 4 { - "matchPackagePatterns": [ - "^io.micronaut" + groupName: 'Micronaut packages', + allowedVersions: '<4', + matchFileNames: [ + 'appengine-java11/**', + 'flexible/java-11/**', ], - "groupName": "Micronaut packages", - "allowedVersions": "<4", - "matchPaths": [ - "appengine-java11/**", - "flexible/java-11/**" + description: '@akitsch: Micronaut V4 requires Java 17', + matchPackageNames: [ + '/^io.micronaut/', ], - "description": "@akitsch: Micronaut V4 requires Java 17" }, - // disable Scala dependency upgrades { - "matchPackagePatterns": [ - "scala" + enabled: false, + matchPackageNames: [ + '/scala/', ], - "enabled": false }, { - "matchPackagePatterns": [ - "^jackson-module-scala" + enabled: false, + matchPackageNames: [ + '/^jackson-module-scala/', ], - "enabled": false }, - // disable SQL Spark dependency upgrades { - "matchPackagePatterns": [ - "^spark-sql" + enabled: false, + matchPackageNames: [ + '/^spark-sql/', ], - "enabled": false }, {}, ], - "rebaseWhen": "behind-base-branch", - "semanticCommits": "enabled", - "vulnerabilityAlerts": { - "labels": [ - "type:security" + rebaseWhen: 'behind-base-branch', + semanticCommits: 'enabled', + vulnerabilityAlerts: { + labels: [ + 'type:security', ], - "minimumReleaseAge": null + minimumReleaseAge: null, }, -} \ No newline at end of file +} diff --git a/appengine-java11/appengine-simple-jetty-main/pom.xml b/appengine-java11/appengine-simple-jetty-main/pom.xml index fbc38d30133..6ad52162b64 100644 --- a/appengine-java11/appengine-simple-jetty-main/pom.xml +++ b/appengine-java11/appengine-simple-jetty-main/pom.xml @@ -21,7 +21,7 @@ UTF-8 11 11 - 9.4.54.v20240208 + 9.4.56.v20240826 diff --git a/appengine-java8/datastore/src/test/java/com/example/appengine/QueriesTest.java b/appengine-java8/datastore/src/test/java/com/example/appengine/QueriesTest.java index c242a669d41..6c36df45fa8 100644 --- a/appengine-java8/datastore/src/test/java/com/example/appengine/QueriesTest.java +++ b/appengine-java8/datastore/src/test/java/com/example/appengine/QueriesTest.java @@ -430,32 +430,6 @@ public void queryInterface_multipleFilters_printsMatchedEntities() throws Except assertThat(buf.toString()).doesNotContain("Charlie"); } - @Test - public void queryInterface_singleFilter_returnsMatchedEntities() throws Exception { - // Arrange - Entity a = new Entity("Person", "a"); - a.setProperty("height", 100); - Entity b = new Entity("Person", "b"); - b.setProperty("height", 150); - Entity c = new Entity("Person", "c"); - c.setProperty("height", 300); - datastore.put(ImmutableList.of(a, b, c)); - - // Act - long minHeight = 150; - // [START gae_java8_datastore_interface_2] - Filter heightMinFilter = - new FilterPredicate("height", FilterOperator.GREATER_THAN_OR_EQUAL, minHeight); - - Query q = new Query("Person").setFilter(heightMinFilter); - // [END gae_java8_datastore_interface_2] - - // Assert - List results = - datastore.prepare(q.setKeysOnly()).asList(FetchOptions.Builder.withDefaults()); - assertWithMessage("query results").that(results).containsExactly(b, c); - } - @Test public void queryInterface_orFilter_printsMatchedEntities() throws Exception { // Arrange diff --git a/auth/src/main/java/UndeleteApiKey.java b/auth/src/main/java/UndeleteApiKey.java new file mode 100644 index 00000000000..cd509c705b3 --- /dev/null +++ b/auth/src/main/java/UndeleteApiKey.java @@ -0,0 +1,59 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// [START apikeys_undelete_api_key] +import com.google.api.apikeys.v2.ApiKeysClient; +import com.google.api.apikeys.v2.Key; +import com.google.api.apikeys.v2.UndeleteKeyRequest; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class UndeleteApiKey { + + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project. + String projectId = "YOUR_PROJECT_ID"; + // The API key id to undelete. + String keyId = "YOUR_KEY_ID"; + + undeleteApiKey(projectId, keyId); + } + + // Undeletes an API key. + public static void undeleteApiKey(String projectId, String keyId) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (ApiKeysClient apiKeysClient = ApiKeysClient.create()) { + + // Initialize the undelete request and set the argument. + UndeleteKeyRequest undeleteKeyRequest = UndeleteKeyRequest.newBuilder() + .setName(String.format("projects/%s/locations/global/keys/%s", projectId, keyId)) + .build(); + + // Make the request and wait for the operation to complete. + Key undeletedKey = apiKeysClient.undeleteKeyAsync(undeleteKeyRequest) + .get(3, TimeUnit.MINUTES); + + System.out.printf("Successfully undeleted the API key: %s", undeletedKey.getName()); + } + } +} +// [END apikeys_undelete_api_key] \ No newline at end of file diff --git a/auth/src/test/java/ApiKeySnippetsIT.java b/auth/src/test/java/ApiKeySnippetsIT.java index 46a059d2203..7f65313d0e1 100644 --- a/auth/src/test/java/ApiKeySnippetsIT.java +++ b/auth/src/test/java/ApiKeySnippetsIT.java @@ -36,7 +36,6 @@ public class ApiKeySnippetsIT { private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); - private static final String CREDENTIALS = System.getenv("GOOGLE_APPLICATION_CREDENTIALS"); private static Key API_KEY; private static String API_KEY_STRING; private ByteArrayOutputStream stdOut; @@ -79,8 +78,15 @@ public static void cleanup() String apiKeyId = getApiKeyId(API_KEY); DeleteApiKey.deleteApiKey(PROJECT_ID, apiKeyId); - String goal = String.format("Successfully deleted the API key: %s", API_KEY.getName()); - assertThat(stdOut.toString()).contains(goal); + + UndeleteApiKey.undeleteApiKey(PROJECT_ID, apiKeyId); + String undeletedKey = String.format("Successfully undeleted the API key: %s", + API_KEY.getName()); + assertThat(stdOut.toString()).contains(undeletedKey); + + DeleteApiKey.deleteApiKey(PROJECT_ID, apiKeyId); + String deletedKey = String.format("Successfully deleted the API key: %s", API_KEY.getName()); + assertThat(stdOut.toString()).contains(deletedKey); stdOut.close(); System.setOut(out); diff --git a/automl/src/main/java/com/google/cloud/vision/samples/automl/ClassificationDeployModel.java b/automl/src/main/java/com/google/cloud/vision/samples/automl/ClassificationDeployModel.java deleted file mode 100644 index 63da52ead0d..00000000000 --- a/automl/src/main/java/com/google/cloud/vision/samples/automl/ClassificationDeployModel.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2019 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.vision.samples.automl; - -// [START automl_vision_classification_deploy_model] -import com.google.api.gax.longrunning.OperationFuture; -import com.google.cloud.automl.v1beta1.AutoMlClient; -import com.google.cloud.automl.v1beta1.DeployModelRequest; -import com.google.cloud.automl.v1beta1.ModelName; -import com.google.cloud.automl.v1beta1.OperationMetadata; -import com.google.protobuf.Empty; -import java.io.IOException; -import java.util.concurrent.ExecutionException; - -class ClassificationDeployModel { - - // Deploy a model - static void classificationDeployModel(String projectId, String modelId) - throws IOException, ExecutionException, InterruptedException { - // String projectId = "YOUR_PROJECT_ID"; - // String modelId = "YOUR_MODEL_ID"; - - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources. - try (AutoMlClient client = AutoMlClient.create()) { - - // Get the full path of the model. - ModelName modelFullId = ModelName.of(projectId, "us-central1", modelId); - - // Build deploy model request. - DeployModelRequest deployModelRequest = - DeployModelRequest.newBuilder().setName(modelFullId.toString()).build(); - - // Deploy a model with the deploy model request. - OperationFuture future = - client.deployModelAsync(deployModelRequest); - - future.get(); - - // Display the deployment details of model. - System.out.println("Model deployment finished"); - } - } -} -// [END automl_vision_classification_deploy_model] diff --git a/automl/src/test/java/com/google/cloud/translate/automl/DatasetApiIT.java b/automl/src/test/java/com/google/cloud/translate/automl/DatasetApiIT.java deleted file mode 100644 index bd02bde649e..00000000000 --- a/automl/src/test/java/com/google/cloud/translate/automl/DatasetApiIT.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2018 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.translate.automl; - -import static com.google.common.truth.Truth.assertThat; - -import com.google.api.gax.rpc.NotFoundException; -import io.grpc.StatusRuntimeException; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.PrintStream; -import java.util.concurrent.ExecutionException; -import org.junit.After; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -/** Tests for Automl translation "Dataset API" sample. */ -@Ignore("This test is ignored because the legacy version of AutoML API is deprecated") -@RunWith(JUnit4.class) -@SuppressWarnings("checkstyle:abbreviationaswordinname") -public class DatasetApiIT { - - private static final String PROJECT_ID = "java-docs-samples-testing"; - private static final String BUCKET = PROJECT_ID + "-vcm"; - private static final String COMPUTE_REGION = "us-central1"; - private ByteArrayOutputStream bout; - private PrintStream originalPrintStream; - private String datasetId = "TEN0000000000000000000"; - - @Before - public void setUp() { - bout = new ByteArrayOutputStream(); - PrintStream out = new PrintStream(bout); - originalPrintStream = System.out; - System.setOut(out); - } - - @After - public void tearDown() { - // restores print statements in the original method - System.out.flush(); - System.setOut(originalPrintStream); - } - - @Test - public void testCreateImportDeleteDataset() throws IOException, InterruptedException { - try { - DatasetApi.importData( - PROJECT_ID, COMPUTE_REGION, datasetId, "gs://" + BUCKET + "/en-ja-short.csv"); - String got = bout.toString(); - assertThat(got).contains("The Dataset doesn't exist "); - } catch (NotFoundException | ExecutionException | StatusRuntimeException ex) { - assertThat(ex.getMessage()).contains("The Dataset doesn't exist"); - } - } -} diff --git a/automl/src/test/java/com/google/cloud/vision/samples/automl/ClassificationDeployModelIT.java b/automl/src/test/java/com/google/cloud/vision/samples/automl/ClassificationDeployModelIT.java index 50095202fa3..7431265fa11 100644 --- a/automl/src/test/java/com/google/cloud/vision/samples/automl/ClassificationDeployModelIT.java +++ b/automl/src/test/java/com/google/cloud/vision/samples/automl/ClassificationDeployModelIT.java @@ -45,20 +45,6 @@ public void tearDown() { System.setOut(null); } - @Test - public void testClassificationDeployModelApi() { - // As model deployment can take a long time, instead try to deploy a - // nonexistent model and confirm that the model was not found, but other - // elements of the request were valid. - try { - ClassificationDeployModel.classificationDeployModel(PROJECT_ID, MODEL_ID); - String got = bout.toString(); - assertThat(got).contains("The model does not exist"); - } catch (IOException | ExecutionException | InterruptedException e) { - assertThat(e.getMessage()).contains("The model does not exist"); - } - } - @Test public void testClassificationDeployModelNodeCountApi() { // As model deployment can take a long time, instead try to deploy a diff --git a/batch/snippets/src/test/java/com/example/batch/CreateResourcesIT.java b/batch/snippets/src/test/java/com/example/batch/CreateResourcesIT.java index 03e6ab7ba64..8e4f8242e0b 100644 --- a/batch/snippets/src/test/java/com/example/batch/CreateResourcesIT.java +++ b/batch/snippets/src/test/java/com/example/batch/CreateResourcesIT.java @@ -36,6 +36,7 @@ import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; +import org.junit.Ignore; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; @@ -135,6 +136,7 @@ private static void safeDeleteJob(String jobName) { } } + @Ignore("Canceling jobs not yet GA") @Test public void createBatchCustomServiceAccountTest() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -148,6 +150,7 @@ public void createBatchCustomServiceAccountTest() Assert.assertNotNull(job.getAllocationPolicy().getServiceAccount().getEmail()); } + @Ignore("Canceling jobs not yet GA") @Test public void createBatchUsingSecretManager() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -164,6 +167,7 @@ public void createBatchUsingSecretManager() -> taskGroup.getTaskSpec().getEnvironment().containsSecretVariables(variableName))); } + @Ignore("Canceling jobs not yet GA") @Test public void createGpuJobTest() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -181,6 +185,7 @@ public void createGpuJobTest() -> instance.getPolicy().getMachineType().contains(machineType))); } + @Ignore("Canceling jobs not yet GA") @Test public void createGpuJobN1Test() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -199,6 +204,7 @@ public void createGpuJobN1Test() -> accelerator.getType().contains(gpuType) && accelerator.getCount() == count))); } + @Ignore("Canceling jobs not yet GA") @Test public void createLocalSsdJobTest() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -217,6 +223,7 @@ public void createLocalSsdJobTest() -> attachedDisk.getDeviceName().contains(LOCAL_SSD_NAME)))); } + @Ignore("Canceling jobs not yet GA") @Test public void createPersistentDiskJobTest() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -243,6 +250,7 @@ public void createPersistentDiskJobTest() -> attachedDisk.getDeviceName().contains(NEW_PERSISTENT_DISK_NAME)))); } + @Ignore("Canceling jobs not yet GA") @Test public void createBatchNotificationTest() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -263,6 +271,7 @@ public void createBatchNotificationTest() && jobNotification.getMessage().getNewTaskState() == State.FAILED)); } + @Ignore("Canceling jobs not yet GA") @Test public void createBatchCustomEventTest() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -284,6 +293,7 @@ public void createBatchCustomEventTest() .anyMatch(runnable -> runnable.getDisplayName().equals(displayName)))); } + @Ignore("Canceling jobs not yet GA") @Test public void createScriptJobWithNfsTest() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -303,6 +313,7 @@ public void createScriptJobWithNfsTest() .anyMatch(volume -> volume.getNfs().getServer().equals(NFS_IP_ADDRESS)))); } + @Ignore("Canceling jobs not yet GA") @Test public void createBatchLabelJobTest() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -324,6 +335,7 @@ public void createBatchLabelJobTest() Assert.assertTrue(job.getLabelsMap().containsValue(labelValue2)); } + @Ignore("Canceling jobs not yet GA") @Test public void createBatchCustomNetworkTest() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -346,6 +358,7 @@ public void createBatchCustomNetworkTest() .anyMatch(AllocationPolicy.NetworkInterface::getNoExternalIpAddress)); } + @Ignore("Canceling jobs not yet GA") @Test public void createJobWithAllocationPolicyLabelTest() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -368,6 +381,7 @@ public void createJobWithAllocationPolicyLabelTest() Assert.assertTrue(job.getAllocationPolicy().getLabelsMap().containsValue(labelValue2)); } + @Ignore("Canceling jobs not yet GA") @Test public void createBatchRunnableLabelTest() throws IOException, ExecutionException, InterruptedException, TimeoutException { diff --git a/batch/snippets/src/test/java/com/example/batch/Util.java b/batch/snippets/src/test/java/com/example/batch/Util.java index eb4342ac572..5a6635ff71b 100644 --- a/batch/snippets/src/test/java/com/example/batch/Util.java +++ b/batch/snippets/src/test/java/com/example/batch/Util.java @@ -109,7 +109,7 @@ public static void waitForJobCompletion(Job job) String[] jobName = job.getName().split("/"); Instant startTime = Instant.now(); while (WAIT_STATES.contains(job.getStatus().getState())) { - if (Instant.now().getEpochSecond() - startTime.getEpochSecond() > 900) { + if (Instant.now().getEpochSecond() - startTime.getEpochSecond() > 1200) { throw new Error("Timed out waiting for operation to complete."); } job = getJob(jobName[1], jobName[3], jobName[5]); diff --git a/bigtable/bigtable-proxy/.gitignore b/bigtable/bigtable-proxy/.gitignore new file mode 100644 index 00000000000..af665abb669 --- /dev/null +++ b/bigtable/bigtable-proxy/.gitignore @@ -0,0 +1,38 @@ +target/ +!.mvn/wrapper/maven-wrapper.jar +!**/src/main/**/target/ +!**/src/test/**/target/ + +### IntelliJ IDEA ### +.idea/modules.xml +.idea/jarRepositories.xml +.idea/compiler.xml +.idea/libraries/ +*.iws +*.iml +*.ipr + +### Eclipse ### +.apt_generated +.classpath +.factorypath +.project +.settings +.springBeans +.sts4-cache + +### NetBeans ### +/nbproject/private/ +/nbbuild/ +/dist/ +/nbdist/ +/.nb-gradle/ +build/ +!**/src/main/**/build/ +!**/src/test/**/build/ + +### VS Code ### +.vscode/ + +### Mac OS ### +.DS_Store diff --git a/bigtable/bigtable-proxy/README.md b/bigtable/bigtable-proxy/README.md new file mode 100644 index 00000000000..d3e7b4d916e --- /dev/null +++ b/bigtable/bigtable-proxy/README.md @@ -0,0 +1,106 @@ +# Bigtable proxy + +## Overview + +A simple server meant to be used as a sidecar to maintain a persistent connection to Bigtable and +collect metrics. The primary purpose is to support applications that can't maintain a longlived +gRPC connection (ie. php in apache). + +The proxy is intended to be used as a local sidecar process. The proxy is intended to be shared by +all processes on the VM that it is running on. It's listening address is hardcoded to `localhost`. +The proxy will use [Application Default Credentials](https://cloud.google.com/docs/authentication/application-default-credentials) +for all outbound RPCs. + +The proxy will accept local unencrypted connections from Bigtable clients, and: +- attach credentials +- export metrics +- send the RPC over an encrypted channel pool to Bigtable service + +## Features + +* Metrics - The proxy will track RPC metrics and export them to Google Cloud Monitoring +* Multi tenant - The proxy can be used to connect to many different Bigtable instances +* Credential handling - The proxy has its own set of credentials. It will ignore any inbound + credentials from the client +* Channel pooling - The proxy will maintain and autosize the outbound channel pool to properly + load balance RPCs. + +## Metrics + +The proxy is instrumented with Opentelemtry and will export those metrics to Google Cloud Monitoring +in a project your choosing. The metrics will be published under the namespace +`workload.googleapis.com`. Available metrics: + +* `bigtableproxy.server.call.started` The total number of RPCs started, including those that have + not completed. +* `bigtableproxy.client.call.credential.duration` Latency of getting credentials +* `bigtableproxy.client.call.queue.duration` Duration of how long the outbound side of the proxy had + the RPC queued +* `bigtableproxy.client.call.sent_total_message_size` Total bytes sent per call to Bigtable service + (excluding metadata, grpc and transport framing bytes +* `bigtableproxy.client.call.rcvd_total_message_size` Total bytes received per call from Bigtable + service (excluding metadata, grpc and transport framing bytes) +* `bigtableproxy.client.gfe.duration` Latency as measured by Google load balancer from the time it + received the first byte of the request until it received the first byte of the response from the + Cloud Bigtable service. +* `bigtableproxy.client.gfe.duration_missing.count` Count of calls missing gfe response headers +* `bigtableproxy.client.call.duration` Total duration of how long the outbound call took +* `bigtableproxy.server.write_wait.duration` Total amount of time spent waiting for the downstream + client to be ready for data. +* `bigtableproxy.client.channel.count` Number of open channels +* `bigtableproxy.client.channel_change_count` Number of channel transitions by previous and next + states. +* `bigtableproxy.client.call.max_outstanding_count` Maximum number of concurrent RPCs in a single + minute window +* `bigtableproxy.presence` Counts number of proxy processes (emit 1 per process). + +## Requirements + +* JVM >= 11 +* Ensure that the service account includes the IAM roles: + * `Monitoring Metric Writer` + * `Bigtable User` +* Ensure that the metrics project has `Stackdriver Monitoring API` enabled + +## Expected usage + +```sh +# Build the binary +mvn package + +# unpack the binary on the proxy host +unzip target/bigtable-proxy-0.0.1-SNAPSHOT-bin.zip +cd bigtable-proxy-0.0.1-SNAPSHOT + +# Verify that the proxy has require permissions using an existing table. Please note that the table +# data will not be modified, however a test metric will be written. +./bigtable-verify.sh \ + --bigtable-project-id=$BIGTABLE_PROJECT_ID \ + --bigtable-instance-id=$BIGTABLE_INSTANCE_ID \ + --bigtable-table-id=$BIGTABLE_TABLE_ID \ + --metrics-project-id=$METRICS_PROJECT_ID + +# Then start the proxy on the specified port. The proxy can forward requests for multiple +# Bigtable projects/instances/tables. However it will export health metrics to a single project +# specified by `metrics-project-id`. +./bigtable-proxy.sh \ + --listen-port=1234 \ + --metrics-project-id=SOME_GCP_PROJECT + +# Start your application, and redirect the bigtable client to connect to the local proxy. +export BIGTABLE_EMULATOR_HOST="localhost:1234" +path/to/application/with/bigtable/client +``` + +## Configuration + +Required options: +* `--listen-port=` The local port to listen for Bigtable client connections. This needs to + match port in the `BIGTABLE_EMULATOR_HOST="localhost:` environment variable passed to your + application. +* `--metrics-project-id=` The Google Cloud project that should be used to collect metrics + emitted from the proxy. + +Optional configuration: +* The environment variable `GOOGLE_APPLICATION_CREDENTIALS` can be used to use a non-default service + account. More details can be found here: https://cloud.google.com/docs/authentication/application-default-credentials diff --git a/bigtable/bigtable-proxy/pom.xml b/bigtable/bigtable-proxy/pom.xml new file mode 100644 index 00000000000..1eebfccb9a4 --- /dev/null +++ b/bigtable/bigtable-proxy/pom.xml @@ -0,0 +1,285 @@ + + + 4.0.0 + + + com.google.cloud.samples + shared-configuration + 1.2.2 + + + + com.google.cloud.bigtable + bigtable-proxy + 0.0.1-SNAPSHOT + + + 11 + 11 + UTF-8 + + + + 26.50.0 + + 1.44.1 + 1.41.0-alpha + 0.33.0 + 0.33.0 + + 2.0.16 + 1.5.12 + 1.11.0 + 4.7.6 + + 4.13.2 + 1.4.4 + + + + + + com.google.cloud + libraries-bom + ${libraries-bom.version} + pom + import + + + io.opentelemetry + opentelemetry-bom + ${otel.version} + pom + import + + + org.mockito + mockito-bom + 5.14.2 + pom + import + + + + + + + + io.grpc + grpc-api + + + io.grpc + grpc-core + + + io.grpc + grpc-netty-shaded + + + io.grpc + grpc-auth + + + com.google.auth + google-auth-library-oauth2-http + + + + + + com.google.api.grpc + grpc-google-cloud-bigtable-v2 + + + com.google.api.grpc + proto-google-cloud-bigtable-v2 + + + com.google.api.grpc + grpc-google-cloud-bigtable-admin-v2 + + + com.google.api.grpc + proto-google-cloud-bigtable-admin-v2 + + + com.google.api.grpc + grpc-google-common-protos + + + com.google.api.grpc + proto-google-common-protos + + + + + io.opentelemetry + opentelemetry-sdk + + + + io.opentelemetry + opentelemetry-sdk-metrics + + + + com.google.cloud.opentelemetry + exporter-metrics + ${exporter-metrics.version} + + + + com.google.cloud + google-cloud-core + + + io.opentelemetry.contrib + opentelemetry-gcp-resources + ${otel-contrib.version} + + + io.opentelemetry + opentelemetry-sdk-extension-autoconfigure-spi + + + com.google.cloud.opentelemetry + shared-resourcemapping + ${shared-resourcemapping.version} + + + + + org.slf4j + slf4j-api + ${slf4j.version} + + + org.slf4j + jul-to-slf4j + ${slf4j.version} + + + ch.qos.logback + logback-classic + ${logback.version} + + + + + com.google.guava + guava + + + + com.google.auto.value + auto-value-annotations + ${auto-value.version} + provided + + + info.picocli + picocli + ${picocli.version} + + + + + io.grpc + grpc-testing + test + + + junit + junit + ${junit.version} + test + + + com.google.truth + truth + ${truth.version} + test + + + org.mockito + mockito-core + + test + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.13.0 + + + + info.picocli + picocli-codegen + ${picocli.version} + + + com.google.auto.value + auto-value + ${auto-value.version} + + + + + -Aproject=${project.groupId}/${project.artifactId} + + + + + + maven-surefire-plugin + 3.5.2 + + + + org.apache.maven.plugins + maven-jar-plugin + 3.4.2 + + + + true + + lib/ + com.google.cloud.bigtable.examples.proxy.Main + + + + + + + org.apache.maven.plugins + maven-assembly-plugin + 3.7.1 + + + + + src/main/assembly/assembly.xml + + + + + + assemble + + single + + package + + + + + + diff --git a/bigtable/bigtable-proxy/src/main/assembly/assembly.xml b/bigtable/bigtable-proxy/src/main/assembly/assembly.xml new file mode 100644 index 00000000000..47126e8861f --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/assembly/assembly.xml @@ -0,0 +1,52 @@ + + bin + + + zip + + + + + + + false + lib + false + + + + + + + ${project.basedir} + + + README* + LICENSE* + NOTICE* + + + + + + ${project.build.scriptSourceDirectory} + + + *.sh + + true + + + + + + ${project.build.directory} + + + *.jar + + + + diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/Main.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/Main.java new file mode 100644 index 00000000000..b480f3777d8 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/Main.java @@ -0,0 +1,37 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy; + +import com.google.cloud.bigtable.examples.proxy.commands.Serve; +import com.google.cloud.bigtable.examples.proxy.commands.Verify; +import org.slf4j.bridge.SLF4JBridgeHandler; +import picocli.CommandLine; +import picocli.CommandLine.Command; + +/** + * Main entry point for proxy commands under {@link + * com.google.cloud.bigtable.examples.proxy.commands}. + */ +@Command( + subcommands = {Serve.class, Verify.class}, + name = "bigtable-proxy") +public final class Main { + public static void main(String[] args) { + SLF4JBridgeHandler.install(); + new CommandLine(new Main()).execute(args); + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelFactory.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelFactory.java new file mode 100644 index 00000000000..10c68d7d9e7 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelFactory.java @@ -0,0 +1,35 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Copied from +// https://github.com/googleapis/sdk-platform-java/blob/a333b0709023c971f12a85e5287b6d77d1b57c48/gax-java/gax-grpc/src/main/java/com/google/api/gax/grpc/ChannelFactory.java +// Changes: +// - package name +// - removed InternalApi annotation + +package com.google.cloud.bigtable.examples.proxy.channelpool; + +import io.grpc.ManagedChannel; +import java.io.IOException; + +/** + * This interface represents a factory for creating one ManagedChannel + * + *

This is public only for technical reasons, for advanced usage. + */ +public interface ChannelFactory { + ManagedChannel createSingleChannel() throws IOException; +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPool.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPool.java new file mode 100644 index 00000000000..380d97c9418 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPool.java @@ -0,0 +1,591 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.channelpool; + +import com.google.api.core.InternalApi; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ForwardingClientCall.SimpleForwardingClientCall; +import io.grpc.ForwardingClientCallListener.SimpleForwardingClientCallListener; +import io.grpc.ManagedChannel; +import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import io.grpc.Status; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CancellationException; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nullable; + +/** + * A {@link ManagedChannel} that will send requests round-robin via a set of channels. + * + *

In addition to spreading requests over a set of child connections, the pool will also actively + * manage the lifecycle of the channels. Currently, lifecycle management is limited to pre-emptively + * replacing channels every hour. In the future it will dynamically size the pool based on number of + * outstanding requests. + * + *

Package-private for internal use. + */ +public class ChannelPool extends ManagedChannel { + @VisibleForTesting static final Logger LOG = Logger.getLogger(ChannelPool.class.getName()); + private static final java.time.Duration REFRESH_PERIOD = java.time.Duration.ofMinutes(50); + + private final ChannelPoolSettings settings; + private final ChannelFactory channelFactory; + private final ScheduledExecutorService executor; + + private final Object entryWriteLock = new Object(); + @VisibleForTesting final AtomicReference> entries = new AtomicReference<>(); + private final AtomicInteger indexTicker = new AtomicInteger(); + private final String authority; + + public static ChannelPool create(ChannelPoolSettings settings, ChannelFactory channelFactory) + throws IOException { + return new ChannelPool(settings, channelFactory, Executors.newSingleThreadScheduledExecutor()); + } + + /** + * Initializes the channel pool. Assumes that all channels have the same authority. + * + * @param settings options for controling the ChannelPool sizing behavior + * @param channelFactory method to create the channels + * @param executor periodically refreshes the channels + */ + @VisibleForTesting + ChannelPool( + ChannelPoolSettings settings, + ChannelFactory channelFactory, + ScheduledExecutorService executor) + throws IOException { + this.settings = settings; + this.channelFactory = channelFactory; + + ImmutableList.Builder initialListBuilder = ImmutableList.builder(); + + for (int i = 0; i < settings.getInitialChannelCount(); i++) { + initialListBuilder.add(new Entry(channelFactory.createSingleChannel())); + } + + entries.set(initialListBuilder.build()); + authority = entries.get().get(0).channel.authority(); + this.executor = executor; + + if (!settings.isStaticSize()) { + executor.scheduleAtFixedRate( + this::resizeSafely, + ChannelPoolSettings.RESIZE_INTERVAL.getSeconds(), + ChannelPoolSettings.RESIZE_INTERVAL.getSeconds(), + TimeUnit.SECONDS); + } + if (settings.isPreemptiveRefreshEnabled()) { + executor.scheduleAtFixedRate( + this::refreshSafely, + REFRESH_PERIOD.getSeconds(), + REFRESH_PERIOD.getSeconds(), + TimeUnit.SECONDS); + } + } + + /** {@inheritDoc} */ + @Override + public String authority() { + return authority; + } + + /** + * Create a {@link ClientCall} on a Channel from the pool chosen in a round-robin fashion to the + * remote operation specified by the given {@link MethodDescriptor}. The returned {@link + * ClientCall} does not trigger any remote behavior until {@link + * ClientCall#start(ClientCall.Listener, io.grpc.Metadata)} is invoked. + */ + @Override + public ClientCall newCall( + MethodDescriptor methodDescriptor, CallOptions callOptions) { + return getChannel(indexTicker.getAndIncrement()).newCall(methodDescriptor, callOptions); + } + + Channel getChannel(int affinity) { + return new AffinityChannel(affinity); + } + + /** {@inheritDoc} */ + @Override + public ManagedChannel shutdown() { + LOG.fine("Initiating graceful shutdown due to explicit request"); + + List localEntries = entries.get(); + for (Entry entry : localEntries) { + entry.channel.shutdown(); + } + if (executor != null) { + // shutdownNow will cancel scheduled tasks + executor.shutdownNow(); + } + return this; + } + + /** {@inheritDoc} */ + @Override + public boolean isShutdown() { + List localEntries = entries.get(); + for (Entry entry : localEntries) { + if (!entry.channel.isShutdown()) { + return false; + } + } + return executor == null || executor.isShutdown(); + } + + /** {@inheritDoc} */ + @Override + public boolean isTerminated() { + List localEntries = entries.get(); + for (Entry entry : localEntries) { + if (!entry.channel.isTerminated()) { + return false; + } + } + + return executor == null || executor.isTerminated(); + } + + /** {@inheritDoc} */ + @Override + public ManagedChannel shutdownNow() { + LOG.fine("Initiating immediate shutdown due to explicit request"); + + List localEntries = entries.get(); + for (Entry entry : localEntries) { + entry.channel.shutdownNow(); + } + if (executor != null) { + executor.shutdownNow(); + } + return this; + } + + /** {@inheritDoc} */ + @Override + public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { + long endTimeNanos = System.nanoTime() + unit.toNanos(timeout); + List localEntries = entries.get(); + for (Entry entry : localEntries) { + long awaitTimeNanos = endTimeNanos - System.nanoTime(); + if (awaitTimeNanos <= 0) { + break; + } + entry.channel.awaitTermination(awaitTimeNanos, TimeUnit.NANOSECONDS); + } + if (executor != null) { + long awaitTimeNanos = endTimeNanos - System.nanoTime(); + executor.awaitTermination(awaitTimeNanos, TimeUnit.NANOSECONDS); + } + return isTerminated(); + } + + private void resizeSafely() { + try { + synchronized (entryWriteLock) { + resize(); + } + } catch (Exception e) { + LOG.log(Level.WARNING, "Failed to resize channel pool", e); + } + } + + /** + * Resize the number of channels based on the number of outstanding RPCs. + * + *

This method is expected to be called on a fixed interval. On every invocation it will: + * + *

    + *
  • Get the maximum number of outstanding RPCs since last invocation + *
  • Determine a valid range of number of channels to handle that many outstanding RPCs + *
  • If the current number of channel falls outside of that range, add or remove at most + * {@link ChannelPoolSettings#MAX_RESIZE_DELTA} to get closer to middle of that range. + *
+ * + *

Not threadsafe, must be called under the entryWriteLock monitor + */ + @VisibleForTesting + void resize() { + List localEntries = entries.get(); + // Estimate the peak of RPCs in the last interval by summing the peak of RPCs per channel + int actualOutstandingRpcs = + localEntries.stream().mapToInt(Entry::getAndResetMaxOutstanding).sum(); + + // Number of channels if each channel operated at max capacity + int minChannels = + (int) Math.ceil(actualOutstandingRpcs / (double) settings.getMaxRpcsPerChannel()); + // Limit the threshold to absolute range + if (minChannels < settings.getMinChannelCount()) { + minChannels = settings.getMinChannelCount(); + } + + // Number of channels if each channel operated at minimum capacity + // Note: getMinRpcsPerChannel() can return 0, but division by 0 shouldn't cause a problem. + int maxChannels = + (int) Math.ceil(actualOutstandingRpcs / (double) settings.getMinRpcsPerChannel()); + // Limit the threshold to absolute range + if (maxChannels > settings.getMaxChannelCount()) { + maxChannels = settings.getMaxChannelCount(); + } + if (maxChannels < minChannels) { + maxChannels = minChannels; + } + + // If the pool were to be resized, try to aim for the middle of the bound, but limit rate of + // change. + int tentativeTarget = (maxChannels + minChannels) / 2; + int currentSize = localEntries.size(); + int delta = tentativeTarget - currentSize; + int dampenedTarget = tentativeTarget; + if (Math.abs(delta) > ChannelPoolSettings.MAX_RESIZE_DELTA) { + dampenedTarget = + currentSize + (int) Math.copySign(ChannelPoolSettings.MAX_RESIZE_DELTA, delta); + } + + // Only resize the pool when thresholds are crossed + if (localEntries.size() < minChannels) { + LOG.fine( + String.format( + "Detected throughput peak of %d, expanding channel pool size: %d -> %d.", + actualOutstandingRpcs, currentSize, dampenedTarget)); + + expand(dampenedTarget); + } else if (localEntries.size() > maxChannels) { + LOG.fine( + String.format( + "Detected throughput drop to %d, shrinking channel pool size: %d -> %d.", + actualOutstandingRpcs, currentSize, dampenedTarget)); + + shrink(dampenedTarget); + } + } + + /** Not threadsafe, must be called under the entryWriteLock monitor */ + private void shrink(int desiredSize) { + ImmutableList localEntries = entries.get(); + Preconditions.checkState( + localEntries.size() >= desiredSize, "current size is already smaller than the desired"); + + // Set the new list + entries.set(localEntries.subList(0, desiredSize)); + // clean up removed entries + List removed = localEntries.subList(desiredSize, localEntries.size()); + removed.forEach(Entry::requestShutdown); + } + + /** Not threadsafe, must be called under the entryWriteLock monitor */ + private void expand(int desiredSize) { + List localEntries = entries.get(); + Preconditions.checkState( + localEntries.size() <= desiredSize, "current size is already bigger than the desired"); + + ImmutableList.Builder newEntries = ImmutableList.builder().addAll(localEntries); + + for (int i = 0; i < desiredSize - localEntries.size(); i++) { + try { + newEntries.add(new Entry(channelFactory.createSingleChannel())); + } catch (IOException e) { + LOG.log(Level.WARNING, "Failed to add channel", e); + } + } + + entries.set(newEntries.build()); + } + + private void refreshSafely() { + try { + refresh(); + } catch (Exception e) { + LOG.log(Level.WARNING, "Failed to pre-emptively refresh channnels", e); + } + } + + /** + * Replace all of the channels in the channel pool with fresh ones. This is meant to mitigate the + * hourly GFE disconnects by giving clients the ability to prime the channel on reconnect. + * + *

This is done on a best effort basis. If the replacement channel fails to construct, the old + * channel will continue to be used. + */ + @InternalApi("Visible for testing") + void refresh() { + // Note: synchronization is necessary in case refresh is called concurrently: + // - thread1 fails to replace a single entry + // - thread2 succeeds replacing an entry + // - thread1 loses the race to replace the list + // - then thread2 will shut down channel that thread1 will put back into circulation (after it + // replaces the list) + synchronized (entryWriteLock) { + LOG.fine("Refreshing all channels"); + ArrayList newEntries = new ArrayList<>(entries.get()); + + for (int i = 0; i < newEntries.size(); i++) { + try { + newEntries.set(i, new Entry(channelFactory.createSingleChannel())); + } catch (IOException e) { + LOG.log(Level.WARNING, "Failed to refresh channel, leaving old channel", e); + } + } + + ImmutableList replacedEntries = entries.getAndSet(ImmutableList.copyOf(newEntries)); + + // Shutdown the channels that were cycled out. + for (Entry e : replacedEntries) { + if (!newEntries.contains(e)) { + e.requestShutdown(); + } + } + } + } + + /** + * Get and retain a Channel Entry. The returned Entry will have its rpc count incremented, + * preventing it from getting recycled. + */ + Entry getRetainedEntry(int affinity) { + // The maximum number of concurrent calls to this method for any given time span is at most 2, + // so the loop can actually be 2 times. But going for 5 times for a safety margin for potential + // code evolving + for (int i = 0; i < 5; i++) { + Entry entry = getEntry(affinity); + if (entry.retain()) { + return entry; + } + } + // It is unlikely to reach here unless the pool code evolves to increase the maximum possible + // concurrent calls to this method. If it does, this is a bug in the channel pool implementation + // the number of retries above should be greater than the number of contending maintenance + // tasks. + throw new IllegalStateException("Bug: failed to retain a channel"); + } + + /** + * Returns one of the channels managed by this pool. The pool continues to "own" the channel, and + * the caller should not shut it down. + * + * @param affinity Two calls to this method with the same affinity returns the same channel most + * of the time, if the channel pool was refreshed since the last call, a new channel will be + * returned. The reverse is not true: Two calls with different affinities might return the + * same channel. However, the implementation should attempt to spread load evenly. + */ + private Entry getEntry(int affinity) { + List localEntries = entries.get(); + + int index = Math.abs(affinity % localEntries.size()); + + return localEntries.get(index); + } + + /** Bundles a gRPC {@link ManagedChannel} with some usage accounting. */ + static class Entry { + private final ManagedChannel channel; + + /** + * The primary purpose of keeping a count for outstanding RPCs is to track when a channel is + * safe to close. In grpc, initialization & starting of rpcs is split between 2 methods: + * Channel#newCall() and ClientCall#start. gRPC already has a mechanism to safely close channels + * that have rpcs that have been started. However, it does not protect calls that have been + * created but not started. In the sequence: Channel#newCall() Channel#shutdown() + * ClientCall#Start(), gRpc will error out the call telling the caller that the channel is + * shutdown. + * + *

Hence, the increment of outstanding RPCs has to happen when the ClientCall is initialized, + * as part of Channel#newCall(), not after the ClientCall is started. The decrement of + * outstanding RPCs has to happen when the ClientCall is closed or the ClientCall failed to + * start. + */ + @VisibleForTesting final AtomicInteger outstandingRpcs = new AtomicInteger(0); + + private final AtomicInteger maxOutstanding = new AtomicInteger(); + + // Flag that the channel should be closed once all of the outstanding RPC complete. + private final AtomicBoolean shutdownRequested = new AtomicBoolean(); + // Flag that the channel has been closed. + private final AtomicBoolean shutdownInitiated = new AtomicBoolean(); + + private Entry(ManagedChannel channel) { + this.channel = channel; + } + + int getAndResetMaxOutstanding() { + return maxOutstanding.getAndSet(outstandingRpcs.get()); + } + + /** + * Try to increment the outstanding RPC count. The method will return false if the channel is + * closing and the caller should pick a different channel. If the method returned true, the + * channel has been successfully retained and it is the responsibility of the caller to release + * it. + */ + private boolean retain() { + // register desire to start RPC + int currentOutstanding = outstandingRpcs.incrementAndGet(); + + // Rough book keeping + int prevMax = maxOutstanding.get(); + if (currentOutstanding > prevMax) { + maxOutstanding.incrementAndGet(); + } + + // abort if the channel is closing + if (shutdownRequested.get()) { + release(); + return false; + } + return true; + } + + /** + * Notify the channel that the number of outstanding RPCs has decreased. If shutdown has been + * previously requested, this method will shutdown the channel if its the last outstanding RPC. + */ + private void release() { + int newCount = outstandingRpcs.decrementAndGet(); + if (newCount < 0) { + LOG.log(Level.WARNING, "Bug! Reference count is negative (" + newCount + ")!"); + } + + // Must check outstandingRpcs after shutdownRequested (in reverse order of retain()) to ensure + // mutual exclusion. + if (shutdownRequested.get() && outstandingRpcs.get() == 0) { + shutdown(); + } + } + + /** + * Request a shutdown. The actual shutdown will be delayed until there are no more outstanding + * RPCs. + */ + private void requestShutdown() { + shutdownRequested.set(true); + if (outstandingRpcs.get() == 0) { + shutdown(); + } + } + + /** Ensure that shutdown is only called once. */ + private void shutdown() { + if (shutdownInitiated.compareAndSet(false, true)) { + channel.shutdown(); + } + } + } + + /** Thin wrapper to ensure that new calls are properly reference counted. */ + private class AffinityChannel extends Channel { + private final int affinity; + + public AffinityChannel(int affinity) { + this.affinity = affinity; + } + + @Override + public String authority() { + return authority; + } + + @Override + public ClientCall newCall( + MethodDescriptor methodDescriptor, CallOptions callOptions) { + + Entry entry = getRetainedEntry(affinity); + + return new ReleasingClientCall<>(entry.channel.newCall(methodDescriptor, callOptions), entry); + } + } + + /** ClientCall wrapper that makes sure to decrement the outstanding RPC count on completion. */ + static class ReleasingClientCall extends SimpleForwardingClientCall { + @Nullable private CancellationException cancellationException; + final Entry entry; + private final AtomicBoolean wasClosed = new AtomicBoolean(); + private final AtomicBoolean wasReleased = new AtomicBoolean(); + + public ReleasingClientCall(ClientCall delegate, Entry entry) { + super(delegate); + this.entry = entry; + } + + @Override + public void start(Listener responseListener, Metadata headers) { + if (cancellationException != null) { + throw new IllegalStateException("Call is already cancelled", cancellationException); + } + try { + super.start( + new SimpleForwardingClientCallListener(responseListener) { + @Override + public void onClose(Status status, Metadata trailers) { + if (!wasClosed.compareAndSet(false, true)) { + LOG.log( + Level.WARNING, + "Call is being closed more than once. Please make sure that onClose() is" + + " not being manually called."); + return; + } + try { + super.onClose(status, trailers); + } finally { + if (wasReleased.compareAndSet(false, true)) { + entry.release(); + } else { + LOG.log( + Level.WARNING, + "Entry was released before the call is closed. This may be due to an" + + " exception on start of the call."); + } + } + } + }, + headers); + } catch (Exception e) { + // In case start failed, make sure to release + if (wasReleased.compareAndSet(false, true)) { + entry.release(); + } else { + LOG.log( + Level.WARNING, + "The entry is already released. This indicates that onClose() has already been" + + " called previously"); + } + throw e; + } + } + + @Override + public void cancel(@Nullable String message, @Nullable Throwable cause) { + this.cancellationException = new CancellationException(message); + super.cancel(message, cause); + } + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPoolSettings.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPoolSettings.java new file mode 100644 index 00000000000..6788e95f485 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPoolSettings.java @@ -0,0 +1,169 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.channelpool; + +import com.google.api.core.BetaApi; +import com.google.auto.value.AutoValue; +import com.google.common.base.Preconditions; +import java.time.Duration; + +/** + * Settings to control {@link ChannelPool} behavior. + * + *

To facilitate low latency/high throughout applications, gax provides a {@link ChannelPool}. + * The pool is meant to facilitate high throughput/low latency clients. By splitting load across + * multiple gRPC channels the client can spread load across multiple frontends and overcome gRPC's + * limit of 100 concurrent RPCs per channel. However oversizing the {@link ChannelPool} can lead to + * underutilized channels which will lead to high tail latency due to GFEs disconnecting idle + * channels. + * + *

The {@link ChannelPool} is designed to adapt to varying traffic patterns by tracking + * outstanding RPCs and resizing the pool size. This class configures the behavior. In general + * clients should aim to have less than 50 concurrent RPCs per channel and at least 1 outstanding + * per channel per minute. + * + *

The settings in this class will be applied every minute. + */ +@BetaApi("surface for channel pool sizing is not yet stable") +@AutoValue +public abstract class ChannelPoolSettings { + /** How often to check and possibly resize the {@link ChannelPool}. */ + static final Duration RESIZE_INTERVAL = Duration.ofMinutes(1); + /** The maximum number of channels that can be added or removed at a time. */ + static final int MAX_RESIZE_DELTA = 2; + + /** + * Threshold to start scaling down the channel pool. + * + *

When the average of the maximum number of outstanding RPCs in a single minute drop below + * this threshold, channels will be removed from the pool. + */ + public abstract int getMinRpcsPerChannel(); + + /** + * Threshold to start scaling up the channel pool. + * + *

When the average of the maximum number of outstanding RPCs in a single minute surpass this + * threshold, channels will be added to the pool. For google services, gRPC channels will start + * locally queuing RPC when there are 100 concurrent RPCs. + */ + public abstract int getMaxRpcsPerChannel(); + + /** + * The absolute minimum size of the channel pool. + * + *

Regardless of the current throughput, the number of channels will not drop below this limit + */ + public abstract int getMinChannelCount(); + + /** + * The absolute maximum size of the channel pool. + * + *

Regardless of the current throughput, the number of channels will not exceed this limit + */ + public abstract int getMaxChannelCount(); + + /** + * The initial size of the channel pool. + * + *

During client construction the client open this many connections. This will be scaled up or + * down in the next period. + */ + public abstract int getInitialChannelCount(); + + /** + * If all of the channels should be replaced on an hourly basis. + * + *

The GFE will forcibly disconnect active channels after an hour. To minimize the cost of + * reconnects, this will create a new channel asynchronuously, prime it and then swap it with an + * old channel. + */ + public abstract boolean isPreemptiveRefreshEnabled(); + + /** Helper to check if the {@link ChannelPool} implementation can skip dynamic size logic */ + boolean isStaticSize() { + // When range is restricted to a single size + if (getMinChannelCount() == getMaxChannelCount()) { + return true; + } + // When the scaling threshold are not set + if (getMinRpcsPerChannel() == 0 && getMaxRpcsPerChannel() == Integer.MAX_VALUE) { + return true; + } + + return false; + } + + public abstract Builder toBuilder(); + + public static ChannelPoolSettings staticallySized(int size) { + return builder() + .setInitialChannelCount(size) + .setMinRpcsPerChannel(0) + .setMaxRpcsPerChannel(Integer.MAX_VALUE) + .setMinChannelCount(size) + .setMaxChannelCount(size) + .build(); + } + + public static Builder builder() { + return new AutoValue_ChannelPoolSettings.Builder() + .setInitialChannelCount(1) + .setMinChannelCount(1) + .setMaxChannelCount(200) + .setMinRpcsPerChannel(0) + .setMaxRpcsPerChannel(Integer.MAX_VALUE) + .setPreemptiveRefreshEnabled(false); + } + + @AutoValue.Builder + public abstract static class Builder { + public abstract Builder setMinRpcsPerChannel(int count); + + public abstract Builder setMaxRpcsPerChannel(int count); + + public abstract Builder setMinChannelCount(int count); + + public abstract Builder setMaxChannelCount(int count); + + public abstract Builder setInitialChannelCount(int count); + + public abstract Builder setPreemptiveRefreshEnabled(boolean enabled); + + abstract ChannelPoolSettings autoBuild(); + + public ChannelPoolSettings build() { + ChannelPoolSettings s = autoBuild(); + + Preconditions.checkState( + s.getMinRpcsPerChannel() <= s.getMaxRpcsPerChannel(), "rpcsPerChannel range is invalid"); + Preconditions.checkState( + s.getMinChannelCount() > 0, "Minimum channel count must be at least 1"); + Preconditions.checkState( + s.getMinChannelCount() <= s.getMaxRpcsPerChannel(), "absolute channel range is invalid"); + Preconditions.checkState( + s.getMinChannelCount() <= s.getInitialChannelCount(), + "initial channel count be at least minChannelCount"); + Preconditions.checkState( + s.getInitialChannelCount() <= s.getMaxChannelCount(), + "initial channel count must be less than maxChannelCount"); + Preconditions.checkState( + s.getInitialChannelCount() > 0, "Initial channel count must be greater than 0"); + return s; + } + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java new file mode 100644 index 00000000000..a2b3dd7fced --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java @@ -0,0 +1,387 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.channelpool; + +import com.google.bigtable.v2.BigtableGrpc; +import com.google.bigtable.v2.PingAndWarmRequest; +import com.google.bigtable.v2.PingAndWarmResponse; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels.PrimingKey; +import com.google.cloud.bigtable.examples.proxy.metrics.Metrics; +import com.google.cloud.bigtable.examples.proxy.metrics.Tracer; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; +import io.grpc.CallCredentials; +import io.grpc.CallOptions; +import io.grpc.ClientCall; +import io.grpc.ClientCall.Listener; +import io.grpc.ConnectivityState; +import io.grpc.Deadline; +import io.grpc.ExperimentalApi; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Metadata; +import io.grpc.Metadata.Key; +import io.grpc.MethodDescriptor; +import io.grpc.Status; +import java.time.Duration; +import java.util.List; +import java.util.Optional; +import java.util.Random; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Decorator for a Bigtable data plane connection to add channel warming via PingAndWarm. Channel + * warming will happen on creation and then every 3 minutes (with jitter). + */ +public class DataChannel extends ManagedChannel { + private static final Logger LOGGER = LoggerFactory.getLogger(DataChannel.class); + + private static final Metadata.Key GFE_DEBUG_REQ_HEADER = + Key.of("X-Return-Encrypted-Headers", Metadata.ASCII_STRING_MARSHALLER); + private static final Metadata.Key GFE_DEBUG_RESP_HEADER = + Key.of("X-Encrypted-Debug-Headers", Metadata.ASCII_STRING_MARSHALLER); + + private static final Duration WARM_PERIOD = Duration.ofMinutes(3); + private static final Duration MAX_JITTER = Duration.ofSeconds(10); + + private final Random random = new Random(); + private final ManagedChannel inner; + private final Metrics metrics; + private final ResourceCollector resourceCollector; + private final CallCredentials callCredentials; + private final ScheduledExecutorService warmingExecutor; + private volatile ScheduledFuture antiIdleTask; + + private final AtomicBoolean closed = new AtomicBoolean(); + private final Object scheduleLock = new Object(); + + public DataChannel( + ResourceCollector resourceCollector, + String userAgent, + CallCredentials callCredentials, + String endpoint, + int port, + ScheduledExecutorService warmingExecutor, + Metrics metrics) { + this.resourceCollector = resourceCollector; + + this.callCredentials = callCredentials; + inner = + ManagedChannelBuilder.forAddress(endpoint, port) + .userAgent(userAgent) + .disableRetry() + .maxInboundMessageSize(256 * 1024 * 1024) + .keepAliveTime(30, TimeUnit.SECONDS) + .keepAliveTimeout(10, TimeUnit.SECONDS) + .build(); + + this.warmingExecutor = warmingExecutor; + this.metrics = metrics; + + new StateTransitionWatcher().run(); + + try { + warm(); + } catch (RuntimeException e) { + try { + inner.shutdown(); + } catch (RuntimeException e2) { + e.addSuppressed(e2); + } + throw e; + } + + antiIdleTask = + warmingExecutor.schedule(this::warmTask, nextWarmup().toMillis(), TimeUnit.MILLISECONDS); + metrics.updateChannelCount(1); + } + + private Duration nextWarmup() { + return WARM_PERIOD.minus( + Duration.ofMillis((long) (MAX_JITTER.toMillis() * random.nextDouble()))); + } + + private void warmTask() { + try { + warm(); + } catch (RuntimeException e) { + LOGGER.warn("anti idle ping failed, forcing reconnect", e); + inner.enterIdle(); + } finally { + synchronized (scheduleLock) { + if (!closed.get()) { + antiIdleTask = + warmingExecutor.schedule( + this::warmTask, nextWarmup().toMillis(), TimeUnit.MILLISECONDS); + } + } + } + } + + private void warm() { + List primingKeys = resourceCollector.getPrimingKeys(); + if (primingKeys.isEmpty()) { + return; + } + + LOGGER.debug("Warming channel {} with: {}", inner, primingKeys); + + List> futures = + primingKeys.stream().map(this::sendPingAndWarm).collect(Collectors.toList()); + + int successCount = 0; + int failures = 0; + for (ListenableFuture future : futures) { + PrimingKey request = primingKeys.get(successCount + failures); + try { + future.get(); + successCount++; + } catch (ExecutionException e) { + // All permanent errors are ignored and treated as a success + // The priming request for that generated the error will be dropped + if (e.getCause() instanceof PingAndWarmException) { + PingAndWarmException se = (PingAndWarmException) e.getCause(); + + switch (se.getStatus().getCode()) { + case INTERNAL: + case PERMISSION_DENIED: + case NOT_FOUND: + case UNAUTHENTICATED: + successCount++; + // drop the priming request for permenant errors + resourceCollector.evict(request); + continue; + default: + // noop + } + LOGGER.warn( + "Failed to prime channel with request: {}, status: {}, debug response headers: {}", + request, + se.getStatus(), + Optional.ofNullable(se.getDebugHeaders()).orElse("")); + } else { + LOGGER.warn("Unexpected failure priming channel with request: {}", request, e.getCause()); + } + + failures++; + } catch (InterruptedException e) { + throw new RuntimeException("Interrupted while priming channel with request: " + request, e); + } + } + if (successCount < failures) { + throw new RuntimeException("Most of the priming requests failed"); + } + } + + private ListenableFuture sendPingAndWarm(PrimingKey primingKey) { + Metadata metadata = primingKey.composeMetadata(); + metadata.put(GFE_DEBUG_REQ_HEADER, "gfe_response_only"); + PingAndWarmRequest request = primingKey.composeProto(); + request = request.toBuilder().setName(request.getName()).build(); + + CallLabels callLabels = CallLabels.create(BigtableGrpc.getPingAndWarmMethod(), metadata); + Tracer tracer = new Tracer(metrics, callLabels); + + CallOptions callOptions = + CallOptions.DEFAULT + .withCallCredentials(callCredentials) + .withDeadline(Deadline.after(1, TimeUnit.MINUTES)); + callOptions = tracer.injectIntoCallOptions(callOptions); + + ClientCall call = + inner.newCall(BigtableGrpc.getPingAndWarmMethod(), callOptions); + + SettableFuture f = SettableFuture.create(); + call.start( + new Listener<>() { + String debugHeaders = null; + + @Override + public void onMessage(PingAndWarmResponse response) { + if (!f.set(response)) { + // TODO: set a metric + LOGGER.warn("PingAndWarm returned multiple responses"); + } + } + + @Override + public void onHeaders(Metadata headers) { + debugHeaders = headers.get(GFE_DEBUG_RESP_HEADER); + } + + @Override + public void onClose(Status status, Metadata trailers) { + tracer.onCallFinished(status); + + if (status.isOk()) { + f.setException( + new PingAndWarmException( + "PingAndWarm was missing a response", debugHeaders, trailers, status)); + } else { + f.setException( + new PingAndWarmException("PingAndWarm failed", debugHeaders, trailers, status)); + } + } + }, + metadata); + call.sendMessage(request); + call.halfClose(); + call.request(Integer.MAX_VALUE); + + return f; + } + + static class PingAndWarmException extends RuntimeException { + + private final String debugHeaders; + private final Metadata trailers; + private final Status status; + + public PingAndWarmException( + String message, String debugHeaders, Metadata trailers, Status status) { + super(String.format("PingAndWarm failed, status: " + status)); + this.debugHeaders = debugHeaders; + this.trailers = trailers; + this.status = status; + } + + public String getDebugHeaders() { + return debugHeaders; + } + + public Metadata getTrailers() { + return trailers; + } + + public Status getStatus() { + return status; + } + } + + @Override + public ManagedChannel shutdown() { + final boolean closing; + + synchronized (scheduleLock) { + closing = closed.compareAndSet(false, true); + antiIdleTask.cancel(true); + } + if (closing) { + metrics.updateChannelCount(-1); + } + + return inner.shutdown(); + } + + @Override + public boolean isShutdown() { + return inner.isShutdown(); + } + + @Override + public boolean isTerminated() { + return inner.isTerminated(); + } + + @Override + public ManagedChannel shutdownNow() { + final boolean closing; + + synchronized (scheduleLock) { + closing = closed.compareAndSet(false, true); + antiIdleTask.cancel(true); + } + + if (closing) { + metrics.updateChannelCount(-1); + } + + return inner.shutdownNow(); + } + + @Override + public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { + return inner.awaitTermination(timeout, unit); + } + + @ExperimentalApi("https://github.com/grpc/grpc-java/issues/4359") + @Override + public ConnectivityState getState(boolean requestConnection) { + return inner.getState(requestConnection); + } + + @ExperimentalApi("https://github.com/grpc/grpc-java/issues/4359") + @Override + public void notifyWhenStateChanged(ConnectivityState source, Runnable callback) { + inner.notifyWhenStateChanged(source, callback); + } + + @ExperimentalApi("https://github.com/grpc/grpc-java/issues/4056") + @Override + public void resetConnectBackoff() { + inner.resetConnectBackoff(); + } + + @ExperimentalApi("https://github.com/grpc/grpc-java/issues/4056") + @Override + public void enterIdle() { + inner.enterIdle(); + } + + @Override + public ClientCall newCall( + MethodDescriptor methodDescriptor, CallOptions callOptions) { + Tracer tracer = + Optional.ofNullable(Tracer.extractTracerFromCallOptions(callOptions)) + .orElseThrow( + () -> + new IllegalStateException( + "DataChannel failed to extract Tracer from CallOptions")); + resourceCollector.collect(tracer.getCallLabels()); + + return inner.newCall(methodDescriptor, callOptions); + } + + @Override + public String authority() { + return inner.authority(); + } + + class StateTransitionWatcher implements Runnable { + private ConnectivityState prevState = null; + + @Override + public void run() { + if (closed.get()) { + return; + } + + ConnectivityState newState = inner.getState(false); + metrics.recordChannelStateChange(prevState, newState); + prevState = newState; + inner.notifyWhenStateChanged(prevState, this); + } + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ResourceCollector.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ResourceCollector.java new file mode 100644 index 00000000000..d36fb630ef3 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ResourceCollector.java @@ -0,0 +1,51 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.channelpool; + +import com.google.cloud.bigtable.examples.proxy.core.CallLabels; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels.ParsingException; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels.PrimingKey; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.collect.ImmutableList; +import java.time.Duration; +import java.util.List; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ResourceCollector { + private static final Logger LOG = LoggerFactory.getLogger(ResourceCollector.class); + + private final Cache primingKeys = + CacheBuilder.newBuilder().expireAfterWrite(Duration.ofHours(1)).maximumSize(100).build(); + + public void collect(CallLabels labels) { + try { + PrimingKey.from(labels).ifPresent(k -> primingKeys.put(k, true)); + } catch (ParsingException e) { + LOG.warn("Failed to collect priming request for {}", labels, e); + } + } + + public List getPrimingKeys() { + return ImmutableList.copyOf(primingKeys.asMap().keySet()); + } + + public void evict(PrimingKey request) { + primingKeys.invalidate(request); + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Endpoint.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Endpoint.java new file mode 100644 index 00000000000..4319cdbfcfe --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Endpoint.java @@ -0,0 +1,49 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.commands; + +import com.google.auto.value.AutoValue; +import com.google.common.base.Preconditions; +import picocli.CommandLine.ITypeConverter; + +@AutoValue +abstract class Endpoint { + abstract String getName(); + + abstract int getPort(); + + @Override + public String toString() { + return String.format("%s:%d", getName(), getPort()); + } + + static Endpoint create(String name, int port) { + return new AutoValue_Endpoint(name, port); + } + + static class ArgConverter implements ITypeConverter { + @Override + public Endpoint convert(String s) throws Exception { + int i = s.lastIndexOf(":"); + Preconditions.checkArgument(i > 0, "endpoint must of the form `name:port`"); + + String name = s.substring(0, i); + int port = Integer.parseInt(s.substring(i + 1)); + return Endpoint.create(name, port); + } + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java new file mode 100644 index 00000000000..797c861632d --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java @@ -0,0 +1,178 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.commands; + +import com.google.auth.Credentials; +import com.google.auth.oauth2.GoogleCredentials; +import com.google.bigtable.admin.v2.BigtableInstanceAdminGrpc; +import com.google.bigtable.admin.v2.BigtableTableAdminGrpc; +import com.google.bigtable.v2.BigtableGrpc; +import com.google.cloud.bigtable.examples.proxy.channelpool.ChannelPool; +import com.google.cloud.bigtable.examples.proxy.channelpool.ChannelPoolSettings; +import com.google.cloud.bigtable.examples.proxy.channelpool.DataChannel; +import com.google.cloud.bigtable.examples.proxy.channelpool.ResourceCollector; +import com.google.cloud.bigtable.examples.proxy.core.ProxyHandler; +import com.google.cloud.bigtable.examples.proxy.core.Registry; +import com.google.cloud.bigtable.examples.proxy.metrics.InstrumentedCallCredentials; +import com.google.cloud.bigtable.examples.proxy.metrics.Metrics; +import com.google.cloud.bigtable.examples.proxy.metrics.MetricsImpl; +import com.google.common.collect.ImmutableMap; +import com.google.longrunning.OperationsGrpc; +import io.grpc.CallCredentials; +import io.grpc.InsecureServerCredentials; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Server; +import io.grpc.ServerCallHandler; +import io.grpc.auth.MoreCallCredentials; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import picocli.CommandLine.Command; +import picocli.CommandLine.Help.Visibility; +import picocli.CommandLine.Option; + +@Command(name = "serve", description = "Start the proxy server") +public class Serve implements Callable { + private static final Logger LOGGER = LoggerFactory.getLogger(Serve.class); + + @Option( + names = "--listen-port", + required = true, + description = "Local port to accept connections on") + int listenPort; + + @Option(names = "--useragent", showDefaultValue = Visibility.ALWAYS) + String userAgent = "bigtable-java-proxy"; + + @Option( + names = "--bigtable-data-endpoint", + converter = Endpoint.ArgConverter.class, + showDefaultValue = Visibility.ALWAYS) + Endpoint dataEndpoint = Endpoint.create("bigtable.googleapis.com", 443); + + @Option( + names = "--bigtable-admin-endpoint", + converter = Endpoint.ArgConverter.class, + showDefaultValue = Visibility.ALWAYS) + Endpoint adminEndpoint = Endpoint.create("bigtableadmin.googleapis.com", 443); + + @Option( + names = "--metrics-project-id", + required = true, + description = "The project id where metrics should be exported") + String metricsProjectId = null; + + ManagedChannel adminChannel = null; + ManagedChannel dataChannel = null; + Credentials credentials = null; + Server server; + Metrics metrics; + private ScheduledExecutorService refreshExecutor; + + @Override + public Void call() throws Exception { + start(); + server.awaitTermination(); + cleanup(); + return null; + } + + void start() throws IOException { + if (credentials == null) { + credentials = GoogleCredentials.getApplicationDefault(); + } + CallCredentials callCredentials = + new InstrumentedCallCredentials(MoreCallCredentials.from(credentials)); + + if (metrics == null) { + // InstrumentedCallCredentials expect to only be called when a Tracer is available in the + // CallOptions. This is only true for DataChannel pingAndWarm and things invoked by + // ProxyHandler. MetricsImpl does not do this, so it must get undecorated credentials. + metrics = new MetricsImpl(credentials, metricsProjectId); + } + + ResourceCollector resourceCollector = new ResourceCollector(); + refreshExecutor = Executors.newSingleThreadScheduledExecutor(); + + ChannelPoolSettings poolSettings = + ChannelPoolSettings.builder() + .setInitialChannelCount(10) + .setMinChannelCount(2) + .setMaxChannelCount(20) + .setMinRpcsPerChannel(5) + .setMaxRpcsPerChannel(50) + .setPreemptiveRefreshEnabled(true) + .build(); + + if (dataChannel == null) { + dataChannel = + ChannelPool.create( + poolSettings, + () -> + new DataChannel( + resourceCollector, + userAgent, + callCredentials, + dataEndpoint.getName(), + dataEndpoint.getPort(), + refreshExecutor, + metrics)); + } + + if (adminChannel == null) { + adminChannel = + ManagedChannelBuilder.forAddress(adminEndpoint.getName(), adminEndpoint.getPort()) + .userAgent(userAgent) + .disableRetry() + .build(); + } + + Map> serviceMap = + ImmutableMap.of( + BigtableGrpc.SERVICE_NAME, + new ProxyHandler<>(metrics, dataChannel, callCredentials), + BigtableInstanceAdminGrpc.SERVICE_NAME, + new ProxyHandler<>(metrics, adminChannel, callCredentials), + BigtableTableAdminGrpc.SERVICE_NAME, + new ProxyHandler<>(metrics, adminChannel, callCredentials), + OperationsGrpc.SERVICE_NAME, + new ProxyHandler<>(metrics, adminChannel, callCredentials)); + + server = + NettyServerBuilder.forAddress( + new InetSocketAddress("localhost", listenPort), InsecureServerCredentials.create()) + .fallbackHandlerRegistry(new Registry(serviceMap)) + .maxInboundMessageSize(256 * 1024 * 1024) + .build(); + + server.start(); + LOGGER.info("Listening on port {}", server.getPort()); + } + + void cleanup() throws InterruptedException { + refreshExecutor.shutdown(); + dataChannel.shutdown(); + adminChannel.shutdown(); + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Verify.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Verify.java new file mode 100644 index 00000000000..669385e4421 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Verify.java @@ -0,0 +1,229 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.commands; + +import com.google.auth.Credentials; +import com.google.auth.oauth2.GoogleCredentials; +import com.google.bigtable.v2.BigtableGrpc; +import com.google.bigtable.v2.BigtableGrpc.BigtableBlockingStub; +import com.google.bigtable.v2.CheckAndMutateRowRequest; +import com.google.bigtable.v2.CheckAndMutateRowResponse; +import com.google.bigtable.v2.Mutation; +import com.google.bigtable.v2.Mutation.DeleteFromRow; +import com.google.bigtable.v2.ReadRowsRequest; +import com.google.bigtable.v2.ReadRowsResponse; +import com.google.bigtable.v2.RowFilter; +import com.google.bigtable.v2.RowFilter.Chain; +import com.google.bigtable.v2.RowSet; +import com.google.cloud.bigtable.examples.proxy.metrics.MetricsImpl; +import com.google.cloud.opentelemetry.metric.GoogleCloudMetricExporter; +import com.google.cloud.opentelemetry.metric.MetricConfiguration; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.ByteString; +import io.grpc.CallCredentials; +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.Deadline; +import io.grpc.ForwardingClientCall.SimpleForwardingClientCall; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Metadata; +import io.grpc.Metadata.Key; +import io.grpc.MethodDescriptor; +import io.grpc.StatusRuntimeException; +import io.grpc.auth.MoreCallCredentials; +import io.opentelemetry.contrib.gcp.resource.GCPResourceProvider; +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import io.opentelemetry.sdk.resources.Resource; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.util.Iterator; +import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; +import picocli.CommandLine.Command; +import picocli.CommandLine.Help.Visibility; +import picocli.CommandLine.Option; + +@Command(name = "verify", description = "Verify environment is properly set up") +public class Verify implements Callable { + @Option( + names = "--bigtable-project-id", + required = true, + description = "Project that contains a Bigtable instance to use for connectivity test") + String bigtableProjectId; + + @Option( + names = "--bigtable-instance-id", + required = true, + description = "Bigtable instance to use for connectivity test") + String bigtableInstanceId; + + @Option( + names = "--bigtable-table-id", + required = true, + description = "Bigtable table to use for connectivity test") + String bigtableTableId; + + @Option( + names = "--metrics-project-id", + required = true, + description = "The project id where metrics should be exported") + String metricsProjectId = null; + + @Option( + names = "--bigtable-data-endpoint", + converter = Endpoint.ArgConverter.class, + showDefaultValue = Visibility.ALWAYS) + Endpoint dataEndpoint = Endpoint.create("bigtable.googleapis.com", 443); + + Credentials credentials = null; + + @Override + public Void call() throws Exception { + if (credentials == null) { + credentials = GoogleCredentials.getApplicationDefault(); + } + checkBigtable( + MoreCallCredentials.from(credentials), + String.format( + "projects/%s/instances/%s/tables/%s", + bigtableProjectId, bigtableInstanceId, bigtableTableId)); + + checkMetrics(credentials); + return null; + } + + private void checkBigtable(CallCredentials callCredentials, String tableName) { + ManagedChannel channel = + ManagedChannelBuilder.forAddress(dataEndpoint.getName(), dataEndpoint.getPort()).build(); + + try { + Metadata md = new Metadata(); + + md.put( + Key.of("x-goog-request-params", Metadata.ASCII_STRING_MARSHALLER), + String.format( + "table_name=%s&app_profile_id=%s", + URLEncoder.encode(tableName, StandardCharsets.UTF_8), "")); + + BigtableBlockingStub stub = + BigtableGrpc.newBlockingStub(channel) + .withCallCredentials(callCredentials) + .withInterceptors(new MetadataInterceptor(md)); + + ReadRowsRequest readRequest = + ReadRowsRequest.newBuilder() + .setTableName( + String.format( + "projects/%s/instances/%s/tables/%s", + bigtableProjectId, bigtableInstanceId, bigtableTableId)) + .setRowsLimit(1) + .setRows( + RowSet.newBuilder().addRowKeys(ByteString.copyFromUtf8("some-nonexistent-row"))) + .setFilter( + RowFilter.newBuilder() + .setChain( + Chain.newBuilder() + .addFilters(RowFilter.newBuilder().setCellsPerRowLimitFilter(1)) + .addFilters( + RowFilter.newBuilder().setStripValueTransformer(true).build()))) + .build(); + + Iterator readIt = + stub.withDeadline(Deadline.after(1, TimeUnit.SECONDS)).readRows(readRequest); + + try { + while (readIt.hasNext()) { + readIt.next(); + } + System.out.println("Bigtable Read: OK"); + } catch (StatusRuntimeException e) { + System.out.println("Bigtable Read: Failed - " + e.getStatus()); + return; + } + + CheckAndMutateRowRequest rwReq = + CheckAndMutateRowRequest.newBuilder() + .setTableName(tableName) + .setRowKey(ByteString.copyFromUtf8("some-non-existent-row")) + .setPredicateFilter(RowFilter.newBuilder().setBlockAllFilter(true)) + .addTrueMutations( + Mutation.newBuilder().setDeleteFromRow(DeleteFromRow.getDefaultInstance())) + .build(); + + try { + CheckAndMutateRowResponse ignored = stub.checkAndMutateRow(rwReq); + System.out.println("Bigtable Read/Write: OK"); + } catch (StatusRuntimeException e) { + System.out.println("Bigtable Read/Write: Failed - " + e.getStatus()); + return; + } + } finally { + channel.shutdown(); + } + } + + void checkMetrics(Credentials creds) { + MetricConfiguration config = + MetricConfiguration.builder() + .setCredentials(creds) + .setProjectId(metricsProjectId) + .setInstrumentationLibraryLabelsEnabled(false) + .build(); + + GCPResourceProvider resourceProvider = new GCPResourceProvider(); + Resource resource = Resource.create(resourceProvider.getAttributes()); + ImmutableList metricData = + ImmutableList.of(MetricsImpl.generateTestPresenceMeasurement(resource)); + + try (MetricExporter exporter = GoogleCloudMetricExporter.createWithConfiguration(config)) { + CompletableResultCode result = exporter.export(metricData); + result.join(1, TimeUnit.MINUTES); + + System.out.println("Metrics resource: " + resource); + if (result.isSuccess()) { + System.out.println("Metrics write: OK"); + } else { + System.out.println("Metrics write: FAILED: " + result.getFailureThrowable().getMessage()); + } + } + } + + private static class MetadataInterceptor implements ClientInterceptor { + private final Metadata metadata; + + private MetadataInterceptor(Metadata metadata) { + this.metadata = metadata; + } + + @Override + public ClientCall interceptCall( + MethodDescriptor method, CallOptions callOptions, Channel next) { + return new SimpleForwardingClientCall<>(next.newCall(method, callOptions)) { + @Override + public void start(Listener responseListener, Metadata headers) { + headers.merge(metadata); + super.start(responseListener, headers); + } + }; + } + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/package-info.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/package-info.java new file mode 100644 index 00000000000..e3b143a9fe9 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/package-info.java @@ -0,0 +1,18 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Contains all the command implementations for the proxy server. */ +package com.google.cloud.bigtable.examples.proxy.commands; diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ByteMarshaller.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ByteMarshaller.java new file mode 100644 index 00000000000..e8d3611045f --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ByteMarshaller.java @@ -0,0 +1,40 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.core; + +import com.google.common.io.ByteStreams; +import io.grpc.MethodDescriptor; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; + +class ByteMarshaller implements MethodDescriptor.Marshaller { + + @Override + public byte[] parse(InputStream stream) { + try { + return ByteStreams.toByteArray(stream); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + } + + @Override + public InputStream stream(byte[] value) { + return new ByteArrayInputStream(value); + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallLabels.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallLabels.java new file mode 100644 index 00000000000..cdd3c6f5e38 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallLabels.java @@ -0,0 +1,291 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.core; + +import com.google.auto.value.AutoValue; +import com.google.bigtable.v2.PingAndWarmRequest; +import com.google.bigtable.v2.PingAndWarmRequest.Builder; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableMap; +import io.grpc.Metadata; +import io.grpc.Metadata.Key; +import io.grpc.MethodDescriptor; +import java.net.URLDecoder; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Optional; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A value class to encapsulate call identity. + * + *

This call extracts relevant information from request headers and makes it accessible to + * metrics & the upstream client. The primary headers consulted are: + * + *

    + *
  • {@code x-goog-request-params} - contains the resource and app profile id + *
  • {@code google-cloud-resource-prefix} - the previous version of {@code + * x-goog-request-params}, used as a fallback + *
  • {@code x-goog-cbt-cookie-routing} - an opaque blob used to routing RPCs on the serverside + *
  • {@code bigtable-features} - the client's available features + *
  • {@code x-goog-api-client} - contains the client info of the downstream client + *
+ */ +@AutoValue +public abstract class CallLabels { + private static final Logger LOG = LoggerFactory.getLogger(CallLabels.class); + + // All RLS headers + static final Key REQUEST_PARAMS = + Key.of("x-goog-request-params", Metadata.ASCII_STRING_MARSHALLER); + static final Key LEGACY_RESOURCE_PREFIX = + Key.of("google-cloud-resource-prefix", Metadata.ASCII_STRING_MARSHALLER); + static final Key ROUTING_COOKIE = + Key.of("x-goog-cbt-cookie-routing", Metadata.ASCII_STRING_MARSHALLER); + static final Key FEATURE_FLAGS = + Key.of("bigtable-features", Metadata.ASCII_STRING_MARSHALLER); + static final Key API_CLIENT = + Key.of("x-goog-api-client", Metadata.ASCII_STRING_MARSHALLER); + + enum ResourceNameType { + Parent("parent", 0), + Name("name", 1), + TableName("table_name", 2); + + private final String name; + private final int priority; + + ResourceNameType(String name, int priority) { + this.name = name; + this.priority = priority; + } + } + + @AutoValue + abstract static class ResourceName { + + abstract ResourceNameType getType(); + + abstract String getValue(); + + static ResourceName create(ResourceNameType type, String value) { + return new AutoValue_CallLabels_ResourceName(type, value); + } + } + + public abstract String getMethodName(); + + abstract Optional getRequestParams(); + + abstract Optional getLegacyResourcePrefix(); + + abstract Optional getRoutingCookie(); + + abstract Optional getEncodedFeatures(); + + public abstract Optional getApiClient(); + + public static CallLabels create(MethodDescriptor method, Metadata headers) { + Optional apiClient = Optional.ofNullable(headers.get(API_CLIENT)); + + Optional requestParams = Optional.ofNullable(headers.get(REQUEST_PARAMS)); + Optional legacyResourcePrefix = + Optional.ofNullable(headers.get(LEGACY_RESOURCE_PREFIX)); + Optional routingCookie = Optional.ofNullable(headers.get(ROUTING_COOKIE)); + Optional encodedFeatures = Optional.ofNullable(headers.get(FEATURE_FLAGS)); + + return create( + method, requestParams, legacyResourcePrefix, routingCookie, encodedFeatures, apiClient); + } + + @SuppressWarnings("OptionalUsedAsFieldOrParameterType") + @VisibleForTesting + public static CallLabels create( + MethodDescriptor method, + Optional requestParams, + Optional legacyResourcePrefix, + Optional routingCookie, + Optional encodedFeatures, + Optional apiClient) { + + return new AutoValue_CallLabels( + method.getFullMethodName(), + requestParams, + legacyResourcePrefix, + routingCookie, + encodedFeatures, + apiClient); + } + + /** + * Extracts the resource name, will use {@link #getRequestParams()} if present, otherwise falls + * back on {@link #getLegacyResourcePrefix()}. If neither is present, {@link Optional#empty()} is + * returned. If there was an issue extracting, a {@link ParsingException} is thrown. In the + * primary case, the value will be url decoded. + */ + public Optional extractResourceName() throws ParsingException { + if (getRequestParams().isEmpty()) { + return getLegacyResourcePrefix(); + } + + String requestParams = getRequestParams().orElse(""); + String[] encodedKvPairs = requestParams.split("&"); + Optional resourceName = Optional.empty(); + + for (String encodedKv : encodedKvPairs) { + String[] split = encodedKv.split("=", 2); + if (split.length != 2) { + continue; + } + String encodedKey = split[0]; + String encodedValue = split[1]; + if (encodedKey.isEmpty() || encodedValue.isEmpty()) { + continue; + } + + Optional newType = findType(encodedKey); + + if (newType.isEmpty()) { + continue; + } + // Skip if we previously found a resource name and the new resource name type has a lower + // priority + if (resourceName.isPresent() + && newType.get().priority <= resourceName.get().getType().priority) { + continue; + } + String decodedValue = percentDecode(encodedValue); + + resourceName = Optional.of(ResourceName.create(newType.get(), decodedValue)); + } + return resourceName.map(ResourceName::getValue); + } + + private static Optional findType(String key) { + for (ResourceNameType type : ResourceNameType.values()) { + if (type.name.equals(key)) { + return Optional.of(type); + } + } + return Optional.empty(); + } + + /** + * Extracts the app profile id from {@link #getRequestParams()}. Returns {@link Optional#empty()} + * if the key is missing. The value will be url decoded. + */ + public Optional extractAppProfileId() throws ParsingException { + String requestParams = getRequestParams().orElse(""); + + for (String encodedPair : requestParams.split("&")) { + if (!encodedPair.startsWith("app_profile_id=")) { + continue; + } + String[] parts = encodedPair.split("=", 2); + String encodedValue = parts.length > 1 ? parts[1] : ""; + return Optional.of(percentDecode(encodedValue)); + } + return Optional.empty(); + } + + private static String percentDecode(String s) throws ParsingException { + try { + return URLDecoder.decode(s, StandardCharsets.UTF_8); + } catch (RuntimeException e) { + throw new ParsingException("Failed to url decode " + s, e); + } + } + + /** + * Can be derived from {@link CallLabels} to create a priming request to keep the channel active + * for future RPCs. + */ + @AutoValue + public abstract static class PrimingKey { + protected abstract Map getMetadata(); + + protected abstract String getName(); + + protected abstract Optional getAppProfileId(); + + public static Optional from(CallLabels labels) throws ParsingException { + final ImmutableMap.Builder md = ImmutableMap.builder(); + + Optional resourceName = labels.extractResourceName(); + if (resourceName.isEmpty()) { + return Optional.empty(); + } + String[] resourceNameParts = resourceName.get().split("/", 5); + if (resourceNameParts.length < 4 + || !resourceNameParts[0].equals("projects") + || !resourceNameParts[2].equals("instances")) { + return Optional.empty(); + } + String instanceName = + "projects/" + resourceNameParts[1] + "/instances/" + resourceNameParts[3]; + StringBuilder reqParams = + new StringBuilder() + .append("name=") + .append(URLEncoder.encode(instanceName, StandardCharsets.UTF_8)); + + Optional appProfileId = labels.extractAppProfileId(); + appProfileId.ifPresent(val -> reqParams.append("&app_profile_id=").append(val)); + md.put(REQUEST_PARAMS.name(), reqParams.toString()); + + labels + .getLegacyResourcePrefix() + .ifPresent(ignored -> md.put(LEGACY_RESOURCE_PREFIX.name(), instanceName)); + + labels.getRoutingCookie().ifPresent(c -> md.put(ROUTING_COOKIE.name(), c)); + + labels.getEncodedFeatures().ifPresent(c -> md.put(FEATURE_FLAGS.name(), c)); + + labels.getApiClient().ifPresent(c -> md.put(API_CLIENT.name(), c)); + + return Optional.of( + new AutoValue_CallLabels_PrimingKey(md.build(), instanceName, appProfileId)); + } + + public Metadata composeMetadata() { + Metadata md = new Metadata(); + for (Entry e : getMetadata().entrySet()) { + md.put(Key.of(e.getKey(), Metadata.ASCII_STRING_MARSHALLER), e.getValue()); + } + return md; + } + + public PingAndWarmRequest composeProto() { + Builder builder = PingAndWarmRequest.newBuilder().setName(getName()); + getAppProfileId().ifPresent(builder::setAppProfileId); + return builder.build(); + } + } + + public static class ParsingException extends Exception { + + public ParsingException(String message) { + super(message); + } + + public ParsingException(String message, Throwable cause) { + super(message, cause); + } + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallProxy.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallProxy.java new file mode 100644 index 00000000000..6285bc5896f --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallProxy.java @@ -0,0 +1,186 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.core; + +import com.google.cloud.bigtable.examples.proxy.metrics.Tracer; +import com.google.common.base.Stopwatch; +import io.grpc.ClientCall; +import io.grpc.Metadata; +import io.grpc.ServerCall; +import io.grpc.Status; +import javax.annotation.concurrent.GuardedBy; + +/** A per gppc RPC proxy. */ +class CallProxy { + + private final Tracer tracer; + final RequestProxy serverCallListener; + final ResponseProxy clientCallListener; + + private final Stopwatch downstreamStopwatch = Stopwatch.createUnstarted(); + + /** + * @param tracer a lifecycle observer to publish metrics. + * @param serverCall the incoming server call. This will be triggered a customer client. + * @param clientCall the outgoing call to Bigtable service. This will be created by {@link + * ProxyHandler} + */ + public CallProxy( + Tracer tracer, ServerCall serverCall, ClientCall clientCall) { + this.tracer = tracer; + // Listen for incoming request messages and send them to the upstream ClientCall + // The RequestProxy will respect back pressure from the ClientCall and only request a new + // message from the incoming rpc when the upstream client call is ready, + serverCallListener = new RequestProxy(clientCall); + + // Listen from response messages from the upstream ClientCall and relay them to the customer's + // client. This will respect backpressure and request new messages from the upstream when the + // customer's client is ready. + clientCallListener = new ResponseProxy(serverCall); + } + + /** + * Back pressure aware message pump of request messages from a customer's downstream client to + * upstream Bigtable service. + * + *

Additional messages are requested from the downstream while the upstream's isReady() flag is + * set. As soon as the upstream signals that is full by returning false for isReady(). {@link + * RequestProxy} will remember that the need to get more messages from downstream and then wait + * until the upstream signals readiness via onClientReady(). + * + *

Please note in the current Bigtable protocol, all RPCs a client unary. Until that changes, + * this proxy will only have a single iteration. However, its designed generically to support + * future usecases. + */ + private class RequestProxy extends ServerCall.Listener { + + private final ClientCall clientCall; + + @GuardedBy("this") + private boolean needToRequest; + + public RequestProxy(ClientCall clientCall) { + this.clientCall = clientCall; + } + + @Override + public void onCancel() { + clientCall.cancel("Server cancelled", null); + } + + @Override + public void onHalfClose() { + clientCall.halfClose(); + } + + @Override + public void onMessage(ReqT message) { + clientCall.sendMessage(message); + synchronized (this) { + if (clientCall.isReady()) { + clientCallListener.serverCall.request(1); + } else { + // The outgoing call is not ready for more requests. Stop requesting additional data and + // wait for it to catch up. + needToRequest = true; + } + } + } + + @Override + public void onReady() { + clientCallListener.onServerReady(); + } + + // Called from ResponseProxy, which is a different thread than the ServerCall.Listener + // callbacks. + synchronized void onClientReady() { + if (needToRequest) { + // When the upstream client is ready for another request message from the customer's client, + // ask for one more message. + clientCallListener.serverCall.request(1); + needToRequest = false; + } + } + } + + /** + * Back pressure aware message pump of response messages from upstream Bigtable service to a + * customer's downstream client. + * + *

Additional messages are requested from the upstream while the downstream's isReady() flag is + * set. As soon as the downstream signals that is full by returning false for isReady(). {@link + * ResponseProxy} will remember that the need to get more messages from upstream and then wait + * until the downstream signals readiness via onServerReady(). + */ + private class ResponseProxy extends ClientCall.Listener { + + private final ServerCall serverCall; + + @GuardedBy("this") + private boolean needToRequest; + + public ResponseProxy(ServerCall serverCall) { + this.serverCall = serverCall; + } + + @Override + public void onClose(Status status, Metadata trailers) { + tracer.onCallFinished(status); + + serverCall.close(status, trailers); + } + + @Override + public void onHeaders(Metadata headers) { + serverCall.sendHeaders(headers); + } + + @Override + public void onMessage(RespT message) { + serverCall.sendMessage(message); + synchronized (this) { + if (serverCall.isReady()) { + serverCallListener.clientCall.request(1); + } else { + // The incoming call is not ready for more responses. Stop requesting additional data + // and wait for it to catch up. + needToRequest = true; + downstreamStopwatch.reset().start(); + } + } + } + + @Override + public void onReady() { + serverCallListener.onClientReady(); + } + + // Called from RequestProxy, which is a different thread than the ClientCall.Listener + // callbacks. + synchronized void onServerReady() { + if (downstreamStopwatch.isRunning()) { + tracer.onDownstreamLatency(downstreamStopwatch.elapsed()); + downstreamStopwatch.stop(); + } + if (needToRequest) { + serverCallListener.clientCall.request(1); + needToRequest = false; + } + } + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ProxyHandler.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ProxyHandler.java new file mode 100644 index 00000000000..dfdbdd24ba2 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ProxyHandler.java @@ -0,0 +1,65 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.core; + +import com.google.cloud.bigtable.examples.proxy.metrics.Metrics; +import com.google.cloud.bigtable.examples.proxy.metrics.Tracer; +import io.grpc.CallCredentials; +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.Metadata; +import io.grpc.ServerCall; +import io.grpc.ServerCallHandler; + +/** A factory pairing of an incoming server call to an outgoing client call. */ +public final class ProxyHandler implements ServerCallHandler { + private static final Metadata.Key AUTHORIZATION_KEY = + Metadata.Key.of("Authorization", Metadata.ASCII_STRING_MARSHALLER); + + private final Metrics metrics; + private final Channel channel; + private final CallCredentials callCredentials; + + public ProxyHandler(Metrics metrics, Channel channel, CallCredentials callCredentials) { + this.metrics = metrics; + this.channel = channel; + this.callCredentials = callCredentials; + } + + @Override + public ServerCall.Listener startCall(ServerCall serverCall, Metadata headers) { + CallLabels callLabels = CallLabels.create(serverCall.getMethodDescriptor(), headers); + Tracer tracer = new Tracer(metrics, callLabels); + + // Inject proxy credentials + CallOptions callOptions = CallOptions.DEFAULT.withCallCredentials(callCredentials); + callOptions = tracer.injectIntoCallOptions(callOptions); + + // Strip incoming credentials + headers.removeAll(AUTHORIZATION_KEY); + + ClientCall clientCall = + channel.newCall(serverCall.getMethodDescriptor(), callOptions); + + CallProxy proxy = new CallProxy<>(tracer, serverCall, clientCall); + clientCall.start(proxy.clientCallListener, headers); + serverCall.request(1); + clientCall.request(1); + return proxy.serverCallListener; + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/Registry.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/Registry.java new file mode 100644 index 00000000000..bed62c292e0 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/Registry.java @@ -0,0 +1,54 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.core; + +import com.google.common.collect.ImmutableMap; +import io.grpc.HandlerRegistry; +import io.grpc.MethodDescriptor; +import io.grpc.ServerCallHandler; +import io.grpc.ServerMethodDefinition; +import java.util.Map; + +/** + * Contains the service name -> handler mapping. This acts as an aggregate service. + * + *

The handlers treat requests and responses as raw byte arrays. + */ +public class Registry extends HandlerRegistry { + private final MethodDescriptor.Marshaller byteMarshaller = new ByteMarshaller(); + private final Map> serviceMap; + + public Registry(Map> serviceMap) { + this.serviceMap = ImmutableMap.copyOf(serviceMap); + } + + @Override + public ServerMethodDefinition lookupMethod(String methodName, String authority) { + MethodDescriptor methodDescriptor = + MethodDescriptor.newBuilder(byteMarshaller, byteMarshaller) + .setFullMethodName(methodName) + .setType(MethodDescriptor.MethodType.UNKNOWN) + .build(); + + ServerCallHandler handler = serviceMap.get(methodDescriptor.getServiceName()); + if (handler == null) { + return null; + } + + return ServerMethodDefinition.create(methodDescriptor, handler); + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/InstrumentedCallCredentials.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/InstrumentedCallCredentials.java new file mode 100644 index 00000000000..14d1454a22f --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/InstrumentedCallCredentials.java @@ -0,0 +1,105 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.metrics; + +import com.google.cloud.bigtable.examples.proxy.channelpool.DataChannel; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels.PrimingKey; +import com.google.cloud.bigtable.examples.proxy.core.ProxyHandler; +import com.google.common.base.Stopwatch; +import io.grpc.CallCredentials; +import io.grpc.CallOptions; +import io.grpc.InternalMayRequireSpecificExecutor; +import io.grpc.Metadata; +import io.grpc.ServerCall; +import io.grpc.Status; +import java.time.Duration; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; +import javax.annotation.Nullable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * {@link CallCredentials} decorator that tracks latency for fetching credentials. + * + *

This expects that all RPCs that use these credentials embed a {@link Tracer} in the {@link + * io.grpc.CallOptions} using {@link Tracer#injectIntoCallOptions(CallOptions)}. + * + *

Known callers: + * + *

    + *
  • {@link DataChannel#sendPingAndWarm(PrimingKey)} + *
  • {@link ProxyHandler#startCall(ServerCall, Metadata)} + *
+ */ +public class InstrumentedCallCredentials extends CallCredentials + implements InternalMayRequireSpecificExecutor { + private static final Logger LOG = LoggerFactory.getLogger(InstrumentedCallCredentials.class); + + private final CallCredentials inner; + private final boolean specificExecutorRequired; + + public InstrumentedCallCredentials(CallCredentials inner) { + this.inner = inner; + this.specificExecutorRequired = + (inner instanceof InternalMayRequireSpecificExecutor) + && ((InternalMayRequireSpecificExecutor) inner).isSpecificExecutorRequired(); + } + + @Override + public void applyRequestMetadata( + RequestInfo requestInfo, Executor appExecutor, MetadataApplier applier) { + @Nullable Tracer tracer = Tracer.extractTracerFromCallOptions(requestInfo.getCallOptions()); + if (tracer == null) { + applier.fail( + Status.INTERNAL.withDescription( + "InstrumentedCallCredentials failed to extract tracer from CallOptions")); + return; + } + final Stopwatch stopwatch = Stopwatch.createStarted(); + + inner.applyRequestMetadata( + requestInfo, + appExecutor, + new MetadataApplier() { + @Override + public void apply(Metadata headers) { + Duration latency = Duration.ofMillis(stopwatch.elapsed(TimeUnit.MILLISECONDS)); + // Most credentials fetches should very fast because they are cached + if (latency.compareTo(Duration.ofMillis(1)) >= 1) { + LOG.debug("Fetching Credentials took {}", latency); + } + tracer.onCredentialsFetch(Status.OK, latency); + applier.apply(headers); + } + + @Override + public void fail(Status status) { + Duration latency = Duration.ofMillis(stopwatch.elapsed(TimeUnit.MILLISECONDS)); + + LOG.warn("Failed to fetch Credentials after {}: {}", latency, status); + tracer.onCredentialsFetch(status, latency); + applier.fail(status); + } + }); + } + + @Override + public boolean isSpecificExecutorRequired() { + return specificExecutorRequired; + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java new file mode 100644 index 00000000000..007d84471e9 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java @@ -0,0 +1,54 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.metrics; + +import com.google.cloud.bigtable.examples.proxy.core.CallLabels; +import com.google.cloud.bigtable.examples.proxy.metrics.Metrics.MetricsAttributes; +import io.grpc.ConnectivityState; +import io.grpc.Status; +import java.time.Duration; + +/** Interface for tracking measurements across the application. */ +public interface Metrics { + MetricsAttributes createAttributes(CallLabels callLabels); + + void recordCallStarted(MetricsAttributes attrs); + + void recordCredLatency(MetricsAttributes attrs, Status status, Duration duration); + + void recordQueueLatency(MetricsAttributes attrs, Duration duration); + + void recordRequestSize(MetricsAttributes attrs, long size); + + void recordResponseSize(MetricsAttributes attrs, long size); + + void recordGfeLatency(MetricsAttributes attrs, Duration duration); + + void recordGfeHeaderMissing(MetricsAttributes attrs); + + void recordCallLatency(MetricsAttributes attrs, Status status, Duration duration); + + void recordFirstByteLatency(MetricsAttributes attrs, Duration duration); + + void updateChannelCount(int delta); + + void recordChannelStateChange(ConnectivityState prevState, ConnectivityState newState); + + void recordDownstreamLatency(MetricsAttributes attrs, Duration latency); + + interface MetricsAttributes {} +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java new file mode 100644 index 00000000000..a5f9a2ce409 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java @@ -0,0 +1,406 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.metrics; + +import com.google.auth.Credentials; +import com.google.auto.value.AutoValue; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels.ParsingException; +import com.google.cloud.opentelemetry.metric.GoogleCloudMetricExporter; +import com.google.cloud.opentelemetry.metric.MetricConfiguration; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import io.grpc.ConnectivityState; +import io.grpc.Status; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.common.AttributesBuilder; +import io.opentelemetry.api.metrics.DoubleHistogram; +import io.opentelemetry.api.metrics.LongCounter; +import io.opentelemetry.api.metrics.LongHistogram; +import io.opentelemetry.api.metrics.LongUpDownCounter; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.api.metrics.MeterProvider; +import io.opentelemetry.api.metrics.ObservableLongGauge; +import io.opentelemetry.contrib.gcp.resource.GCPResourceProvider; +import io.opentelemetry.sdk.common.InstrumentationScopeInfo; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableGaugeData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableLongPointData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableMetricData; +import io.opentelemetry.sdk.resources.Resource; +import java.io.Closeable; +import java.io.IOException; +import java.time.Duration; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Central definition of all the {@link OpenTelemetry} metrics in this application. + * + *

The metric definition themselves are only accessible via typesafe record methods. + */ +@SuppressWarnings("ClassEscapesDefinedScope") +public class MetricsImpl implements Closeable, Metrics { + private static final Logger LOG = LoggerFactory.getLogger(MetricsImpl.class); + + private static final InstrumentationScopeInfo INSTRUMENTATION_SCOPE_INFO = + InstrumentationScopeInfo.builder("bigtable-proxy").setVersion("0.0.1").build(); + + private static final String METRIC_PREFIX = "bigtableproxy."; + + private static final AttributeKey API_CLIENT_KEY = AttributeKey.stringKey("api_client"); + private static final AttributeKey RESOURCE_KEY = AttributeKey.stringKey("resource"); + private static final AttributeKey APP_PROFILE_KEY = AttributeKey.stringKey("app_profile"); + private static final AttributeKey METHOD_KEY = AttributeKey.stringKey("method"); + private static final AttributeKey STATUS_KEY = AttributeKey.stringKey("status"); + + private static final AttributeKey PREV_CHANNEL_STATE = + AttributeKey.stringKey("prev_state"); + private static final AttributeKey CURRENT_CHANNEL_STATE = + AttributeKey.stringKey("current_state"); + + private static final String METRIC_PRESENCE_NAME = METRIC_PREFIX + "presence"; + private static final String METRIC_PRESENCE_DESC = "Number of proxy processes"; + private static final String METRIC_PRESENCE_UNIT = "{process}"; + + private final MeterProvider meterProvider; + + private final DoubleHistogram gfeLatency; + private final LongCounter gfeResponseHeadersMissing; + private final DoubleHistogram clientCredLatencies; + private final DoubleHistogram clientQueueLatencies; + private final DoubleHistogram clientCallLatencies; + private final DoubleHistogram clientCallFirstByteLatencies; + private final DoubleHistogram downstreamLatencies; + private final LongCounter serverCallsStarted; + private final LongHistogram requestSizes; + private final LongHistogram responseSizes; + private final LongCounter channelStateChangeCounter; + + private final ObservableLongGauge outstandingRpcCountGauge; + private final ObservableLongGauge presenceGauge; + + private final LongUpDownCounter channelCounter; + private final AtomicInteger numOutstandingRpcs = new AtomicInteger(); + private final AtomicInteger maxSeen = new AtomicInteger(); + + public MetricsImpl(Credentials credentials, String projectId) throws IOException { + this(createMeterProvider(credentials, projectId)); + } + + private static SdkMeterProvider createMeterProvider(Credentials credentials, String projectId) { + MetricConfiguration config = + MetricConfiguration.builder() + .setProjectId(projectId) + .setCredentials(credentials) + .setInstrumentationLibraryLabelsEnabled(false) + .build(); + + MetricExporter exporter = GoogleCloudMetricExporter.createWithConfiguration(config); + + return SdkMeterProvider.builder() + .setResource(Resource.create(new GCPResourceProvider().getAttributes())) + .registerMetricReader( + PeriodicMetricReader.builder(exporter).setInterval(Duration.ofMinutes(1)).build()) + .build(); + } + + MetricsImpl(MeterProvider meterProvider) { + this.meterProvider = meterProvider; + @SuppressWarnings("DataFlowIssue") + Meter meter = + meterProvider + .meterBuilder(INSTRUMENTATION_SCOPE_INFO.getName()) + .setInstrumentationVersion(INSTRUMENTATION_SCOPE_INFO.getVersion()) + .build(); + + serverCallsStarted = + meter + .counterBuilder(METRIC_PREFIX + "server.call.started") + .setDescription( + "The total number of RPCs started, including those that have not completed.") + .setUnit("{call}") + .build(); + + clientCredLatencies = + meter + .histogramBuilder(METRIC_PREFIX + "client.call.credential.duration") + .setDescription("Latency of getting credentials") + .setUnit("ms") + .build(); + + clientQueueLatencies = + meter + .histogramBuilder(METRIC_PREFIX + "client.call.queue.duration") + .setDescription( + "Duration of how long the outbound side of the proxy had the RPC queued") + .setUnit("ms") + .build(); + + requestSizes = + meter + .histogramBuilder(METRIC_PREFIX + "client.call.sent_total_message_size") + .setDescription( + "Total bytes sent per call to Bigtable service (excluding metadata, grpc and" + + " transport framing bytes)") + .setUnit("by") + .ofLongs() + .build(); + + responseSizes = + meter + .histogramBuilder(METRIC_PREFIX + "client.call.rcvd_total_message_size") + .setDescription( + "Total bytes received per call from Bigtable service (excluding metadata, grpc and" + + " transport framing bytes)") + .setUnit("by") + .ofLongs() + .build(); + + gfeLatency = + meter + .histogramBuilder(METRIC_PREFIX + "client.gfe.duration") + .setDescription( + "Latency as measured by Google load balancer from the time it " + + "received the first byte of the request until it received the first byte of" + + " the response from the Cloud Bigtable service.") + .setUnit("ms") + .build(); + + gfeResponseHeadersMissing = + meter + .counterBuilder(METRIC_PREFIX + "client.gfe.duration_missing.count") + .setDescription("Count of calls missing gfe response headers") + .setUnit("{call}") + .build(); + + clientCallLatencies = + meter + .histogramBuilder(METRIC_PREFIX + "client.call.duration") + .setDescription("Total duration of how long the outbound call took") + .setUnit("ms") + .build(); + + clientCallFirstByteLatencies = + meter + .histogramBuilder(METRIC_PREFIX + "client.first_byte.duration") + .setDescription("Latency from start of request until first response is received") + .setUnit("ms") + .build(); + + downstreamLatencies = + meter + .histogramBuilder(METRIC_PREFIX + "server.write_wait.duration") + .setDescription( + "Total amount of time spent waiting for the downstream client to be" + + " ready for data") + .setUnit("ms") + .build(); + + channelCounter = + meter + .upDownCounterBuilder(METRIC_PREFIX + "client.channel.count") + .setDescription("Number of open channels") + .setUnit("{channel}") + .build(); + + outstandingRpcCountGauge = + meter + .gaugeBuilder(METRIC_PREFIX + "client.call.max_outstanding_count") + .setDescription("Maximum number of concurrent RPCs in a single minute window") + .setUnit("{call}") + .ofLongs() + .buildWithCallback(o -> o.record(maxSeen.getAndSet(0))); + + presenceGauge = + meter + .gaugeBuilder(METRIC_PRESENCE_NAME) + .setDescription(METRIC_PRESENCE_DESC) + .setUnit(METRIC_PRESENCE_UNIT) + .ofLongs() + .buildWithCallback(o -> o.record(1)); + + channelStateChangeCounter = + meter + .counterBuilder(METRIC_PREFIX + "client.channel_change_count") + .setDescription("Counter of channel state transitions") + .setUnit("{change}") + .build(); + } + + @Override + public void close() throws IOException { + outstandingRpcCountGauge.close(); + presenceGauge.close(); + + if (meterProvider instanceof Closeable) { + ((Closeable) meterProvider).close(); + } + } + + @Override + public MetricsAttributesImpl createAttributes(CallLabels callLabels) { + AttributesBuilder attrs = + Attributes.builder() + .put(METHOD_KEY, callLabels.getMethodName()) + .put(API_CLIENT_KEY, callLabels.getApiClient().orElse("")); + + String resourceValue; + try { + resourceValue = callLabels.extractResourceName().orElse(""); + } catch (ParsingException e) { + LOG.warn("Failed to extract resource from callLabels: {}", callLabels, e); + resourceValue = ""; + } + attrs.put(MetricsImpl.RESOURCE_KEY, resourceValue); + + String appProfile; + try { + appProfile = callLabels.extractAppProfileId().orElse(""); + } catch (ParsingException e) { + LOG.warn("Failed to extract app profile from callLabels: {}", callLabels, e); + appProfile = ""; + } + attrs.put(MetricsImpl.APP_PROFILE_KEY, appProfile); + + return new AutoValue_MetricsImpl_MetricsAttributesImpl(attrs.build()); + } + + @Override + public void recordCallStarted(MetricsAttributes attrs) { + serverCallsStarted.add(1, unwrap(attrs)); + + int outstanding = numOutstandingRpcs.incrementAndGet(); + maxSeen.updateAndGet(n -> Math.max(outstanding, n)); + } + + @Override + public void recordCredLatency(MetricsAttributes attrs, Status status, Duration duration) { + Attributes attributes = + unwrap(attrs).toBuilder().put(STATUS_KEY, status.getCode().name()).build(); + clientCredLatencies.record(toMs(duration), attributes); + } + + @Override + public void recordQueueLatency(MetricsAttributes attrs, Duration duration) { + clientQueueLatencies.record(toMs(duration), unwrap(attrs)); + } + + @Override + public void recordRequestSize(MetricsAttributes attrs, long size) { + requestSizes.record(size, unwrap(attrs)); + } + + @Override + public void recordResponseSize(MetricsAttributes attrs, long size) { + responseSizes.record(size, unwrap(attrs)); + } + + @Override + public void recordGfeLatency(MetricsAttributes attrs, Duration duration) { + gfeLatency.record(toMs(duration), unwrap(attrs)); + } + + @Override + public void recordGfeHeaderMissing(MetricsAttributes attrs) { + gfeResponseHeadersMissing.add(1, unwrap(attrs)); + } + + @Override + public void recordCallLatency(MetricsAttributes attrs, Status status, Duration duration) { + Attributes attributes = + unwrap(attrs).toBuilder().put(STATUS_KEY, status.getCode().name()).build(); + + clientCallLatencies.record(toMs(duration), attributes); + numOutstandingRpcs.decrementAndGet(); + } + + @Override + public void recordFirstByteLatency(MetricsAttributes attrs, Duration duration) { + clientCallFirstByteLatencies.record(toMs(duration), unwrap(attrs)); + } + + @Override + public void updateChannelCount(int delta) { + channelCounter.add(delta); + } + + @Override + public void recordChannelStateChange(ConnectivityState prevState, ConnectivityState newState) { + Attributes attributes = + Attributes.builder() + .put( + PREV_CHANNEL_STATE, Optional.ofNullable(prevState).map(Enum::name).orElse("")) + .put( + CURRENT_CHANNEL_STATE, + Optional.ofNullable(newState).map(Enum::name).orElse("")) + .build(); + channelStateChangeCounter.add(1, attributes); + } + + @Override + public void recordDownstreamLatency(MetricsAttributes attrs, Duration latency) { + downstreamLatencies.record(toMs(latency), unwrap(attrs)); + } + + private static double toMs(Duration duration) { + return duration.toNanos() / 1_000_000.0; + } + + private static Attributes unwrap(MetricsAttributes wrapped) { + return ((MetricsAttributesImpl) wrapped).getAttributes(); + } + + /** + * Generate a test data point to test permissions for exporting metrics. Used in {@link + * com.google.cloud.bigtable.examples.proxy.commands.Verify}. + */ + public static MetricData generateTestPresenceMeasurement(Resource resource) { + Instant end = Instant.now().truncatedTo(ChronoUnit.MINUTES); + Instant start = end.minus(Duration.ofMinutes(1)); + + return ImmutableMetricData.createLongGauge( + resource, + INSTRUMENTATION_SCOPE_INFO, + METRIC_PRESENCE_NAME, + METRIC_PRESENCE_DESC, + METRIC_PRESENCE_UNIT, + ImmutableGaugeData.create( + ImmutableList.of( + ImmutableLongPointData.create( + TimeUnit.MILLISECONDS.toNanos(start.toEpochMilli()), + TimeUnit.MILLISECONDS.toNanos(end.toEpochMilli()), + Attributes.empty(), + 1L)))); + } + + @VisibleForTesting + @AutoValue + abstract static class MetricsAttributesImpl implements MetricsAttributes { + abstract Attributes getAttributes(); + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java new file mode 100644 index 00000000000..b0162ede05f --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java @@ -0,0 +1,137 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.metrics; + +import com.google.cloud.bigtable.examples.proxy.core.CallLabels; +import com.google.cloud.bigtable.examples.proxy.metrics.Metrics.MetricsAttributes; +import com.google.common.base.Stopwatch; +import io.grpc.CallOptions; +import io.grpc.CallOptions.Key; +import io.grpc.ClientStreamTracer; +import io.grpc.Metadata; +import io.grpc.Status; +import java.time.Duration; +import java.time.temporal.ChronoUnit; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * RPC lifecycle tracer. + * + *

It hooks into both gRPC RPC lifecycle and this application. It combines the extracted {@link + * CallLabels} with {@link Metrics} recording. + */ +public class Tracer extends ClientStreamTracer { + private static final Key CALL_OPTION_KEY = Key.create("bigtable-proxy-tracer"); + + private static final Metadata.Key SERVER_TIMING_HEADER_KEY = + Metadata.Key.of("server-timing", Metadata.ASCII_STRING_MARSHALLER); + private static final Pattern SERVER_TIMING_HEADER_PATTERN = Pattern.compile(".*dur=(?\\d+)"); + + private final Metrics metrics; + private final CallLabels callLabels; + private final MetricsAttributes attrs; + private final Stopwatch stopwatch; + private volatile Optional grpcQueueDuration = Optional.empty(); + private final AtomicLong responseSize = new AtomicLong(); + private volatile Duration downstreamLatency; + + public Tracer(Metrics metrics, CallLabels callLabels) { + this.metrics = metrics; + this.callLabels = callLabels; + this.attrs = metrics.createAttributes(callLabels); + + stopwatch = Stopwatch.createStarted(); + + metrics.recordCallStarted(attrs); + } + + public CallOptions injectIntoCallOptions(CallOptions callOptions) { + return callOptions + .withOption(CALL_OPTION_KEY, this) + .withStreamTracerFactory( + new Factory() { + @Override + public ClientStreamTracer newClientStreamTracer(StreamInfo info, Metadata headers) { + return Tracer.this; + } + }); + } + + public static Tracer extractTracerFromCallOptions(CallOptions callOptions) { + return callOptions.getOption(CALL_OPTION_KEY); + } + + @Override + public void outboundMessageSent(int seqNo, long optionalWireSize, long optionalUncompressedSize) { + grpcQueueDuration = + Optional.of(Duration.of(stopwatch.elapsed(TimeUnit.MICROSECONDS), ChronoUnit.MICROS)); + } + + @Override + public void outboundUncompressedSize(long bytes) { + metrics.recordRequestSize(attrs, bytes); + } + + @Override + public void inboundUncompressedSize(long bytes) { + responseSize.addAndGet(bytes); + } + + @Override + public void inboundHeaders(Metadata headers) { + Optional.ofNullable(headers.get(SERVER_TIMING_HEADER_KEY)) + .map(SERVER_TIMING_HEADER_PATTERN::matcher) + .filter(Matcher::find) + .map(m -> m.group("dur")) + .map(Long::parseLong) + .map(Duration::ofMillis) + .ifPresentOrElse( + d -> metrics.recordGfeLatency(attrs, d), () -> metrics.recordGfeHeaderMissing(attrs)); + } + + @Override + public void inboundMessage(int seqNo) { + if (seqNo == 0) { + metrics.recordFirstByteLatency( + attrs, Duration.ofMillis(stopwatch.elapsed(TimeUnit.MILLISECONDS))); + } + } + + public void onCallFinished(Status status) { + grpcQueueDuration.ifPresent(d -> metrics.recordQueueLatency(attrs, d)); + metrics.recordDownstreamLatency(attrs, downstreamLatency); + metrics.recordResponseSize(attrs, responseSize.get()); + metrics.recordCallLatency( + attrs, status, Duration.ofMillis(stopwatch.elapsed(TimeUnit.MILLISECONDS))); + } + + public void onCredentialsFetch(Status status, Duration duration) { + metrics.recordCredLatency(attrs, status, duration); + } + + public CallLabels getCallLabels() { + return callLabels; + } + + public void onDownstreamLatency(Duration latency) { + downstreamLatency = downstreamLatency.plus(latency); + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/package-info.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/package-info.java new file mode 100644 index 00000000000..6175827d83f --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/package-info.java @@ -0,0 +1,17 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy; diff --git a/bigtable/bigtable-proxy/src/main/resources/logback.xml b/bigtable/bigtable-proxy/src/main/resources/logback.xml new file mode 100644 index 00000000000..b2f4edd122e --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/resources/logback.xml @@ -0,0 +1,21 @@ + + + + + + + %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + + + + + + + + + + + + + + diff --git a/bigtable/bigtable-proxy/src/main/scripts/bigtable-proxy.sh b/bigtable/bigtable-proxy/src/main/scripts/bigtable-proxy.sh new file mode 100755 index 00000000000..58b35e9c0a9 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/scripts/bigtable-proxy.sh @@ -0,0 +1,16 @@ +#!/bin/sh + # Copyright 2024 Google LLC + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + +java -jar ${project.build.finalName}.jar serve "$@" diff --git a/bigtable/bigtable-proxy/src/main/scripts/bigtable-verify.sh b/bigtable/bigtable-proxy/src/main/scripts/bigtable-verify.sh new file mode 100755 index 00000000000..380cb84100b --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/scripts/bigtable-verify.sh @@ -0,0 +1,16 @@ +#!/bin/sh + # Copyright 2024 Google LLC + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + +java -jar ${project.build.finalName}.jar verify "$@" diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPoolTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPoolTest.java new file mode 100644 index 00000000000..bc1ecc83acd --- /dev/null +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPoolTest.java @@ -0,0 +1,804 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.channelpool; + +import static com.google.common.truth.Truth.assertThat; +import static io.grpc.MethodDescriptor.generateFullMethodName; + +import com.google.bigtable.v2.BigtableGrpc; +import com.google.bigtable.v2.MutateRowRequest; +import com.google.bigtable.v2.MutateRowResponse; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.type.Color; +import com.google.type.Money; +import io.grpc.CallOptions; +import io.grpc.ClientCall; +import io.grpc.ClientCall.Listener; +import io.grpc.ManagedChannel; +import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import io.grpc.Status; +import io.grpc.protobuf.ProtoUtils; +import io.grpc.stub.ClientCalls; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Handler; +import java.util.logging.LogRecord; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import org.junit.After; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; +import org.mockito.stubbing.Answer; + +@RunWith(JUnit4.class) +public class ChannelPoolTest { + private static final int DEFAULT_AWAIT_TERMINATION_SEC = 10; + private ChannelPool pool; + + @After + public void cleanup() throws InterruptedException { + Preconditions.checkNotNull(pool, "Channel pool was never created"); + pool.shutdown(); + pool.awaitTermination(DEFAULT_AWAIT_TERMINATION_SEC, TimeUnit.SECONDS); + } + + @Test + public void testAuthority() throws IOException { + ManagedChannel sub1 = Mockito.mock(ManagedChannel.class); + ManagedChannel sub2 = Mockito.mock(ManagedChannel.class); + + Mockito.when(sub1.authority()).thenReturn("myAuth"); + + pool = + ChannelPool.create( + ChannelPoolSettings.staticallySized(2), + new FakeChannelFactory(Arrays.asList(sub1, sub2))); + assertThat(pool.authority()).isEqualTo("myAuth"); + } + + @Test + public void testRoundRobin() throws IOException { + ManagedChannel sub1 = Mockito.mock(ManagedChannel.class); + ManagedChannel sub2 = Mockito.mock(ManagedChannel.class); + + Mockito.when(sub1.authority()).thenReturn("myAuth"); + + ArrayList channels = Lists.newArrayList(sub1, sub2); + pool = + ChannelPool.create( + ChannelPoolSettings.staticallySized(channels.size()), new FakeChannelFactory(channels)); + + verifyTargetChannel(pool, channels, sub1); + verifyTargetChannel(pool, channels, sub2); + verifyTargetChannel(pool, channels, sub1); + } + + private void verifyTargetChannel( + ChannelPool pool, List channels, ManagedChannel targetChannel) { + MethodDescriptor methodDescriptor = + BigtableGrpc.getMutateRowMethod(); + CallOptions callOptions = CallOptions.DEFAULT; + @SuppressWarnings("unchecked") + ClientCall expectedClientCall = + Mockito.mock(ClientCall.class); + + channels.forEach(Mockito::reset); + Mockito.doReturn(expectedClientCall).when(targetChannel).newCall(methodDescriptor, callOptions); + + ClientCall actualCall = + pool.newCall(methodDescriptor, callOptions); + Mockito.verify(targetChannel, Mockito.times(1)).newCall(methodDescriptor, callOptions); + actualCall.start(null, null); + Mockito.verify(expectedClientCall, Mockito.times(1)).start(Mockito.any(), Mockito.any()); + + for (ManagedChannel otherChannel : channels) { + if (otherChannel != targetChannel) { + Mockito.verify(otherChannel, Mockito.never()).newCall(methodDescriptor, callOptions); + } + } + } + + @Test + public void ensureEvenDistribution() throws InterruptedException, IOException { + int numChannels = 10; + final ManagedChannel[] channels = new ManagedChannel[numChannels]; + final AtomicInteger[] counts = new AtomicInteger[numChannels]; + + MethodDescriptor methodDescriptor = + BigtableGrpc.getMutateRowMethod(); + final CallOptions callOptions = CallOptions.DEFAULT; + @SuppressWarnings("unchecked") + final ClientCall clientCall = + Mockito.mock(ClientCall.class); + + for (int i = 0; i < numChannels; i++) { + final int index = i; + + counts[i] = new AtomicInteger(); + + channels[i] = Mockito.mock(ManagedChannel.class); + Mockito.when(channels[i].newCall(methodDescriptor, callOptions)) + .thenAnswer( + (ignored) -> { + counts[index].incrementAndGet(); + return clientCall; + }); + } + + pool = + ChannelPool.create( + ChannelPoolSettings.staticallySized(numChannels), + new FakeChannelFactory(Arrays.asList(channels))); + + int numThreads = 20; + final int numPerThread = 1000; + + ExecutorService executor = Executors.newFixedThreadPool(numThreads); + for (int i = 0; i < numThreads; i++) { + executor.submit( + () -> { + for (int j = 0; j < numPerThread; j++) { + pool.newCall(methodDescriptor, callOptions); + } + }); + } + executor.shutdown(); + boolean shutdown = executor.awaitTermination(1, TimeUnit.MINUTES); + assertThat(shutdown).isTrue(); + + int expectedCount = (numThreads * numPerThread) / numChannels; + for (AtomicInteger count : counts) { + assertThat(count.get()).isAnyOf(expectedCount, expectedCount + 1); + } + } + + // Test channelPrimer is called same number of times as poolSize if executorService is set to null + @Test + public void channelPrimerShouldCallPoolConstruction() throws IOException { + ChannelPrimer mockChannelPrimer = Mockito.mock(ChannelPrimer.class); + ManagedChannel channel1 = Mockito.mock(ManagedChannel.class); + ManagedChannel channel2 = Mockito.mock(ManagedChannel.class); + + pool = + ChannelPool.create( + ChannelPoolSettings.staticallySized(2).toBuilder() + .setPreemptiveRefreshEnabled(true) + .build(), + new FakeChannelFactory(Arrays.asList(channel1, channel2), mockChannelPrimer)); + Mockito.verify(mockChannelPrimer, Mockito.times(2)) + .primeChannel(Mockito.any(ManagedChannel.class)); + } + + // Test channelPrimer is called periodically, if there's an executorService + @Test + public void channelPrimerIsCalledPeriodically() throws IOException { + ChannelPrimer mockChannelPrimer = Mockito.mock(ChannelPrimer.class); + ManagedChannel channel1 = Mockito.mock(ManagedChannel.class); + ManagedChannel channel2 = Mockito.mock(ManagedChannel.class); + ManagedChannel channel3 = Mockito.mock(ManagedChannel.class); + + List channelRefreshers = new ArrayList<>(); + + ScheduledExecutorService scheduledExecutorService = + Mockito.mock(ScheduledExecutorService.class); + + Answer extractChannelRefresher = + invocation -> { + channelRefreshers.add(invocation.getArgument(0)); + return Mockito.mock(ScheduledFuture.class); + }; + + Mockito.doAnswer(extractChannelRefresher) + .when(scheduledExecutorService) + .scheduleAtFixedRate( + Mockito.any(Runnable.class), Mockito.anyLong(), Mockito.anyLong(), Mockito.any()); + + FakeChannelFactory channelFactory = + new FakeChannelFactory(Arrays.asList(channel1, channel2, channel3), mockChannelPrimer); + + pool = + new ChannelPool( + ChannelPoolSettings.staticallySized(1).toBuilder() + .setPreemptiveRefreshEnabled(true) + .build(), + channelFactory, + scheduledExecutorService); + // 1 call during the creation + Mockito.verify(mockChannelPrimer, Mockito.times(1)) + .primeChannel(Mockito.any(ManagedChannel.class)); + + channelRefreshers.get(0).run(); + // 1 more call during channel refresh + Mockito.verify(mockChannelPrimer, Mockito.times(2)) + .primeChannel(Mockito.any(ManagedChannel.class)); + + channelRefreshers.get(0).run(); + // 1 more call during channel refresh + Mockito.verify(mockChannelPrimer, Mockito.times(3)) + .primeChannel(Mockito.any(ManagedChannel.class)); + } + + // ---- + // call should be allowed to complete and the channel should not be shutdown + @Test + public void callShouldCompleteAfterCreation() throws IOException { + ManagedChannel underlyingChannel = Mockito.mock(ManagedChannel.class); + ManagedChannel replacementChannel = Mockito.mock(ManagedChannel.class); + FakeChannelFactory channelFactory = + new FakeChannelFactory(ImmutableList.of(underlyingChannel, replacementChannel)); + pool = ChannelPool.create(ChannelPoolSettings.staticallySized(1), channelFactory); + + // create a mock call when new call comes to the underlying channel + MockClientCall mockClientCall = new MockClientCall<>(1, Status.OK); + MockClientCall spyClientCall = Mockito.spy(mockClientCall); + Mockito.when( + underlyingChannel.newCall( + Mockito.>any(), Mockito.any(CallOptions.class))) + .thenReturn(spyClientCall); + + Answer verifyChannelNotShutdown = + invocation -> { + Mockito.verify(underlyingChannel, Mockito.never()).shutdown(); + return invocation.callRealMethod(); + }; + + // verify that underlying channel is not shutdown when clientCall is still sending message + Mockito.doAnswer(verifyChannelNotShutdown).when(spyClientCall).sendMessage(Mockito.anyString()); + + // create a new call on entry + @SuppressWarnings("unchecked") + ClientCall.Listener listener = Mockito.mock(ClientCall.Listener.class); + ClientCall call = + pool.newCall(FakeMethodDescriptor.create(), CallOptions.DEFAULT); + + pool.refresh(); + // shutdown is not called because there is still an outstanding call, even if it hasn't started + Mockito.verify(underlyingChannel, Mockito.after(200).never()).shutdown(); + + // start clientCall + call.start(listener, new Metadata()); + // send message and end the call + call.sendMessage("message"); + // shutdown is called because the outstanding call has completed + Mockito.verify(underlyingChannel, Mockito.atLeastOnce()).shutdown(); + + // Replacement channel shouldn't be touched + Mockito.verify(replacementChannel, Mockito.never()).shutdown(); + Mockito.verify(replacementChannel, Mockito.never()).newCall(Mockito.any(), Mockito.any()); + } + + // call should be allowed to complete and the channel should not be shutdown + @Test + public void callShouldCompleteAfterStarted() throws IOException { + final ManagedChannel underlyingChannel = Mockito.mock(ManagedChannel.class); + ManagedChannel replacementChannel = Mockito.mock(ManagedChannel.class); + + FakeChannelFactory channelFactory = + new FakeChannelFactory(ImmutableList.of(underlyingChannel, replacementChannel)); + pool = ChannelPool.create(ChannelPoolSettings.staticallySized(1), channelFactory); + + // create a mock call when new call comes to the underlying channel + MockClientCall mockClientCall = new MockClientCall<>(1, Status.OK); + MockClientCall spyClientCall = Mockito.spy(mockClientCall); + Mockito.when( + underlyingChannel.newCall( + Mockito.>any(), Mockito.any(CallOptions.class))) + .thenReturn(spyClientCall); + + Answer verifyChannelNotShutdown = + invocation -> { + Mockito.verify(underlyingChannel, Mockito.never()).shutdown(); + return invocation.callRealMethod(); + }; + + // verify that underlying channel is not shutdown when clientCall is still sending message + Mockito.doAnswer(verifyChannelNotShutdown).when(spyClientCall).sendMessage(Mockito.anyString()); + + // create a new call on safeShutdownManagedChannel + @SuppressWarnings("unchecked") + ClientCall.Listener listener = Mockito.mock(ClientCall.Listener.class); + ClientCall call = + pool.newCall(FakeMethodDescriptor.create(), CallOptions.DEFAULT); + + // start clientCall + call.start(listener, new Metadata()); + pool.refresh(); + + // shutdown is not called because there is still an outstanding call + Mockito.verify(underlyingChannel, Mockito.after(200).never()).shutdown(); + // send message and end the call + call.sendMessage("message"); + // shutdown is called because the outstanding call has completed + Mockito.verify(underlyingChannel, Mockito.atLeastOnce()).shutdown(); + } + + // Channel should be shutdown after a refresh all the calls have completed + @Test + public void channelShouldShutdown() throws IOException { + ManagedChannel underlyingChannel = Mockito.mock(ManagedChannel.class); + ManagedChannel replacementChannel = Mockito.mock(ManagedChannel.class); + + FakeChannelFactory channelFactory = + new FakeChannelFactory(ImmutableList.of(underlyingChannel, replacementChannel)); + pool = ChannelPool.create(ChannelPoolSettings.staticallySized(1), channelFactory); + + // create a mock call when new call comes to the underlying channel + MockClientCall mockClientCall = new MockClientCall<>(1, Status.OK); + MockClientCall spyClientCall = Mockito.spy(mockClientCall); + Mockito.when( + underlyingChannel.newCall( + Mockito.>any(), Mockito.any(CallOptions.class))) + .thenReturn(spyClientCall); + + Answer verifyChannelNotShutdown = + invocation -> { + Mockito.verify(underlyingChannel, Mockito.never()).shutdown(); + return invocation.callRealMethod(); + }; + + // verify that underlying channel is not shutdown when clientCall is still sending message + Mockito.doAnswer(verifyChannelNotShutdown).when(spyClientCall).sendMessage(Mockito.anyString()); + + // create a new call on safeShutdownManagedChannel + @SuppressWarnings("unchecked") + ClientCall.Listener listener = Mockito.mock(ClientCall.Listener.class); + ClientCall call = + pool.newCall(FakeMethodDescriptor.create(), CallOptions.DEFAULT); + + // start clientCall + call.start(listener, new Metadata()); + // send message and end the call + call.sendMessage("message"); + // shutdown is not called because it has not been shutdown yet + Mockito.verify(underlyingChannel, Mockito.after(200).never()).shutdown(); + pool.refresh(); + // shutdown is called because the outstanding call has completed + Mockito.verify(underlyingChannel, Mockito.atLeastOnce()).shutdown(); + } + + @Test + public void channelRefreshShouldSwapChannels() throws IOException { + ManagedChannel underlyingChannel1 = Mockito.mock(ManagedChannel.class); + ManagedChannel underlyingChannel2 = Mockito.mock(ManagedChannel.class); + + // mock executor service to capture the runnable scheduled, so we can invoke it when we want to + ScheduledExecutorService scheduledExecutorService = + Mockito.mock(ScheduledExecutorService.class); + + Mockito.doReturn(null) + .when(scheduledExecutorService) + .schedule( + Mockito.any(Runnable.class), Mockito.anyLong(), Mockito.eq(TimeUnit.MILLISECONDS)); + + FakeChannelFactory channelFactory = + new FakeChannelFactory(ImmutableList.of(underlyingChannel1, underlyingChannel2)); + pool = + new ChannelPool( + ChannelPoolSettings.staticallySized(1).toBuilder() + .setPreemptiveRefreshEnabled(true) + .build(), + channelFactory, + scheduledExecutorService); + Mockito.reset(underlyingChannel1); + + pool.newCall(FakeMethodDescriptor.create(), CallOptions.DEFAULT); + + Mockito.verify(underlyingChannel1, Mockito.only()) + .newCall(Mockito.>any(), Mockito.any(CallOptions.class)); + + // swap channel + pool.refresh(); + + pool.newCall(FakeMethodDescriptor.create(), CallOptions.DEFAULT); + + Mockito.verify(underlyingChannel2, Mockito.only()) + .newCall(Mockito.>any(), Mockito.any(CallOptions.class)); + } + + @Test + public void channelCountShouldNotChangeWhenOutstandingRpcsAreWithinLimits() throws Exception { + ScheduledExecutorService executor = Mockito.mock(ScheduledExecutorService.class); + + List> startedCalls = new ArrayList<>(); + + ChannelFactory channelFactory = + () -> { + ManagedChannel channel = Mockito.mock(ManagedChannel.class); + Mockito.when(channel.newCall(Mockito.any(), Mockito.any())) + .thenAnswer( + invocation -> { + @SuppressWarnings("unchecked") + ClientCall clientCall = Mockito.mock(ClientCall.class); + startedCalls.add(clientCall); + return clientCall; + }); + return channel; + }; + + pool = + new ChannelPool( + ChannelPoolSettings.builder() + .setInitialChannelCount(2) + .setMinRpcsPerChannel(1) + .setMaxRpcsPerChannel(2) + .build(), + channelFactory, + executor); + assertThat(pool.entries.get()).hasSize(2); + + // Start the minimum number of + for (int i = 0; i < 2; i++) { + ClientCalls.futureUnaryCall( + pool.newCall(BigtableGrpc.getMutateRowMethod(), CallOptions.DEFAULT), + MutateRowRequest.getDefaultInstance()); + } + pool.resize(); + assertThat(pool.entries.get()).hasSize(2); + + // Add enough RPCs to be just at the brink of expansion + for (int i = startedCalls.size(); i < 4; i++) { + ClientCalls.futureUnaryCall( + pool.newCall(BigtableGrpc.getMutateRowMethod(), CallOptions.DEFAULT), + MutateRowRequest.getDefaultInstance()); + } + pool.resize(); + assertThat(pool.entries.get()).hasSize(2); + + // Add another RPC to push expansion + pool.newCall(BigtableGrpc.getMutateRowMethod(), CallOptions.DEFAULT); + pool.resize(); + assertThat(pool.entries.get()).hasSize(4); // += ChannelPool::MAX_RESIZE_DELTA + assertThat(startedCalls).hasSize(5); + + // Complete RPCs to the brink of shrinking + @SuppressWarnings("unchecked") + ArgumentCaptor> captor = + ArgumentCaptor.forClass(ClientCall.Listener.class); + Mockito.verify(startedCalls.remove(0)).start(captor.capture(), Mockito.any()); + captor.getValue().onClose(Status.ABORTED, new Metadata()); + // Resize twice: the first round maintains the peak from the last cycle + pool.resize(); + pool.resize(); + assertThat(pool.entries.get()).hasSize(4); + assertThat(startedCalls).hasSize(4); + + // Complete another RPC to trigger shrinking + Mockito.verify(startedCalls.remove(0)).start(captor.capture(), Mockito.any()); + captor.getValue().onClose(Status.ABORTED, new Metadata()); + // Resize twice: the first round maintains the peak from the last cycle + pool.resize(); + pool.resize(); + assertThat(startedCalls).hasSize(3); + // range of channels is [2-3] rounded down average is 2 + assertThat(pool.entries.get()).hasSize(2); + } + + @Test + public void removedIdleChannelsAreShutdown() throws Exception { + ScheduledExecutorService executor = Mockito.mock(ScheduledExecutorService.class); + + List channels = new ArrayList<>(); + + ChannelFactory channelFactory = + () -> { + ManagedChannel channel = Mockito.mock(ManagedChannel.class); + Mockito.when(channel.newCall(Mockito.any(), Mockito.any())) + .thenAnswer( + invocation -> { + @SuppressWarnings("unchecked") + ClientCall clientCall = Mockito.mock(ClientCall.class); + return clientCall; + }); + + channels.add(channel); + return channel; + }; + + pool = + new ChannelPool( + ChannelPoolSettings.builder() + .setInitialChannelCount(2) + .setMinRpcsPerChannel(1) + .setMaxRpcsPerChannel(2) + .build(), + channelFactory, + executor); + assertThat(pool.entries.get()).hasSize(2); + + // With no outstanding RPCs, the pool should shrink + pool.resize(); + assertThat(pool.entries.get()).hasSize(1); + Mockito.verify(channels.get(1), Mockito.times(1)).shutdown(); + } + + @Test + public void removedActiveChannelsAreShutdown() throws Exception { + ScheduledExecutorService executor = Mockito.mock(ScheduledExecutorService.class); + + List channels = new ArrayList<>(); + List> startedCalls = new ArrayList<>(); + + ChannelFactory channelFactory = + () -> { + ManagedChannel channel = Mockito.mock(ManagedChannel.class); + Mockito.when(channel.newCall(Mockito.any(), Mockito.any())) + .thenAnswer( + invocation -> { + @SuppressWarnings("unchecked") + ClientCall clientCall = Mockito.mock(ClientCall.class); + startedCalls.add(clientCall); + return clientCall; + }); + + channels.add(channel); + return channel; + }; + + pool = + new ChannelPool( + ChannelPoolSettings.builder() + .setInitialChannelCount(2) + .setMinRpcsPerChannel(1) + .setMaxRpcsPerChannel(2) + .build(), + channelFactory, + executor); + assertThat(pool.entries.get()).hasSize(2); + + // Start 2 RPCs + for (int i = 0; i < 2; i++) { + ClientCalls.futureUnaryCall( + pool.newCall(BigtableGrpc.getMutateRowMethod(), CallOptions.DEFAULT), + MutateRowRequest.getDefaultInstance()); + } + // Complete the first one + @SuppressWarnings("unchecked") + ArgumentCaptor> captor = + ArgumentCaptor.forClass(ClientCall.Listener.class); + Mockito.verify(startedCalls.get(0)).start(captor.capture(), Mockito.any()); + captor.getValue().onClose(Status.ABORTED, new Metadata()); + + // With a single RPC, the pool should shrink + pool.resize(); + pool.resize(); + assertThat(pool.entries.get()).hasSize(1); + + // While the RPC is outstanding, the channel should still be open + Mockito.verify(channels.get(1), Mockito.never()).shutdown(); + + // Complete the RPC + Mockito.verify(startedCalls.get(1)).start(captor.capture(), Mockito.any()); + captor.getValue().onClose(Status.ABORTED, new Metadata()); + // Now the channel should be closed + Mockito.verify(channels.get(1), Mockito.times(1)).shutdown(); + } + + @Test + public void testReleasingClientCallCancelEarly() throws IOException { + @SuppressWarnings("unchecked") + ClientCall mockClientCall = Mockito.mock(ClientCall.class); + Mockito.doAnswer(invocation -> null).when(mockClientCall).cancel(Mockito.any(), Mockito.any()); + ManagedChannel fakeChannel = Mockito.mock(ManagedChannel.class); + Mockito.when(fakeChannel.newCall(Mockito.any(), Mockito.any())).thenReturn(mockClientCall); + ChannelPoolSettings channelPoolSettings = ChannelPoolSettings.staticallySized(1); + ChannelFactory factory = new FakeChannelFactory(ImmutableList.of(fakeChannel)); + pool = ChannelPool.create(channelPoolSettings, factory); + + ClientCall call = + pool.newCall(BigtableGrpc.getMutateRowMethod(), CallOptions.DEFAULT); + call.cancel(null, null); + + IllegalStateException e = + Assert.assertThrows( + IllegalStateException.class, () -> call.start(new Listener<>() {}, new Metadata())); + assertThat(e.getCause()).isInstanceOf(CancellationException.class); + assertThat(e.getMessage()).isEqualTo("Call is already cancelled"); + } + + @Test + public void testDoubleRelease() throws Exception { + FakeLogHandler logHandler = new FakeLogHandler(); + ChannelPool.LOG.addHandler(logHandler); + + try { + // Create a fake channel pool thats backed by mock channels that simply record invocations + @SuppressWarnings("unchecked") + ClientCall mockClientCall = + Mockito.mock(ClientCall.class); + ManagedChannel fakeChannel = Mockito.mock(ManagedChannel.class); + Mockito.when( + fakeChannel.newCall( + Mockito.eq(BigtableGrpc.getMutateRowMethod()), Mockito.any(CallOptions.class))) + .thenReturn(mockClientCall); + ChannelPoolSettings channelPoolSettings = ChannelPoolSettings.staticallySized(1); + ChannelFactory factory = new FakeChannelFactory(ImmutableList.of(fakeChannel)); + + pool = ChannelPool.create(channelPoolSettings, factory); + + // Start the RPC + ListenableFuture rpcFuture = + BigtableGrpc.newFutureStub(pool).mutateRow(MutateRowRequest.getDefaultInstance()); + + // Get the server side listener and intentionally close it twice + @SuppressWarnings("unchecked") + ArgumentCaptor> clientCallListenerCaptor = + ArgumentCaptor.forClass(ClientCall.Listener.class); + + Mockito.verify(mockClientCall).start(clientCallListenerCaptor.capture(), Mockito.any()); + clientCallListenerCaptor.getValue().onClose(Status.INTERNAL, new Metadata()); + clientCallListenerCaptor.getValue().onClose(Status.UNKNOWN, new Metadata()); + + // Ensure that the channel pool properly logged the double call and kept the refCount correct + assertThat(logHandler.getAllMessages()) + .contains( + "Call is being closed more than once. Please make sure that onClose() is not being" + + " manually called."); + assertThat(pool.entries.get()).hasSize(1); + ChannelPool.Entry entry = pool.entries.get().get(0); + assertThat(entry.outstandingRpcs.get()).isEqualTo(0); + } finally { + ChannelPool.LOG.removeHandler(logHandler); + } + } + + static class FakeChannelFactory implements ChannelFactory { + private int called = 0; + private final List channels; + private ChannelPrimer channelPrimer; + + public FakeChannelFactory(List channels) { + this.channels = channels; + } + + public FakeChannelFactory(List channels, ChannelPrimer channelPrimer) { + this.channels = channels; + this.channelPrimer = channelPrimer; + } + + public ManagedChannel createSingleChannel() { + ManagedChannel managedChannel = channels.get(called++); + if (this.channelPrimer != null) { + this.channelPrimer.primeChannel(managedChannel); + } + return managedChannel; + } + } + + static class FakeLogHandler extends Handler { + List records = new ArrayList<>(); + + @Override + public void publish(LogRecord record) { + records.add(record); + } + + @Override + public void flush() {} + + @Override + public void close() throws SecurityException {} + + public List getAllMessages() { + return records.stream().map(LogRecord::getMessage).collect(Collectors.toList()); + } + } + + public interface ChannelPrimer { + void primeChannel(ManagedChannel managedChannel); + } + + static class MockClientCall extends ClientCall { + + private final ResponseT response; + private Listener responseListener; + private Metadata headers; + private final Status status; + + public MockClientCall(ResponseT response, Status status) { + this.response = response; + this.status = status; + } + + @Override + public synchronized void start(Listener responseListener, Metadata headers) { + this.responseListener = responseListener; + this.headers = headers; + } + + @Override + public void request(int numMessages) {} + + @Override + public void cancel(@Nullable String message, @Nullable Throwable cause) {} + + @Override + public void halfClose() {} + + @Override + public void sendMessage(RequestT message) { + responseListener.onHeaders(headers); + responseListener.onMessage(response); + responseListener.onClose(status, headers); + } + } + + static class FakeMethodDescriptor { + // Utility class, uninstantiable. + private FakeMethodDescriptor() {} + + public static MethodDescriptor create() { + return create(MethodDescriptor.MethodType.UNARY, "FakeClient/fake-method"); + } + + public static MethodDescriptor create( + MethodDescriptor.MethodType type, String name) { + return MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName(name) + .setRequestMarshaller(new FakeMarshaller()) + .setResponseMarshaller(new FakeMarshaller()) + .build(); + } + + private static class FakeMarshaller implements MethodDescriptor.Marshaller { + @Override + public T parse(InputStream stream) { + throw new UnsupportedOperationException("FakeMarshaller doesn't actually do anything"); + } + + @Override + public InputStream stream(T value) { + throw new UnsupportedOperationException("FakeMarshaller doesn't actually do anything"); + } + } + } + + static final MethodDescriptor METHOD_RECOGNIZE = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName("google.gax.FakeService", "Recognize")) + .setRequestMarshaller(ProtoUtils.marshaller(Color.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Money.getDefaultInstance())) + .build(); + + public static final MethodDescriptor METHOD_SERVER_STREAMING_RECOGNIZE = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName( + generateFullMethodName("google.gax.FakeService", "ServerStreamingRecognize")) + .setRequestMarshaller(ProtoUtils.marshaller(Color.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Money.getDefaultInstance())) + .build(); +} diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/EndpointTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/EndpointTest.java new file mode 100644 index 00000000000..999b081a246 --- /dev/null +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/EndpointTest.java @@ -0,0 +1,56 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.commands; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.bigtable.examples.proxy.commands.Endpoint.ArgConverter; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class EndpointTest { + @Test + public void testOk() throws Exception { + ArgConverter argConverter = new ArgConverter(); + Endpoint result = argConverter.convert("some-endpoint:1234"); + assertThat(result).isEqualTo(Endpoint.create("some-endpoint", 1234)); + } + + @Test + public void testMissingPort() throws Exception { + ArgConverter argConverter = new ArgConverter(); + assertThrows(IllegalArgumentException.class, () -> argConverter.convert("some-endpoint:")); + assertThrows(IllegalArgumentException.class, () -> argConverter.convert("some-endpoint")); + } + + @Test + public void testMissingName() throws Exception { + ArgConverter argConverter = new ArgConverter(); + assertThrows(IllegalArgumentException.class, () -> argConverter.convert(":1234")); + } + + @Test + public void testIpv6() throws Exception { + ArgConverter argConverter = new ArgConverter(); + Endpoint result = argConverter.convert("[2561:1900:4545:0003:0200:F8FF:FE21:67CF]:1234"); + assertThat(result) + .isEqualTo(Endpoint.create("[2561:1900:4545:0003:0200:F8FF:FE21:67CF]", 1234)); + } +} diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeMetricsTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeMetricsTest.java new file mode 100644 index 00000000000..23479c25b90 --- /dev/null +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeMetricsTest.java @@ -0,0 +1,441 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.commands; + +import static org.junit.Assert.assertThrows; +import static org.mockito.AdditionalMatchers.geq; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.verify; + +import com.google.auth.Credentials; +import com.google.bigtable.v2.BigtableGrpc; +import com.google.bigtable.v2.BigtableGrpc.BigtableBlockingStub; +import com.google.bigtable.v2.BigtableGrpc.BigtableImplBase; +import com.google.bigtable.v2.CheckAndMutateRowRequest; +import com.google.bigtable.v2.CheckAndMutateRowResponse; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels; +import com.google.cloud.bigtable.examples.proxy.metrics.Metrics; +import com.google.cloud.bigtable.examples.proxy.metrics.Metrics.MetricsAttributes; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.ForwardingClientCall.SimpleForwardingClientCall; +import io.grpc.ForwardingServerCall.SimpleForwardingServerCall; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Metadata; +import io.grpc.Metadata.Key; +import io.grpc.MethodDescriptor; +import io.grpc.Server; +import io.grpc.ServerBuilder; +import io.grpc.ServerCall; +import io.grpc.ServerCall.Listener; +import io.grpc.ServerCallHandler; +import io.grpc.ServerInterceptor; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.grpc.stub.StreamObserver; +import io.grpc.testing.GrpcCleanupRule; +import java.io.IOException; +import java.net.ServerSocket; +import java.net.URI; +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Optional; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingDeque; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.Mock; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnit; +import org.mockito.junit.MockitoRule; + +@RunWith(JUnit4.class) +public class ServeMetricsTest { + @Rule public final MockitoRule mockitoTestRule = MockitoJUnit.rule(); + + @Mock Metrics mockMetrics; + + @Rule + public final GrpcCleanupRule grpcCleanup = new GrpcCleanupRule().setTimeout(1, TimeUnit.MINUTES); + + private MetadataInterceptor serverMetadataInterceptor = new MetadataInterceptor(); + @Spy FakeDataService dataService = new FakeDataService(); + @Spy FakeCredentials fakeCredentials = new FakeCredentials(); + private ManagedChannel fakeServiceChannel; + private Serve serve; + private ManagedChannel proxyChannel; + + @Before + public void setUp() throws Exception { + Server server = grpcCleanup.register(createServer()); + + fakeServiceChannel = + grpcCleanup.register( + ManagedChannelBuilder.forAddress("localhost", server.getPort()).usePlaintext().build()); + + serve = createAndStartCommand(fakeServiceChannel, fakeCredentials, mockMetrics); + + proxyChannel = + grpcCleanup.register( + ManagedChannelBuilder.forAddress("localhost", serve.listenPort).usePlaintext().build()); + } + + @After + public void tearDown() throws Exception { + if (serve != null) { + serve.cleanup(); + } + } + + private Server createServer() throws IOException { + for (int i = 10; i >= 0; i--) { + int port; + try (ServerSocket serverSocket = new ServerSocket(0)) { + port = serverSocket.getLocalPort(); + } + try { + return ServerBuilder.forPort(port) + .intercept(serverMetadataInterceptor) + .addService(dataService) + .build() + .start(); + } catch (IOException e) { + if (i == 0) { + throw e; + } + } + } + throw new IllegalStateException( + "Should never happen, if the server could be started it should've been returned or the last" + + " attempt threw an exception"); + } + + private static Serve createAndStartCommand( + ManagedChannel targetChannel, FakeCredentials targetCredentials, Metrics metrics) + throws IOException { + for (int i = 10; i >= 0; i--) { + Serve s = new Serve(); + s.dataChannel = targetChannel; + s.adminChannel = targetChannel; + s.credentials = targetCredentials; + s.metrics = metrics; + + try (ServerSocket serverSocket = new ServerSocket(0)) { + s.listenPort = serverSocket.getLocalPort(); + } + + try { + s.start(); + return s; + } catch (IOException e) { + if (i == 0) { + throw e; + } + } + } + throw new IllegalStateException( + "Should never happen, if the server could be started it should've been returned or the last" + + " attempt threw an exception"); + } + + @Test + public void testHappyPath() throws IOException { + serverMetadataInterceptor.responseHeaders = + () -> { + Metadata md = new Metadata(); + md.put(Key.of("server-timing", Metadata.ASCII_STRING_MARSHALLER), "dur=1234"); + return md; + }; + + BigtableBlockingStub stub = + BigtableGrpc.newBlockingStub(proxyChannel) + .withInterceptors( + new OutgoingMetadataInterceptor( + ImmutableMap.of( + "x-goog-request-params", + String.format( + "table_name=projects/%s/instances/%s/tables/%s&app_profile_id=%s", + "fake-project", "fake-instance", "fake-table", "fake-profile") + .replaceAll("/", "%2F"), + "x-goog-api-client", + "fake-client"))); + + MetricsAttributes fakeAttrs = new MetricsAttributes() {}; + + doReturn(fakeAttrs).when(mockMetrics).createAttributes(any()); + doAnswer( + invocation -> { + Thread.sleep(10); + return invocation.callRealMethod(); + }) + .when(dataService) + .checkAndMutateRow(any(), any()); + + doAnswer( + invocation -> { + Thread.sleep(10); + return invocation.callRealMethod(); + }) + .when(fakeCredentials) + .getRequestMetadata(any()); + + CheckAndMutateRowRequest request = + CheckAndMutateRowRequest.newBuilder() + .setTableName("project/fake-project/instances/fake-instance/tables/fake-table") + .build(); + CheckAndMutateRowResponse response = stub.checkAndMutateRow(request); + + verify(mockMetrics) + .createAttributes( + eq( + CallLabels.create( + BigtableGrpc.getCheckAndMutateRowMethod(), + Optional.of( + String.format( + "table_name=projects/%s/instances/%s/tables/%s&app_profile_id=%s", + "fake-project", "fake-instance", "fake-table", "fake-profile") + .replaceAll("/", "%2F")), + Optional.empty(), + Optional.empty(), + Optional.empty(), + Optional.of("fake-client")))); + + verify(mockMetrics).recordCallStarted(eq(fakeAttrs)); + verify(mockMetrics).recordCredLatency(eq(fakeAttrs), eq(Status.OK), geq(Duration.ofMillis(10))); + verify(mockMetrics).recordGfeLatency(eq(fakeAttrs), eq(Duration.ofMillis(1234))); + verify(mockMetrics).recordQueueLatency(eq(fakeAttrs), geq(Duration.ZERO)); + verify(mockMetrics).recordRequestSize(eq(fakeAttrs), eq((long) request.getSerializedSize())); + verify(mockMetrics).recordResponseSize(eq(fakeAttrs), eq((long) response.getSerializedSize())); + verify(mockMetrics).recordCallLatency(eq(fakeAttrs), eq(Status.OK), geq(Duration.ofMillis(20))); + } + + @Test + public void testMissingGfe() throws IOException { + BigtableBlockingStub stub = + BigtableGrpc.newBlockingStub(proxyChannel) + .withInterceptors( + new OutgoingMetadataInterceptor( + ImmutableMap.of( + "x-goog-request-params", + String.format( + "table_name=projects/%s/instances/%s/tables/%s&app_profile_id=%s", + "fake-project", "fake-instance", "fake-table", "fake-profile") + .replaceAll("/", "%2F"), + "x-goog-api-client", + "fake-client"))); + + MetricsAttributes fakeAttrs = new MetricsAttributes() {}; + doReturn(fakeAttrs).when(mockMetrics).createAttributes(any()); + + CheckAndMutateRowRequest request = + CheckAndMutateRowRequest.newBuilder() + .setTableName("project/fake-project/instances/fake-instance/tables/fake-table") + .build(); + CheckAndMutateRowResponse response = stub.checkAndMutateRow(request); + + verify(mockMetrics) + .createAttributes( + eq( + CallLabels.create( + BigtableGrpc.getCheckAndMutateRowMethod(), + Optional.of( + String.format( + "table_name=projects/%s/instances/%s/tables/%s&app_profile_id=%s", + "fake-project", "fake-instance", "fake-table", "fake-profile") + .replaceAll("/", "%2F")), + Optional.empty(), + Optional.empty(), + Optional.empty(), + Optional.of("fake-client")))); + + verify(mockMetrics).recordGfeHeaderMissing(eq(fakeAttrs)); + } + + @Test + public void testError() throws IOException { + final BigtableBlockingStub stub = + BigtableGrpc.newBlockingStub(proxyChannel) + .withInterceptors( + new OutgoingMetadataInterceptor( + ImmutableMap.of( + "x-goog-request-params", + String.format( + "table_name=projects/%s/instances/%s/tables/%s&app_profile_id=%s", + "fake-project", "fake-instance", "fake-table", "fake-profile") + .replaceAll("/", "%2F"), + "x-goog-api-client", + "fake-client"))); + + doAnswer( + invocation -> { + Thread.sleep(10); + return invocation.callRealMethod(); + }) + .when(fakeCredentials) + .getRequestMetadata(any()); + + doAnswer( + invocation -> { + Thread.sleep(10); + invocation + .getArgument(1, StreamObserver.class) + .onError(Status.INTERNAL.asRuntimeException()); + return null; + }) + .when(dataService) + .checkAndMutateRow(any(), any()); + + MetricsAttributes fakeAttrs = new MetricsAttributes() {}; + doReturn(fakeAttrs).when(mockMetrics).createAttributes(any()); + + CheckAndMutateRowRequest request = + CheckAndMutateRowRequest.newBuilder() + .setTableName("project/fake-project/instances/fake-instance/tables/fake-table") + .build(); + assertThrows(StatusRuntimeException.class, () -> stub.checkAndMutateRow(request)); + + verify(mockMetrics) + .createAttributes( + eq( + CallLabels.create( + BigtableGrpc.getCheckAndMutateRowMethod(), + Optional.of( + String.format( + "table_name=projects/%s/instances/%s/tables/%s&app_profile_id=%s", + "fake-project", "fake-instance", "fake-table", "fake-profile") + .replaceAll("/", "%2F")), + Optional.empty(), + Optional.empty(), + Optional.empty(), + Optional.of("fake-client")))); + + verify(mockMetrics).recordCallStarted(eq(fakeAttrs)); + verify(mockMetrics).recordCredLatency(eq(fakeAttrs), eq(Status.OK), geq(Duration.ofMillis(10))); + verify(mockMetrics).recordQueueLatency(eq(fakeAttrs), geq(Duration.ZERO)); + verify(mockMetrics).recordRequestSize(eq(fakeAttrs), eq((long) request.getSerializedSize())); + verify(mockMetrics).recordResponseSize(eq(fakeAttrs), eq(0L)); + verify(mockMetrics) + .recordCallLatency(eq(fakeAttrs), eq(Status.INTERNAL), geq(Duration.ofMillis(20))); + } + + static class MetadataInterceptor implements ServerInterceptor { + private BlockingQueue requestHeaders = new LinkedBlockingDeque<>(); + volatile Supplier responseHeaders = Metadata::new; + volatile Supplier responseTrailers = Metadata::new; + + @Override + public Listener interceptCall( + ServerCall call, Metadata metadata, ServerCallHandler next) { + requestHeaders.add(metadata); + return next.startCall( + new SimpleForwardingServerCall(call) { + @Override + public void sendHeaders(Metadata headers) { + headers.merge(responseHeaders.get()); + super.sendHeaders(headers); + } + + @Override + public void close(Status status, Metadata trailers) { + trailers.merge(responseTrailers.get()); + super.close(status, trailers); + } + }, + metadata); + } + } + + private static class FakeDataService extends BigtableImplBase { + + @Override + public void checkAndMutateRow( + CheckAndMutateRowRequest request, + StreamObserver responseObserver) { + responseObserver.onNext( + CheckAndMutateRowResponse.newBuilder().setPredicateMatched(true).build()); + responseObserver.onCompleted(); + } + } + + private static class FakeCredentials extends Credentials { + private static final String HEADER_NAME = "authorization"; + private String fakeValue = "fake-token"; + + @Override + public String getAuthenticationType() { + return "fake"; + } + + @Override + public Map> getRequestMetadata(URI uri) throws IOException { + return Map.of(HEADER_NAME, Lists.newArrayList(fakeValue)); + } + + @Override + public boolean hasRequestMetadata() { + return true; + } + + @Override + public boolean hasRequestMetadataOnly() { + return true; + } + + @Override + public void refresh() throws IOException { + // noop + } + } + + private static class OutgoingMetadataInterceptor implements ClientInterceptor { + private final Map metadata; + + private OutgoingMetadataInterceptor(Map metadata) { + this.metadata = metadata; + } + + @Override + public ClientCall interceptCall( + MethodDescriptor methodDescriptor, CallOptions callOptions, Channel channel) { + return new SimpleForwardingClientCall<>(channel.newCall(methodDescriptor, callOptions)) { + @Override + public void start(Listener responseListener, Metadata headers) { + for (Entry entry : metadata.entrySet()) { + headers.put(Key.of(entry.getKey(), Metadata.ASCII_STRING_MARSHALLER), entry.getValue()); + } + super.start(responseListener, headers); + } + }; + } + } +} diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeParsingTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeParsingTest.java new file mode 100644 index 00000000000..d3c458ae2d4 --- /dev/null +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeParsingTest.java @@ -0,0 +1,73 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.commands; + +import static com.google.common.truth.Truth.assertThat; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import picocli.CommandLine; + +@RunWith(JUnit4.class) +public class ServeParsingTest { + @Test + public void testMinimalArgs() { + Serve serve = new Serve(); + new CommandLine(serve).parseArgs("--listen-port=1234", "--metrics-project-id=fake-project"); + + assertThat(serve.listenPort).isEqualTo(1234); + assertThat(serve.metricsProjectId).isEqualTo("fake-project"); + assertThat(serve.userAgent).isEqualTo("bigtable-java-proxy"); + assertThat(serve.dataEndpoint).isEqualTo(Endpoint.create("bigtable.googleapis.com", 443)); + assertThat(serve.adminEndpoint).isEqualTo(Endpoint.create("bigtableadmin.googleapis.com", 443)); + } + + @Test + public void testDataEndpointOverride() { + Serve serve = new Serve(); + new CommandLine(serve) + .parseArgs( + "--listen-port=1234", + "--metrics-project-id=fake-project", + "--bigtable-data-endpoint=example.com:1234"); + + assertThat(serve.listenPort).isEqualTo(1234); + assertThat(serve.dataEndpoint).isEqualTo(Endpoint.create("example.com", 1234)); + } + + @Test + public void testAdminDataEndpointOverride() { + Serve serve = new Serve(); + new CommandLine(serve) + .parseArgs( + "--listen-port=1234", + "--metrics-project-id=fake-project", + "--bigtable-admin-endpoint=example.com:1234"); + + assertThat(serve.listenPort).isEqualTo(1234); + assertThat(serve.adminEndpoint).isEqualTo(Endpoint.create("example.com", 1234)); + } + + @Test + public void testMetricsProjectIdOverride() { + Serve serve = new Serve(); + new CommandLine(serve) + .parseArgs("--listen-port=1234", "--metrics-project-id=other-fake-project"); + assertThat(serve.metricsProjectId).isEqualTo("other-fake-project"); + } +} diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeTest.java new file mode 100644 index 00000000000..69be009dd5b --- /dev/null +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeTest.java @@ -0,0 +1,597 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.commands; + +import static com.google.cloud.bigtable.examples.proxy.utils.ContextSubject.assertThat; +import static com.google.cloud.bigtable.examples.proxy.utils.MetadataSubject.assertThat; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; + +import com.google.auth.Credentials; +import com.google.bigtable.admin.v2.BigtableInstanceAdminGrpc; +import com.google.bigtable.admin.v2.BigtableInstanceAdminGrpc.BigtableInstanceAdminFutureStub; +import com.google.bigtable.admin.v2.BigtableInstanceAdminGrpc.BigtableInstanceAdminImplBase; +import com.google.bigtable.admin.v2.BigtableTableAdminGrpc; +import com.google.bigtable.admin.v2.BigtableTableAdminGrpc.BigtableTableAdminFutureStub; +import com.google.bigtable.admin.v2.BigtableTableAdminGrpc.BigtableTableAdminImplBase; +import com.google.bigtable.admin.v2.GetInstanceRequest; +import com.google.bigtable.admin.v2.GetTableRequest; +import com.google.bigtable.admin.v2.Instance; +import com.google.bigtable.admin.v2.Table; +import com.google.bigtable.v2.BigtableGrpc; +import com.google.bigtable.v2.BigtableGrpc.BigtableFutureStub; +import com.google.bigtable.v2.BigtableGrpc.BigtableImplBase; +import com.google.bigtable.v2.CheckAndMutateRowRequest; +import com.google.bigtable.v2.CheckAndMutateRowResponse; +import com.google.cloud.bigtable.examples.proxy.metrics.NoopMetrics; +import com.google.common.collect.Lists; +import com.google.common.collect.Range; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.longrunning.GetOperationRequest; +import com.google.longrunning.Operation; +import com.google.longrunning.OperationsGrpc; +import com.google.longrunning.OperationsGrpc.OperationsFutureStub; +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.Context; +import io.grpc.Deadline; +import io.grpc.ForwardingClientCall.SimpleForwardingClientCall; +import io.grpc.ForwardingClientCallListener.SimpleForwardingClientCallListener; +import io.grpc.ForwardingServerCall.SimpleForwardingServerCall; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Metadata; +import io.grpc.Metadata.Key; +import io.grpc.MethodDescriptor; +import io.grpc.ServerCall; +import io.grpc.ServerCall.Listener; +import io.grpc.ServerCallHandler; +import io.grpc.ServerInterceptor; +import io.grpc.Status; +import io.grpc.inprocess.InProcessChannelBuilder; +import io.grpc.inprocess.InProcessServerBuilder; +import io.grpc.stub.StreamObserver; +import io.grpc.testing.GrpcCleanupRule; +import java.io.IOException; +import java.net.ServerSocket; +import java.net.URI; +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.BlockingDeque; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.LinkedBlockingDeque; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ServeTest { + private final String targetServerName = UUID.randomUUID().toString(); + + @Rule + public final GrpcCleanupRule grpcCleanup = new GrpcCleanupRule().setTimeout(1, TimeUnit.MINUTES); + + // Fake targets + private CallContextInterceptor callContextInterceptor; + private MetadataInterceptor metadataInterceptor; + private FakeDataService dataService; + private FakeInstanceAdminService instanceAdminService; + private FakeTableAdminService tableAdminService; + private OperationService operationService; + private ManagedChannel fakeServiceChannel; + private FakeCredentials fakeCredentials; + + // Proxy + private Serve serve; + private ManagedChannel proxyChannel; + + @Before + public void setUp() throws IOException { + // Create the fake target + callContextInterceptor = new CallContextInterceptor(); + metadataInterceptor = new MetadataInterceptor(); + dataService = new FakeDataService(); + instanceAdminService = new FakeInstanceAdminService(); + tableAdminService = new FakeTableAdminService(); + operationService = new OperationService(); + + fakeCredentials = new FakeCredentials(); + + grpcCleanup.register( + InProcessServerBuilder.forName(targetServerName) + .intercept(callContextInterceptor) + .intercept(metadataInterceptor) + .addService(dataService) + .addService(instanceAdminService) + .addService(tableAdminService) + .addService(operationService) + .build() + .start()); + + fakeServiceChannel = + grpcCleanup.register( + InProcessChannelBuilder.forName(targetServerName).usePlaintext().build()); + + // Create the proxy + // Inject fakes for upstream calls. For unit tests we want to shim communications to the + // bigtable service. + serve = createAndStartCommand(fakeServiceChannel, fakeCredentials); + + proxyChannel = + grpcCleanup.register( + ManagedChannelBuilder.forAddress("localhost", serve.listenPort).usePlaintext().build()); + } + + @After + public void tearDown() throws InterruptedException { + if (serve != null) { + serve.cleanup(); + } + } + + @Test + public void testDataRpcOk() throws InterruptedException, ExecutionException, TimeoutException { + BigtableFutureStub proxyStub = BigtableGrpc.newFutureStub(proxyChannel); + + CheckAndMutateRowRequest request = + CheckAndMutateRowRequest.newBuilder().setTableName("some-table").build(); + final ListenableFuture proxyFuture = + proxyStub.checkAndMutateRow(request); + StreamObserver serverObserver = + dataService + .calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .poll(1, TimeUnit.SECONDS); + + assertWithMessage("Timed out waiting for the proxied RPC on the fake server") + .that(serverObserver) + .isNotNull(); + + CheckAndMutateRowResponse expectedResponse = + CheckAndMutateRowResponse.newBuilder().setPredicateMatched(true).build(); + + serverObserver.onNext(expectedResponse); + serverObserver.onCompleted(); + + CheckAndMutateRowResponse r = proxyFuture.get(1, TimeUnit.SECONDS); + assertThat(r).isEqualTo(expectedResponse); + } + + @Test + public void testInstanceRpcOk() + throws InterruptedException, ExecutionException, TimeoutException { + BigtableInstanceAdminFutureStub proxyStub = + BigtableInstanceAdminGrpc.newFutureStub(proxyChannel); + + GetInstanceRequest request = GetInstanceRequest.newBuilder().setName("some-instance").build(); + final ListenableFuture proxyFuture = proxyStub.getInstance(request); + StreamObserver serverObserver = + instanceAdminService + .calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .poll(1, TimeUnit.SECONDS); + + assertWithMessage("Timed out waiting for the proxied RPC on the fake server") + .that(serverObserver) + .isNotNull(); + + Instance expectedResponse = Instance.newBuilder().setName("some-instance").build(); + + serverObserver.onNext(expectedResponse); + serverObserver.onCompleted(); + + Instance r = proxyFuture.get(1, TimeUnit.SECONDS); + assertThat(r).isEqualTo(expectedResponse); + } + + @Test + public void testTableRpcOk() throws InterruptedException, ExecutionException, TimeoutException { + BigtableTableAdminFutureStub proxyStub = BigtableTableAdminGrpc.newFutureStub(proxyChannel); + + GetTableRequest request = GetTableRequest.newBuilder().setName("some-table").build(); + final ListenableFuture proxyFuture = proxyStub.getTable(request); + StreamObserver
serverObserver = + tableAdminService + .calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .poll(1, TimeUnit.SECONDS); + + assertWithMessage("Timed out waiting for the proxied RPC on the fake server") + .that(serverObserver) + .isNotNull(); + + Table expectedResponse = Table.newBuilder().setName("some-table").build(); + + serverObserver.onNext(expectedResponse); + serverObserver.onCompleted(); + + Table r = proxyFuture.get(1, TimeUnit.SECONDS); + assertThat(r).isEqualTo(expectedResponse); + } + + @Test + public void testOpRpcOk() throws InterruptedException, ExecutionException, TimeoutException { + OperationsFutureStub proxyStub = OperationsGrpc.newFutureStub(proxyChannel); + + GetOperationRequest request = GetOperationRequest.newBuilder().setName("some-table").build(); + final ListenableFuture proxyFuture = proxyStub.getOperation(request); + StreamObserver serverObserver = + operationService + .calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .poll(1, TimeUnit.SECONDS); + + if (proxyFuture.isDone()) { + proxyFuture.get(); + } + assertWithMessage("Timed out waiting for the proxied RPC on the fake server") + .that(serverObserver) + .isNotNull(); + + Operation expectedResponse = Operation.newBuilder().setName("some-table").build(); + + serverObserver.onNext(expectedResponse); + serverObserver.onCompleted(); + + Operation r = proxyFuture.get(1, TimeUnit.SECONDS); + assertThat(r).isEqualTo(expectedResponse); + } + + @Test + public void testMetadataProxy() + throws InterruptedException, ExecutionException, TimeoutException { + Metadata responseMetadata = new Metadata(); + responseMetadata.put(Key.of("resp-header", Metadata.ASCII_STRING_MARSHALLER), "resp-value"); + metadataInterceptor.responseHeaders = () -> responseMetadata; + + Metadata trailers = new Metadata(); + trailers.put(Key.of("trailer", Metadata.ASCII_STRING_MARSHALLER), "trailer-value"); + metadataInterceptor.responseTrailers = () -> trailers; + + AtomicReference clientRecvHeader = new AtomicReference<>(); + AtomicReference clientRecvTrailer = new AtomicReference<>(); + + BigtableFutureStub proxyStub = + BigtableGrpc.newFutureStub(proxyChannel) + .withInterceptors( + new ClientInterceptor() { + @Override + public ClientCall interceptCall( + MethodDescriptor methodDescriptor, + CallOptions callOptions, + Channel channel) { + return new SimpleForwardingClientCall<>( + channel.newCall(methodDescriptor, callOptions)) { + @Override + public void start(Listener responseListener, Metadata headers) { + headers.put( + Key.of("client-sent-header", Metadata.ASCII_STRING_MARSHALLER), + "client-sent-header-value"); + super.start( + new SimpleForwardingClientCallListener(responseListener) { + @Override + public void onHeaders(Metadata headers) { + clientRecvHeader.set(headers); + super.onHeaders(headers); + } + + @Override + public void onClose(Status status, Metadata trailers) { + clientRecvTrailer.set(trailers); + super.onClose(status, trailers); + } + }, + headers); + } + }; + } + }); + + CheckAndMutateRowRequest request = + CheckAndMutateRowRequest.newBuilder().setTableName("some-table").build(); + final ListenableFuture proxyFuture = + proxyStub.checkAndMutateRow(request); + StreamObserver serverObserver = + dataService + .calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .poll(1, TimeUnit.SECONDS); + + assertWithMessage("Timed out waiting for the proxied RPC on the fake server") + .that(serverObserver) + .isNotNull(); + + serverObserver.onNext(CheckAndMutateRowResponse.newBuilder().setPredicateMatched(true).build()); + serverObserver.onCompleted(); + + proxyFuture.get(1, TimeUnit.SECONDS); + + assertThat(metadataInterceptor.requestHeaders.poll(1, TimeUnit.SECONDS)) + .hasValue("client-sent-header", "client-sent-header-value"); + + assertThat(clientRecvHeader.get()).hasValue("resp-header", "resp-value"); + assertThat(clientRecvTrailer.get()).hasValue("trailer", "trailer-value"); + } + + @Test + public void testDeadlinePropagation() + throws InterruptedException, ExecutionException, TimeoutException { + + Deadline originalDeadline = Deadline.after(10, TimeUnit.MINUTES); + + BigtableFutureStub proxyStub = + BigtableGrpc.newFutureStub(proxyChannel).withDeadline(originalDeadline); + + CheckAndMutateRowRequest request = + CheckAndMutateRowRequest.newBuilder().setTableName("some-table").build(); + final ListenableFuture proxyFuture = + proxyStub.checkAndMutateRow(request); + StreamObserver serverObserver = + dataService + .calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .poll(1, TimeUnit.SECONDS); + + assertWithMessage("Timed out waiting for the proxied RPC on the fake server") + .that(serverObserver) + .isNotNull(); + + serverObserver.onNext(CheckAndMutateRowResponse.newBuilder().setPredicateMatched(true).build()); + serverObserver.onCompleted(); + + proxyFuture.get(1, TimeUnit.SECONDS); + + Context serverContext = callContextInterceptor.contexts.poll(1, TimeUnit.SECONDS); + assertThat(serverContext) + .hasRemainingDeadlineThat() + .isIn(Range.closed(Duration.ofMinutes(9), Duration.ofMinutes(10))); + } + + @Test + public void testCredentials() throws InterruptedException, ExecutionException, TimeoutException { + BigtableFutureStub proxyStub = BigtableGrpc.newFutureStub(proxyChannel); + + CheckAndMutateRowRequest request = + CheckAndMutateRowRequest.newBuilder().setTableName("some-table").build(); + final ListenableFuture proxyFuture = + proxyStub.checkAndMutateRow(request); + StreamObserver serverObserver = + dataService + .calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .poll(1, TimeUnit.SECONDS); + + assertWithMessage("Timed out waiting for the proxied RPC on the fake server") + .that(serverObserver) + .isNotNull(); + + serverObserver.onNext(CheckAndMutateRowResponse.newBuilder().setPredicateMatched(true).build()); + serverObserver.onCompleted(); + proxyFuture.get(1, TimeUnit.SECONDS); + + assertThat(metadataInterceptor.requestHeaders.poll(1, TimeUnit.SECONDS)) + .hasValue("authorization", "fake-token"); + } + + @Test + public void testCredentialsClobber() + throws InterruptedException, ExecutionException, TimeoutException { + BigtableFutureStub proxyStub = + BigtableGrpc.newFutureStub(proxyChannel) + .withInterceptors( + new ClientInterceptor() { + @Override + public ClientCall interceptCall( + MethodDescriptor methodDescriptor, + CallOptions callOptions, + Channel channel) { + return new SimpleForwardingClientCall( + channel.newCall(methodDescriptor, callOptions)) { + @Override + public void start(Listener responseListener, Metadata headers) { + headers.put( + Metadata.Key.of("authorization", Metadata.ASCII_STRING_MARSHALLER), + "pre-proxied-value"); + super.start(responseListener, headers); + } + }; + } + }); + + CheckAndMutateRowRequest request = + CheckAndMutateRowRequest.newBuilder().setTableName("some-table").build(); + final ListenableFuture proxyFuture = + proxyStub.checkAndMutateRow(request); + StreamObserver serverObserver = + dataService + .calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .poll(1, TimeUnit.SECONDS); + + assertWithMessage("Timed out waiting for the proxied RPC on the fake server") + .that(serverObserver) + .isNotNull(); + + serverObserver.onNext(CheckAndMutateRowResponse.newBuilder().setPredicateMatched(true).build()); + serverObserver.onCompleted(); + proxyFuture.get(1, TimeUnit.SECONDS); + + Metadata serverRequestHeaders = metadataInterceptor.requestHeaders.poll(1, TimeUnit.SECONDS); + assertThat(serverRequestHeaders).hasValue("authorization", "fake-token"); + } + + private static Serve createAndStartCommand( + ManagedChannel targetChannel, FakeCredentials targetCredentials) throws IOException { + for (int i = 10; i >= 0; i--) { + Serve s = new Serve(); + s.dataChannel = targetChannel; + s.adminChannel = targetChannel; + s.credentials = targetCredentials; + s.metrics = new NoopMetrics(); + + try (ServerSocket serverSocket = new ServerSocket(0)) { + s.listenPort = serverSocket.getLocalPort(); + } + + try { + s.start(); + return s; + } catch (IOException e) { + if (i == 0) { + throw e; + } + } + } + throw new IllegalStateException( + "Should never happen, if the server could be started it should've been returned or the last" + + " attempt threw an exception"); + } + + static class CallContextInterceptor implements ServerInterceptor { + BlockingQueue contexts = new LinkedBlockingDeque<>(); + + @Override + public Listener interceptCall( + ServerCall call, Metadata headers, ServerCallHandler next) { + + contexts.add(Context.current()); + return next.startCall(call, headers); + } + } + + static class MetadataInterceptor implements ServerInterceptor { + private BlockingQueue requestHeaders = new LinkedBlockingDeque<>(); + volatile Supplier responseHeaders = Metadata::new; + volatile Supplier responseTrailers = Metadata::new; + + @Override + public Listener interceptCall( + ServerCall call, Metadata metadata, ServerCallHandler next) { + requestHeaders.add(metadata); + return next.startCall( + new SimpleForwardingServerCall(call) { + @Override + public void sendHeaders(Metadata headers) { + headers.merge(responseHeaders.get()); + super.sendHeaders(headers); + } + + @Override + public void close(Status status, Metadata trailers) { + trailers.merge(responseTrailers.get()); + super.close(status, trailers); + } + }, + metadata); + } + } + + private static class FakeDataService extends BigtableImplBase { + private final ConcurrentHashMap< + CheckAndMutateRowRequest, BlockingDeque>> + calls = new ConcurrentHashMap<>(); + + @Override + public void checkAndMutateRow( + CheckAndMutateRowRequest request, + StreamObserver responseObserver) { + calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .add(responseObserver); + } + } + + private static class FakeInstanceAdminService extends BigtableInstanceAdminImplBase { + private final ConcurrentHashMap>> + calls = new ConcurrentHashMap<>(); + + @Override + public void getInstance(GetInstanceRequest request, StreamObserver responseObserver) { + calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .add(responseObserver); + } + } + + private static class FakeTableAdminService extends BigtableTableAdminImplBase { + private final ConcurrentHashMap>> calls = + new ConcurrentHashMap<>(); + + @Override + public void getTable(GetTableRequest request, StreamObserver
responseObserver) { + calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .add(responseObserver); + } + } + + private static class OperationService extends OperationsGrpc.OperationsImplBase { + private final ConcurrentHashMap>> + calls = new ConcurrentHashMap<>(); + + @Override + public void getOperation( + GetOperationRequest request, StreamObserver responseObserver) { + calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .add(responseObserver); + } + } + + private static class FakeCredentials extends Credentials { + private static final String HEADER_NAME = "authorization"; + private String fakeValue = "fake-token"; + + @Override + public String getAuthenticationType() { + return "fake"; + } + + @Override + public Map> getRequestMetadata(URI uri) throws IOException { + return Map.of(HEADER_NAME, Lists.newArrayList(fakeValue)); + } + + @Override + public boolean hasRequestMetadata() { + return true; + } + + @Override + public boolean hasRequestMetadataOnly() { + return true; + } + + @Override + public void refresh() throws IOException { + // noop + } + } +} diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/core/CallLabelsTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/core/CallLabelsTest.java new file mode 100644 index 00000000000..c17278c2e8d --- /dev/null +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/core/CallLabelsTest.java @@ -0,0 +1,169 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.core; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.bigtable.v2.BigtableGrpc; +import com.google.bigtable.v2.PingAndWarmRequest; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels.ParsingException; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels.PrimingKey; +import io.grpc.Metadata; +import java.util.Optional; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class CallLabelsTest { + @Test + public void testAllBasic() throws ParsingException { + Metadata md = new Metadata(); + md.put( + CallLabels.REQUEST_PARAMS, + "table_name=projects/p/instances/i/tables/t&app_profile_id=a".replaceAll("/", "%2F")); + md.put(CallLabels.LEGACY_RESOURCE_PREFIX, "projects/p/instances/i/tables/t"); + md.put(CallLabels.ROUTING_COOKIE, "some-opaque-string"); + md.put(CallLabels.FEATURE_FLAGS, "some-serialized-features-string"); + md.put(CallLabels.API_CLIENT, "some-client"); + CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); + + assertThat(callLabels.getRequestParams()) + .isEqualTo( + Optional.of("table_name=projects%2Fp%2Finstances%2Fi%2Ftables%2Ft&app_profile_id=a")); + assertThat(callLabels.getLegacyResourcePrefix()) + .isEqualTo(Optional.of("projects/p/instances/i/tables/t")); + assertThat(callLabels.getRoutingCookie()).isEqualTo(Optional.of("some-opaque-string")); + assertThat(callLabels.getEncodedFeatures()) + .isEqualTo(Optional.of("some-serialized-features-string")); + assertThat(callLabels.getApiClient()).isEqualTo(Optional.of("some-client")); + + assertThat(callLabels.extractAppProfileId()).isEqualTo(Optional.of("a")); + assertThat(callLabels.extractResourceName()) + .isEqualTo(Optional.of("projects/p/instances/i/tables/t")); + } + + @Test + public void testResourceEscaped() throws ParsingException { + Metadata md = new Metadata(); + md.put( + CallLabels.REQUEST_PARAMS, + "table_name=projects/p/instances/i/tables/t".replace("/", "%2F")); + CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); + + assertThat(callLabels.extractResourceName()) + .isEqualTo(Optional.of("projects/p/instances/i/tables/t")); + } + + @Test + public void testEmpty() throws ParsingException { + Metadata md = new Metadata(); + CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); + + assertThat(callLabels.extractResourceName()).isEqualTo(Optional.empty()); + assertThat(callLabels.extractAppProfileId()).isEqualTo(Optional.empty()); + } + + @Test + public void testLegacyFallback() throws ParsingException { + Metadata md = new Metadata(); + md.put(CallLabels.LEGACY_RESOURCE_PREFIX, "projects/p/instances/i/tables/t"); + CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); + + assertThat(callLabels.extractResourceName()) + .isEqualTo(Optional.of("projects/p/instances/i/tables/t")); + } + + @Test + public void testMalformed1() throws ParsingException { + Metadata md = new Metadata(); + md.put(CallLabels.REQUEST_PARAMS, "table_name="); + CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); + + assertThat(callLabels.extractResourceName()).isEqualTo(Optional.empty()); + } + + @Test + public void testMalformed2() throws ParsingException { + Metadata md = new Metadata(); + md.put(CallLabels.REQUEST_PARAMS, "&"); + CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); + + assertThat(callLabels.extractResourceName()).isEqualTo(Optional.empty()); + } + + @Test + public void testMalformed3() throws ParsingException { + Metadata md = new Metadata(); + md.put(CallLabels.REQUEST_PARAMS, "table_name=&"); + CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); + + assertThat(callLabels.extractResourceName()).isEqualTo(Optional.empty()); + } + + @Test + public void testMalformed4() throws ParsingException { + Metadata md = new Metadata(); + md.put(CallLabels.REQUEST_PARAMS, "table_name=%s"); + CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); + + assertThrows(ParsingException.class, callLabels::extractResourceName); + } + + @Test + public void testPrimingKey() throws ParsingException { + final String tableName = "projects/myp/instances/myi/tables/myt"; + final String encodedTableName = "projects%2Fmyp%2Finstances%2Fmyi%2Ftables%2Fmyt"; + final String instanceName = "projects/myp/instances/myi"; + final String encodedInstanceName = "projects%2Fmyp%2Finstances%2Fmyi"; + final String appProfileId = "mya"; + + CallLabels callLabels = + CallLabels.create( + BigtableGrpc.getMutateRowMethod(), + Optional.of( + String.format("table_name=%s&app_profile_id=%s", encodedTableName, appProfileId)), + Optional.of(tableName), + Optional.of("opaque-cookie"), + Optional.of("encoded-features"), + Optional.of("some-client")); + PrimingKey key = PrimingKey.from(callLabels).get(); + + assertThat(key.getAppProfileId()).isEqualTo(Optional.of("mya")); + assertThat(key.getName()).isEqualTo(instanceName); + + Metadata m = new Metadata(); + + m.put( + CallLabels.REQUEST_PARAMS, + String.format("name=%s&app_profile_id=%s", encodedInstanceName, appProfileId)); + m.put(CallLabels.LEGACY_RESOURCE_PREFIX, instanceName); + m.put(CallLabels.ROUTING_COOKIE, "opaque-cookie"); + m.put(CallLabels.FEATURE_FLAGS, "encoded-features"); + m.put(CallLabels.API_CLIENT, "some-client"); + + assertThat(key.composeMetadata().toString()).isEqualTo(m.toString()); + + assertThat(key.composeProto()) + .isEqualTo( + PingAndWarmRequest.newBuilder() + .setName(instanceName) + .setAppProfileId(appProfileId) + .build()); + } +} diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImplTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImplTest.java new file mode 100644 index 00000000000..7fd741a5445 --- /dev/null +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImplTest.java @@ -0,0 +1,91 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.metrics; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.bigtable.v2.BigtableGrpc; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.MeterProvider; +import java.util.Optional; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.Answers; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnit; +import org.mockito.junit.MockitoRule; + +@RunWith(JUnit4.class) +public class MetricsImplTest { + @Rule public final MockitoRule mockitoTestRule = MockitoJUnit.rule(); + + @Mock(answer = Answers.RETURNS_DEEP_STUBS) + MeterProvider mockMeterProvider; + + private MetricsImpl metrics; + + @Before + public void setUp() throws Exception { + metrics = new MetricsImpl(mockMeterProvider); + } + + @Test + public void testBasic() { + CallLabels callLabels = + CallLabels.create( + BigtableGrpc.getMutateRowMethod(), + Optional.of( + "table_name=projects/p/instances/i/tables/t&app_profile_id=a" + .replaceAll("/", "%2F")), + Optional.of("projects/p/instances/i/tables/t"), + Optional.of("opaque-cookie"), + Optional.of("encoded-features"), + Optional.of("some-client")); + + Attributes attrs = metrics.createAttributes(callLabels).getAttributes(); + assertThat(attrs.asMap()) + .containsAtLeast( + AttributeKey.stringKey("api_client"), "some-client", + AttributeKey.stringKey("resource"), "projects/p/instances/i/tables/t", + AttributeKey.stringKey("app_profile"), "a", + AttributeKey.stringKey("method"), "google.bigtable.v2.Bigtable/MutateRow"); + } + + @Test + public void testMissing() { + CallLabels callLabels = + CallLabels.create( + BigtableGrpc.getMutateRowMethod(), + Optional.empty(), + Optional.empty(), + Optional.empty(), + Optional.empty(), + Optional.empty()); + Attributes attrs = metrics.createAttributes(callLabels).getAttributes(); + assertThat(attrs.asMap()) + .containsAtLeast( + AttributeKey.stringKey("api_client"), "", + AttributeKey.stringKey("resource"), "", + AttributeKey.stringKey("app_profile"), "", + AttributeKey.stringKey("method"), "google.bigtable.v2.Bigtable/MutateRow"); + } +} diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java new file mode 100644 index 00000000000..0fb2b33289f --- /dev/null +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java @@ -0,0 +1,66 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.metrics; + +import com.google.cloud.bigtable.examples.proxy.core.CallLabels; +import io.grpc.ConnectivityState; +import io.grpc.Status; +import java.time.Duration; + +public class NoopMetrics implements Metrics { + + @Override + public MetricsAttributes createAttributes(CallLabels callLabels) { + return null; + } + + @Override + public void recordCallStarted(MetricsAttributes attrs) {} + + @Override + public void recordCredLatency(MetricsAttributes attrs, Status status, Duration duration) {} + + @Override + public void recordQueueLatency(MetricsAttributes attrs, Duration duration) {} + + @Override + public void recordRequestSize(MetricsAttributes attrs, long size) {} + + @Override + public void recordResponseSize(MetricsAttributes attrs, long size) {} + + @Override + public void recordGfeLatency(MetricsAttributes attrs, Duration duration) {} + + @Override + public void recordGfeHeaderMissing(MetricsAttributes attrs) {} + + @Override + public void recordCallLatency(MetricsAttributes attrs, Status status, Duration duration) {} + + @Override + public void recordFirstByteLatency(MetricsAttributes attrs, Duration duration) {} + + @Override + public void recordDownstreamLatency(MetricsAttributes attrs, Duration latency) {} + + @Override + public void updateChannelCount(int delta) {} + + @Override + public void recordChannelStateChange(ConnectivityState prevState, ConnectivityState newState) {} +} diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/utils/ContextSubject.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/utils/ContextSubject.java new file mode 100644 index 00000000000..0babab53c6c --- /dev/null +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/utils/ContextSubject.java @@ -0,0 +1,51 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.utils; + +import static com.google.common.truth.Truth.assertAbout; + +import com.google.common.truth.ComparableSubject; +import com.google.common.truth.FailureMetadata; +import com.google.common.truth.Subject; +import io.grpc.Context; +import java.time.Duration; +import java.util.concurrent.TimeUnit; +import org.jspecify.annotations.Nullable; + +public class ContextSubject extends Subject { + private final Context context; + + public ContextSubject(FailureMetadata metadata, @Nullable Context actual) { + super(metadata, actual); + this.context = actual; + } + + public static Factory context() { + return ContextSubject::new; + } + + public static ContextSubject assertThat(Context context) { + return assertAbout(context()).that(context); + } + + public ComparableSubject hasRemainingDeadlineThat() { + Duration remaining = + Duration.ofMillis(context.getDeadline().timeRemaining(TimeUnit.MILLISECONDS)); + + return check("getDeadline().timeRemaining()").that(remaining); + } +} diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/utils/MetadataSubject.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/utils/MetadataSubject.java new file mode 100644 index 00000000000..4494c52dc94 --- /dev/null +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/utils/MetadataSubject.java @@ -0,0 +1,70 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.utils; + +import static com.google.common.truth.Truth.assertAbout; + +import com.google.common.truth.FailureMetadata; +import com.google.common.truth.Subject; +import io.grpc.Metadata; +import java.util.ArrayList; +import java.util.Optional; +import org.jspecify.annotations.Nullable; + +public class MetadataSubject extends Subject { + private final Metadata metadata; + + public MetadataSubject(FailureMetadata metadata, @Nullable Metadata actual) { + super(metadata, actual); + this.metadata = actual; + } + + public static Factory metadata() { + return MetadataSubject::new; + } + + public static MetadataSubject assertThat(Metadata metadata) { + return assertAbout(metadata()).that(metadata); + } + + public void hasKey(String key) { + hasKey(Metadata.Key.of(key, Metadata.ASCII_STRING_MARSHALLER)); + } + + public void hasKey(Metadata.Key key) { + check("keys()").that(metadata.keys()).contains(key); + } + + public void hasValue(String key, String value) { + hasValue(Metadata.Key.of(key, Metadata.ASCII_STRING_MARSHALLER), value); + } + + public void hasValue(Metadata.Key key, T value) { + Iterable actualValues = Optional.ofNullable(metadata.getAll(key)).orElse(new ArrayList<>()); + check("get(" + key + ")").that(actualValues).containsExactly(value); + } + + public void containsValue(String key, String value) { + check("get(" + key + ")") + .that(metadata.getAll(Metadata.Key.of(key, Metadata.ASCII_STRING_MARSHALLER))) + .contains(value); + } + + public void containsValue(Metadata.Key key, T value) { + check("get(" + key + ")").that(metadata.getAll(key)).contains(value); + } +} diff --git a/compute/cloud-client/src/main/java/compute/disks/consistencygroup/AddDiskToConsistencyGroup.java b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/AddDiskToConsistencyGroup.java new file mode 100644 index 00000000000..7c4650fad09 --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/AddDiskToConsistencyGroup.java @@ -0,0 +1,98 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.disks.consistencygroup; + +// [START compute_consistency_group_add_disk] +import com.google.cloud.compute.v1.AddResourcePoliciesDiskRequest; +import com.google.cloud.compute.v1.AddResourcePoliciesRegionDiskRequest; +import com.google.cloud.compute.v1.DisksAddResourcePoliciesRequest; +import com.google.cloud.compute.v1.DisksClient; +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.RegionDisksAddResourcePoliciesRequest; +import com.google.cloud.compute.v1.RegionDisksClient; +import java.io.IOException; +import java.util.Arrays; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class AddDiskToConsistencyGroup { + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project that contains the disk. + String project = "YOUR_PROJECT_ID"; + // Zone or region of the disk. + String location = "us-central1"; + // Name of the disk. + String diskName = "DISK_NAME"; + // Name of the consistency group. + String consistencyGroupName = "CONSISTENCY_GROUP"; + // Region of the consistency group. + String consistencyGroupLocation = "us-central1"; + + addDiskToConsistencyGroup( + project, location, diskName, consistencyGroupName, consistencyGroupLocation); + } + + // Adds a disk to a consistency group. + public static Operation.Status addDiskToConsistencyGroup( + String project, String location, String diskName, + String consistencyGroupName, String consistencyGroupLocation) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + String consistencyGroupUrl = String.format( + "https://www.googleapis.com/compute/v1/projects/%s/regions/%s/resourcePolicies/%s", + project, consistencyGroupLocation, consistencyGroupName); + Operation response; + if (Character.isDigit(location.charAt(location.length() - 1))) { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (RegionDisksClient disksClient = RegionDisksClient.create()) { + AddResourcePoliciesRegionDiskRequest request = + AddResourcePoliciesRegionDiskRequest.newBuilder() + .setDisk(diskName) + .setRegion(location) + .setProject(project) + .setRegionDisksAddResourcePoliciesRequestResource( + RegionDisksAddResourcePoliciesRequest.newBuilder() + .addAllResourcePolicies(Arrays.asList(consistencyGroupUrl)) + .build()) + .build(); + response = disksClient.addResourcePoliciesAsync(request).get(1, TimeUnit.MINUTES); + } + } else { + try (DisksClient disksClient = DisksClient.create()) { + AddResourcePoliciesDiskRequest request = + AddResourcePoliciesDiskRequest.newBuilder() + .setDisk(diskName) + .setZone(location) + .setProject(project) + .setDisksAddResourcePoliciesRequestResource( + DisksAddResourcePoliciesRequest.newBuilder() + .addAllResourcePolicies(Arrays.asList(consistencyGroupUrl)) + .build()) + .build(); + response = disksClient.addResourcePoliciesAsync(request).get(1, TimeUnit.MINUTES); + } + } + if (response.hasError()) { + throw new Error("Error adding disk to consistency group! " + response.getError()); + } + return response.getStatus(); + } +} +// [END compute_consistency_group_add_disk] \ No newline at end of file diff --git a/compute/cloud-client/src/main/java/compute/disks/consistencygroup/CreateConsistencyGroup.java b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/CreateConsistencyGroup.java new file mode 100644 index 00000000000..df6c324d8d5 --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/CreateConsistencyGroup.java @@ -0,0 +1,75 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.disks.consistencygroup; + +// [START compute_consistency_group_create] +import com.google.cloud.compute.v1.InsertResourcePolicyRequest; +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.ResourcePoliciesClient; +import com.google.cloud.compute.v1.ResourcePolicy; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class CreateConsistencyGroup { + + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project you want to use. + String project = "YOUR_PROJECT_ID"; + // Name of the region in which you want to create the consistency group. + String region = "us-central1"; + // Name of the consistency group you want to create. + String consistencyGroupName = "YOUR_CONSISTENCY_GROUP_NAME"; + + createConsistencyGroup(project, region, consistencyGroupName); + } + + // Creates a new consistency group resource policy in the specified project and region. + public static Operation.Status createConsistencyGroup( + String project, String region, String consistencyGroupName) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (ResourcePoliciesClient regionResourcePoliciesClient = ResourcePoliciesClient.create()) { + ResourcePolicy resourcePolicy = + ResourcePolicy.newBuilder() + .setName(consistencyGroupName) + .setRegion(region) + .setDiskConsistencyGroupPolicy( + ResourcePolicy.newBuilder().getDiskConsistencyGroupPolicy()) + .build(); + + InsertResourcePolicyRequest request = InsertResourcePolicyRequest.newBuilder() + .setProject(project) + .setRegion(region) + .setResourcePolicyResource(resourcePolicy) + .build(); + + Operation response = + regionResourcePoliciesClient.insertAsync(request).get(1, TimeUnit.MINUTES); + + if (response.hasError()) { + throw new Error("Error creating consistency group! " + response.getError()); + } + return response.getStatus(); + } + } +} +// [END compute_consistency_group_create] \ No newline at end of file diff --git a/compute/cloud-client/src/main/java/compute/disks/consistencygroup/DeleteConsistencyGroup.java b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/DeleteConsistencyGroup.java new file mode 100644 index 00000000000..89ab6f756e0 --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/DeleteConsistencyGroup.java @@ -0,0 +1,59 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.disks.consistencygroup; + +// [START compute_consistency_group_delete] +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.ResourcePoliciesClient; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class DeleteConsistencyGroup { + + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project you want to use. + String project = "YOUR_PROJECT_ID"; + // Region in which your consistency group is located. + String region = "us-central1"; + // Name of the consistency group you want to delete. + String consistencyGroupName = "YOUR_CONSISTENCY_GROUP_NAME"; + + deleteConsistencyGroup(project, region, consistencyGroupName); + } + + // Deletes a consistency group resource policy in the specified project and region. + public static Operation.Status deleteConsistencyGroup( + String project, String region, String consistencyGroupName) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (ResourcePoliciesClient resourcePoliciesClient = ResourcePoliciesClient.create()) { + Operation response = resourcePoliciesClient + .deleteAsync(project, region, consistencyGroupName).get(1, TimeUnit.MINUTES); + + if (response.hasError()) { + throw new Error("Error deleting disk! " + response.getError()); + } + return response.getStatus(); + } + } +} +// [END compute_consistency_group_delete] \ No newline at end of file diff --git a/compute/cloud-client/src/main/java/compute/disks/consistencygroup/ListRegionalDisksInConsistencyGroup.java b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/ListRegionalDisksInConsistencyGroup.java new file mode 100644 index 00000000000..36fe60cf2ad --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/ListRegionalDisksInConsistencyGroup.java @@ -0,0 +1,74 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.disks.consistencygroup; + +// [START compute_consistency_group_list_disks_regional] +import com.google.cloud.compute.v1.Disk; +import com.google.cloud.compute.v1.ListRegionDisksRequest; +import com.google.cloud.compute.v1.RegionDisksClient; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; + +public class ListRegionalDisksInConsistencyGroup { + public static void main(String[] args) + throws IOException, InterruptedException, ExecutionException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project you want to use. + String project = "YOUR_PROJECT_ID"; + // Name of the consistency group. + String consistencyGroupName = "CONSISTENCY_GROUP_ID"; + // Region of the disk. + String disksLocation = "us-central1"; + // Region of the consistency group. + String consistencyGroupLocation = "us-central1"; + + listRegionalDisksInConsistencyGroup( + project, consistencyGroupName, consistencyGroupLocation, disksLocation); + } + + // Lists disks in a consistency group. + public static List listRegionalDisksInConsistencyGroup(String project, + String consistencyGroupName, String consistencyGroupLocation, String disksLocation) + throws IOException { + String filter = String + .format("https://www.googleapis.com/compute/v1/projects/%s/regions/%s/resourcePolicies/%s", + project, consistencyGroupLocation, consistencyGroupName); + List disksList = new ArrayList<>(); + + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (RegionDisksClient disksClient = RegionDisksClient.create()) { + ListRegionDisksRequest request = + ListRegionDisksRequest.newBuilder() + .setProject(project) + .setRegion(disksLocation) + .build(); + + RegionDisksClient.ListPagedResponse response = disksClient.list(request); + for (Disk disk : response.iterateAll()) { + if (disk.getResourcePoliciesList().contains(filter)) { + disksList.add(disk); + } + } + } + System.out.println(disksList.size()); + return disksList; + } +} +// [END compute_consistency_group_list_disks_regional] \ No newline at end of file diff --git a/compute/cloud-client/src/main/java/compute/disks/consistencygroup/ListZonalDisksInConsistencyGroup.java b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/ListZonalDisksInConsistencyGroup.java new file mode 100644 index 00000000000..2434802d860 --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/ListZonalDisksInConsistencyGroup.java @@ -0,0 +1,73 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.disks.consistencygroup; + +// [START compute_consistency_group_list_disks_zonal] +import com.google.cloud.compute.v1.Disk; +import com.google.cloud.compute.v1.DisksClient; +import com.google.cloud.compute.v1.ListDisksRequest; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; + +public class ListZonalDisksInConsistencyGroup { + public static void main(String[] args) + throws IOException, InterruptedException, ExecutionException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project you want to use. + String project = "YOUR_PROJECT_ID"; + // Name of the consistency group. + String consistencyGroupName = "CONSISTENCY_GROUP_ID"; + // Zone of the disk. + String disksLocation = "us-central1-a"; + // Region of the consistency group. + String consistencyGroupLocation = "us-central1"; + + listZonalDisksInConsistencyGroup( + project, consistencyGroupName, consistencyGroupLocation, disksLocation); + } + + // Lists disks in a consistency group. + public static List listZonalDisksInConsistencyGroup(String project, + String consistencyGroupName, String consistencyGroupLocation, String disksLocation) + throws IOException { + String filter = String + .format("https://www.googleapis.com/compute/v1/projects/%s/regions/%s/resourcePolicies/%s", + project, consistencyGroupLocation, consistencyGroupName); + List disksList = new ArrayList<>(); + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (DisksClient disksClient = DisksClient.create()) { + ListDisksRequest request = + ListDisksRequest.newBuilder() + .setProject(project) + .setZone(disksLocation) + .build(); + DisksClient.ListPagedResponse response = disksClient.list(request); + + for (Disk disk : response.iterateAll()) { + if (disk.getResourcePoliciesList().contains(filter)) { + disksList.add(disk); + } + } + } + System.out.println(disksList.size()); + return disksList; + } +} +// [END compute_consistency_group_list_disks_zonal] diff --git a/compute/cloud-client/src/main/java/compute/disks/consistencygroup/RemoveDiskFromConsistencyGroup.java b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/RemoveDiskFromConsistencyGroup.java new file mode 100644 index 00000000000..b791125b0dd --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/RemoveDiskFromConsistencyGroup.java @@ -0,0 +1,100 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.disks.consistencygroup; + +// [START compute_consistency_group_remove_disk] +import com.google.cloud.compute.v1.DisksClient; +import com.google.cloud.compute.v1.DisksRemoveResourcePoliciesRequest; +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.RegionDisksClient; +import com.google.cloud.compute.v1.RegionDisksRemoveResourcePoliciesRequest; +import com.google.cloud.compute.v1.RemoveResourcePoliciesDiskRequest; +import com.google.cloud.compute.v1.RemoveResourcePoliciesRegionDiskRequest; +import java.io.IOException; +import java.util.Arrays; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class RemoveDiskFromConsistencyGroup { + + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project that contains the disk. + String project = "YOUR_PROJECT_ID"; + // Zone or region of the disk. + String location = "us-central1"; + // Name of the disk. + String diskName = "DISK_NAME"; + // Name of the consistency group. + String consistencyGroupName = "CONSISTENCY_GROUP"; + // Region of the consistency group. + String consistencyGroupLocation = "us-central1"; + + removeDiskFromConsistencyGroup( + project, location, diskName, consistencyGroupName, consistencyGroupLocation); + } + + // Removes a disk from a consistency group. + public static Operation.Status removeDiskFromConsistencyGroup( + String project, String location, String diskName, + String consistencyGroupName, String consistencyGroupLocation) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + String consistencyGroupUrl = String.format( + "https://www.googleapis.com/compute/v1/projects/%s/regions/%s/resourcePolicies/%s", + project, consistencyGroupLocation, consistencyGroupName); + Operation response; + if (Character.isDigit(location.charAt(location.length() - 1))) { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (RegionDisksClient disksClient = RegionDisksClient.create()) { + RemoveResourcePoliciesRegionDiskRequest request = + RemoveResourcePoliciesRegionDiskRequest.newBuilder() + .setDisk(diskName) + .setRegion(location) + .setProject(project) + .setRegionDisksRemoveResourcePoliciesRequestResource( + RegionDisksRemoveResourcePoliciesRequest.newBuilder() + .addAllResourcePolicies(Arrays.asList(consistencyGroupUrl)) + .build()) + .build(); + + response = disksClient.removeResourcePoliciesAsync(request).get(1, TimeUnit.MINUTES); + } + } else { + try (DisksClient disksClient = DisksClient.create()) { + RemoveResourcePoliciesDiskRequest request = + RemoveResourcePoliciesDiskRequest.newBuilder() + .setDisk(diskName) + .setZone(location) + .setProject(project) + .setDisksRemoveResourcePoliciesRequestResource( + DisksRemoveResourcePoliciesRequest.newBuilder() + .addAllResourcePolicies(Arrays.asList(consistencyGroupUrl)) + .build()) + .build(); + response = disksClient.removeResourcePoliciesAsync(request).get(1, TimeUnit.MINUTES); + } + } + if (response.hasError()) { + throw new Error("Error removing disk from consistency group! " + response.getError()); + } + return response.getStatus(); + } +} +// [END compute_consistency_group_remove_disk] \ No newline at end of file diff --git a/compute/cloud-client/src/main/java/compute/disks/storagepool/CreateHyperdiskStoragePool.java b/compute/cloud-client/src/main/java/compute/disks/storagepool/CreateHyperdiskStoragePool.java index 61239faa495..30cdde803d0 100644 --- a/compute/cloud-client/src/main/java/compute/disks/storagepool/CreateHyperdiskStoragePool.java +++ b/compute/cloud-client/src/main/java/compute/disks/storagepool/CreateHyperdiskStoragePool.java @@ -15,7 +15,6 @@ package compute.disks.storagepool; // [START compute_hyperdisk_pool_create] - import com.google.cloud.compute.v1.InsertStoragePoolRequest; import com.google.cloud.compute.v1.Operation; import com.google.cloud.compute.v1.StoragePool; @@ -32,12 +31,13 @@ public static void main(String[] args) // Project ID or project number of the Google Cloud project you want to use. String projectId = "YOUR_PROJECT_ID"; // Name of the zone in which you want to create the storagePool. - String zone = "europe-central2-b"; + String zone = "us-central1-a"; // Name of the storagePool you want to create. String storagePoolName = "YOUR_STORAGE_POOL_NAME"; - // The type of disk you want to create. This value uses the following format: - // "projects/%s/zones/%s/storagePoolTypes/hyperdisk-throughput|hyperdisk-balanced" - String storagePoolType = "hyperdisk-balanced"; + // The type of disk you want to create. + // Storage types can be "hyperdisk-throughput" or "hyperdisk-balanced" + String storagePoolType = String.format( + "projects/%s/zones/%s/storagePoolTypes/hyperdisk-balanced", projectId, zone); // Optional: the capacity provisioning type of the storage pool. // The allowed values are advanced and standard. If not specified, the value advanced is used. String capacityProvisioningType = "advanced"; @@ -48,16 +48,19 @@ public static void main(String[] args) long provisionedIops = 3000; // the throughput in MBps to provision for the storage pool. long provisionedThroughput = 140; + // The allowed values are low-casing strings "advanced" and "standard". + // If not specified, "advanced" is used. + String performanceProvisioningType = "advanced"; createHyperdiskStoragePool(projectId, zone, storagePoolName, storagePoolType, - capacityProvisioningType, provisionedCapacity, provisionedIops, provisionedThroughput); + capacityProvisioningType, provisionedCapacity, provisionedIops, + provisionedThroughput, performanceProvisioningType); } // Creates a hyperdisk storagePool in a project public static StoragePool createHyperdiskStoragePool(String projectId, String zone, - String storagePoolName, String storagePoolType, - String capacityProvisioningType, long capacity, - long iops, long throughput) + String storagePoolName, String storagePoolType, String capacityProvisioningType, + long capacity, long iops, long throughput, String performanceProvisioningType) throws IOException, ExecutionException, InterruptedException, TimeoutException { // Initialize client that will be used to send requests. This client only needs to be created // once, and can be reused for multiple requests. @@ -71,6 +74,7 @@ public static StoragePool createHyperdiskStoragePool(String projectId, String zo .setPoolProvisionedCapacityGb(capacity) .setPoolProvisionedIops(iops) .setPoolProvisionedThroughput(throughput) + .setPerformanceProvisioningType(performanceProvisioningType) .build(); InsertStoragePoolRequest request = InsertStoragePoolRequest.newBuilder() diff --git a/compute/cloud-client/src/main/java/compute/reservation/ConsumeAnyMatchingReservation.java b/compute/cloud-client/src/main/java/compute/reservation/ConsumeAnyMatchingReservation.java new file mode 100644 index 00000000000..b8d1ac7f8f9 --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/reservation/ConsumeAnyMatchingReservation.java @@ -0,0 +1,125 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.reservation; + +// [START compute_consume_any_matching_reservation] +import static com.google.cloud.compute.v1.ReservationAffinity.ConsumeReservationType.ANY_RESERVATION; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.compute.v1.AttachedDisk; +import com.google.cloud.compute.v1.AttachedDiskInitializeParams; +import com.google.cloud.compute.v1.InsertInstanceRequest; +import com.google.cloud.compute.v1.Instance; +import com.google.cloud.compute.v1.InstancesClient; +import com.google.cloud.compute.v1.NetworkInterface; +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.ReservationAffinity; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class ConsumeAnyMatchingReservation { + + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project you want to use. + String projectId = "YOUR_PROJECT_ID"; + // Zone where the VM instance will be created. + String zone = "us-central1-a"; + // Name of the VM instance you want to query. + String instanceName = "YOUR_INSTANCE_NAME"; + // machineType: machine type of the VM being created. + // * For a list of machine types, see https://cloud.google.com/compute/docs/machine-types + String machineTypeName = "n1-standard-4"; + // sourceImage: path to the operating system image to mount. + // * For details about images you can mount, see https://cloud.google.com/compute/docs/images + String sourceImage = "projects/debian-cloud/global/images/family/debian-11"; + // diskSizeGb: storage size of the boot disk to attach to the instance. + long diskSizeGb = 10L; + // networkName: network interface to associate with the instance. + String networkName = "default"; + // Minimum CPU platform of the instances. + String minCpuPlatform = "Intel Skylake"; + + createInstanceAsync(projectId, zone, instanceName, machineTypeName, sourceImage, + diskSizeGb, networkName, minCpuPlatform); + } + + // Create a virtual machine targeted with the reserveAffinity field. + // In this consumption model, existing and new VMs automatically consume a reservation + // if their properties match the VM properties specified in the reservation. + public static Instance createInstanceAsync(String projectId, String zone, + String instanceName, String machineTypeName, String sourceImage, + long diskSizeGb, String networkName, String minCpuPlatform) + throws IOException, InterruptedException, ExecutionException, TimeoutException { + String machineType = String.format("zones/%s/machineTypes/%s", zone, machineTypeName); + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (InstancesClient instancesClient = InstancesClient.create()) { + AttachedDisk disk = + AttachedDisk.newBuilder() + .setBoot(true) + .setAutoDelete(true) + .setType(AttachedDisk.Type.PERSISTENT.toString()) + .setDeviceName("disk-1") + .setInitializeParams( + AttachedDiskInitializeParams.newBuilder() + .setSourceImage(sourceImage) + .setDiskSizeGb(diskSizeGb) + .build()) + .build(); + + NetworkInterface networkInterface = NetworkInterface.newBuilder() + .setName(networkName) + .build(); + + ReservationAffinity reservationAffinity = + ReservationAffinity.newBuilder() + .setConsumeReservationType(ANY_RESERVATION.toString()) + .build(); + + Instance instanceResource = + Instance.newBuilder() + .setName(instanceName) + .setMachineType(machineType) + .addDisks(disk) + .addNetworkInterfaces(networkInterface) + .setMinCpuPlatform(minCpuPlatform) + .setReservationAffinity(reservationAffinity) + .build(); + + InsertInstanceRequest insertInstanceRequest = InsertInstanceRequest.newBuilder() + .setProject(projectId) + .setZone(zone) + .setInstanceResource(instanceResource) + .build(); + + OperationFuture operation = instancesClient.insertAsync( + insertInstanceRequest); + + Operation response = operation.get(3, TimeUnit.MINUTES); + + if (response.hasError()) { + return null; + } + return instancesClient.get(projectId, zone, instanceName); + } + } +} +// [END compute_consume_any_matching_reservation] \ No newline at end of file diff --git a/compute/cloud-client/src/main/java/compute/reservation/ConsumeSingleProjectReservation.java b/compute/cloud-client/src/main/java/compute/reservation/ConsumeSingleProjectReservation.java new file mode 100644 index 00000000000..8f1118b4d1b --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/reservation/ConsumeSingleProjectReservation.java @@ -0,0 +1,127 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.reservation; + +// [START compute_consume_single_project_reservation] +import static com.google.cloud.compute.v1.ReservationAffinity.ConsumeReservationType.SPECIFIC_RESERVATION; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.compute.v1.AttachedDisk; +import com.google.cloud.compute.v1.AttachedDiskInitializeParams; +import com.google.cloud.compute.v1.InsertInstanceRequest; +import com.google.cloud.compute.v1.Instance; +import com.google.cloud.compute.v1.InstancesClient; +import com.google.cloud.compute.v1.NetworkInterface; +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.ReservationAffinity; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class ConsumeSingleProjectReservation { + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project you want to use. + String projectId = "YOUR_PROJECT_ID"; + // Name of the zone where the reservation is located. + String zone = "us-central1-a"; + // Name of the reservation you want to query. + String reservationName = "YOUR_RESERVATION_NAME"; + // Name of the VM instance you want to query. + String instanceName = "YOUR_INSTANCE_NAME"; + // machineType: machine type of the VM being created. + // * For a list of machine types, see https://cloud.google.com/compute/docs/machine-types + String machineTypeName = "n1-standard-4"; + // sourceImage: path to the operating system image to mount. + // * For details about images you can mount, see https://cloud.google.com/compute/docs/images + String sourceImage = "projects/debian-cloud/global/images/family/debian-11"; + // diskSizeGb: storage size of the boot disk to attach to the instance. + long diskSizeGb = 10L; + // networkName: network interface to associate with the instance. + String networkName = "default"; + // Minimum CPU platform of the instances. + String minCpuPlatform = "Intel Skylake"; + + createInstanceAsync(projectId, zone, instanceName, reservationName, machineTypeName, + sourceImage, diskSizeGb, networkName, minCpuPlatform); + } + + // Create a virtual machine targeted with the reserveAffinity field. + // Ensure that the VM's properties match the reservation's VM properties. + public static Instance createInstanceAsync(String projectId, String zone, String instanceName, + String reservationName, String machineTypeName, String sourceImage, long diskSizeGb, + String networkName, String minCpuPlatform) + throws IOException, InterruptedException, ExecutionException, TimeoutException { + String machineType = String.format("zones/%s/machineTypes/%s", zone, machineTypeName); + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (InstancesClient instancesClient = InstancesClient.create()) { + AttachedDisk disk = + AttachedDisk.newBuilder() + .setBoot(true) + .setAutoDelete(true) + .setType(AttachedDisk.Type.PERSISTENT.toString()) + .setDeviceName("disk-1") + .setInitializeParams( + AttachedDiskInitializeParams.newBuilder() + .setSourceImage(sourceImage) + .setDiskSizeGb(diskSizeGb) + .build()) + .build(); + + NetworkInterface networkInterface = NetworkInterface.newBuilder() + .setName(networkName) + .build(); + + ReservationAffinity reservationAffinity = + ReservationAffinity.newBuilder() + .setConsumeReservationType(SPECIFIC_RESERVATION.toString()) + .setKey("compute.googleapis.com/reservation-name") + // Set specific reservation + .addValues(reservationName) + .build(); + + Instance instanceResource = + Instance.newBuilder() + .setName(instanceName) + .setMachineType(machineType) + .addDisks(disk) + .addNetworkInterfaces(networkInterface) + .setMinCpuPlatform(minCpuPlatform) + .setReservationAffinity(reservationAffinity) + .build(); + + InsertInstanceRequest insertInstanceRequest = InsertInstanceRequest.newBuilder() + .setProject(projectId) + .setZone(zone) + .setInstanceResource(instanceResource) + .build(); + + OperationFuture operation = instancesClient.insertAsync( + insertInstanceRequest); + Operation response = operation.get(3, TimeUnit.MINUTES); + + if (response.hasError()) { + return null; + } + return instancesClient.get(projectId, zone, instanceName); + } + } +} +// [END compute_consume_single_project_reservation] diff --git a/compute/cloud-client/src/main/java/compute/reservation/ConsumeSpecificSharedReservation.java b/compute/cloud-client/src/main/java/compute/reservation/ConsumeSpecificSharedReservation.java new file mode 100644 index 00000000000..acf084798bf --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/reservation/ConsumeSpecificSharedReservation.java @@ -0,0 +1,131 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.reservation; + +// [START compute_consume_specific_shared_reservation] +import static com.google.cloud.compute.v1.ReservationAffinity.ConsumeReservationType.SPECIFIC_RESERVATION; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.compute.v1.AttachedDisk; +import com.google.cloud.compute.v1.AttachedDiskInitializeParams; +import com.google.cloud.compute.v1.InsertInstanceRequest; +import com.google.cloud.compute.v1.Instance; +import com.google.cloud.compute.v1.InstancesClient; +import com.google.cloud.compute.v1.NetworkInterface; +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.ReservationAffinity; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class ConsumeSpecificSharedReservation { + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project you want to use. + String projectId = "YOUR_PROJECT_ID"; + // Name of the zone the reservation is located. + String zone = "us-central1-a"; + // Name of the reservation you want to query. + String reservationName = "YOUR_RESERVATION_NAME"; + // Name of the VM instance you want to query. + String instanceName = "YOUR_INSTANCE_NAME"; + // machineType: machine type of the VM being created. + // * For a list of machine types, see https://cloud.google.com/compute/docs/machine-types + String machineTypeName = "n1-standard-4"; + // sourceImage: path to the operating system image to mount. + // * For details about images you can mount, see https://cloud.google.com/compute/docs/images + String sourceImage = "projects/debian-cloud/global/images/family/debian-11"; + // diskSizeGb: storage size of the boot disk to attach to the instance. + long diskSizeGb = 10L; + // networkName: network interface to associate with the instance. + String networkName = "default"; + // Minimum CPU platform of the instances. + String minCpuPlatform = "Intel Skylake"; + + createInstanceAsync(projectId, zone, instanceName, reservationName, machineTypeName, + sourceImage, diskSizeGb, networkName, minCpuPlatform); + } + + // Create a virtual machine targeted with the reserveAffinity field. + // Ensure that the VM's properties match the reservation's VM properties. + public static Instance createInstanceAsync(String projectId, String zone, String instanceName, + String reservationName, String machineTypeName, String sourceImage, long diskSizeGb, + String networkName, String minCpuPlatform) + throws IOException, InterruptedException, ExecutionException, TimeoutException { + String machineType = String.format("zones/%s/machineTypes/%s", zone, machineTypeName); + // To consume this reservation from any consumer projects that this reservation is shared with, + // you must also specify the owner project of the reservation - the path to the reservation. + String reservationPath = + String.format("projects/%s/reservations/%s", projectId, reservationName); + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (InstancesClient instancesClient = InstancesClient.create()) { + AttachedDisk disk = + AttachedDisk.newBuilder() + .setBoot(true) + .setAutoDelete(true) + .setType(AttachedDisk.Type.PERSISTENT.toString()) + .setDeviceName("disk-1") + .setInitializeParams( + AttachedDiskInitializeParams.newBuilder() + .setSourceImage(sourceImage) + .setDiskSizeGb(diskSizeGb) + .build()) + .build(); + + NetworkInterface networkInterface = NetworkInterface.newBuilder() + .setName(networkName) + .build(); + + ReservationAffinity reservationAffinity = + ReservationAffinity.newBuilder() + .setConsumeReservationType(SPECIFIC_RESERVATION.toString()) + .setKey("compute.googleapis.com/reservation-name") + // Set specific reservation + .addValues(reservationPath) + .build(); + + Instance instanceResource = + Instance.newBuilder() + .setName(instanceName) + .setMachineType(machineType) + .addDisks(disk) + .addNetworkInterfaces(networkInterface) + .setMinCpuPlatform(minCpuPlatform) + .setReservationAffinity(reservationAffinity) + .build(); + + InsertInstanceRequest insertInstanceRequest = InsertInstanceRequest.newBuilder() + .setProject(projectId) + .setZone(zone) + .setInstanceResource(instanceResource) + .build(); + + OperationFuture operation = instancesClient.insertAsync( + insertInstanceRequest); + Operation response = operation.get(3, TimeUnit.MINUTES); + + if (response.hasError()) { + return null; + } + return instancesClient.get(projectId, zone, instanceName); + } + } +} +// [END compute_consume_specific_shared_reservation] \ No newline at end of file diff --git a/compute/cloud-client/src/main/java/compute/reservation/CreateInstanceWithoutConsumingReservation.java b/compute/cloud-client/src/main/java/compute/reservation/CreateInstanceWithoutConsumingReservation.java new file mode 100644 index 00000000000..df278717286 --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/reservation/CreateInstanceWithoutConsumingReservation.java @@ -0,0 +1,122 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.reservation; + +// [START compute_instance_not_consume_reservation] +import static com.google.cloud.compute.v1.ReservationAffinity.ConsumeReservationType.NO_RESERVATION; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.compute.v1.AttachedDisk; +import com.google.cloud.compute.v1.AttachedDiskInitializeParams; +import com.google.cloud.compute.v1.InsertInstanceRequest; +import com.google.cloud.compute.v1.Instance; +import com.google.cloud.compute.v1.InstancesClient; +import com.google.cloud.compute.v1.NetworkInterface; +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.ReservationAffinity; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class CreateInstanceWithoutConsumingReservation { + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project you want to use. + String projectId = "YOUR_PROJECT_ID"; + // Name of the zone you want to use. + String zone = "us-central1-a"; + // Name of the VM instance you want to query. + String instanceName = "YOUR_INSTANCE_NAME"; + // machineType: machine type of the VM being created. + // * This value uses the format zones/{zone}/machineTypes/{type_name}. + // * For a list of machine types, see https://cloud.google.com/compute/docs/machine-types + String machineTypeName = "n1-standard-1"; + // sourceImage: path to the operating system image to mount. + // * For details about images you can mount, see https://cloud.google.com/compute/docs/images + String sourceImage = "projects/debian-cloud/global/images/family/debian-11"; + // diskSizeGb: storage size of the boot disk to attach to the instance. + long diskSizeGb = 10L; + // networkName: network interface to associate with the instance. + String networkName = "default"; + + createInstanceWithoutConsumingReservationAsync(projectId, zone, instanceName, + machineTypeName, sourceImage, diskSizeGb, networkName); + } + + // Create a virtual machine that explicitly doesn't consume reservations + public static Instance createInstanceWithoutConsumingReservationAsync( + String project, String zone, String instanceName, + String machineTypeName, String sourceImage, long diskSizeGb, String networkName) + throws IOException, InterruptedException, ExecutionException, TimeoutException { + String machineType = String.format("zones/%s/machineTypes/%s", zone, machineTypeName); + + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (InstancesClient instancesClient = InstancesClient.create()) { + AttachedDisk disk = + AttachedDisk.newBuilder() + .setBoot(true) + .setAutoDelete(true) + .setType(AttachedDisk.Type.PERSISTENT.toString()) + .setDeviceName("disk-1") + .setInitializeParams( + AttachedDiskInitializeParams.newBuilder() + .setSourceImage(sourceImage) + .setDiskSizeGb(diskSizeGb) + .build()) + .build(); + + NetworkInterface networkInterface = NetworkInterface.newBuilder() + .setName(networkName) + .build(); + + ReservationAffinity reservationAffinity = + ReservationAffinity.newBuilder() + .setConsumeReservationType(NO_RESERVATION.toString()) + .build(); + + Instance instanceResource = + Instance.newBuilder() + .setName(instanceName) + .setMachineType(machineType) + .addDisks(disk) + .addNetworkInterfaces(networkInterface) + .setReservationAffinity(reservationAffinity) + .build(); + + InsertInstanceRequest insertInstanceRequest = InsertInstanceRequest.newBuilder() + .setProject(project) + .setZone(zone) + .setInstanceResource(instanceResource) + .build(); + + OperationFuture operation = instancesClient.insertAsync( + insertInstanceRequest); + + // Wait for the operation to complete. + Operation response = operation.get(3, TimeUnit.MINUTES); + + if (response.hasError()) { + return null; + } + return instancesClient.get(project, zone, instanceName); + } + } +} +// [END compute_instance_not_consume_reservation] \ No newline at end of file diff --git a/compute/cloud-client/src/main/java/compute/reservation/CreateReservation.java b/compute/cloud-client/src/main/java/compute/reservation/CreateReservation.java index 59f9077d448..c2f79720167 100644 --- a/compute/cloud-client/src/main/java/compute/reservation/CreateReservation.java +++ b/compute/cloud-client/src/main/java/compute/reservation/CreateReservation.java @@ -16,6 +16,7 @@ package compute.reservation; +// [START compute_reservation_create] import com.google.cloud.compute.v1.AcceleratorConfig; import com.google.cloud.compute.v1.AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk; import com.google.cloud.compute.v1.AllocationSpecificSKUAllocationReservedInstanceProperties; @@ -28,7 +29,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -// [START compute_reservation_create] public class CreateReservation { public static void main(String[] args) @@ -47,29 +47,27 @@ public static void main(String[] args) } // Creates reservation with optional flags - public static void createReservation( + public static Reservation createReservation( String projectId, String reservationName, int numberOfVms, String zone) throws IOException, ExecutionException, InterruptedException, TimeoutException { + // Create the reservation with optional properties: + // Machine type of the instances in the reservation. + String machineType = "n1-standard-2"; + // Number of accelerators to be attached to the instances in the reservation. + int numberOfAccelerators = 1; + // Accelerator type to be attached to the instances in the reservation. + String acceleratorType = "nvidia-tesla-t4"; + // Minimum CPU platform to be attached to the instances in the reservation. + String minCpuPlatform = "Intel Skylake"; + // Local SSD size in GB to be attached to the instances in the reservation. + int localSsdSize = 375; + // Local SSD interfaces to be attached to the instances in the reservation. + String localSsdInterface1 = "NVME"; + String localSsdInterface2 = "SCSI"; + boolean specificReservationRequired = true; // Initialize client that will be used to send requests. This client only needs to be created // once, and can be reused for multiple requests. try (ReservationsClient reservationsClient = ReservationsClient.create()) { - - // Create the reservation with optional properties: - // Machine type of the instances in the reservation. - String machineType = "n1-standard-2"; - // Number of accelerators to be attached to the instances in the reservation. - int numberOfAccelerators = 1; - // Accelerator type to be attached to the instances in the reservation. - String acceleratorType = "nvidia-tesla-t4"; - // Minimum CPU platform to be attached to the instances in the reservation. - String minCpuPlatform = "Intel Skylake"; - // Local SSD size in GB to be attached to the instances in the reservation. - int localSsdSize = 375; - // Local SSD interfaces to be attached to the instances in the reservation. - String localSsdInterface1 = "NVME"; - String localSsdInterface2 = "SCSI"; - boolean specificReservationRequired = true; - Reservation reservation = Reservation.newBuilder() .setName(reservationName) @@ -105,15 +103,13 @@ public static void createReservation( .build()) .build(); - // Wait for the create reservation operation to complete. Operation response = reservationsClient.insertAsync(projectId, zone, reservation).get(7, TimeUnit.MINUTES); if (response.hasError()) { - System.out.println("Reservation creation failed!" + response); - return; + return null; } - System.out.println("Reservation created. Operation Status: " + response.getStatus()); + return reservationsClient.get(projectId, zone, reservationName); } } } diff --git a/compute/cloud-client/src/main/java/compute/reservation/CreateReservationForInstanceTemplate.java b/compute/cloud-client/src/main/java/compute/reservation/CreateReservationForInstanceTemplate.java index 69edf13ef22..fca7a3ca6d6 100644 --- a/compute/cloud-client/src/main/java/compute/reservation/CreateReservationForInstanceTemplate.java +++ b/compute/cloud-client/src/main/java/compute/reservation/CreateReservationForInstanceTemplate.java @@ -17,7 +17,6 @@ package compute.reservation; // [START compute_reservation_create_template] - import com.google.cloud.compute.v1.AllocationSpecificSKUReservation; import com.google.cloud.compute.v1.Operation; import com.google.cloud.compute.v1.Reservation; @@ -44,7 +43,6 @@ public static void main(String[] args) // to be used for creating the reservation. String instanceTemplateUri = "projects/YOUR_PROJECT_ID/global/instanceTemplates/YOUR_INSTANCE_TEMPLATE_NAME"; - // The URI of the instance template with REGIONAL location // to be used for creating the reservation. For us-central1 region in this case. // String instanceTemplateUri = @@ -55,15 +53,13 @@ public static void main(String[] args) } // Creates a reservation in a project for the instance template. - public static void createReservationForInstanceTemplate( + public static Reservation createReservationForInstanceTemplate( String projectId, String reservationName, String instanceTemplateUri, int numberOfVms, String zone) throws IOException, ExecutionException, InterruptedException, TimeoutException { // Initialize client that will be used to send requests. This client only needs to be created // once, and can be reused for multiple requests. try (ReservationsClient reservationsClient = ReservationsClient.create()) { - - // Create the reservation. Reservation reservation = Reservation.newBuilder() .setName(reservationName) @@ -77,15 +73,13 @@ public static void createReservationForInstanceTemplate( .build()) .build(); - // Wait for the create reservation operation to complete. Operation response = reservationsClient.insertAsync(projectId, zone, reservation).get(3, TimeUnit.MINUTES); if (response.hasError()) { - System.out.println("Reservation creation failed!" + response); - return; + return null; } - System.out.println("Reservation created. Operation Status: " + response.getStatus()); + return reservationsClient.get(projectId, zone, reservationName); } } } diff --git a/compute/cloud-client/src/main/java/compute/reservation/CreateReservationFromVm.java b/compute/cloud-client/src/main/java/compute/reservation/CreateReservationFromVm.java new file mode 100644 index 00000000000..0a7c6bab178 --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/reservation/CreateReservationFromVm.java @@ -0,0 +1,131 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.reservation; + +// [START compute_reservation_create_from_vm] +import com.google.cloud.compute.v1.AcceleratorConfig; +import com.google.cloud.compute.v1.AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk; +import com.google.cloud.compute.v1.AllocationSpecificSKUAllocationReservedInstanceProperties; +import com.google.cloud.compute.v1.AllocationSpecificSKUReservation; +import com.google.cloud.compute.v1.AttachedDisk; +import com.google.cloud.compute.v1.InsertReservationRequest; +import com.google.cloud.compute.v1.Instance; +import com.google.cloud.compute.v1.InstancesClient; +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.Reservation; +import com.google.cloud.compute.v1.ReservationsClient; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class CreateReservationFromVm { + + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project you want to use. + String project = "YOUR_PROJECT_ID"; + // The zone of the VM. In this zone the reservation will be created. + String zone = "us-central1-a"; + // The name of the reservation to create. + String reservationName = "YOUR_RESERVATION_NAME"; + // The name of the VM to create the reservation from. + String vmName = "YOUR_VM_NAME"; + + createComputeReservationFromVm(project, zone, reservationName, vmName); + } + + // Creates a compute reservation from an existing VM. + public static void createComputeReservationFromVm( + String project, String zone, String reservationName, String vmName) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (InstancesClient instancesClient = InstancesClient.create(); + ReservationsClient reservationsClient = ReservationsClient.create()) { + Instance existingVm = instancesClient.get(project, zone, vmName); + + // Extract properties from the existing VM + List guestAccelerators = new ArrayList<>(); + if (!existingVm.getGuestAcceleratorsList().isEmpty()) { + for (AcceleratorConfig accelatorConfig : existingVm.getGuestAcceleratorsList()) { + guestAccelerators.add( + AcceleratorConfig.newBuilder() + .setAcceleratorCount(accelatorConfig.getAcceleratorCount()) + .setAcceleratorType(accelatorConfig.getAcceleratorType() + .substring(accelatorConfig.getAcceleratorType().lastIndexOf('/') + 1)) + .build()); + } + } + + List localSsds = + new ArrayList<>(); + if (!existingVm.getDisksList().isEmpty()) { + for (AttachedDisk disk : existingVm.getDisksList()) { + if (disk.getDiskSizeGb() >= 375) { + localSsds.add( + AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk.newBuilder() + .setDiskSizeGb(disk.getDiskSizeGb()) + .setInterface(disk.getInterface()) + .build()); + } + } + } + + AllocationSpecificSKUAllocationReservedInstanceProperties instanceProperties = + AllocationSpecificSKUAllocationReservedInstanceProperties.newBuilder() + .setMachineType( + existingVm.getMachineType() + .substring(existingVm.getMachineType().lastIndexOf('/') + 1)) + .setMinCpuPlatform(existingVm.getMinCpuPlatform()) + .addAllLocalSsds(localSsds) + .addAllGuestAccelerators(guestAccelerators) + .build(); + + Reservation reservation = + Reservation.newBuilder() + .setName(reservationName) + .setSpecificReservation( + AllocationSpecificSKUReservation.newBuilder() + .setCount(3) + .setInstanceProperties(instanceProperties) + .build()) + .setSpecificReservationRequired(true) + .build(); + + InsertReservationRequest insertReservationRequest = + InsertReservationRequest.newBuilder() + .setProject(project) + .setZone(zone) + .setReservationResource(reservation) + .build(); + + Operation response = reservationsClient + .insertAsync(insertReservationRequest).get(3, TimeUnit.MINUTES); + + if (response.hasError()) { + System.out.println("Reservation creation failed ! ! " + response); + return; + } + System.out.println("Operation completed successfully."); + } + } +} +// [END compute_reservation_create_from_vm] \ No newline at end of file diff --git a/compute/cloud-client/src/main/java/compute/reservation/CreateSharedReservation.java b/compute/cloud-client/src/main/java/compute/reservation/CreateSharedReservation.java index 91eb89f4336..53052ee4d25 100644 --- a/compute/cloud-client/src/main/java/compute/reservation/CreateSharedReservation.java +++ b/compute/cloud-client/src/main/java/compute/reservation/CreateSharedReservation.java @@ -17,7 +17,6 @@ package compute.reservation; // [START compute_reservation_create_shared] - import com.google.cloud.compute.v1.AllocationSpecificSKUReservation; import com.google.cloud.compute.v1.Operation; import com.google.cloud.compute.v1.Reservation; diff --git a/compute/cloud-client/src/main/java/compute/reservation/CreateTemplateWithoutConsumingReservation.java b/compute/cloud-client/src/main/java/compute/reservation/CreateTemplateWithoutConsumingReservation.java new file mode 100644 index 00000000000..2857b3288bd --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/reservation/CreateTemplateWithoutConsumingReservation.java @@ -0,0 +1,110 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.reservation; + +// [START compute_template_not_consume_reservation] +import static com.google.cloud.compute.v1.ReservationAffinity.ConsumeReservationType.NO_RESERVATION; + +import com.google.cloud.compute.v1.AccessConfig; +import com.google.cloud.compute.v1.AttachedDisk; +import com.google.cloud.compute.v1.AttachedDiskInitializeParams; +import com.google.cloud.compute.v1.InsertInstanceTemplateRequest; +import com.google.cloud.compute.v1.InstanceProperties; +import com.google.cloud.compute.v1.InstanceTemplate; +import com.google.cloud.compute.v1.InstanceTemplatesClient; +import com.google.cloud.compute.v1.NetworkInterface; +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.ReservationAffinity; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class CreateTemplateWithoutConsumingReservation { + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project you want to use. + String projectId = "YOUR_PROJECT_ID"; + // Name of the template you want to query. + String templateName = "YOUR_INSTANCE_TEMPLATE_NAME"; + String machineType = "e2-standard-4"; + String sourceImage = "projects/debian-cloud/global/images/family/debian-11"; + + createTemplateWithoutConsumingReservationAsync( + projectId, templateName, machineType, sourceImage); + } + + + // Create a template that explicitly doesn't consume any reservations. + public static InstanceTemplate createTemplateWithoutConsumingReservationAsync( + String projectId, String templateName, String machineType, String sourceImage) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (InstanceTemplatesClient instanceTemplatesClient = InstanceTemplatesClient.create()) { + AttachedDisk attachedDisk = AttachedDisk.newBuilder() + .setInitializeParams(AttachedDiskInitializeParams.newBuilder() + .setSourceImage(sourceImage) + .setDiskType("pd-balanced") + .setDiskSizeGb(250) + .build()) + .setAutoDelete(true) + .setBoot(true) + .build(); + + NetworkInterface networkInterface = NetworkInterface.newBuilder() + .setName("global/networks/default") + .addAccessConfigs(AccessConfig.newBuilder() + .setName("External NAT") + .setType(AccessConfig.Type.ONE_TO_ONE_NAT.toString()) + .setNetworkTier(AccessConfig.NetworkTier.PREMIUM.toString()) + .build()) + .build(); + + ReservationAffinity reservationAffinity = + ReservationAffinity.newBuilder() + .setConsumeReservationType(NO_RESERVATION.toString()) + .build(); + + InstanceProperties instanceProperties = InstanceProperties.newBuilder() + .addDisks(attachedDisk) + .setMachineType(machineType) + .setReservationAffinity(reservationAffinity) + .addNetworkInterfaces(networkInterface) + .build(); + + InsertInstanceTemplateRequest insertInstanceTemplateRequest = InsertInstanceTemplateRequest + .newBuilder() + .setProject(projectId) + .setInstanceTemplateResource(InstanceTemplate.newBuilder() + .setName(templateName) + .setProperties(instanceProperties) + .build()) + .build(); + + Operation response = instanceTemplatesClient.insertAsync(insertInstanceTemplateRequest) + .get(3, TimeUnit.MINUTES); + + if (response.hasError()) { + return null; + } + return instanceTemplatesClient.get(projectId, templateName); + } + } +} +// [END compute_template_not_consume_reservation] diff --git a/compute/cloud-client/src/main/java/compute/reservation/DeleteReservation.java b/compute/cloud-client/src/main/java/compute/reservation/DeleteReservation.java index d539732049c..60671d46feb 100644 --- a/compute/cloud-client/src/main/java/compute/reservation/DeleteReservation.java +++ b/compute/cloud-client/src/main/java/compute/reservation/DeleteReservation.java @@ -17,7 +17,6 @@ package compute.reservation; // [START compute_reservation_delete] - import com.google.cloud.compute.v1.DeleteReservationRequest; import com.google.cloud.compute.v1.Operation; import com.google.cloud.compute.v1.ReservationsClient; diff --git a/compute/cloud-client/src/main/java/compute/reservation/GetReservation.java b/compute/cloud-client/src/main/java/compute/reservation/GetReservation.java index 5e99d11c191..6c74227df4d 100644 --- a/compute/cloud-client/src/main/java/compute/reservation/GetReservation.java +++ b/compute/cloud-client/src/main/java/compute/reservation/GetReservation.java @@ -17,7 +17,6 @@ package compute.reservation; // [START compute_reservation_get] - import com.google.cloud.compute.v1.Reservation; import com.google.cloud.compute.v1.ReservationsClient; import java.io.IOException; diff --git a/compute/cloud-client/src/main/java/compute/reservation/ListReservations.java b/compute/cloud-client/src/main/java/compute/reservation/ListReservations.java index 969f4d25d6b..8c907037a37 100644 --- a/compute/cloud-client/src/main/java/compute/reservation/ListReservations.java +++ b/compute/cloud-client/src/main/java/compute/reservation/ListReservations.java @@ -17,7 +17,6 @@ package compute.reservation; // [START compute_reservation_list] - import com.google.cloud.compute.v1.Reservation; import com.google.cloud.compute.v1.ReservationsClient; import java.io.IOException; diff --git a/compute/cloud-client/src/main/java/compute/reservation/UpdateVmsForReservation.java b/compute/cloud-client/src/main/java/compute/reservation/UpdateVmsForReservation.java index 1d37e2c0ade..48fa92b7599 100644 --- a/compute/cloud-client/src/main/java/compute/reservation/UpdateVmsForReservation.java +++ b/compute/cloud-client/src/main/java/compute/reservation/UpdateVmsForReservation.java @@ -17,8 +17,8 @@ package compute.reservation; // [START compute_reservation_vms_update] - import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.Reservation; import com.google.cloud.compute.v1.ReservationsClient; import com.google.cloud.compute.v1.ReservationsResizeRequest; import com.google.cloud.compute.v1.ResizeReservationRequest; @@ -45,7 +45,7 @@ public static void main(String[] args) } // Updates a reservation with new VM capacity. - public static void updateVmsForReservation( + public static Reservation updateVmsForReservation( String projectId, String zone, String reservationName, int numberOfVms) throws IOException, ExecutionException, InterruptedException, TimeoutException { // Initialize client that will be used to send requests. This client only needs to be created @@ -66,10 +66,9 @@ public static void updateVmsForReservation( .get(3, TimeUnit.MINUTES); if (response.hasError()) { - System.out.println("Reservation update failed !!" + response); - return; + return null; } - System.out.println("Reservation updated successfully: " + response.getStatus()); + return reservationsClient.get(projectId, zone, reservationName); } } } diff --git a/compute/cloud-client/src/test/java/compute/disks/ConsistencyGroupIT.java b/compute/cloud-client/src/test/java/compute/disks/ConsistencyGroupIT.java new file mode 100644 index 00000000000..61d4ec81940 --- /dev/null +++ b/compute/cloud-client/src/test/java/compute/disks/ConsistencyGroupIT.java @@ -0,0 +1,195 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.disks; + +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.compute.v1.AddResourcePoliciesRegionDiskRequest; +import com.google.cloud.compute.v1.DisksClient; +import com.google.cloud.compute.v1.InsertResourcePolicyRequest; +import com.google.cloud.compute.v1.ListDisksRequest; +import com.google.cloud.compute.v1.ListRegionDisksRequest; +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.RegionDisksClient; +import com.google.cloud.compute.v1.RemoveResourcePoliciesRegionDiskRequest; +import com.google.cloud.compute.v1.ResourcePoliciesClient; +import compute.disks.consistencygroup.AddDiskToConsistencyGroup; +import compute.disks.consistencygroup.CreateConsistencyGroup; +import compute.disks.consistencygroup.DeleteConsistencyGroup; +import compute.disks.consistencygroup.ListRegionalDisksInConsistencyGroup; +import compute.disks.consistencygroup.ListZonalDisksInConsistencyGroup; +import compute.disks.consistencygroup.RemoveDiskFromConsistencyGroup; +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.MockedStatic; + +@RunWith(JUnit4.class) +@Timeout(value = 2, unit = TimeUnit.MINUTES) +public class ConsistencyGroupIT { + private static final String PROJECT_ID = "project-id"; + private static final String REGION = "asia-east1"; + private static final String CONSISTENCY_GROUP_NAME = "consistency-group"; + private static final String DISK_NAME = "disk-for-consistency"; + + @Test + public void testCreateConsistencyGroupResourcePolicy() throws Exception { + try (MockedStatic mockedResourcePoliciesClient = + mockStatic(ResourcePoliciesClient.class)) { + Operation operation = mock(Operation.class); + ResourcePoliciesClient mockClient = mock(ResourcePoliciesClient.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedResourcePoliciesClient.when(ResourcePoliciesClient::create).thenReturn(mockClient); + when(mockClient.insertAsync(any(InsertResourcePolicyRequest.class))) + .thenReturn(mockFuture); + when(mockFuture.get(anyLong(), any(TimeUnit.class))).thenReturn(operation); + when(operation.getStatus()).thenReturn(Operation.Status.DONE); + + Operation.Status status = CreateConsistencyGroup.createConsistencyGroup( + PROJECT_ID, REGION, CONSISTENCY_GROUP_NAME); + + verify(mockClient, times(1)).insertAsync(any(InsertResourcePolicyRequest.class)); + verify(mockFuture, times(1)).get(anyLong(), any(TimeUnit.class)); + assertEquals(Operation.Status.DONE, status); + } + } + + @Test + public void testAddRegionalDiskToConsistencyGroup() throws Exception { + try (MockedStatic mockedRegionDisksClient = + mockStatic(RegionDisksClient.class)) { + Operation operation = mock(Operation.class); + RegionDisksClient mockClient = mock(RegionDisksClient.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedRegionDisksClient.when(RegionDisksClient::create).thenReturn(mockClient); + when(mockClient.addResourcePoliciesAsync(any(AddResourcePoliciesRegionDiskRequest.class))) + .thenReturn(mockFuture); + when(mockFuture.get(anyLong(), any(TimeUnit.class))).thenReturn(operation); + when(operation.getStatus()).thenReturn(Operation.Status.DONE); + + Operation.Status status = AddDiskToConsistencyGroup.addDiskToConsistencyGroup( + PROJECT_ID, REGION, DISK_NAME, CONSISTENCY_GROUP_NAME, REGION); + + verify(mockClient, times(1)) + .addResourcePoliciesAsync(any(AddResourcePoliciesRegionDiskRequest.class)); + verify(mockFuture, times(1)).get(anyLong(), any(TimeUnit.class)); + assertEquals(Operation.Status.DONE, status); + } + } + + @Test + public void testRemoveDiskFromConsistencyGroup() throws Exception { + try (MockedStatic mockedRegionDisksClient = + mockStatic(RegionDisksClient.class)) { + Operation operation = mock(Operation.class); + RegionDisksClient mockClient = mock(RegionDisksClient.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedRegionDisksClient.when(RegionDisksClient::create).thenReturn(mockClient); + when(mockClient.removeResourcePoliciesAsync( + any(RemoveResourcePoliciesRegionDiskRequest.class))).thenReturn(mockFuture); + when(mockFuture.get(anyLong(), any(TimeUnit.class))).thenReturn(operation); + when(operation.getStatus()).thenReturn(Operation.Status.DONE); + + Operation.Status status = RemoveDiskFromConsistencyGroup.removeDiskFromConsistencyGroup( + PROJECT_ID, REGION, DISK_NAME, CONSISTENCY_GROUP_NAME, REGION); + + verify(mockClient, times(1)) + .removeResourcePoliciesAsync(any(RemoveResourcePoliciesRegionDiskRequest.class)); + verify(mockFuture, times(1)).get(anyLong(), any(TimeUnit.class)); + assertEquals(Operation.Status.DONE, status); + } + } + + @Test + public void testDeleteConsistencyGroup() throws Exception { + try (MockedStatic mockedResourcePoliciesClient = + mockStatic(ResourcePoliciesClient.class)) { + Operation operation = mock(Operation.class); + ResourcePoliciesClient mockClient = mock(ResourcePoliciesClient.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedResourcePoliciesClient.when(ResourcePoliciesClient::create).thenReturn(mockClient); + when(mockClient.deleteAsync(PROJECT_ID, REGION, CONSISTENCY_GROUP_NAME)) + .thenReturn(mockFuture); + when(mockFuture.get(anyLong(), any(TimeUnit.class))).thenReturn(operation); + when(operation.getStatus()).thenReturn(Operation.Status.DONE); + + Operation.Status status = DeleteConsistencyGroup.deleteConsistencyGroup( + PROJECT_ID, REGION, CONSISTENCY_GROUP_NAME); + + verify(mockClient, times(1)) + .deleteAsync(PROJECT_ID, REGION, CONSISTENCY_GROUP_NAME); + verify(mockFuture, times(1)).get(anyLong(), any(TimeUnit.class)); + assertEquals(Operation.Status.DONE, status); + } + } + + @Test + public void testListRegionalDisksInConsistencyGroup() throws Exception { + try (MockedStatic mockedRegionDisksClient = + mockStatic(RegionDisksClient.class)) { + RegionDisksClient mockClient = mock(RegionDisksClient.class); + RegionDisksClient.ListPagedResponse mockResponse = + mock(RegionDisksClient.ListPagedResponse.class); + + mockedRegionDisksClient.when(RegionDisksClient::create).thenReturn(mockClient); + when(mockClient.list(any(ListRegionDisksRequest.class))) + .thenReturn(mockResponse); + + ListRegionalDisksInConsistencyGroup.listRegionalDisksInConsistencyGroup( + PROJECT_ID, CONSISTENCY_GROUP_NAME, REGION, REGION); + + verify(mockClient, times(1)) + .list(any(ListRegionDisksRequest.class)); + verify(mockResponse, times(1)).iterateAll(); + } + } + + @Test + public void testListZonalDisksInConsistencyGroup() throws Exception { + try (MockedStatic mockedRegionDisksClient = + mockStatic(DisksClient.class)) { + DisksClient mockClient = mock(DisksClient.class); + DisksClient.ListPagedResponse mockResponse = + mock(DisksClient.ListPagedResponse.class); + + mockedRegionDisksClient.when(DisksClient::create).thenReturn(mockClient); + when(mockClient.list(any(ListDisksRequest.class))) + .thenReturn(mockResponse); + + ListZonalDisksInConsistencyGroup.listZonalDisksInConsistencyGroup( + PROJECT_ID, CONSISTENCY_GROUP_NAME, REGION, REGION); + + verify(mockClient, times(1)) + .list(any(ListDisksRequest.class)); + verify(mockResponse, times(1)).iterateAll(); + } + } +} \ No newline at end of file diff --git a/compute/cloud-client/src/test/java/compute/disks/CreateHyperdiskIT.java b/compute/cloud-client/src/test/java/compute/disks/CreateHyperdiskIT.java new file mode 100644 index 00000000000..b54af43baf4 --- /dev/null +++ b/compute/cloud-client/src/test/java/compute/disks/CreateHyperdiskIT.java @@ -0,0 +1,79 @@ +/* +* Copyright 2024 Google LLC +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package compute.disks; + +import static com.google.common.truth.Truth.assertWithMessage; + +import com.google.cloud.compute.v1.Disk; +import java.io.IOException; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.junit.Assert; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +@Timeout(value = 3, unit = TimeUnit.MINUTES) +public class CreateHyperdiskIT { + private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); + private static final String ZONE = "us-west1-a"; + private static final String HYPERDISK_NAME = "test-hyperdisk-enc-" + UUID.randomUUID(); + + // Check if the required environment variables are set. + public static void requireEnvVar(String envVarName) { + assertWithMessage(String.format("Missing environment variable '%s' ", envVarName)) + .that(System.getenv(envVarName)).isNotEmpty(); + } + + @BeforeAll + public static void setUp() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); + requireEnvVar("GOOGLE_CLOUD_PROJECT"); + } + + @AfterAll + public static void cleanup() + throws IOException, InterruptedException, ExecutionException, TimeoutException { + // Delete disk created for testing. + DeleteDisk.deleteDisk(PROJECT_ID, ZONE, HYPERDISK_NAME); + } + + @Test + public void testCreateHyperdisk() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + String diskType = String.format("zones/%s/diskTypes/hyperdisk-balanced", ZONE); + + Disk hyperdisk = CreateHyperdisk + .createHyperdisk(PROJECT_ID, ZONE, HYPERDISK_NAME, diskType, + 10, 3000, 140); + + Assert.assertNotNull(hyperdisk); + Assert.assertEquals(HYPERDISK_NAME, hyperdisk.getName()); + Assert.assertEquals(3000, hyperdisk.getProvisionedIops()); + Assert.assertEquals(140, hyperdisk.getProvisionedThroughput()); + Assert.assertEquals(10, hyperdisk.getSizeGb()); + Assert.assertTrue(hyperdisk.getType().contains("hyperdisk-balanced")); + Assert.assertTrue(hyperdisk.getZone().contains(ZONE)); + } +} \ No newline at end of file diff --git a/compute/cloud-client/src/test/java/compute/disks/HyperdiskIT.java b/compute/cloud-client/src/test/java/compute/disks/HyperdiskIT.java new file mode 100644 index 00000000000..d8fac41a64b --- /dev/null +++ b/compute/cloud-client/src/test/java/compute/disks/HyperdiskIT.java @@ -0,0 +1,132 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.disks; + +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.RETURNS_DEEP_STUBS; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.compute.v1.Disk; +import com.google.cloud.compute.v1.DisksClient; +import com.google.cloud.compute.v1.InsertDiskRequest; +import com.google.cloud.compute.v1.InsertStoragePoolRequest; +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.StoragePool; +import com.google.cloud.compute.v1.StoragePoolsClient; +import compute.disks.storagepool.CreateDiskInStoragePool; +import compute.disks.storagepool.CreateHyperdiskStoragePool; +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.MockedStatic; + +@RunWith(JUnit4.class) +@Timeout(value = 5, unit = TimeUnit.MINUTES) +public class HyperdiskIT { + private static final String PROJECT_ID = "project-id"; + private static final String ZONE = "asia-east1-a"; + private static final String HYPERDISK_IN_POOL_NAME = "hyperdisk"; + private static final String STORAGE_POOL_NAME = "storage-pool"; + private static final String PERFORMANCE_PROVISIONING_TYPE = "advanced"; + private static final String CAPACITY_PROVISIONING_TYPE = "advanced"; + + @Test + public void testCreateHyperdiskStoragePool() throws Exception { + String poolType = String.format( + "projects/%s/zones/%s/storagePoolTypes/%s", PROJECT_ID, ZONE, "hyperdisk-balanced"); + StoragePool storagePool = StoragePool.newBuilder() + .setZone(ZONE) + .setName(STORAGE_POOL_NAME) + .setStoragePoolType(poolType) + .setCapacityProvisioningType(CAPACITY_PROVISIONING_TYPE) + .setPoolProvisionedCapacityGb(10240) + .setPoolProvisionedIops(10000) + .setPoolProvisionedThroughput(1024) + .setPerformanceProvisioningType(PERFORMANCE_PROVISIONING_TYPE) + .build(); + try (MockedStatic mockedStoragePoolsClient = + mockStatic(StoragePoolsClient.class)) { + StoragePoolsClient mockClient = mock(StoragePoolsClient.class); + OperationFuture mockFuture = + mock(OperationFuture.class, RETURNS_DEEP_STUBS); + Operation operation = mock(Operation.class, RETURNS_DEEP_STUBS); + + mockedStoragePoolsClient.when(StoragePoolsClient::create).thenReturn(mockClient); + when(mockClient.insertAsync(any(InsertStoragePoolRequest.class))) + .thenReturn(mockFuture); + when(mockFuture.get(anyLong(), any(TimeUnit.class))).thenReturn(operation); + when(operation.getStatus()).thenReturn(Operation.Status.DONE); + when(mockClient.get(PROJECT_ID, ZONE, STORAGE_POOL_NAME)).thenReturn(storagePool); + + + StoragePool expectedStoragePool = CreateHyperdiskStoragePool + .createHyperdiskStoragePool(PROJECT_ID, ZONE, STORAGE_POOL_NAME, poolType, + CAPACITY_PROVISIONING_TYPE, 10240, 10000, 1024, + PERFORMANCE_PROVISIONING_TYPE); + + verify(mockClient, times(1)).insertAsync(any(InsertStoragePoolRequest.class)); + verify(mockFuture, times(1)).get(anyLong(), any(TimeUnit.class)); + assertEquals(storagePool, expectedStoragePool); + } + } + + @Test + public void testCreateDiskInStoragePool() throws Exception { + String diskType = String.format("zones/%s/diskTypes/hyperdisk-balanced", ZONE); + Disk expectedHyperdisk = Disk.newBuilder() + .setZone(ZONE) + .setName(HYPERDISK_IN_POOL_NAME) + .setType(diskType) + .setSizeGb(10L) + .setProvisionedIops(3000L) + .setProvisionedThroughput(140L) + .build(); + String storagePoolLink = String.format("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/storagePools/%s", + PROJECT_ID, ZONE, STORAGE_POOL_NAME); + + try (MockedStatic mockedDisksClient = mockStatic(DisksClient.class)) { + DisksClient mockClient = mock(DisksClient.class); + OperationFuture mockFuture = + mock(OperationFuture.class, RETURNS_DEEP_STUBS); + Operation operation = mock(Operation.class, RETURNS_DEEP_STUBS); + + mockedDisksClient.when(DisksClient::create).thenReturn(mockClient); + when(mockClient.insertAsync(any(InsertDiskRequest.class))).thenReturn(mockFuture); + when(mockFuture.get(anyLong(), any(TimeUnit.class))).thenReturn(operation); + when(operation.getStatus()).thenReturn(Operation.Status.DONE); + when(mockClient.get(PROJECT_ID, ZONE, HYPERDISK_IN_POOL_NAME)).thenReturn(expectedHyperdisk); + + + Disk returnedDisk = CreateDiskInStoragePool + .createDiskInStoragePool(PROJECT_ID, ZONE, HYPERDISK_IN_POOL_NAME, storagePoolLink, + diskType, 10, 3000, 140); + + verify(mockClient, times(1)).insertAsync(any(InsertDiskRequest.class)); + verify(mockFuture, times(1)).get(anyLong(), any(TimeUnit.class)); + assertEquals(expectedHyperdisk, returnedDisk); + } + } +} \ No newline at end of file diff --git a/compute/cloud-client/src/test/java/compute/disks/HyperdisksIT.java b/compute/cloud-client/src/test/java/compute/disks/HyperdisksIT.java deleted file mode 100644 index b0d4b0fee91..00000000000 --- a/compute/cloud-client/src/test/java/compute/disks/HyperdisksIT.java +++ /dev/null @@ -1,140 +0,0 @@ -/* -* Copyright 2024 Google LLC -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -package compute.disks; - -import static com.google.common.truth.Truth.assertWithMessage; - -import com.google.cloud.compute.v1.Disk; -import com.google.cloud.compute.v1.StoragePool; -import compute.Util; -import compute.disks.storagepool.CreateDiskInStoragePool; -import compute.disks.storagepool.CreateHyperdiskStoragePool; -import java.io.IOException; -import java.util.UUID; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import org.junit.Assert; -import org.junit.FixMethodOrder; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Timeout; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.junit.runners.MethodSorters; - -@RunWith(JUnit4.class) -@Timeout(value = 40, unit = TimeUnit.MINUTES) -@FixMethodOrder(MethodSorters.NAME_ASCENDING) -public class HyperdisksIT { - private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); - private static final String ZONE = "us-east1-c"; - private static String HYPERDISK_NAME; - private static String HYPERDISK_IN_POOL_NAME; - private static String STORAGE_POOL_NAME; - - // Check if the required environment variables are set. - public static void requireEnvVar(String envVarName) { - assertWithMessage(String.format("Missing environment variable '%s' ", envVarName)) - .that(System.getenv(envVarName)).isNotEmpty(); - } - - @BeforeAll - public static void setUp() - throws IOException, ExecutionException, InterruptedException, TimeoutException { - requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); - requireEnvVar("GOOGLE_CLOUD_PROJECT"); - HYPERDISK_NAME = "test-hyperdisk-enc-" + UUID.randomUUID(); - HYPERDISK_IN_POOL_NAME = "test-hyperdisk-enc-" + UUID.randomUUID(); - STORAGE_POOL_NAME = "test-storage-pool-enc-" + UUID.randomUUID(); - - Util.cleanUpExistingDisks("test-hyperdisk-enc-", PROJECT_ID, ZONE); - Util.cleanUpExistingStoragePool("test-storage-pool-enc-", PROJECT_ID, ZONE); - } - - @AfterAll - public static void cleanup() - throws IOException, InterruptedException, ExecutionException, TimeoutException { - // Delete all disks created for testing. - DeleteDisk.deleteDisk(PROJECT_ID, ZONE, HYPERDISK_NAME); - //DeleteDisk.deleteDisk(PROJECT_ID, ZONE, HYPERDISK_IN_POOL_NAME); - - //Util.deleteStoragePool(PROJECT_ID, ZONE, STORAGE_POOL_NAME); - } - - @Test - public void stage1_CreateHyperdiskTest() - throws IOException, ExecutionException, InterruptedException, TimeoutException { - String diskType = String.format("zones/%s/diskTypes/hyperdisk-balanced", ZONE); - - Disk hyperdisk = CreateHyperdisk - .createHyperdisk(PROJECT_ID, ZONE, HYPERDISK_NAME, diskType, - 10, 3000, 140); - - Assert.assertNotNull(hyperdisk); - Assert.assertEquals(HYPERDISK_NAME, hyperdisk.getName()); - Assert.assertEquals(3000, hyperdisk.getProvisionedIops()); - Assert.assertEquals(140, hyperdisk.getProvisionedThroughput()); - Assert.assertEquals(10, hyperdisk.getSizeGb()); - Assert.assertTrue(hyperdisk.getType().contains("hyperdisk-balanced")); - Assert.assertTrue(hyperdisk.getZone().contains(ZONE)); - } - - @Disabled - @Test - public void stage1_CreateHyperdiskStoragePoolTest() - throws IOException, ExecutionException, InterruptedException, TimeoutException { - String poolType = String.format("projects/%s/zones/%s/storagePoolTypes/hyperdisk-balanced", - PROJECT_ID, ZONE); - StoragePool storagePool = CreateHyperdiskStoragePool - .createHyperdiskStoragePool(PROJECT_ID, ZONE, STORAGE_POOL_NAME, poolType, - "advanced", 10240, 10000, 10240); - - Assert.assertNotNull(storagePool); - Assert.assertEquals(STORAGE_POOL_NAME, storagePool.getName()); - Assert.assertEquals(10000, storagePool.getPoolProvisionedIops()); - Assert.assertEquals(10240, storagePool.getPoolProvisionedThroughput()); - Assert.assertEquals(10240, storagePool.getPoolProvisionedCapacityGb()); - Assert.assertTrue(storagePool.getStoragePoolType().contains("hyperdisk-balanced")); - Assert.assertTrue(storagePool.getCapacityProvisioningType().equalsIgnoreCase("advanced")); - Assert.assertTrue(storagePool.getZone().contains(ZONE)); - } - - @Disabled - @Test - public void stage2_CreateHyperdiskStoragePoolTest() - throws IOException, ExecutionException, InterruptedException, TimeoutException { - String diskType = String.format("zones/%s/diskTypes/hyperdisk-balanced", ZONE); - String storagePoolLink = String - .format("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/storagePools/%s", - PROJECT_ID, ZONE, STORAGE_POOL_NAME); - Disk disk = CreateDiskInStoragePool - .createDiskInStoragePool(PROJECT_ID, ZONE, HYPERDISK_IN_POOL_NAME, storagePoolLink, - diskType, 10, 3000, 140); - - Assert.assertNotNull(disk); - Assert.assertEquals(HYPERDISK_IN_POOL_NAME, disk.getName()); - Assert.assertTrue(disk.getStoragePool().contains(STORAGE_POOL_NAME)); - Assert.assertEquals(3000, disk.getProvisionedIops()); - Assert.assertEquals(140, disk.getProvisionedThroughput()); - Assert.assertEquals(10, disk.getSizeGb()); - Assert.assertTrue(disk.getType().contains("hyperdisk-balanced")); - Assert.assertTrue(disk.getZone().contains(ZONE)); - } -} \ No newline at end of file diff --git a/compute/cloud-client/src/test/java/compute/reservation/ConsumeReservationsIT.java b/compute/cloud-client/src/test/java/compute/reservation/ConsumeReservationsIT.java new file mode 100644 index 00000000000..96c8b22a4e0 --- /dev/null +++ b/compute/cloud-client/src/test/java/compute/reservation/ConsumeReservationsIT.java @@ -0,0 +1,163 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.reservation; + +import static com.google.cloud.compute.v1.ReservationAffinity.ConsumeReservationType.ANY_RESERVATION; +import static com.google.cloud.compute.v1.ReservationAffinity.ConsumeReservationType.SPECIFIC_RESERVATION; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertNotNull; + +import com.google.api.gax.rpc.NotFoundException; +import com.google.cloud.compute.v1.AllocationSpecificSKUAllocationReservedInstanceProperties; +import com.google.cloud.compute.v1.AllocationSpecificSKUReservation; +import com.google.cloud.compute.v1.Instance; +import com.google.cloud.compute.v1.Reservation; +import com.google.cloud.compute.v1.ReservationsClient; +import compute.DeleteInstance; +import java.io.IOException; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.junit.Assert; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +@Timeout(value = 6, unit = TimeUnit.MINUTES) +public class ConsumeReservationsIT { + private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); + private static final String ZONE = "us-central1-a"; + static String templateUUID = UUID.randomUUID().toString(); + private static final String RESERVATION_NAME = "test-reservaton-" + templateUUID; + private static final String INSTANCE_FOR_SPR = "test-instance-for-spr-" + templateUUID; + private static final String INSTANCE_FOR_ANY_MATCHING = "test-instance-" + templateUUID; + private static final String SPECIFIC_SHARED_INSTANCE = "test-instance-shared-" + templateUUID; + private static final String MACHINE_TYPE = "n1-standard-4"; + private static final String SOURCE_IMAGE = "projects/debian-cloud/global/images/family/debian-11"; + private static final String NETWORK_NAME = "default"; + private static final long DISK_SIZE_GB = 10L; + private static final String MIN_CPU_PLATFORM = "Intel Skylake"; + + // Check if the required environment variables are set. + public static void requireEnvVar(String envVarName) { + assertWithMessage(String.format("Missing environment variable '%s' ", envVarName)) + .that(System.getenv(envVarName)).isNotEmpty(); + } + + @BeforeAll + public static void setUp() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); + requireEnvVar("GOOGLE_CLOUD_PROJECT"); + + ConsumeReservationsIT.createReservation( + PROJECT_ID, RESERVATION_NAME, ZONE); + } + + @AfterAll + public static void cleanup() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // Delete all instances created for testing. + DeleteInstance.deleteInstance(PROJECT_ID, ZONE, INSTANCE_FOR_SPR); + DeleteInstance.deleteInstance(PROJECT_ID, ZONE, INSTANCE_FOR_ANY_MATCHING); + DeleteInstance.deleteInstance(PROJECT_ID, ZONE, SPECIFIC_SHARED_INSTANCE); + + // Delete all reservations created for testing. + DeleteReservation.deleteReservation(PROJECT_ID, ZONE, RESERVATION_NAME); + + // Test that reservation is deleted + Assertions.assertThrows( + NotFoundException.class, + () -> GetReservation.getReservation(PROJECT_ID, RESERVATION_NAME, ZONE)); + } + + @Test + public void testConsumeAnyMatchingReservation() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + Instance instance = ConsumeAnyMatchingReservation + .createInstanceAsync(PROJECT_ID, ZONE, INSTANCE_FOR_ANY_MATCHING, + MACHINE_TYPE, SOURCE_IMAGE, DISK_SIZE_GB, NETWORK_NAME, MIN_CPU_PLATFORM); + + assertNotNull(instance); + Assert.assertEquals(ANY_RESERVATION.toString(), + instance.getReservationAffinity().getConsumeReservationType()); + } + + @Test + public void testConsumeSingleProjectReservation() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + Instance instance = ConsumeSingleProjectReservation.createInstanceAsync( + PROJECT_ID, ZONE, INSTANCE_FOR_SPR, RESERVATION_NAME, MACHINE_TYPE, + SOURCE_IMAGE, DISK_SIZE_GB, NETWORK_NAME, MIN_CPU_PLATFORM); + + assertNotNull(instance); + assertThat(instance.getReservationAffinity().getValuesList()) + .contains(RESERVATION_NAME); + Assert.assertEquals(SPECIFIC_RESERVATION.toString(), + instance.getReservationAffinity().getConsumeReservationType()); + } + + @Test + public void testConsumeSpecificSharedReservation() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + Instance instance = ConsumeSpecificSharedReservation.createInstanceAsync( + PROJECT_ID, ZONE, SPECIFIC_SHARED_INSTANCE, RESERVATION_NAME, MACHINE_TYPE, + SOURCE_IMAGE, DISK_SIZE_GB, NETWORK_NAME, MIN_CPU_PLATFORM); + + assertNotNull(instance); + Assert.assertTrue(instance.getReservationAffinity() + .getValuesList().get(0).contains(RESERVATION_NAME)); + Assert.assertEquals(SPECIFIC_RESERVATION.toString(), + instance.getReservationAffinity().getConsumeReservationType()); + } + + // Creates reservation with the given parameters. + public static void createReservation( + String projectId, String reservationName, String zone) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + boolean specificReservationRequired = true; + int numberOfVms = 3; + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (ReservationsClient reservationsClient = ReservationsClient.create()) { + Reservation reservation = + Reservation.newBuilder() + .setName(reservationName) + .setZone(zone) + .setSpecificReservationRequired(specificReservationRequired) + .setSpecificReservation( + AllocationSpecificSKUReservation.newBuilder() + .setCount(numberOfVms) + .setInstanceProperties( + AllocationSpecificSKUAllocationReservedInstanceProperties.newBuilder() + .setMachineType(MACHINE_TYPE) + .setMinCpuPlatform(MIN_CPU_PLATFORM) + .build()) + .build()) + .build(); + + reservationsClient.insertAsync(projectId, zone, reservation).get(3, TimeUnit.MINUTES); + } + } +} \ No newline at end of file diff --git a/compute/cloud-client/src/test/java/compute/reservation/CreateReservationFromVmIT.java b/compute/cloud-client/src/test/java/compute/reservation/CreateReservationFromVmIT.java new file mode 100644 index 00000000000..e98dd20ba22 --- /dev/null +++ b/compute/cloud-client/src/test/java/compute/reservation/CreateReservationFromVmIT.java @@ -0,0 +1,115 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.reservation; + +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; + +import com.google.api.gax.rpc.NotFoundException; +import com.google.cloud.compute.v1.Instance; +import com.google.cloud.compute.v1.InstancesClient; +import com.google.cloud.compute.v1.Reservation; +import com.google.cloud.compute.v1.ReservationsClient; +import compute.CreateInstance; +import compute.DeleteInstance; +import compute.Util; +import java.io.IOException; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +@Timeout(value = 3, unit = TimeUnit.MINUTES) +public class CreateReservationFromVmIT { + + private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); + private static final String ZONE = "us-east4-c"; + private static ReservationsClient reservationsClient; + private static InstancesClient instancesClient; + private static String reservationName; + private static String instanceForReservation; + static String javaVersion = System.getProperty("java.version").substring(0, 2); + + // Check if the required environment variables are set. + public static void requireEnvVar(String envVarName) { + assertWithMessage(String.format("Missing environment variable '%s' ", envVarName)) + .that(System.getenv(envVarName)).isNotEmpty(); + } + + @BeforeAll + public static void setUp() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); + requireEnvVar("GOOGLE_CLOUD_PROJECT"); + reservationsClient = ReservationsClient.create(); + instancesClient = InstancesClient.create(); + + reservationName = "test-reservation-from-vm-" + javaVersion + "-" + + UUID.randomUUID().toString().substring(0, 8); + instanceForReservation = "test-instance-for-reserv-" + javaVersion + "-" + + UUID.randomUUID().toString().substring(0, 8); + + // Cleanup existing stale resources. + Util.cleanUpExistingInstances("test-instance-for-reserv-" + javaVersion, PROJECT_ID, ZONE); + Util.cleanUpExistingReservations("test-reservation-from-vm-" + javaVersion, PROJECT_ID, ZONE); + + CreateInstance.createInstance(PROJECT_ID, ZONE, instanceForReservation); + } + + @AfterAll + public static void cleanup() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // Delete resources created for testing. + DeleteInstance.deleteInstance(PROJECT_ID, ZONE, instanceForReservation); + + reservationsClient.close(); + instancesClient.close(); + } + + @Test + public void testCreateComputeReservationFromVm() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + CreateReservationFromVm.createComputeReservationFromVm( + PROJECT_ID, ZONE, reservationName, instanceForReservation); + + Instance instance = instancesClient.get(PROJECT_ID, ZONE, instanceForReservation); + Reservation reservation = + reservationsClient.get(PROJECT_ID, ZONE, reservationName); + + Assertions.assertNotNull(reservation); + assertThat(reservation.getName()).isEqualTo(reservationName); + Assertions.assertEquals(instance.getMinCpuPlatform(), + reservation.getSpecificReservation().getInstanceProperties().getMinCpuPlatform()); + Assertions.assertEquals(instance.getGuestAcceleratorsList(), + reservation.getSpecificReservation().getInstanceProperties().getGuestAcceleratorsList()); + + DeleteReservation.deleteReservation(PROJECT_ID, ZONE, reservationName); + + // Test that reservation is deleted + Assertions.assertThrows( + NotFoundException.class, + () -> GetReservation.getReservation(PROJECT_ID, reservationName, ZONE)); + } +} diff --git a/compute/cloud-client/src/test/java/compute/reservation/CrudOperationsReservationIT.java b/compute/cloud-client/src/test/java/compute/reservation/CrudOperationsReservationIT.java index 9c5ea9710e4..0dd9209ae5f 100644 --- a/compute/cloud-client/src/test/java/compute/reservation/CrudOperationsReservationIT.java +++ b/compute/cloud-client/src/test/java/compute/reservation/CrudOperationsReservationIT.java @@ -22,10 +22,7 @@ import com.google.api.gax.rpc.NotFoundException; import com.google.cloud.compute.v1.Reservation; -import compute.Util; -import java.io.ByteArrayOutputStream; import java.io.IOException; -import java.io.PrintStream; import java.util.List; import java.util.UUID; import java.util.concurrent.ExecutionException; @@ -35,24 +32,17 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.MethodOrderer; -import org.junit.jupiter.api.Order; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestMethodOrder; import org.junit.jupiter.api.Timeout; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; @RunWith(JUnit4.class) @Timeout(value = 6, unit = TimeUnit.MINUTES) -@TestMethodOrder(MethodOrderer.OrderAnnotation.class) public class CrudOperationsReservationIT { - private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); - private static final String ZONE = "us-central1-b"; - static String javaVersion = System.getProperty("java.version").substring(0, 2); - private static final String RESERVATION_NAME = "test-reservation-" + javaVersion + "-" - + UUID.randomUUID().toString().substring(0, 8); + private static final String ZONE = "us-central1-a"; + private static final String RESERVATION_NAME = "test-reservation-" + UUID.randomUUID(); private static final int NUMBER_OF_VMS = 3; // Check if the required environment variables are set. @@ -67,40 +57,21 @@ public static void setUp() requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); requireEnvVar("GOOGLE_CLOUD_PROJECT"); - // Cleanup existing stale resources. - Util.cleanUpExistingReservations("test-reservation-" + javaVersion, PROJECT_ID, ZONE); + CreateReservation.createReservation(PROJECT_ID, RESERVATION_NAME, NUMBER_OF_VMS, ZONE); } @AfterAll public static void cleanup() throws IOException, ExecutionException, InterruptedException, TimeoutException { - // Delete all reservations created for testing. DeleteReservation.deleteReservation(PROJECT_ID, ZONE, RESERVATION_NAME); - // Test that reservations are deleted + // Test that reservation is deleted Assertions.assertThrows( NotFoundException.class, () -> GetReservation.getReservation(PROJECT_ID, RESERVATION_NAME, ZONE)); } @Test - @Order(1) - public void testCreateReservation() - throws IOException, ExecutionException, InterruptedException, TimeoutException { - final PrintStream out = System.out; - ByteArrayOutputStream stdOut = new ByteArrayOutputStream(); - System.setOut(new PrintStream(stdOut)); - CreateReservation.createReservation( - PROJECT_ID, RESERVATION_NAME, NUMBER_OF_VMS, ZONE); - - assertThat(stdOut.toString()).contains("Reservation created. Operation Status: DONE"); - - stdOut.close(); - System.setOut(out); - } - - @Test - @Order(3) public void testGetReservation() throws IOException { Reservation reservation = GetReservation.getReservation( @@ -111,7 +82,6 @@ public void testGetReservation() } @Test - @Order(4) public void testListReservation() throws IOException { List reservations = ListReservations.listReservations(PROJECT_ID, ZONE); @@ -119,4 +89,15 @@ public void testListReservation() throws IOException { assertThat(reservations).isNotNull(); Assert.assertTrue(reservations.get(0).getName().contains("test-")); } + + @Test + public void testUpdateVmsForReservation() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + int newNumberOfVms = 1; + Reservation reservation = UpdateVmsForReservation.updateVmsForReservation( + PROJECT_ID, ZONE, RESERVATION_NAME, newNumberOfVms); + + Assert.assertNotNull(reservation); + Assert.assertEquals(newNumberOfVms, reservation.getSpecificReservation().getCount()); + } } \ No newline at end of file diff --git a/compute/cloud-client/src/test/java/compute/reservation/ReservationIT.java b/compute/cloud-client/src/test/java/compute/reservation/ReservationIT.java index 48e25d2dd7b..22be75ea98a 100644 --- a/compute/cloud-client/src/test/java/compute/reservation/ReservationIT.java +++ b/compute/cloud-client/src/test/java/compute/reservation/ReservationIT.java @@ -18,6 +18,7 @@ import static com.google.common.truth.Truth.assertThat; import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertNotNull; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -35,7 +36,6 @@ import compute.CreateRegionalInstanceTemplate; import compute.DeleteInstanceTemplate; import compute.DeleteRegionalInstanceTemplate; -import compute.Util; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; @@ -45,51 +45,40 @@ import java.util.concurrent.TimeoutException; import org.junit.Assert; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.MethodOrderer; -import org.junit.jupiter.api.Order; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestMethodOrder; import org.junit.jupiter.api.Timeout; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; @RunWith(JUnit4.class) @Timeout(value = 6, unit = TimeUnit.MINUTES) -@TestMethodOrder(MethodOrderer.OrderAnnotation.class) public class ReservationIT { - private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); private static final String ZONE = "asia-south1-a"; private static final String REGION = ZONE.substring(0, ZONE.lastIndexOf('-')); - static String javaVersion = System.getProperty("java.version").substring(0, 2); - private static ReservationsClient reservationsClient; - private static final String RESERVATION_NAME_GLOBAL = "test-reservation-global-" + javaVersion - + "-" + UUID.randomUUID().toString().substring(0, 8); - private static final String RESERVATION_NAME_REGIONAL = "test-reservation-regional-" - + javaVersion + "-" + UUID.randomUUID().toString().substring(0, 8); + static String templateUUID = UUID.randomUUID().toString(); + private static final String RESERVATION_NAME_GLOBAL = "test-reservation-global-" + templateUUID; + private static final String RESERVATION_NAME_REGIONAL = + "test-reservation-regional-" + templateUUID; private static final String GLOBAL_INSTANCE_TEMPLATE_NAME = - "test-global-inst-temp-" + javaVersion + "-" + UUID.randomUUID().toString().substring(0, 8); - private static final String REGIONAL_INSTANCE_TEMPLATE_NAME = "test-regional-inst-temp-" - + javaVersion + "-" + UUID.randomUUID().toString().substring(0, 8); + "test-global-inst-temp-" + templateUUID; + private static final String REGIONAL_INSTANCE_TEMPLATE_NAME = + "test-regional-inst-temp-" + templateUUID; private static final String GLOBAL_INSTANCE_TEMPLATE_URI = String.format( "projects/%s/global/instanceTemplates/%s", PROJECT_ID, GLOBAL_INSTANCE_TEMPLATE_NAME); private static final String REGIONAL_INSTANCE_TEMPLATE_URI = String.format("projects/%s/regions/%s/instanceTemplates/%s", PROJECT_ID, REGION, REGIONAL_INSTANCE_TEMPLATE_NAME); private static final String SPECIFIC_SHARED_INSTANCE_TEMPLATE_NAME = - "test-shared-inst-temp-" + javaVersion + "-" - + UUID.randomUUID().toString().substring(0, 8); + "test-shared-inst-temp-" + templateUUID; private static final String INSTANCE_TEMPLATE_SHARED_RESERV_URI = String.format("projects/%s/global/instanceTemplates/%s", PROJECT_ID, SPECIFIC_SHARED_INSTANCE_TEMPLATE_NAME); - private static final String RESERVATION_NAME_SHARED = "test-reservation-shared-" + javaVersion - + "-" + UUID.randomUUID().toString().substring(0, 8); + private static final String RESERVATION_NAME_SHARED = "test-reservation-shared-" + templateUUID; private static final int NUMBER_OF_VMS = 3; - private ByteArrayOutputStream stdOut; + private static ByteArrayOutputStream stdOut; // Check if the required environment variables are set. public static void requireEnvVar(String envVarName) { @@ -102,22 +91,9 @@ public static void setUp() throws IOException, ExecutionException, InterruptedException, TimeoutException { requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); requireEnvVar("GOOGLE_CLOUD_PROJECT"); - final PrintStream out = System.out; - ByteArrayOutputStream stdOut = new ByteArrayOutputStream(); + stdOut = new ByteArrayOutputStream(); System.setOut(new PrintStream(stdOut)); - // Cleanup existing stale resources. - Util.cleanUpExistingInstanceTemplates("test-global-inst-temp-" + javaVersion, PROJECT_ID); - Util.cleanUpExistingRegionalInstanceTemplates( - "test-regional-inst-temp-" + javaVersion, PROJECT_ID, ZONE); - Util.cleanUpExistingReservations( - "test-reservation-global-" + javaVersion, PROJECT_ID, ZONE); - Util.cleanUpExistingReservations("test-reservation-regional-" + javaVersion, PROJECT_ID, ZONE); - Util.cleanUpExistingInstanceTemplates("test-shared-inst-temp-" + javaVersion, PROJECT_ID); - - // Initialize the client once for all tests - reservationsClient = ReservationsClient.create(); - // Create instance template with GLOBAL location. CreateInstanceTemplate.createInstanceTemplate(PROJECT_ID, GLOBAL_INSTANCE_TEMPLATE_NAME); assertThat(stdOut.toString()) @@ -129,16 +105,12 @@ public static void setUp() // Create instance template for shares reservation. CreateInstanceTemplate.createInstanceTemplate( PROJECT_ID, SPECIFIC_SHARED_INSTANCE_TEMPLATE_NAME); - - stdOut.close(); - System.setOut(out); } @AfterAll public static void cleanup() throws IOException, ExecutionException, InterruptedException, TimeoutException { final PrintStream out = System.out; - ByteArrayOutputStream stdOut = new ByteArrayOutputStream(); System.setOut(new PrintStream(stdOut)); // Delete instance template with GLOBAL location. @@ -173,34 +145,19 @@ public static void cleanup() NotFoundException.class, () -> GetReservation.getReservation(PROJECT_ID, RESERVATION_NAME_REGIONAL, ZONE)); - // Close the client after all tests - reservationsClient.close(); - stdOut.close(); System.setOut(out); } - @BeforeEach - public void beforeEach() { - stdOut = new ByteArrayOutputStream(); - System.setOut(new PrintStream(stdOut)); - } - - @AfterEach - public void afterEach() { - stdOut = null; - System.setOut(null); - } - @Test - @Order(1) public void testCreateReservationWithGlobalInstanceTemplate() throws IOException, ExecutionException, InterruptedException, TimeoutException { - CreateReservationForInstanceTemplate.createReservationForInstanceTemplate( + Reservation reservation = CreateReservationForInstanceTemplate + .createReservationForInstanceTemplate( PROJECT_ID, RESERVATION_NAME_GLOBAL, GLOBAL_INSTANCE_TEMPLATE_URI, NUMBER_OF_VMS, ZONE); - Reservation reservation = reservationsClient.get(PROJECT_ID, ZONE, RESERVATION_NAME_GLOBAL); + assertNotNull(reservation); Assert.assertTrue(reservation.getSpecificReservation() .getSourceInstanceTemplate().contains(GLOBAL_INSTANCE_TEMPLATE_NAME)); Assert.assertEquals(RESERVATION_NAME_GLOBAL, reservation.getName()); @@ -209,30 +166,18 @@ public void testCreateReservationWithGlobalInstanceTemplate() @Test public void testCreateReservationWithRegionInstanceTemplate() throws IOException, ExecutionException, InterruptedException, TimeoutException { - CreateReservationForInstanceTemplate.createReservationForInstanceTemplate( + Reservation reservation = CreateReservationForInstanceTemplate + .createReservationForInstanceTemplate( PROJECT_ID, RESERVATION_NAME_REGIONAL, REGIONAL_INSTANCE_TEMPLATE_URI, NUMBER_OF_VMS, ZONE); - Reservation reservation = reservationsClient.get(PROJECT_ID, ZONE, RESERVATION_NAME_REGIONAL); + assertNotNull(reservation); Assert.assertTrue(reservation.getSpecificReservation() .getSourceInstanceTemplate().contains(REGIONAL_INSTANCE_TEMPLATE_NAME)); Assert.assertTrue(reservation.getZone().contains(ZONE)); Assert.assertEquals(RESERVATION_NAME_REGIONAL, reservation.getName()); } - @Test - @Order(2) - public void testUpdateVmsForReservation() - throws IOException, ExecutionException, InterruptedException, TimeoutException { - int newNumberOfVms = 5; - UpdateVmsForReservation.updateVmsForReservation( - PROJECT_ID, ZONE, RESERVATION_NAME_GLOBAL, newNumberOfVms); - Reservation reservation = GetReservation.getReservation( - PROJECT_ID, RESERVATION_NAME_GLOBAL, ZONE); - - Assert.assertEquals(newNumberOfVms, reservation.getSpecificReservation().getCount()); - } - @Test public void testCreateSharedReservation() throws ExecutionException, InterruptedException, TimeoutException { diff --git a/compute/cloud-client/src/test/java/compute/reservation/WithoutConsumingReservationIT.java b/compute/cloud-client/src/test/java/compute/reservation/WithoutConsumingReservationIT.java new file mode 100644 index 00000000000..763b1e2df5f --- /dev/null +++ b/compute/cloud-client/src/test/java/compute/reservation/WithoutConsumingReservationIT.java @@ -0,0 +1,101 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.reservation; + +import static com.google.cloud.compute.v1.ReservationAffinity.ConsumeReservationType.NO_RESERVATION; +import static com.google.common.truth.Truth.assertWithMessage; + +import com.google.cloud.compute.v1.Instance; +import com.google.cloud.compute.v1.InstanceTemplate; +import compute.DeleteInstance; +import compute.DeleteInstanceTemplate; +import java.io.IOException; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +@Timeout(value = 3, unit = TimeUnit.MINUTES) +public class WithoutConsumingReservationIT { + private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); + private static final String ZONE = "us-central1-a"; + static String templateUUID = UUID.randomUUID().toString(); + private static final String INSTANCE_NOT_CONSUME_RESERVATION_NAME = + "test-instance-not-consume-" + templateUUID; + private static final String TEMPLATE_NOT_CONSUME_RESERVATION_NAME = + "test-template-not-consume-" + templateUUID; + private static final String MACHINE_TYPE_NAME = "n1-standard-1"; + private static final String SOURCE_IMAGE = "projects/debian-cloud/global/images/family/debian-11"; + private static final String NETWORK_NAME = "default"; + private static final long DISK_SIZE_GD = 10L; + + // Check if the required environment variables are set. + public static void requireEnvVar(String envVarName) { + assertWithMessage(String.format("Missing environment variable '%s' ", envVarName)) + .that(System.getenv(envVarName)).isNotEmpty(); + } + + @BeforeAll + public static void setUp() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); + requireEnvVar("GOOGLE_CLOUD_PROJECT"); + } + + @AfterAll + public static void cleanup() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // Delete the instance created for testing. + DeleteInstance.deleteInstance(PROJECT_ID, ZONE, INSTANCE_NOT_CONSUME_RESERVATION_NAME); + DeleteInstanceTemplate.deleteInstanceTemplate( + PROJECT_ID, TEMPLATE_NOT_CONSUME_RESERVATION_NAME); + } + + @Test + public void testCreateInstanceNotConsumeReservation() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + Instance instance = CreateInstanceWithoutConsumingReservation + .createInstanceWithoutConsumingReservationAsync( + PROJECT_ID, ZONE, INSTANCE_NOT_CONSUME_RESERVATION_NAME, MACHINE_TYPE_NAME, + SOURCE_IMAGE, DISK_SIZE_GD, NETWORK_NAME); + + Assertions.assertNotNull(instance); + Assertions.assertEquals(NO_RESERVATION.toString(), + instance.getReservationAffinity().getConsumeReservationType()); + } + + @Test + public void testCreateTemplateNotConsumeReservation() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + InstanceTemplate template = + CreateTemplateWithoutConsumingReservation.createTemplateWithoutConsumingReservationAsync( + PROJECT_ID, TEMPLATE_NOT_CONSUME_RESERVATION_NAME, + MACHINE_TYPE_NAME, SOURCE_IMAGE); + + Assertions.assertNotNull(template); + Assertions.assertEquals(NO_RESERVATION.toString(), + template.getPropertiesOrBuilder().getReservationAffinity().getConsumeReservationType()); + } +} diff --git a/dataflow/snippets/pom.xml b/dataflow/snippets/pom.xml index 76a6d12a149..c642a0b6191 100755 --- a/dataflow/snippets/pom.xml +++ b/dataflow/snippets/pom.xml @@ -163,11 +163,10 @@ ${apache_beam.version} - + org.apache.kafka kafka-clients 3.8.0 - test diff --git a/dataflow/snippets/src/main/java/com/example/dataflow/KafkaReadTopics.java b/dataflow/snippets/src/main/java/com/example/dataflow/KafkaReadTopics.java new file mode 100644 index 00000000000..a9d12f40fc3 --- /dev/null +++ b/dataflow/snippets/src/main/java/com/example/dataflow/KafkaReadTopics.java @@ -0,0 +1,111 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.dataflow; + +// [START dataflow_kafka_read_multi_topic] +import java.util.List; +import org.apache.beam.sdk.Pipeline; +import org.apache.beam.sdk.PipelineResult; +import org.apache.beam.sdk.io.TextIO; +import org.apache.beam.sdk.io.kafka.KafkaIO; +import org.apache.beam.sdk.options.Description; +import org.apache.beam.sdk.options.PipelineOptionsFactory; +import org.apache.beam.sdk.options.StreamingOptions; +import org.apache.beam.sdk.transforms.Filter; +import org.apache.beam.sdk.transforms.MapElements; +import org.apache.beam.sdk.values.TypeDescriptors; +import org.apache.kafka.common.serialization.LongDeserializer; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.joda.time.Duration; +import org.joda.time.Instant; + +public class KafkaReadTopics { + + // [END dataflow_kafka_read_multi_topic] + public interface Options extends StreamingOptions { + @Description("The Kafka bootstrap server. Example: localhost:9092") + String getBootstrapServer(); + + void setBootstrapServer(String value); + + @Description("The first Kafka topic to read from.") + String getTopic1(); + + void setTopic1(String value); + + @Description("The second Kafka topic to read from.") + String getTopic2(); + + void setTopic2(String value); + } + + public static PipelineResult.State main(String[] args) { + // Parse the pipeline options passed into the application. Example: + // --bootstrap_servers=$BOOTSTRAP_SERVERS --topic=$KAFKA_TOPIC --outputPath=$OUTPUT_FILE + // For more information, see https://beam.apache.org/documentation/programming-guide/#configuring-pipeline-options + var options = PipelineOptionsFactory.fromArgs(args).withValidation().as(Options.class); + options.setStreaming(true); + + Pipeline pipeline = createPipeline(options); + return pipeline.run().waitUntilFinish(); + } + + // [START dataflow_kafka_read_multi_topic] + public static Pipeline createPipeline(Options options) { + String topic1 = options.getTopic1(); + String topic2 = options.getTopic2(); + + // Build the pipeline. + var pipeline = Pipeline.create(options); + var allTopics = pipeline + .apply(KafkaIO.read() + .withTopics(List.of(topic1, topic2)) + .withBootstrapServers(options.getBootstrapServer()) + .withKeyDeserializer(LongDeserializer.class) + .withValueDeserializer(StringDeserializer.class) + .withMaxReadTime(Duration.standardSeconds(10)) + .withStartReadTime(Instant.EPOCH) + ); + + // Create separate pipeline branches for each topic. + // The first branch filters on topic1. + allTopics + .apply(Filter.by(record -> record.getTopic().equals(topic1))) + .apply(MapElements + .into(TypeDescriptors.strings()) + .via(record -> record.getKV().getValue())) + .apply(TextIO.write() + .to(topic1) + .withSuffix(".txt") + .withNumShards(1) + ); + + // The second branch filters on topic2. + allTopics + .apply(Filter.by(record -> record.getTopic().equals(topic2))) + .apply(MapElements + .into(TypeDescriptors.strings()) + .via(record -> record.getKV().getValue())) + .apply(TextIO.write() + .to(topic2) + .withSuffix(".txt") + .withNumShards(1) + ); + return pipeline; + } +} +// [END dataflow_kafka_read_multi_topic] diff --git a/dataflow/snippets/src/main/java/com/example/dataflow/ReadFromStorage.java b/dataflow/snippets/src/main/java/com/example/dataflow/ReadFromStorage.java new file mode 100644 index 00000000000..4554466205f --- /dev/null +++ b/dataflow/snippets/src/main/java/com/example/dataflow/ReadFromStorage.java @@ -0,0 +1,61 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.dataflow; + +// [START dataflow_read_from_cloud_storage] +import org.apache.beam.sdk.Pipeline; +import org.apache.beam.sdk.PipelineResult; +import org.apache.beam.sdk.io.TextIO; +import org.apache.beam.sdk.options.Description; +import org.apache.beam.sdk.options.PipelineOptions; +import org.apache.beam.sdk.options.PipelineOptionsFactory; +import org.apache.beam.sdk.transforms.MapElements; +import org.apache.beam.sdk.values.TypeDescriptors; + +public class ReadFromStorage { + // [END dataflow_read_from_cloud_storage] + public interface Options extends PipelineOptions { + @Description("The Cloud Storage bucket to read from") + String getBucket(); + + void setBucket(String value); + } + + public static PipelineResult.State main(String[] args) { + var options = PipelineOptionsFactory.fromArgs(args).withValidation().as(Options.class); + Pipeline pipeline = createPipeline(options); + return pipeline.run().waitUntilFinish(); + } + + // [START dataflow_read_from_cloud_storage] + public static Pipeline createPipeline(Options options) { + var pipeline = Pipeline.create(options); + pipeline + // Read from a text file. + .apply(TextIO.read().from( + "gs://" + options.getBucket() + "/*.txt")) + .apply( + MapElements.into(TypeDescriptors.strings()) + .via( + (x -> { + System.out.println(x); + return x; + }))); + return pipeline; + } +} +// [END dataflow_read_from_cloud_storage] diff --git a/dataflow/snippets/src/test/java/com/example/dataflow/ApacheIcebergIT.java b/dataflow/snippets/src/test/java/com/example/dataflow/ApacheIcebergIT.java index 0a6e0149b65..1c7cfcbe213 100644 --- a/dataflow/snippets/src/test/java/com/example/dataflow/ApacheIcebergIT.java +++ b/dataflow/snippets/src/test/java/com/example/dataflow/ApacheIcebergIT.java @@ -53,7 +53,7 @@ public class ApacheIcebergIT { private ByteArrayOutputStream bout; - private PrintStream out; + private final PrintStream originalOut = System.out; private static final String CATALOG_NAME = "local"; private static final String TABLE_NAME = "table1"; @@ -112,8 +112,7 @@ private void writeTableRecord() @Before public void setUp() throws IOException { bout = new ByteArrayOutputStream(); - out = new PrintStream(bout); - System.setOut(out); + System.setOut(new PrintStream(bout)); // Create an Apache Iceberg catalog with a table. warehouseDirectory = Files.createTempDirectory("test-warehouse"); @@ -131,7 +130,7 @@ public void setUp() throws IOException { @After public void tearDown() throws IOException { Files.deleteIfExists(Paths.get(OUTPUT_FILE_NAME)); - System.setOut(null); + System.setOut(originalOut); } @Test diff --git a/dataflow/snippets/src/test/java/com/example/dataflow/BigQueryWriteIT.java b/dataflow/snippets/src/test/java/com/example/dataflow/BigQueryWriteIT.java index 983d0c3e92e..e785010f961 100644 --- a/dataflow/snippets/src/test/java/com/example/dataflow/BigQueryWriteIT.java +++ b/dataflow/snippets/src/test/java/com/example/dataflow/BigQueryWriteIT.java @@ -47,7 +47,7 @@ public class BigQueryWriteIT { private static final String projectId = System.getenv("GOOGLE_CLOUD_PROJECT"); private ByteArrayOutputStream bout; - private PrintStream out; + private final PrintStream originalOut = System.out; private BigQuery bigquery; private String datasetName; private String tableName; @@ -65,8 +65,7 @@ private void createTable() { @Before public void setUp() throws InterruptedException { bout = new ByteArrayOutputStream(); - out = new PrintStream(bout); - System.setOut(out); + System.setOut(new PrintStream(bout)); bigquery = BigQueryOptions.getDefaultInstance().getService(); @@ -79,7 +78,7 @@ public void setUp() throws InterruptedException { public void tearDown() { bigquery.delete( DatasetId.of(projectId, datasetName), DatasetDeleteOption.deleteContents()); - System.setOut(null); + System.setOut(originalOut); } @Test diff --git a/dataflow/snippets/src/test/java/com/example/dataflow/BiqQueryReadIT.java b/dataflow/snippets/src/test/java/com/example/dataflow/BiqQueryReadIT.java index d4864f72c1e..837c1687726 100644 --- a/dataflow/snippets/src/test/java/com/example/dataflow/BiqQueryReadIT.java +++ b/dataflow/snippets/src/test/java/com/example/dataflow/BiqQueryReadIT.java @@ -45,7 +45,7 @@ public class BiqQueryReadIT { private static final String projectId = System.getenv("GOOGLE_CLOUD_PROJECT"); private ByteArrayOutputStream bout; - private PrintStream out; + private final PrintStream originalOut = System.out; private BigQuery bigquery; private String datasetName; private String tableName; @@ -53,8 +53,7 @@ public class BiqQueryReadIT { @Before public void setUp() throws InterruptedException { bout = new ByteArrayOutputStream(); - out = new PrintStream(bout); - System.setOut(out); + System.setOut(new PrintStream(bout)); bigquery = BigQueryOptions.getDefaultInstance().getService(); @@ -81,7 +80,7 @@ public void setUp() throws InterruptedException { public void tearDown() { bigquery.delete( DatasetId.of(projectId, datasetName), DatasetDeleteOption.deleteContents()); - System.setOut(null); + System.setOut(originalOut); } @Test diff --git a/dataflow/snippets/src/test/java/com/example/dataflow/KafkaReadIT.java b/dataflow/snippets/src/test/java/com/example/dataflow/KafkaReadIT.java index b5b1abba076..2c47dae1105 100644 --- a/dataflow/snippets/src/test/java/com/example/dataflow/KafkaReadIT.java +++ b/dataflow/snippets/src/test/java/com/example/dataflow/KafkaReadIT.java @@ -39,10 +39,13 @@ import org.testcontainers.utility.DockerImageName; public class KafkaReadIT { - private static final String TOPIC_NAME = "topic-" + UUID.randomUUID(); + private static final String[] TOPIC_NAMES = { + "topic-" + UUID.randomUUID(), + "topic-" + UUID.randomUUID() + }; - private static final String OUTPUT_FILE_NAME_PREFIX = UUID.randomUUID().toString(); - private static final String OUTPUT_FILE_NAME = OUTPUT_FILE_NAME_PREFIX + "-00000-of-00001.txt"; + // The TextIO connector appends this suffix to the pipeline output file. + private static final String OUTPUT_FILE_SUFFIX = "-00000-of-00001.txt"; private static KafkaContainer kafka; private static String bootstrapServer; @@ -54,26 +57,32 @@ public void setUp() throws ExecutionException, InterruptedException { kafka.start(); bootstrapServer = kafka.getBootstrapServers(); - // Create a topic. + // Create topics. Properties properties = new Properties(); properties.put("bootstrap.servers", bootstrapServer); AdminClient adminClient = AdminClient.create(properties); - var topic = new NewTopic(TOPIC_NAME, 1, (short) 1); - adminClient.createTopics(Arrays.asList(topic)); + for (String topicName : TOPIC_NAMES) { + var topic = new NewTopic(topicName, 1, (short) 1); + adminClient.createTopics(Arrays.asList(topic)); + } - // Send a message to the topic. - properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); + // Send messages to the topics. + properties.put("key.serializer", "org.apache.kafka.common.serialization.LongSerializer"); properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); - KafkaProducer producer = new KafkaProducer<>(properties); - ProducerRecord record = new ProducerRecord<>(TOPIC_NAME, "key-0", "event-0"); - Future future = producer.send(record); - future.get(); + KafkaProducer producer = new KafkaProducer<>(properties); + for (String topicName : TOPIC_NAMES) { + var record = new ProducerRecord<>(topicName, 0L, topicName + "-event-0"); + Future future = producer.send(record); + future.get(); + } } @After public void tearDown() throws IOException { kafka.stop(); - Files.deleteIfExists(Paths.get(OUTPUT_FILE_NAME)); + for (String topicName : TOPIC_NAMES) { + Files.deleteIfExists(Paths.get(topicName + OUTPUT_FILE_SUFFIX)); + } } @Test @@ -81,13 +90,28 @@ public void testApacheKafkaRead() throws IOException { PipelineResult.State state = KafkaRead.main(new String[] { "--runner=DirectRunner", "--bootstrapServer=" + bootstrapServer, - "--topic=" + TOPIC_NAME, - "--outputPath=" + OUTPUT_FILE_NAME_PREFIX + "--topic=" + TOPIC_NAMES[0], + "--outputPath=" + TOPIC_NAMES[0] // Use the topic name as the output file name. }); assertEquals(PipelineResult.State.DONE, state); + verifyOutput(TOPIC_NAMES[0]); + } + + @Test + public void testApacheKafkaReadTopics() throws IOException { + PipelineResult.State state = KafkaReadTopics.main(new String[] { + "--runner=DirectRunner", + "--bootstrapServer=" + bootstrapServer, + "--topic1=" + TOPIC_NAMES[0], + "--topic2=" + TOPIC_NAMES[1] + }); + assertEquals(PipelineResult.State.DONE, state); + verifyOutput(TOPIC_NAMES[0]); + verifyOutput(TOPIC_NAMES[1]); + } - // Verify the pipeline wrote the output. - String output = Files.readString(Paths.get(OUTPUT_FILE_NAME)); - assertTrue(output.contains("event-0")); + private void verifyOutput(String topic) throws IOException { + String output = Files.readString(Paths.get(topic + OUTPUT_FILE_SUFFIX)); + assertTrue(output.contains(topic + "-event-0")); } } diff --git a/dataflow/snippets/src/test/java/com/example/dataflow/PubSubWriteIT.java b/dataflow/snippets/src/test/java/com/example/dataflow/PubSubWriteIT.java index 19c4771e91d..fb82ae54543 100644 --- a/dataflow/snippets/src/test/java/com/example/dataflow/PubSubWriteIT.java +++ b/dataflow/snippets/src/test/java/com/example/dataflow/PubSubWriteIT.java @@ -47,7 +47,7 @@ public class PubSubWriteIT { private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); private ByteArrayOutputStream bout; - private PrintStream out; + private final PrintStream originalOut = System.out; private String topicId; private String subscriptionId; TopicAdminClient topicAdminClient; @@ -64,8 +64,7 @@ public void setUp() throws Exception { requireEnvVar("GOOGLE_CLOUD_PROJECT"); bout = new ByteArrayOutputStream(); - out = new PrintStream(bout); - System.setOut(out); + System.setOut(new PrintStream(bout)); topicId = "test_topic_" + UUID.randomUUID().toString().substring(0, 8); subscriptionId = topicId + "-sub"; @@ -84,7 +83,7 @@ public void setUp() throws Exception { public void tearDown() { subscriptionAdminClient.deleteSubscription(SubscriptionName.of(PROJECT_ID, subscriptionId)); topicAdminClient.deleteTopic(TopicName.of(PROJECT_ID, topicId)); - System.setOut(null); + System.setOut(originalOut); } @Test diff --git a/dataflow/snippets/src/test/java/com/example/dataflow/ReadFromStorageIT.java b/dataflow/snippets/src/test/java/com/example/dataflow/ReadFromStorageIT.java new file mode 100644 index 00000000000..d4e656b8264 --- /dev/null +++ b/dataflow/snippets/src/test/java/com/example/dataflow/ReadFromStorageIT.java @@ -0,0 +1,93 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.dataflow; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.testing.RemoteStorageHelper; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.nio.charset.StandardCharsets; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import org.apache.beam.sdk.PipelineResult; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ReadFromStorageIT { + + private static final String projectId = System.getenv("GOOGLE_CLOUD_PROJECT"); + + private ByteArrayOutputStream bout; + private final PrintStream originalout = System.out; + + String bucketName; + Storage storage; + + private static final String[] lines = {"line 1", "line 2"}; + + @Before + public void setUp() { + // Redirect System.err to capture logs. + bout = new ByteArrayOutputStream(); + System.setOut(new PrintStream(bout)); + + // Create a Cloud Storage bucket with a text file. + RemoteStorageHelper helper = RemoteStorageHelper.create(); + storage = helper.getOptions().getService(); + bucketName = RemoteStorageHelper.generateBucketName(); + storage.create(BucketInfo.of(bucketName)); + + String objectName = "file1.txt"; + String contents = String.format("%s\n%s\n", lines[0], lines[1]); + + BlobId blobId = BlobId.of(bucketName, objectName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).build(); + byte[] content = contents.getBytes(StandardCharsets.UTF_8); + + storage.create(blobInfo, content); + } + + @After + public void tearDown() throws ExecutionException, InterruptedException { + RemoteStorageHelper.forceDelete(storage, bucketName, 5, TimeUnit.SECONDS); + + System.setOut(originalout); + bout.reset(); + } + + @Test + public void readFromStorage_shouldReadFile() throws Exception { + + PipelineResult.State state = ReadFromStorage.main( + new String[] {"--runner=DirectRunner", "--bucket=" + bucketName}); + assertEquals(PipelineResult.State.DONE, state); + + String got = bout.toString(); + assertTrue(got.contains(lines[0])); + assertTrue(got.contains(lines[1])); + } +} diff --git a/dataplex/quickstart/pom.xml b/dataplex/quickstart/pom.xml new file mode 100644 index 00000000000..07173434647 --- /dev/null +++ b/dataplex/quickstart/pom.xml @@ -0,0 +1,58 @@ + + + 4.0.0 + + dataplex + dataplex-quickstart + jar + Google Dataplex Quickstart + + + + com.google.cloud.samples + shared-configuration + 1.2.2 + + + + 11 + 11 + UTF-8 + + + + + + com.google.cloud + libraries-bom + 26.49.0 + pom + import + + + + + + + com.google.cloud + google-cloud-dataplex + + + junit + junit + 4.13.2 + test + + + com.google.truth + truth + 1.4.4 + test + + + diff --git a/dataplex/quickstart/src/main/java/dataplex/Quickstart.java b/dataplex/quickstart/src/main/java/dataplex/Quickstart.java new file mode 100644 index 00000000000..177d8c9a3d3 --- /dev/null +++ b/dataplex/quickstart/src/main/java/dataplex/Quickstart.java @@ -0,0 +1,251 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package dataplex; + +// [START dataplex_quickstart] +import com.google.cloud.dataplex.v1.Aspect; +import com.google.cloud.dataplex.v1.AspectType; +import com.google.cloud.dataplex.v1.CatalogServiceClient; +import com.google.cloud.dataplex.v1.Entry; +import com.google.cloud.dataplex.v1.EntryGroup; +import com.google.cloud.dataplex.v1.EntryGroupName; +import com.google.cloud.dataplex.v1.EntryName; +import com.google.cloud.dataplex.v1.EntrySource; +import com.google.cloud.dataplex.v1.EntryType; +import com.google.cloud.dataplex.v1.EntryView; +import com.google.cloud.dataplex.v1.GetEntryRequest; +import com.google.cloud.dataplex.v1.LocationName; +import com.google.cloud.dataplex.v1.SearchEntriesRequest; +import com.google.cloud.dataplex.v1.SearchEntriesResult; +import com.google.protobuf.Struct; +import com.google.protobuf.Value; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; + +public class Quickstart { + + public static void main(String[] args) { + // TODO(developer): Replace these variables before running the sample. + String projectId = "MY_PROJECT_ID"; + // Available locations: https://cloud.google.com/dataplex/docs/locations + String location = "MY_LOCATION"; + // Variables below can be replaced with custom values or defaults can be kept + String aspectTypeId = "dataplex-quickstart-aspect-type"; + String entryTypeId = "dataplex-quickstart-entry-type"; + String entryGroupId = "dataplex-quickstart-entry-group"; + String entryId = "dataplex-quickstart-entry"; + + quickstart(projectId, location, aspectTypeId, entryTypeId, entryGroupId, entryId); + } + + // Method to demonstrate lifecycle of different Dataplex resources and their interactions. + // Method creates Aspect Type, Entry Type, Entry Group and Entry, retrieves Entry + // and cleans up created resources. + public static void quickstart( + String projectId, + String location, + String aspectTypeId, + String entryTypeId, + String entryGroupId, + String entryId) { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (CatalogServiceClient client = CatalogServiceClient.create()) { + // 0) Prepare variables used in following steps + LocationName globalLocationName = LocationName.of(projectId, "global"); + LocationName specificLocationName = LocationName.of(projectId, location); + + // 1) Create Aspect Type that will be attached to Entry Type + AspectType.MetadataTemplate aspectField = + AspectType.MetadataTemplate.newBuilder() + // The name must follow regex ^(([a-zA-Z]{1})([\\w\\-_]{0,62}))$ + // That means name must only contain alphanumeric character or dashes or underscores, + // start with an alphabet, and must be less than 63 characters. + .setName("example_field") + // Metadata Template is recursive structure, + // primitive types such as "string" or "integer" indicate leaf node, + // complex types such as "record" or "array" would require nested Metadata Template + .setType("string") + .setIndex(1) + .setAnnotations( + AspectType.MetadataTemplate.Annotations.newBuilder() + .setDescription("example field to be filled during entry creation") + .build()) + .setConstraints( + AspectType.MetadataTemplate.Constraints.newBuilder() + // Specifies if field will be required in Aspect Type. + .setRequired(true) + .build()) + .build(); + AspectType aspectType = + AspectType.newBuilder() + .setDescription("aspect type for dataplex quickstart") + .setMetadataTemplate( + AspectType.MetadataTemplate.newBuilder() + .setName("example_template") + .setType("record") + // Aspect Type fields, that themselves are Metadata Templates + .addAllRecordFields(List.of(aspectField)) + .build()) + .build(); + AspectType createdAspectType = + client + .createAspectTypeAsync( + // Aspect Type is created in "global" location to highlight, that resources from + // "global" region can be attached to Entry created in specific location + globalLocationName, aspectType, aspectTypeId) + .get(); + System.out.println("Step 1: Created aspect type -> " + createdAspectType.getName()); + + // 2) Create Entry Type, of which type Entry will be created + EntryType entryType = + EntryType.newBuilder() + .setDescription("entry type for dataplex quickstart") + .addRequiredAspects( + EntryType.AspectInfo.newBuilder() + // Aspect Type created in step 1 + .setType( + String.format( + "projects/%s/locations/global/aspectTypes/%s", + projectId, aspectTypeId)) + .build()) + .build(); + EntryType createdEntryType = + client + // Entry Type is created in "global" location to highlight, that resources from + // "global" region can be attached to Entry created in specific location + .createEntryTypeAsync(globalLocationName, entryType, entryTypeId) + .get(); + System.out.println("Step 2: Created entry type -> " + createdEntryType.getName()); + + // 3) Create Entry Group in which Entry will be located + EntryGroup entryGroup = + EntryGroup.newBuilder().setDescription("entry group for dataplex quickstart").build(); + EntryGroup createdEntryGroup = + client + // Entry Group is created for specific location + .createEntryGroupAsync(specificLocationName, entryGroup, entryGroupId) + .get(); + System.out.println("Step 3: Created entry group -> " + createdEntryGroup.getName()); + + // 4) Create Entry + // Wait 30 seconds to allow previously created resources to propagate + Thread.sleep(30000); + String aspectKey = String.format("%s.global.%s", projectId, aspectTypeId); + Entry entry = + Entry.newBuilder() + .setEntryType( + // Entry is an instance of Entry Type created in step 2 + String.format( + "projects/%s/locations/global/entryTypes/%s", projectId, entryTypeId)) + .setEntrySource( + EntrySource.newBuilder().setDescription("entry for dataplex quickstart").build()) + .putAllAspects( + Map.of( + // Attach Aspect that is an instance of Aspect Type created in step 1 + aspectKey, + Aspect.newBuilder() + .setAspectType( + String.format( + "projects/%s/locations/global/aspectTypes/%s", + projectId, aspectTypeId)) + .setData( + Struct.newBuilder() + .putFields( + "example_field", + Value.newBuilder() + .setStringValue("example value for the field") + .build()) + .build()) + .build())) + .build(); + Entry createdEntry = + client.createEntry( + // Entry is created in specific location, but it is still possible to link it with + // resources (Aspect Type and Entry Type) from "global" location + EntryGroupName.of(projectId, location, entryGroupId), entry, entryId); + System.out.println("Step 4: Created entry -> " + createdEntry.getName()); + + // 5) Retrieve created Entry + GetEntryRequest getEntryRequest = + GetEntryRequest.newBuilder() + .setName(EntryName.of(projectId, location, entryGroupId, entryId).toString()) + .setView(EntryView.FULL) + .build(); + Entry retrievedEntry = client.getEntry(getEntryRequest); + System.out.println("Step 5: Retrieved entry -> " + retrievedEntry.getName()); + retrievedEntry + .getAspectsMap() + .values() + .forEach( + retrievedAspect -> { + System.out.println("Retrieved aspect for entry:"); + System.out.println(" * aspect type -> " + retrievedAspect.getAspectType()); + System.out.println( + " * aspect field value -> " + + retrievedAspect + .getData() + .getFieldsMap() + .get("example_field") + .getStringValue()); + }); + + // 6) Use Search capabilities to find Entry + // Wait 30 seconds to allow resources to propagate to Search + System.out.println("Step 6: Waiting for resources to propagate to Search..."); + Thread.sleep(30000); + SearchEntriesRequest searchEntriesRequest = + SearchEntriesRequest.newBuilder() + .setName(globalLocationName.toString()) + .setQuery("name:dataplex-quickstart-entry") + .build(); + CatalogServiceClient.SearchEntriesPagedResponse searchEntriesResponse = + client.searchEntries(searchEntriesRequest); + List entriesFromSearch = + searchEntriesResponse.getPage().getResponse().getResultsList().stream() + .map(SearchEntriesResult::getDataplexEntry) + .collect(Collectors.toList()); + System.out.println("Entries found in Search:"); + // Please note in output that Entry Group and Entry Type are also represented as Entries + entriesFromSearch.forEach( + entryFromSearch -> System.out.println(" * " + entryFromSearch.getName())); + + // 7) Clean created resources + client + .deleteEntryGroupAsync( + String.format( + "projects/%s/locations/%s/entryGroups/%s", projectId, location, entryGroupId)) + .get(); + client + .deleteEntryTypeAsync( + String.format("projects/%s/locations/global/entryTypes/%s", projectId, entryTypeId)) + .get(); + client + .deleteAspectTypeAsync( + String.format("projects/%s/locations/global/aspectTypes/%s", projectId, aspectTypeId)) + .get(); + System.out.println("Step 7: Successfully cleaned up resources"); + + } catch (IOException | InterruptedException | ExecutionException e) { + System.err.println("Error during quickstart execution: " + e); + } + } +} +// [END dataplex_quickstart] diff --git a/dataplex/quickstart/src/test/java/dataplex/QuickstartIT.java b/dataplex/quickstart/src/test/java/dataplex/QuickstartIT.java new file mode 100644 index 00000000000..62330c98eca --- /dev/null +++ b/dataplex/quickstart/src/test/java/dataplex/QuickstartIT.java @@ -0,0 +1,129 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package dataplex; + +import static com.google.common.truth.Truth.assertThat; +import static junit.framework.TestCase.assertNotNull; + +import com.google.cloud.dataplex.v1.CatalogServiceClient; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.PrintStream; +import java.util.List; +import java.util.UUID; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +public class QuickstartIT { + private static final String ID = UUID.randomUUID().toString().substring(0, 8); + private static final String LOCATION = "us-central1"; + private static final String PROJECT_ID = requireProjectIdEnvVar(); + private static ByteArrayOutputStream bout; + private static PrintStream originalPrintStream; + private static final String ASPECT_TYPE_ID = "quickstart-aspect-type-" + ID; + private static final String ENTRY_TYPE_ID = "quickstart-entry-type-" + ID; + private static final String ENTRY_GROUP_ID = "quickstart-entry-group-" + ID; + private static final String ENTRY_ID = "quickstart-entry-" + ID; + + private static String requireProjectIdEnvVar() { + String value = System.getenv("GOOGLE_CLOUD_PROJECT"); + assertNotNull( + "Environment variable GOOGLE_CLOUD_PROJECT is required to perform these tests.", value); + return value; + } + + private static void forceCleanResources() throws IOException { + try (CatalogServiceClient client = CatalogServiceClient.create()) { + try { + client + .deleteEntryGroupAsync( + String.format( + "projects/%s/locations/%s/entryGroups/%s", + PROJECT_ID, LOCATION, ENTRY_GROUP_ID)) + .get(); + } catch (Exception e) { + // Pass, no resource to delete + } + try { + client + .deleteEntryTypeAsync( + String.format( + "projects/%s/locations/global/entryTypes/%s", PROJECT_ID, ENTRY_TYPE_ID)) + .get(); + } catch (Exception e) { + // Pass, no resource to delete + } + try { + client + .deleteAspectTypeAsync( + String.format( + "projects/%s/locations/global/aspectTypes/%s", PROJECT_ID, ASPECT_TYPE_ID)) + .get(); + } catch (Exception e) { + // Pass, no resource to delete + } + } + } + + @BeforeClass + public static void setUp() { + requireProjectIdEnvVar(); + // Re-direct print stream to capture logging + bout = new ByteArrayOutputStream(); + originalPrintStream = System.out; + System.setOut(new PrintStream(bout)); + } + + @Test + public void testQuickstart() { + List expectedLogs = + List.of( + String.format( + "Step 1: Created aspect type -> projects/%s/locations/global/aspectTypes/%s", + PROJECT_ID, ASPECT_TYPE_ID), + String.format( + "Step 2: Created entry type -> projects/%s/locations/global/entryTypes/%s", + PROJECT_ID, ENTRY_TYPE_ID), + String.format( + "Step 3: Created entry group -> projects/%s/locations/%s/entryGroups/%s", + PROJECT_ID, LOCATION, ENTRY_GROUP_ID), + String.format( + "Step 4: Created entry -> projects/%s/locations/%s/entryGroups/%s/entries/%s", + PROJECT_ID, LOCATION, ENTRY_GROUP_ID, ENTRY_ID), + String.format( + "Step 5: Retrieved entry -> projects/%s/locations/%s/entryGroups/%s/entries/%s", + PROJECT_ID, LOCATION, ENTRY_GROUP_ID, ENTRY_ID), + // Step 6 - result from Search + "Entries found in Search:", + "Step 7: Successfully cleaned up resources"); + + Quickstart.quickstart( + PROJECT_ID, LOCATION, ASPECT_TYPE_ID, ENTRY_TYPE_ID, ENTRY_GROUP_ID, ENTRY_ID); + String output = bout.toString(); + + expectedLogs.forEach(expectedLog -> assertThat(output).contains(expectedLog)); + } + + @AfterClass + public static void tearDown() throws IOException { + forceCleanResources(); + // Restore print statements + System.setOut(originalPrintStream); + bout.reset(); + } +} diff --git a/dataplex/snippets/src/main/java/dataplex/CreateAspectType.java b/dataplex/snippets/src/main/java/dataplex/CreateAspectType.java index b8fa5ad18da..5ef598a69b5 100644 --- a/dataplex/snippets/src/main/java/dataplex/CreateAspectType.java +++ b/dataplex/snippets/src/main/java/dataplex/CreateAspectType.java @@ -22,7 +22,6 @@ import com.google.cloud.dataplex.v1.LocationName; import java.util.List; -// Sample to create Aspect Type public class CreateAspectType { public static void main(String[] args) throws Exception { @@ -59,33 +58,32 @@ public static void main(String[] args) throws Exception { System.out.println("Successfully created aspect type: " + createdAspectType.getName()); } + // Method to create Aspect Type located in projectId, location and with aspectTypeId and + // aspectFields specifying schema of the Aspect Type public static AspectType createAspectType( String projectId, String location, String aspectTypeId, List aspectFields) throws Exception { - LocationName locationName = LocationName.of(projectId, location); - AspectType aspectType = - AspectType.newBuilder() - .setDescription("description of the aspect type") - .setMetadataTemplate( - AspectType.MetadataTemplate.newBuilder() - // The name must follow regex ^(([a-zA-Z]{1})([\\w\\-_]{0,62}))$ - // That means name must only contain alphanumeric character or dashes or - // underscores, start with an alphabet, and must be less than 63 characters. - .setName("name_of_the_template") - .setType("record") - // Aspect Type fields, that themselves are Metadata Templates - .addAllRecordFields(aspectFields) - .build()) - .build(); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + LocationName locationName = LocationName.of(projectId, location); + AspectType aspectType = + AspectType.newBuilder() + .setDescription("description of the aspect type") + .setMetadataTemplate( + AspectType.MetadataTemplate.newBuilder() + // The name must follow regex ^(([a-zA-Z]{1})([\\w\\-_]{0,62}))$ + // That means name must only contain alphanumeric character or dashes or + // underscores, start with an alphabet, and must be less than 63 characters. + .setName("name_of_the_template") + .setType("record") + // Aspect Type fields, that themselves are Metadata Templates + .addAllRecordFields(aspectFields) + .build()) + .build(); return client.createAspectTypeAsync(locationName, aspectType, aspectTypeId).get(); } } diff --git a/dataplex/snippets/src/main/java/dataplex/CreateEntry.java b/dataplex/snippets/src/main/java/dataplex/CreateEntry.java index 63607563497..b4d1a7a7fbe 100644 --- a/dataplex/snippets/src/main/java/dataplex/CreateEntry.java +++ b/dataplex/snippets/src/main/java/dataplex/CreateEntry.java @@ -31,6 +31,7 @@ public class CreateEntry { public static void main(String[] args) throws Exception { // TODO(developer): Replace these variables before running the sample. String projectId = "MY_PROJECT_ID"; + // Available locations: https://cloud.google.com/dataplex/docs/locations String location = "MY_LOCATION"; String entryGroupId = "MY_ENTRY_GROUP_ID"; String entryId = "MY_ENTRY_ID"; diff --git a/dataplex/snippets/src/main/java/dataplex/CreateEntryGroup.java b/dataplex/snippets/src/main/java/dataplex/CreateEntryGroup.java index 20385e52d0d..3df7feeb515 100644 --- a/dataplex/snippets/src/main/java/dataplex/CreateEntryGroup.java +++ b/dataplex/snippets/src/main/java/dataplex/CreateEntryGroup.java @@ -21,7 +21,6 @@ import com.google.cloud.dataplex.v1.EntryGroup; import com.google.cloud.dataplex.v1.LocationName; -// Samples to create Entry Group public class CreateEntryGroup { public static void main(String[] args) throws Exception { @@ -35,17 +34,15 @@ public static void main(String[] args) throws Exception { System.out.println("Successfully created entry group: " + createdEntryGroup.getName()); } + // Method to create Entry Group located in projectId, location and with entryGroupId public static EntryGroup createEntryGroup(String projectId, String location, String entryGroupId) throws Exception { - LocationName locationName = LocationName.of(projectId, location); - EntryGroup entryGroup = - EntryGroup.newBuilder().setDescription("description of the entry group").build(); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + LocationName locationName = LocationName.of(projectId, location); + EntryGroup entryGroup = + EntryGroup.newBuilder().setDescription("description of the entry group").build(); return client.createEntryGroupAsync(locationName, entryGroup, entryGroupId).get(); } } diff --git a/dataplex/snippets/src/main/java/dataplex/CreateEntryType.java b/dataplex/snippets/src/main/java/dataplex/CreateEntryType.java index 4b35f120c3b..190e35d8f32 100644 --- a/dataplex/snippets/src/main/java/dataplex/CreateEntryType.java +++ b/dataplex/snippets/src/main/java/dataplex/CreateEntryType.java @@ -21,7 +21,6 @@ import com.google.cloud.dataplex.v1.EntryType; import com.google.cloud.dataplex.v1.LocationName; -// Samples to create Entry Type public class CreateEntryType { public static void main(String[] args) throws Exception { @@ -35,27 +34,26 @@ public static void main(String[] args) throws Exception { System.out.println("Successfully created entry type: " + createdEntryType.getName()); } + // Method to create Entry Type located in projectId, location and with entryTypeId public static EntryType createEntryType(String projectId, String location, String entryTypeId) throws Exception { - LocationName locationName = LocationName.of(projectId, location); - EntryType entryType = - EntryType.newBuilder() - .setDescription("description of the entry type") - // Required aspects will need to be attached to every entry created for this entry type. - // You cannot change required aspects for entry type once it is created. - .addRequiredAspects( - EntryType.AspectInfo.newBuilder() - // Example of system aspect type. - // It is also possible to specify custom aspect type. - .setType("projects/dataplex-types/locations/global/aspectTypes/schema") - .build()) - .build(); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + LocationName locationName = LocationName.of(projectId, location); + EntryType entryType = + EntryType.newBuilder() + .setDescription("description of the entry type") + // Required aspects will need to be attached to every entry created for this entry + // type. + // You cannot change required aspects for entry type once it is created. + .addRequiredAspects( + EntryType.AspectInfo.newBuilder() + // Example of system aspect type. + // It is also possible to specify custom aspect type. + .setType("projects/dataplex-types/locations/global/aspectTypes/schema") + .build()) + .build(); return client.createEntryTypeAsync(locationName, entryType, entryTypeId).get(); } } diff --git a/dataplex/snippets/src/main/java/dataplex/DeleteAspectType.java b/dataplex/snippets/src/main/java/dataplex/DeleteAspectType.java index a329d4e6b3d..37be0713bb1 100644 --- a/dataplex/snippets/src/main/java/dataplex/DeleteAspectType.java +++ b/dataplex/snippets/src/main/java/dataplex/DeleteAspectType.java @@ -20,7 +20,6 @@ import com.google.cloud.dataplex.v1.AspectTypeName; import com.google.cloud.dataplex.v1.CatalogServiceClient; -// Sample to delete Aspect Type public class DeleteAspectType { public static void main(String[] args) throws Exception { @@ -34,15 +33,13 @@ public static void main(String[] args) throws Exception { System.out.println("Successfully deleted aspect type"); } + // Method to delete Aspect Type located in projectId, location and with aspectTypeId public static void deleteAspectType(String projectId, String location, String aspectTypeId) throws Exception { - AspectTypeName aspectTypeName = AspectTypeName.of(projectId, location, aspectTypeId); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + AspectTypeName aspectTypeName = AspectTypeName.of(projectId, location, aspectTypeId); client.deleteAspectTypeAsync(aspectTypeName).get(); } } diff --git a/dataplex/snippets/src/main/java/dataplex/DeleteEntry.java b/dataplex/snippets/src/main/java/dataplex/DeleteEntry.java index 1f9acc8a833..7e8467324e5 100644 --- a/dataplex/snippets/src/main/java/dataplex/DeleteEntry.java +++ b/dataplex/snippets/src/main/java/dataplex/DeleteEntry.java @@ -25,6 +25,7 @@ public class DeleteEntry { public static void main(String[] args) throws Exception { // TODO(developer): Replace these variables before running the sample. String projectId = "MY_PROJECT_ID"; + // Available locations: https://cloud.google.com/dataplex/docs/locations String location = "MY_LOCATION"; String entryGroupId = "MY_ENTRY_GROUP_ID"; String entryId = "MY_ENTRY_ID"; diff --git a/dataplex/snippets/src/main/java/dataplex/DeleteEntryGroup.java b/dataplex/snippets/src/main/java/dataplex/DeleteEntryGroup.java index da4445edecd..6a7935b7b18 100644 --- a/dataplex/snippets/src/main/java/dataplex/DeleteEntryGroup.java +++ b/dataplex/snippets/src/main/java/dataplex/DeleteEntryGroup.java @@ -20,7 +20,6 @@ import com.google.cloud.dataplex.v1.CatalogServiceClient; import com.google.cloud.dataplex.v1.EntryGroupName; -// Sample to delete Entry Group public class DeleteEntryGroup { public static void main(String[] args) throws Exception { @@ -34,15 +33,13 @@ public static void main(String[] args) throws Exception { System.out.println("Successfully deleted entry group"); } + // Method to delete Entry Group located in projectId, location and with entryGroupId public static void deleteEntryGroup(String projectId, String location, String entryGroupId) throws Exception { - EntryGroupName entryGroupName = EntryGroupName.of(projectId, location, entryGroupId); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + EntryGroupName entryGroupName = EntryGroupName.of(projectId, location, entryGroupId); client.deleteEntryGroupAsync(entryGroupName).get(); } } diff --git a/dataplex/snippets/src/main/java/dataplex/DeleteEntryType.java b/dataplex/snippets/src/main/java/dataplex/DeleteEntryType.java index 8662af2d31e..2c2fc66b91d 100644 --- a/dataplex/snippets/src/main/java/dataplex/DeleteEntryType.java +++ b/dataplex/snippets/src/main/java/dataplex/DeleteEntryType.java @@ -20,7 +20,6 @@ import com.google.cloud.dataplex.v1.CatalogServiceClient; import com.google.cloud.dataplex.v1.EntryTypeName; -// Sample to delete Entry Type public class DeleteEntryType { public static void main(String[] args) throws Exception { @@ -34,15 +33,13 @@ public static void main(String[] args) throws Exception { System.out.println("Successfully deleted entry type"); } + // Method to delete Entry Type located in projectId, location and with entryTypeId public static void deleteEntryType(String projectId, String location, String entryTypeId) throws Exception { - EntryTypeName entryTypeName = EntryTypeName.of(projectId, location, entryTypeId); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + EntryTypeName entryTypeName = EntryTypeName.of(projectId, location, entryTypeId); client.deleteEntryTypeAsync(entryTypeName).get(); } } diff --git a/dataplex/snippets/src/main/java/dataplex/GetAspectType.java b/dataplex/snippets/src/main/java/dataplex/GetAspectType.java index 33c14191f88..92e21ea1e4d 100644 --- a/dataplex/snippets/src/main/java/dataplex/GetAspectType.java +++ b/dataplex/snippets/src/main/java/dataplex/GetAspectType.java @@ -22,7 +22,6 @@ import com.google.cloud.dataplex.v1.CatalogServiceClient; import java.io.IOException; -// Sample to get Aspect Type public class GetAspectType { public static void main(String[] args) throws IOException { @@ -36,15 +35,13 @@ public static void main(String[] args) throws IOException { System.out.println("Aspect type retrieved successfully: " + aspectType.getName()); } + // Method to retrieve Aspect Type located in projectId, location and with aspectTypeId public static AspectType getAspectType(String projectId, String location, String aspectTypeId) throws IOException { - AspectTypeName aspectTypeName = AspectTypeName.of(projectId, location, aspectTypeId); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + AspectTypeName aspectTypeName = AspectTypeName.of(projectId, location, aspectTypeId); return client.getAspectType(aspectTypeName); } } diff --git a/dataplex/snippets/src/main/java/dataplex/GetEntry.java b/dataplex/snippets/src/main/java/dataplex/GetEntry.java index 73773bf93bc..e1580f17a19 100644 --- a/dataplex/snippets/src/main/java/dataplex/GetEntry.java +++ b/dataplex/snippets/src/main/java/dataplex/GetEntry.java @@ -29,6 +29,7 @@ public class GetEntry { public static void main(String[] args) throws IOException { // TODO(developer): Replace these variables before running the sample. String projectId = "MY_PROJECT_ID"; + // Available locations: https://cloud.google.com/dataplex/docs/locations String location = "MY_LOCATION"; String entryGroupId = "MY_ENTRY_GROUP_ID"; String entryId = "MY_ENTRY_ID"; diff --git a/dataplex/snippets/src/main/java/dataplex/GetEntryGroup.java b/dataplex/snippets/src/main/java/dataplex/GetEntryGroup.java index d3073d4615a..eef9d7a9e76 100644 --- a/dataplex/snippets/src/main/java/dataplex/GetEntryGroup.java +++ b/dataplex/snippets/src/main/java/dataplex/GetEntryGroup.java @@ -22,7 +22,6 @@ import com.google.cloud.dataplex.v1.EntryGroupName; import java.io.IOException; -// Sample to get Entry Group public class GetEntryGroup { public static void main(String[] args) throws IOException { @@ -36,15 +35,13 @@ public static void main(String[] args) throws IOException { System.out.println("Entry group retrieved successfully: " + entryGroup.getName()); } + // Method to retrieve Entry Group located in projectId, location and with entryGroupId public static EntryGroup getEntryGroup(String projectId, String location, String entryGroupId) throws IOException { - EntryGroupName entryGroupName = EntryGroupName.of(projectId, location, entryGroupId); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + EntryGroupName entryGroupName = EntryGroupName.of(projectId, location, entryGroupId); return client.getEntryGroup(entryGroupName); } } diff --git a/dataplex/snippets/src/main/java/dataplex/GetEntryType.java b/dataplex/snippets/src/main/java/dataplex/GetEntryType.java index 36ed5038709..87cf18ef423 100644 --- a/dataplex/snippets/src/main/java/dataplex/GetEntryType.java +++ b/dataplex/snippets/src/main/java/dataplex/GetEntryType.java @@ -22,7 +22,6 @@ import com.google.cloud.dataplex.v1.EntryTypeName; import java.io.IOException; -// Sample to get Entry Type public class GetEntryType { public static void main(String[] args) throws IOException { @@ -36,15 +35,13 @@ public static void main(String[] args) throws IOException { System.out.println("Entry type retrieved successfully: " + entryType.getName()); } + // Method to retrieve Entry Type located in projectId, location and with entryTypeId public static EntryType getEntryType(String projectId, String location, String entryTypeId) throws IOException { - EntryTypeName entryTypeName = EntryTypeName.of(projectId, location, entryTypeId); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + EntryTypeName entryTypeName = EntryTypeName.of(projectId, location, entryTypeId); return client.getEntryType(entryTypeName); } } diff --git a/dataplex/snippets/src/main/java/dataplex/ListAspectTypes.java b/dataplex/snippets/src/main/java/dataplex/ListAspectTypes.java index 2f6b0181aa4..73b9dbab517 100644 --- a/dataplex/snippets/src/main/java/dataplex/ListAspectTypes.java +++ b/dataplex/snippets/src/main/java/dataplex/ListAspectTypes.java @@ -24,7 +24,6 @@ import java.io.IOException; import java.util.List; -// Sample to list Aspect Types public class ListAspectTypes { public static void main(String[] args) throws IOException { @@ -38,15 +37,13 @@ public static void main(String[] args) throws IOException { aspectType -> System.out.println("Aspect type name: " + aspectType.getName())); } + // Method to list Aspect Types located in projectId and location public static List listAspectTypes(String projectId, String location) throws IOException { - LocationName locationName = LocationName.of(projectId, location); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + LocationName locationName = LocationName.of(projectId, location); CatalogServiceClient.ListAspectTypesPagedResponse listAspectTypesResponse = client.listAspectTypes(locationName); // Paging is implicitly handled by .iterateAll(), all results will be returned diff --git a/dataplex/snippets/src/main/java/dataplex/ListEntries.java b/dataplex/snippets/src/main/java/dataplex/ListEntries.java index be54b674563..ec564c12fc3 100644 --- a/dataplex/snippets/src/main/java/dataplex/ListEntries.java +++ b/dataplex/snippets/src/main/java/dataplex/ListEntries.java @@ -30,6 +30,7 @@ public class ListEntries { public static void main(String[] args) throws IOException { // TODO(developer): Replace these variables before running the sample. String projectId = "MY_PROJECT_ID"; + // Available locations: https://cloud.google.com/dataplex/docs/locations String location = "MY_LOCATION"; String entryGroupId = "MY_ENTRY_GROUP_ID"; diff --git a/dataplex/snippets/src/main/java/dataplex/ListEntryGroups.java b/dataplex/snippets/src/main/java/dataplex/ListEntryGroups.java index e66c9043052..b30422f3805 100644 --- a/dataplex/snippets/src/main/java/dataplex/ListEntryGroups.java +++ b/dataplex/snippets/src/main/java/dataplex/ListEntryGroups.java @@ -24,7 +24,6 @@ import java.io.IOException; import java.util.List; -// Sample to list Entry Groups public class ListEntryGroups { public static void main(String[] args) throws IOException { @@ -38,15 +37,13 @@ public static void main(String[] args) throws IOException { entryGroup -> System.out.println("Entry group name: " + entryGroup.getName())); } + // Method to list Entry Groups located in projectId and location public static List listEntryGroups(String projectId, String location) throws IOException { - LocationName locationName = LocationName.of(projectId, location); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + LocationName locationName = LocationName.of(projectId, location); CatalogServiceClient.ListEntryGroupsPagedResponse listEntryGroupsResponse = client.listEntryGroups(locationName); // Paging is implicitly handled by .iterateAll(), all results will be returned diff --git a/dataplex/snippets/src/main/java/dataplex/ListEntryTypes.java b/dataplex/snippets/src/main/java/dataplex/ListEntryTypes.java index 30bb5de05c2..35eeefb3ac3 100644 --- a/dataplex/snippets/src/main/java/dataplex/ListEntryTypes.java +++ b/dataplex/snippets/src/main/java/dataplex/ListEntryTypes.java @@ -24,7 +24,6 @@ import java.io.IOException; import java.util.List; -// Sample to list Entry Types public class ListEntryTypes { public static void main(String[] args) throws IOException { @@ -37,15 +36,13 @@ public static void main(String[] args) throws IOException { entryTypes.forEach(entryType -> System.out.println("Entry type name: " + entryType.getName())); } + // Method to list Entry Types located in projectId and location public static List listEntryTypes(String projectId, String location) throws IOException { - LocationName locationName = LocationName.of(projectId, location); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + LocationName locationName = LocationName.of(projectId, location); CatalogServiceClient.ListEntryTypesPagedResponse listEntryTypesResponse = client.listEntryTypes(locationName); // Paging is implicitly handled by .iterateAll(), all results will be returned diff --git a/dataplex/snippets/src/main/java/dataplex/LookupEntry.java b/dataplex/snippets/src/main/java/dataplex/LookupEntry.java index e4f19c79920..f32774d12d4 100644 --- a/dataplex/snippets/src/main/java/dataplex/LookupEntry.java +++ b/dataplex/snippets/src/main/java/dataplex/LookupEntry.java @@ -29,6 +29,7 @@ public class LookupEntry { public static void main(String[] args) throws IOException { // TODO(developer): Replace these variables before running the sample. String projectId = "MY_PROJECT_ID"; + // Available locations: https://cloud.google.com/dataplex/docs/locations String location = "MY_LOCATION"; String entryGroupId = "MY_ENTRY_GROUP_ID"; String entryId = "MY_ENTRY_ID"; diff --git a/dataplex/snippets/src/main/java/dataplex/SearchEntries.java b/dataplex/snippets/src/main/java/dataplex/SearchEntries.java new file mode 100644 index 00000000000..25706176380 --- /dev/null +++ b/dataplex/snippets/src/main/java/dataplex/SearchEntries.java @@ -0,0 +1,65 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package dataplex; + +// [START dataplex_search_entries] +import com.google.cloud.dataplex.v1.CatalogServiceClient; +import com.google.cloud.dataplex.v1.Entry; +import com.google.cloud.dataplex.v1.SearchEntriesRequest; +import com.google.cloud.dataplex.v1.SearchEntriesResult; +import java.io.IOException; +import java.util.List; +import java.util.stream.Collectors; + +public class SearchEntries { + + public static void main(String[] args) throws IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "MY_PROJECT_ID"; + // How to write query for search: https://cloud.google.com/dataplex/docs/search-syntax + String query = "MY_QUERY"; + + List entries = searchEntries(projectId, query); + entries.forEach(entry -> System.out.println("Entry name found in search: " + entry.getName())); + } + + // Method to search Entries located in projectId and matching query + public static List searchEntries(String projectId, String query) throws IOException { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (CatalogServiceClient client = CatalogServiceClient.create()) { + SearchEntriesRequest searchEntriesRequest = + SearchEntriesRequest.newBuilder() + .setPageSize(100) + // Required field, will by default limit search scope to organization under which the + // project is located + .setName(String.format("projects/%s/locations/global", projectId)) + // Optional field, will further limit search scope only to specified project + .setScope(String.format("projects/%s", projectId)) + .setQuery(query) + .build(); + + CatalogServiceClient.SearchEntriesPagedResponse searchEntriesResponse = + client.searchEntries(searchEntriesRequest); + return searchEntriesResponse.getPage().getResponse().getResultsList().stream() + // Extract Entries nested inside search results + .map(SearchEntriesResult::getDataplexEntry) + .collect(Collectors.toList()); + } + } +} +// [END dataplex_search_entries] diff --git a/dataplex/snippets/src/main/java/dataplex/UpdateAspectType.java b/dataplex/snippets/src/main/java/dataplex/UpdateAspectType.java index cb0c9b1b4d7..49572df174e 100644 --- a/dataplex/snippets/src/main/java/dataplex/UpdateAspectType.java +++ b/dataplex/snippets/src/main/java/dataplex/UpdateAspectType.java @@ -23,7 +23,6 @@ import com.google.protobuf.FieldMask; import java.util.List; -// Sample to update Aspect Type public class UpdateAspectType { public static void main(String[] args) throws Exception { @@ -60,39 +59,37 @@ public static void main(String[] args) throws Exception { System.out.println("Successfully updated aspect type: " + updatedAspectType.getName()); } + // Method to update Aspect Type located in projectId, location and with aspectTypeId and + // aspectFields specifying schema of the Aspect Type public static AspectType updateAspectType( String projectId, String location, String aspectTypeId, List aspectFields) throws Exception { - AspectType aspectType = - AspectType.newBuilder() - .setName(AspectTypeName.of(projectId, location, aspectTypeId).toString()) - .setDescription("updated description of the aspect type") - .setMetadataTemplate( - AspectType.MetadataTemplate.newBuilder() - // Because Record Fields is an array, it needs to be fully replaced. - // It is because you do not have a way to specify array elements in update mask. - .addAllRecordFields(aspectFields) - .build()) - .build(); - - // Update mask specifies which fields will be updated. - // If empty mask is given, all modifiable fields from the request will be used for update. - // If update mask is specified as "*" it is treated as full update, - // that means fields not present in the request will be emptied. - FieldMask updateMask = - FieldMask.newBuilder() - .addPaths("description") - .addPaths("metadata_template.record_fields") - .build(); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + AspectType aspectType = + AspectType.newBuilder() + .setName(AspectTypeName.of(projectId, location, aspectTypeId).toString()) + .setDescription("updated description of the aspect type") + .setMetadataTemplate( + AspectType.MetadataTemplate.newBuilder() + // Because Record Fields is an array, it needs to be fully replaced. + // It is because you do not have a way to specify array elements in update + // mask. + .addAllRecordFields(aspectFields) + .build()) + .build(); + + // Update mask specifies which fields will be updated. + // For more information on update masks, see: https://google.aip.dev/161 + FieldMask updateMask = + FieldMask.newBuilder() + .addPaths("description") + .addPaths("metadata_template.record_fields") + .build(); return client.updateAspectTypeAsync(aspectType, updateMask).get(); } } diff --git a/dataplex/snippets/src/main/java/dataplex/UpdateEntry.java b/dataplex/snippets/src/main/java/dataplex/UpdateEntry.java index 0127c3e4a20..d3cee2cc74f 100644 --- a/dataplex/snippets/src/main/java/dataplex/UpdateEntry.java +++ b/dataplex/snippets/src/main/java/dataplex/UpdateEntry.java @@ -32,6 +32,7 @@ public class UpdateEntry { public static void main(String[] args) throws Exception { // TODO(developer): Replace these variables before running the sample. String projectId = "MY_PROJECT_ID"; + // Available locations: https://cloud.google.com/dataplex/docs/locations String location = "MY_LOCATION"; String entryGroupId = "MY_ENTRY_GROUP_ID"; String entryId = "MY_ENTRY_ID"; diff --git a/dataplex/snippets/src/main/java/dataplex/UpdateEntryGroup.java b/dataplex/snippets/src/main/java/dataplex/UpdateEntryGroup.java index bc0a0c727e6..4bae947e317 100644 --- a/dataplex/snippets/src/main/java/dataplex/UpdateEntryGroup.java +++ b/dataplex/snippets/src/main/java/dataplex/UpdateEntryGroup.java @@ -22,7 +22,6 @@ import com.google.cloud.dataplex.v1.EntryGroupName; import com.google.protobuf.FieldMask; -// Sample to update Entry Group public class UpdateEntryGroup { public static void main(String[] args) throws Exception { @@ -36,25 +35,21 @@ public static void main(String[] args) throws Exception { System.out.println("Successfully updated entry group: " + updatedEntryGroup.getName()); } + // Method to update Entry Group located in projectId, location and with entryGroupId public static EntryGroup updateEntryGroup(String projectId, String location, String entryGroupId) throws Exception { - EntryGroup entryGroup = - EntryGroup.newBuilder() - .setName(EntryGroupName.of(projectId, location, entryGroupId).toString()) - .setDescription("updated description of the entry group") - .build(); - - // Update mask specifies which fields will be updated. - // If empty mask is given, all modifiable fields from the request will be used for update. - // If update mask is specified as "*" it is treated as full update, - // that means fields not present in the request will be emptied. - FieldMask updateMask = FieldMask.newBuilder().addPaths("description").build(); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + EntryGroup entryGroup = + EntryGroup.newBuilder() + .setName(EntryGroupName.of(projectId, location, entryGroupId).toString()) + .setDescription("updated description of the entry group") + .build(); + + // Update mask specifies which fields will be updated. + // For more information on update masks, see: https://google.aip.dev/161 + FieldMask updateMask = FieldMask.newBuilder().addPaths("description").build(); return client.updateEntryGroupAsync(entryGroup, updateMask).get(); } } diff --git a/dataplex/snippets/src/main/java/dataplex/UpdateEntryType.java b/dataplex/snippets/src/main/java/dataplex/UpdateEntryType.java index daac40a35f3..d0c3a245077 100644 --- a/dataplex/snippets/src/main/java/dataplex/UpdateEntryType.java +++ b/dataplex/snippets/src/main/java/dataplex/UpdateEntryType.java @@ -22,7 +22,6 @@ import com.google.cloud.dataplex.v1.EntryTypeName; import com.google.protobuf.FieldMask; -// Sample to update Entry Type public class UpdateEntryType { public static void main(String[] args) throws Exception { @@ -36,25 +35,21 @@ public static void main(String[] args) throws Exception { System.out.println("Successfully updated entry type: " + updatedEntryType.getName()); } + // Method to update Entry Type located in projectId, location and with entryTypeId public static EntryType updateEntryType(String projectId, String location, String entryTypeId) throws Exception { - EntryType entryType = - EntryType.newBuilder() - .setName(EntryTypeName.of(projectId, location, entryTypeId).toString()) - .setDescription("updated description of the entry type") - .build(); - - // Update mask specifies which fields will be updated. - // If empty mask is given, all modifiable fields from the request will be used for update. - // If update mask is specified as "*" it is treated as full update, - // that means fields not present in the request will be emptied. - FieldMask updateMask = FieldMask.newBuilder().addPaths("description").build(); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + EntryType entryType = + EntryType.newBuilder() + .setName(EntryTypeName.of(projectId, location, entryTypeId).toString()) + .setDescription("updated description of the entry type") + .build(); + + // Update mask specifies which fields will be updated. + // For more information on update masks, see: https://google.aip.dev/161 + FieldMask updateMask = FieldMask.newBuilder().addPaths("description").build(); return client.updateEntryTypeAsync(entryType, updateMask).get(); } } diff --git a/dataplex/snippets/src/test/java/dataplex/AspectTypeIT.java b/dataplex/snippets/src/test/java/dataplex/AspectTypeIT.java index 35f5d4df991..066d43a6b42 100644 --- a/dataplex/snippets/src/test/java/dataplex/AspectTypeIT.java +++ b/dataplex/snippets/src/test/java/dataplex/AspectTypeIT.java @@ -31,7 +31,7 @@ public class AspectTypeIT { private static final String ID = UUID.randomUUID().toString().substring(0, 8); private static final String LOCATION = "us-central1"; - private static final String aspectTypeId = "test-aspect-type" + ID; + private static final String aspectTypeId = "test-aspect-type-" + ID; private static String expectedAspectType; private static final String PROJECT_ID = requireProjectIdEnvVar(); @@ -79,7 +79,8 @@ public void testUpdateAspectType() throws Exception { @Test public void testCreateAspectType() throws Exception { - String aspectTypeIdToCreate = "test-aspect-type" + UUID.randomUUID().toString().substring(0, 8); + String aspectTypeIdToCreate = + "test-aspect-type-" + UUID.randomUUID().toString().substring(0, 8); String expectedAspectTypeToCreate = String.format( "projects/%s/locations/%s/aspectTypes/%s", PROJECT_ID, LOCATION, aspectTypeIdToCreate); @@ -95,7 +96,8 @@ public void testCreateAspectType() throws Exception { @Test public void testDeleteAspectType() throws Exception { - String aspectTypeIdToDelete = "test-aspect-type" + UUID.randomUUID().toString().substring(0, 8); + String aspectTypeIdToDelete = + "test-aspect-type-" + UUID.randomUUID().toString().substring(0, 8); // Create Aspect Type to be deleted CreateAspectType.createAspectType( PROJECT_ID, LOCATION, aspectTypeIdToDelete, new ArrayList<>()); diff --git a/dataplex/snippets/src/test/java/dataplex/EntryGroupIT.java b/dataplex/snippets/src/test/java/dataplex/EntryGroupIT.java index 76456e8edef..8e2608b8c06 100644 --- a/dataplex/snippets/src/test/java/dataplex/EntryGroupIT.java +++ b/dataplex/snippets/src/test/java/dataplex/EntryGroupIT.java @@ -30,7 +30,7 @@ public class EntryGroupIT { private static final String ID = UUID.randomUUID().toString().substring(0, 8); private static final String LOCATION = "us-central1"; - private static final String entryGroupId = "test-entry-group" + ID; + private static final String entryGroupId = "test-entry-group-" + ID; private static String expectedEntryGroup; private static final String PROJECT_ID = requireProjectIdEnvVar(); @@ -77,7 +77,8 @@ public void testUpdateEntryGroup() throws Exception { @Test public void testCreateEntryGroup() throws Exception { - String entryGroupIdToCreate = "test-entry-group" + UUID.randomUUID().toString().substring(0, 8); + String entryGroupIdToCreate = + "test-entry-group-" + UUID.randomUUID().toString().substring(0, 8); String expectedEntryGroupToCreate = String.format( "projects/%s/locations/%s/entryGroups/%s", PROJECT_ID, LOCATION, entryGroupIdToCreate); @@ -92,7 +93,8 @@ public void testCreateEntryGroup() throws Exception { @Test public void testDeleteEntryGroup() throws Exception { - String entryGroupIdToDelete = "test-entry-group" + UUID.randomUUID().toString().substring(0, 8); + String entryGroupIdToDelete = + "test-entry-group-" + UUID.randomUUID().toString().substring(0, 8); // Create Entry Group to be deleted CreateEntryGroup.createEntryGroup(PROJECT_ID, LOCATION, entryGroupIdToDelete); diff --git a/dataplex/snippets/src/test/java/dataplex/EntryTypeIT.java b/dataplex/snippets/src/test/java/dataplex/EntryTypeIT.java index e6116d637ff..a410e785c45 100644 --- a/dataplex/snippets/src/test/java/dataplex/EntryTypeIT.java +++ b/dataplex/snippets/src/test/java/dataplex/EntryTypeIT.java @@ -30,7 +30,7 @@ public class EntryTypeIT { private static final String ID = UUID.randomUUID().toString().substring(0, 8); private static final String LOCATION = "us-central1"; - private static final String entryTypeId = "test-entry-type" + ID; + private static final String entryTypeId = "test-entry-type-" + ID; private static String expectedEntryType; private static final String PROJECT_ID = requireProjectIdEnvVar(); @@ -48,6 +48,7 @@ public static void checkRequirements() { } @BeforeClass + // Set-up code that will be executed before all tests public static void setUp() throws Exception { expectedEntryType = String.format("projects/%s/locations/%s/entryTypes/%s", PROJECT_ID, LOCATION, entryTypeId); @@ -75,7 +76,7 @@ public void testUpdateEntryType() throws Exception { @Test public void testCreateEntryType() throws Exception { - String entryTypeIdToCreate = "test-entry-type" + UUID.randomUUID().toString().substring(0, 8); + String entryTypeIdToCreate = "test-entry-type-" + UUID.randomUUID().toString().substring(0, 8); String expectedEntryTypeToCreate = String.format( "projects/%s/locations/%s/entryTypes/%s", PROJECT_ID, LOCATION, entryTypeIdToCreate); @@ -90,8 +91,8 @@ public void testCreateEntryType() throws Exception { @Test public void testDeleteEntryType() throws Exception { - String entryTypeIdToDelete = "test-entry-type" + UUID.randomUUID().toString().substring(0, 8); - // Create Entry Group to be deleted + String entryTypeIdToDelete = "test-entry-type-" + UUID.randomUUID().toString().substring(0, 8); + // Create Entry Type to be deleted CreateEntryType.createEntryType(PROJECT_ID, LOCATION, entryTypeIdToDelete); // No exception means successful call. @@ -99,8 +100,9 @@ public void testDeleteEntryType() throws Exception { } @AfterClass + // Clean-up code that will be executed after all tests public static void tearDown() throws Exception { - // Clean-up Entry Group resource created in setUp() + // Clean-up Entry Type resource created in setUp() DeleteEntryType.deleteEntryType(PROJECT_ID, LOCATION, entryTypeId); } } diff --git a/dataplex/snippets/src/test/java/dataplex/SearchEntriesIT.java b/dataplex/snippets/src/test/java/dataplex/SearchEntriesIT.java new file mode 100644 index 00000000000..2a1d7636dd5 --- /dev/null +++ b/dataplex/snippets/src/test/java/dataplex/SearchEntriesIT.java @@ -0,0 +1,71 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package dataplex; + +import static com.google.common.truth.Truth.assertThat; +import static junit.framework.TestCase.assertNotNull; + +import com.google.cloud.dataplex.v1.Entry; +import java.io.IOException; +import java.util.List; +import java.util.UUID; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +public class SearchEntriesIT { + private static final String ID = UUID.randomUUID().toString().substring(0, 8); + private static final String LOCATION = "us-central1"; + private static final String entryGroupId = "test-entry-group-" + ID; + private static final String entryId = "test-entry-" + ID; + private static final String expectedEntry = + String.format("locations/%s/entryGroups/%s/entries/%s", LOCATION, entryGroupId, entryId); + + private static final String PROJECT_ID = requireProjectIdEnvVar(); + + private static String requireProjectIdEnvVar() { + String value = System.getenv("GOOGLE_CLOUD_PROJECT"); + assertNotNull( + "Environment variable GOOGLE_CLOUD_PROJECT is required to perform these tests.", value); + return value; + } + + @BeforeClass + public static void setUp() throws Exception { + requireProjectIdEnvVar(); + CreateEntryGroup.createEntryGroup(PROJECT_ID, LOCATION, entryGroupId); + CreateEntry.createEntry(PROJECT_ID, LOCATION, entryGroupId, entryId); + Thread.sleep(30000); + } + + @Test + public void testSearchEntries() throws IOException { + String query = "name:test-entry- AND description:description AND aspect:generic"; + List entries = SearchEntries.searchEntries(PROJECT_ID, query); + assertThat( + entries.stream() + .map(Entry::getName) + .map(entryName -> entryName.substring(entryName.indexOf("location")))) + .contains(expectedEntry); + } + + @AfterClass + public static void tearDown() throws Exception { + // Entry inside this Entry Group will be deleted automatically + DeleteEntryGroup.deleteEntryGroup(PROJECT_ID, LOCATION, entryGroupId); + } +} diff --git a/document-ai/src/main/java/documentai/v1beta3/ProcessQualityDocument.java b/document-ai/src/main/java/documentai/v1beta3/ProcessQualityDocument.java deleted file mode 100644 index c212777f874..00000000000 --- a/document-ai/src/main/java/documentai/v1beta3/ProcessQualityDocument.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package documentai.v1beta3; - -// [START documentai_process_quality_document] - -import com.google.cloud.documentai.v1beta3.Document; -import com.google.cloud.documentai.v1beta3.DocumentProcessorServiceClient; -import com.google.cloud.documentai.v1beta3.DocumentProcessorServiceSettings; -import com.google.cloud.documentai.v1beta3.ProcessRequest; -import com.google.cloud.documentai.v1beta3.ProcessResponse; -import com.google.cloud.documentai.v1beta3.RawDocument; -import com.google.protobuf.ByteString; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.util.List; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -public class ProcessQualityDocument { - public static void processQualityDocument() - throws IOException, InterruptedException, ExecutionException, TimeoutException { - // TODO(developer): Replace these variables before running the sample. - String projectId = "your-project-id"; - String location = "your-project-location"; // Format is "us" or "eu". - String processerId = "your-processor-id"; - String filePath = "path/to/input/file.pdf"; - processQualityDocument(projectId, location, processerId, filePath); - } - - public static void processQualityDocument( - String projectId, String location, String processorId, String filePath) - throws IOException, InterruptedException, ExecutionException, TimeoutException { - // Initialize client that will be used to send requests. This client only needs - // to be created - // once, and can be reused for multiple requests. After completing all of your - // requests, call - // the "close" method on the client to safely clean up any remaining background - // resources. - String endpoint = String.format("%s-documentai.googleapis.com:443", location); - DocumentProcessorServiceSettings settings = - DocumentProcessorServiceSettings.newBuilder().setEndpoint(endpoint).build(); - try (DocumentProcessorServiceClient client = DocumentProcessorServiceClient.create(settings)) { - // The full resource name of the processor, e.g.: - // projects/project-id/locations/location/processor/processor-id - // You must create new processors in the Cloud Console first - String name = - String.format("projects/%s/locations/%s/processors/%s", projectId, location, processorId); - - // Read the file. - byte[] imageFileData = Files.readAllBytes(Paths.get(filePath)); - - // Convert the image data to a Buffer and base64 encode it. - ByteString content = ByteString.copyFrom(imageFileData); - - RawDocument document = - RawDocument.newBuilder().setContent(content).setMimeType("application/pdf").build(); - - // Configure the process request. - ProcessRequest request = - ProcessRequest.newBuilder().setName(name).setRawDocument(document).build(); - - // Recognizes text entities in the PDF document - ProcessResponse result = client.processDocument(request); - Document documentResponse = result.getDocument(); - - System.out.println("Document processing complete."); - - // Read the quality-specific information from the output from the - // Intelligent Document Quality Processor: - // https://cloud.google.com/document-ai/docs/processors-list#processor_doc-quality-processor - // OCR and other data is also present in the quality processor's response. - // Please see the OCR and other samples for how to parse other data in the - // response. - List entities = documentResponse.getEntitiesList(); - for (Document.Entity entity : entities) { - float entityConfidence = entity.getConfidence(); - long pageNumber = entity.getPageAnchor().getPageRefs(0).getPage() + 1; - System.out.printf( - "Page %d has a quality score of (%.2f%%):\n", pageNumber, entityConfidence * 100.0); - for (Document.Entity property : entity.getPropertiesList()) { - float propertyConfidence = property.getConfidence(); - String propertyType = property.getType(); - System.out.printf(" * %s score of %.2f%%\n", propertyType, propertyConfidence * 100.0); - } - } - } - } -} -// [END documentai_process_quality_document] diff --git a/document-ai/src/test/java/documentai/v1beta3/ProcessQualityDocumentTest.java b/document-ai/src/test/java/documentai/v1beta3/ProcessQualityDocumentTest.java deleted file mode 100644 index 7379dbf0f30..00000000000 --- a/document-ai/src/test/java/documentai/v1beta3/ProcessQualityDocumentTest.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package documentai.v1beta3; - -import static com.google.common.truth.Truth.assertThat; -import static org.junit.Assert.assertNotNull; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.PrintStream; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -public class ProcessQualityDocumentTest { - private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); - private static final String PROCESSOR_ID = "f80f55e03d4c20ed"; - private static final String FILE_PATH = "resources/document_quality_poor.pdf"; - - private ByteArrayOutputStream bout; - private PrintStream out; - private PrintStream originalPrintStream; - - private static void requireEnvVar(String varName) { - assertNotNull( - String.format("Environment variable '%s' must be set to perform these tests.", varName), - System.getenv(varName)); - } - - @Before - public void checkRequirements() { - requireEnvVar("GOOGLE_CLOUD_PROJECT"); - requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); - } - - @Before - public void setUp() { - bout = new ByteArrayOutputStream(); - out = new PrintStream(bout); - originalPrintStream = System.out; - System.setOut(out); - } - - @Test - public void testProcessQualityDocument() - throws InterruptedException, ExecutionException, IOException, TimeoutException { - // parse the GCS invoice as a form. - ProcessQualityDocument.processQualityDocument(PROJECT_ID, "us", PROCESSOR_ID, FILE_PATH); - String got = bout.toString(); - - assertThat(got).contains("Page 1 has a quality score of"); - assertThat(got).contains("defect_blurry score of 9"); - assertThat(got).contains("defect_noisy"); - } - - @After - public void tearDown() { - System.out.flush(); - System.setOut(originalPrintStream); - } -} diff --git a/eventarc/storage-handler/src/main/java/com/example/cloudrun/CloudEventController.java b/eventarc/storage-handler/src/main/java/com/example/cloudrun/CloudEventController.java index 103a232bd7d..dc289f7be0c 100644 --- a/eventarc/storage-handler/src/main/java/com/example/cloudrun/CloudEventController.java +++ b/eventarc/storage-handler/src/main/java/com/example/cloudrun/CloudEventController.java @@ -46,7 +46,12 @@ ResponseEntity handleCloudEvent(@RequestBody CloudEvent cloudEvent) String json = new String(cloudEvent.getData().toBytes()); StorageObjectData.Builder builder = StorageObjectData.newBuilder(); - JsonFormat.parser().merge(json, builder); + + // If you do not ignore unknown fields, then JsonFormat.Parser returns an + // error when encountering a new or unknown field. Note that you might lose + // some event data in the unmarshaling process by ignoring unknown fields. + JsonFormat.Parser parser = JsonFormat.parser().ignoringUnknownFields(); + parser.merge(json, builder); StorageObjectData data = builder.build(); // Convert protobuf timestamp to java Instant @@ -54,7 +59,7 @@ ResponseEntity handleCloudEvent(@RequestBody CloudEvent cloudEvent) Instant updated = Instant.ofEpochSecond(ts.getSeconds(), ts.getNanos()); String msg = String.format( - "Cloud Storage object changed: %s/%s modified at %s\n", + "Cloud Storage object changed: %s/%s modified at %s%n", data.getBucket(), data.getName(), updated); System.out.println(msg); diff --git a/functions/README.md b/functions/README.md index f093b1fa13a..8258e9db6e5 100644 --- a/functions/README.md +++ b/functions/README.md @@ -2,26 +2,25 @@ # Google Cloud Functions Java Samples -[Cloud Functions][functions_docs] is a lightweight, event-based, asynchronous -compute solution that allows you to create small, single-purpose functions that -respond to Cloud events without the need to manage a server or a runtime -environment. +[Cloud Run functions](https://cloud.google.com/functions/docs/concepts/overview) is a lightweight, event-based, asynchronous compute solution that allows you to create small, single-purpose functions that respond to Cloud events without the need to manage a server or a runtime environment. -[functions_docs]: https://cloud.google.com/functions/docs/ +There are two versions of Cloud Run functions: + +* **Cloud Run functions**, formerly known as Cloud Functions (2nd gen), which deploys your function as services on Cloud Run, allowing you to trigger them using Eventarc and Pub/Sub. Cloud Run functions are created using `gcloud functions` or `gcloud run`. Samples for Cloud Run functions can be found in the [`functions/v2`](v2/) folder. +* **Cloud Run functions (1st gen)**, formerly known as Cloud Functions (1st gen), the original version of functions with limited event triggers and configurability. Cloud Run functions (1st gen) are created using `gcloud functions --no-gen2`. Samples for Cloud Run functions (1st generation) can be found in the current `functions/` folder. ## Samples * [Hello World](helloworld/) -* [Concepts](concepts/) +* [Concepts](v2/concepts/) * [Datastore](v2/datastore/) * [Firebase](firebase/) -* [Cloud Pub/Sub](pubsub/) +* [Cloud Pub/Sub](v2/pubsub/) * [HTTP](http/) * [Logging & Monitoring](logging/) * [Slack](slack/) -* [OCR tutorial](ocr/) -* [ImageMagick](imagemagick/) -* [CI/CD setup](ci_cd/) +* [OCR tutorial](v2/ocr/) +* [ImageMagick](v2/imagemagick/) ## Running Functions Locally The [Java Functions Framework](https://github.com/GoogleCloudPlatform/functions-framework-java) diff --git a/functions/ci_cd/cloudbuild.yaml b/functions/ci_cd/cloudbuild.yaml deleted file mode 100644 index 38b402a8b4d..00000000000 --- a/functions/ci_cd/cloudbuild.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# [START functions_ci_cd_cloud_build] -steps: -- name: 'gcr.io/cloud-builders/mvn' - args: ['clean', 'verify'] - dir: 'function/dir/from/repo/root' -- name: 'gcr.io/cloud-builders/gcloud' - args: ['functions', 'deploy', '[YOUR_DEPLOYED_FUNCTION_NAME]', '[YOUR_FUNCTION_TRIGGER]', '--runtime', 'java11', '--entry-point', '[YOUR_FUNCTION_NAME_IN_CODE]'] - dir: 'function/dir/from/repo/root' -# [END functions_ci_cd_cloud_build] diff --git a/functions/v2/firebase/firestore/src/main/java/functions/FirebaseFirestore.java b/functions/v2/firebase/firestore/src/main/java/functions/FirebaseFirestore.java index dfd0acea59f..e79d77a167e 100644 --- a/functions/v2/firebase/firestore/src/main/java/functions/FirebaseFirestore.java +++ b/functions/v2/firebase/firestore/src/main/java/functions/FirebaseFirestore.java @@ -28,16 +28,17 @@ public class FirebaseFirestore implements CloudEventsFunction { @Override public void accept(CloudEvent event) throws InvalidProtocolBufferException { - DocumentEventData firestorEventData = DocumentEventData.parseFrom(event.getData().toBytes()); + DocumentEventData firestoreEventData = DocumentEventData + .parseFrom(event.getData().toBytes()); logger.info("Function triggered by event on: " + event.getSource()); logger.info("Event type: " + event.getType()); logger.info("Old value:"); - logger.info(firestorEventData.getOldValue().toString()); + logger.info(firestoreEventData.getOldValue().toString()); logger.info("New value:"); - logger.info(firestorEventData.getValue().toString()); + logger.info(firestoreEventData.getValue().toString()); } } diff --git a/functions/v2/hello-gcs/src/main/java/functions/HelloGcs.java b/functions/v2/hello-gcs/src/main/java/functions/HelloGcs.java index 26c49560d1d..a1b227a90bb 100644 --- a/functions/v2/hello-gcs/src/main/java/functions/HelloGcs.java +++ b/functions/v2/hello-gcs/src/main/java/functions/HelloGcs.java @@ -40,7 +40,12 @@ public void accept(CloudEvent event) throws InvalidProtocolBufferException { String cloudEventData = new String(event.getData().toBytes(), StandardCharsets.UTF_8); StorageObjectData.Builder builder = StorageObjectData.newBuilder(); - JsonFormat.parser().merge(cloudEventData, builder); + + // If you do not ignore unknown fields, then JsonFormat.Parser returns an + // error when encountering a new or unknown field. Note that you might lose + // some event data in the unmarshaling process by ignoring unknown fields. + JsonFormat.Parser parser = JsonFormat.parser().ignoringUnknownFields(); + parser.merge(cloudEventData, builder); StorageObjectData data = builder.build(); logger.info("Bucket: " + data.getBucket()); diff --git a/functions/v2/imagemagick/src/main/java/functions/ImageMagick.java b/functions/v2/imagemagick/src/main/java/functions/ImageMagick.java index 90a9fefd81d..546207d26cf 100644 --- a/functions/v2/imagemagick/src/main/java/functions/ImageMagick.java +++ b/functions/v2/imagemagick/src/main/java/functions/ImageMagick.java @@ -156,7 +156,12 @@ private static StorageObjectData getEventData(CloudEvent event) // Extract Cloud Event data and convert to StorageObjectData String cloudEventData = new String(event.getData().toBytes(), StandardCharsets.UTF_8); StorageObjectData.Builder builder = StorageObjectData.newBuilder(); - JsonFormat.parser().merge(cloudEventData, builder); + + // If you do not ignore unknown fields, then JsonFormat.Parser returns an + // error when encountering a new or unknown field. Note that you might lose + // some event data in the unmarshaling process by ignoring unknown fields. + JsonFormat.Parser parser = JsonFormat.parser().ignoringUnknownFields(); + parser.merge(cloudEventData, builder); return builder.build(); } // [START functions_imagemagick_setup] diff --git a/functions/v2/ocr/ocr-process-image/src/main/java/functions/OcrProcessImage.java b/functions/v2/ocr/ocr-process-image/src/main/java/functions/OcrProcessImage.java index f41d76b1b66..05437bc3714 100644 --- a/functions/v2/ocr/ocr-process-image/src/main/java/functions/OcrProcessImage.java +++ b/functions/v2/ocr/ocr-process-image/src/main/java/functions/OcrProcessImage.java @@ -71,7 +71,12 @@ public void accept(CloudEvent event) throws InvalidProtocolBufferException { // Unmarshal data from CloudEvent String cloudEventData = new String(event.getData().toBytes(), StandardCharsets.UTF_8); StorageObjectData.Builder builder = StorageObjectData.newBuilder(); - JsonFormat.parser().merge(cloudEventData, builder); + + // If you do not ignore unknown fields, then JsonFormat.Parser returns an + // error when encountering a new or unknown field. Note that you might lose + // some event data in the unmarshaling process by ignoring unknown fields. + JsonFormat.Parser parser = JsonFormat.parser().ignoringUnknownFields(); + parser.merge(cloudEventData, builder); StorageObjectData gcsEvent = builder.build(); String bucket = gcsEvent.getBucket(); diff --git a/language/snippets/pom.xml b/language/snippets/pom.xml index 5914ee501c2..561349ff7ce 100644 --- a/language/snippets/pom.xml +++ b/language/snippets/pom.xml @@ -23,8 +23,6 @@ UTF-8 - - @@ -42,8 +40,6 @@ com.google.cloud google-cloud-language - - junit junit diff --git a/managedkafka/examples/pom.xml b/managedkafka/examples/pom.xml index 248b42e2962..7f1343971b3 100644 --- a/managedkafka/examples/pom.xml +++ b/managedkafka/examples/pom.xml @@ -29,7 +29,7 @@ com.google.cloud libraries-bom - 26.40.0 + 26.50.0 pom import @@ -40,13 +40,7 @@ com.google.cloud google-cloud-managedkafka - 0.1.0 - - com.google.api.grpc - proto-google-cloud-managedkafka-v1 - 0.1.0 - junit junit diff --git a/managedkafka/examples/src/main/java/examples/CreateCluster.java b/managedkafka/examples/src/main/java/examples/CreateCluster.java index 478e6ce7f2b..63c22d30c6a 100644 --- a/managedkafka/examples/src/main/java/examples/CreateCluster.java +++ b/managedkafka/examples/src/main/java/examples/CreateCluster.java @@ -17,7 +17,13 @@ package examples; // [START managedkafka_create_cluster] + import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.longrunning.OperationSnapshot; +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.retrying.RetryingFuture; +import com.google.api.gax.retrying.TimedRetryAlgorithm; import com.google.cloud.managedkafka.v1.AccessConfig; import com.google.cloud.managedkafka.v1.CapacityConfig; import com.google.cloud.managedkafka.v1.Cluster; @@ -25,9 +31,11 @@ import com.google.cloud.managedkafka.v1.GcpConfig; import com.google.cloud.managedkafka.v1.LocationName; import com.google.cloud.managedkafka.v1.ManagedKafkaClient; +import com.google.cloud.managedkafka.v1.ManagedKafkaSettings; import com.google.cloud.managedkafka.v1.NetworkConfig; import com.google.cloud.managedkafka.v1.OperationMetadata; import com.google.cloud.managedkafka.v1.RebalanceConfig; +import java.time.Duration; import java.util.concurrent.ExecutionException; public class CreateCluster { @@ -64,17 +72,47 @@ public static void createCluster( .setRebalanceConfig(rebalanceConfig) .build(); - try (ManagedKafkaClient managedKafkaClient = ManagedKafkaClient.create()) { + // Create the settings to configure the timeout for polling operations + ManagedKafkaSettings.Builder settingsBuilder = ManagedKafkaSettings.newBuilder(); + TimedRetryAlgorithm timedRetryAlgorithm = OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setTotalTimeoutDuration(Duration.ofHours(1L)) + .build()); + settingsBuilder.createClusterOperationSettings() + .setPollingAlgorithm(timedRetryAlgorithm); + + try (ManagedKafkaClient managedKafkaClient = ManagedKafkaClient.create( + settingsBuilder.build())) { + CreateClusterRequest request = CreateClusterRequest.newBuilder() .setParent(LocationName.of(projectId, region).toString()) .setClusterId(clusterId) .setCluster(cluster) .build(); + // The duration of this operation can vary considerably, typically taking between 10-40 // minutes. OperationFuture future = managedKafkaClient.createClusterOperationCallable().futureCall(request); + + // Get the initial LRO and print details. + OperationSnapshot operation = future.getInitialFuture().get(); + System.out.printf("Cluster creation started. Operation name: %s\nDone: %s\nMetadata: %s\n", + operation.getName(), + operation.isDone(), + future.getMetadata().get().toString()); + + while (!future.isDone()) { + // The pollingFuture gives us the most recent status of the operation + RetryingFuture pollingFuture = future.getPollingFuture(); + OperationSnapshot currentOp = pollingFuture.getAttemptResult().get(); + System.out.printf("Polling Operation:\nName: %s\n Done: %s\n", + currentOp.getName(), + currentOp.isDone()); + } + + // NOTE: future.get() blocks completion until the operation is complete (isDone = True) Cluster response = future.get(); System.out.printf("Created cluster: %s\n", response.getName()); } catch (ExecutionException e) { diff --git a/managedkafka/examples/src/main/java/examples/DeleteCluster.java b/managedkafka/examples/src/main/java/examples/DeleteCluster.java index 391a86fcb41..767ef74a718 100644 --- a/managedkafka/examples/src/main/java/examples/DeleteCluster.java +++ b/managedkafka/examples/src/main/java/examples/DeleteCluster.java @@ -17,14 +17,21 @@ package examples; // [START managedkafka_delete_cluster] + import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.longrunning.OperationSnapshot; +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.retrying.TimedRetryAlgorithm; import com.google.api.gax.rpc.ApiException; import com.google.cloud.managedkafka.v1.ClusterName; import com.google.cloud.managedkafka.v1.DeleteClusterRequest; import com.google.cloud.managedkafka.v1.ManagedKafkaClient; +import com.google.cloud.managedkafka.v1.ManagedKafkaSettings; import com.google.cloud.managedkafka.v1.OperationMetadata; import com.google.protobuf.Empty; import java.io.IOException; +import java.time.Duration; public class DeleteCluster { @@ -38,13 +45,32 @@ public static void main(String[] args) throws Exception { public static void deleteCluster(String projectId, String region, String clusterId) throws Exception { - try (ManagedKafkaClient managedKafkaClient = ManagedKafkaClient.create()) { + + // Create the settings to configure the timeout for polling operations + ManagedKafkaSettings.Builder settingsBuilder = ManagedKafkaSettings.newBuilder(); + TimedRetryAlgorithm timedRetryAlgorithm = OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setTotalTimeoutDuration(Duration.ofHours(1L)) + .build()); + settingsBuilder.deleteClusterOperationSettings() + .setPollingAlgorithm(timedRetryAlgorithm); + + try (ManagedKafkaClient managedKafkaClient = ManagedKafkaClient.create( + settingsBuilder.build())) { DeleteClusterRequest request = DeleteClusterRequest.newBuilder() .setName(ClusterName.of(projectId, region, clusterId).toString()) .build(); OperationFuture future = managedKafkaClient.deleteClusterOperationCallable().futureCall(request); + + // Get the initial LRO and print details. CreateCluster contains sample code for polling logs. + OperationSnapshot operation = future.getInitialFuture().get(); + System.out.printf("Cluster deletion started. Operation name: %s\nDone: %s\nMetadata: %s\n", + operation.getName(), + operation.isDone(), + future.getMetadata().get().toString()); + future.get(); System.out.println("Deleted cluster"); } catch (IOException | ApiException e) { diff --git a/managedkafka/examples/src/main/java/examples/UpdateCluster.java b/managedkafka/examples/src/main/java/examples/UpdateCluster.java index d33de2283d9..feeff950ffd 100644 --- a/managedkafka/examples/src/main/java/examples/UpdateCluster.java +++ b/managedkafka/examples/src/main/java/examples/UpdateCluster.java @@ -17,14 +17,21 @@ package examples; // [START managedkafka_update_cluster] + import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.longrunning.OperationSnapshot; +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.retrying.TimedRetryAlgorithm; import com.google.cloud.managedkafka.v1.CapacityConfig; import com.google.cloud.managedkafka.v1.Cluster; import com.google.cloud.managedkafka.v1.ClusterName; import com.google.cloud.managedkafka.v1.ManagedKafkaClient; +import com.google.cloud.managedkafka.v1.ManagedKafkaSettings; import com.google.cloud.managedkafka.v1.OperationMetadata; import com.google.cloud.managedkafka.v1.UpdateClusterRequest; import com.google.protobuf.FieldMask; +import java.time.Duration; import java.util.concurrent.ExecutionException; public class UpdateCluster { @@ -48,11 +55,29 @@ public static void updateCluster( .build(); FieldMask updateMask = FieldMask.newBuilder().addPaths("capacity_config.memory_bytes").build(); - try (ManagedKafkaClient managedKafkaClient = ManagedKafkaClient.create()) { + // Create the settings to configure the timeout for polling operations + ManagedKafkaSettings.Builder settingsBuilder = ManagedKafkaSettings.newBuilder(); + TimedRetryAlgorithm timedRetryAlgorithm = OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setTotalTimeoutDuration(Duration.ofHours(1L)) + .build()); + settingsBuilder.updateClusterOperationSettings() + .setPollingAlgorithm(timedRetryAlgorithm); + + try (ManagedKafkaClient managedKafkaClient = ManagedKafkaClient.create( + settingsBuilder.build())) { UpdateClusterRequest request = UpdateClusterRequest.newBuilder().setUpdateMask(updateMask).setCluster(cluster).build(); OperationFuture future = managedKafkaClient.updateClusterOperationCallable().futureCall(request); + + // Get the initial LRO and print details. CreateCluster contains sample code for polling logs. + OperationSnapshot operation = future.getInitialFuture().get(); + System.out.printf("Cluster update started. Operation name: %s\nDone: %s\nMetadata: %s\n", + operation.getName(), + operation.isDone(), + future.getMetadata().get().toString()); + Cluster response = future.get(); System.out.printf("Updated cluster: %s\n", response.getName()); } catch (ExecutionException e) { diff --git a/managedkafka/examples/src/test/java/examples/ClustersTest.java b/managedkafka/examples/src/test/java/examples/ClustersTest.java index e24ee8ae837..e5d47e3edbd 100644 --- a/managedkafka/examples/src/test/java/examples/ClustersTest.java +++ b/managedkafka/examples/src/test/java/examples/ClustersTest.java @@ -24,10 +24,21 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import com.google.api.core.ApiFuture; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.longrunning.OperationSnapshot; +import com.google.api.gax.retrying.RetryingFuture; +import com.google.api.gax.rpc.OperationCallable; import com.google.cloud.managedkafka.v1.Cluster; import com.google.cloud.managedkafka.v1.ClusterName; +import com.google.cloud.managedkafka.v1.CreateClusterRequest; +import com.google.cloud.managedkafka.v1.DeleteClusterRequest; import com.google.cloud.managedkafka.v1.LocationName; import com.google.cloud.managedkafka.v1.ManagedKafkaClient; +import com.google.cloud.managedkafka.v1.ManagedKafkaSettings; +import com.google.cloud.managedkafka.v1.OperationMetadata; +import com.google.cloud.managedkafka.v1.UpdateClusterRequest; +import com.google.protobuf.Empty; import java.io.ByteArrayOutputStream; import java.io.PrintStream; import java.util.ArrayList; @@ -58,11 +69,54 @@ public void setUp() { @Test public void createClusterTest() throws Exception { ManagedKafkaClient managedKafkaClient = mock(ManagedKafkaClient.class); + OperationCallable operationCallable = + mock(OperationCallable.class); + OperationFuture operationFuture = + mock(OperationFuture.class); + try (MockedStatic mockedStatic = Mockito.mockStatic(ManagedKafkaClient.class)) { - mockedStatic.when(() -> create()).thenReturn(managedKafkaClient); + + // client creation + mockedStatic.when(() -> create(any(ManagedKafkaSettings.class))) + .thenReturn(managedKafkaClient); + + // operation callable when(managedKafkaClient.createClusterOperationCallable()) - .thenReturn(MockOperationFuture.getOperableCallable()); + .thenReturn(operationCallable); + when(operationCallable.futureCall(any(CreateClusterRequest.class))) + .thenReturn(operationFuture); + + // initial future + ApiFuture initialFuture = mock(ApiFuture.class); + when(operationFuture.getInitialFuture()).thenReturn(initialFuture); + + // Metadata + ApiFuture metadataFuture = mock(ApiFuture.class); + OperationMetadata metadata = mock(OperationMetadata.class); + when(operationFuture.getMetadata()).thenReturn(metadataFuture); + when(metadataFuture.get()).thenReturn(metadata); + + // operation snapshot + OperationSnapshot operationSnapshot = mock(OperationSnapshot.class); + when(operationFuture.getInitialFuture().get()).thenReturn(operationSnapshot); + when(operationSnapshot.getName()) + .thenReturn("projects/test-project/locations/test-location/operations/test-operation"); + when(operationSnapshot.isDone()).thenReturn(false, false, true); + + // polling future + RetryingFuture pollingFuture = mock(RetryingFuture.class); + when(operationFuture.getPollingFuture()).thenReturn(pollingFuture); + when(operationFuture.isDone()).thenReturn(false, false, true); + ApiFuture attemptResult = mock(ApiFuture.class); + when(pollingFuture.getAttemptResult()).thenReturn(attemptResult); + when(attemptResult.get()).thenReturn(operationSnapshot); + + // Setup final result + Cluster resultCluster = mock(Cluster.class); + when(operationFuture.get()).thenReturn(resultCluster); + when(resultCluster.getName()).thenReturn(clusterName); + String subnet = "test-subnet"; int cpu = 3; long memory = 3221225472L; @@ -71,6 +125,10 @@ public void createClusterTest() throws Exception { assertThat(output).contains("Created cluster"); assertThat(output).contains(clusterName); verify(managedKafkaClient, times(1)).createClusterOperationCallable(); + verify(operationCallable, times(1)).futureCall(any(CreateClusterRequest.class)); + verify(operationFuture, times(2)).getPollingFuture(); // Verify 2 polling attempts + verify(pollingFuture, times(2)).getAttemptResult(); // Verify 2 attempt results + verify(operationSnapshot, times(3)).isDone(); // 2 polls + 1 initial check } } @@ -122,32 +180,104 @@ public void listClustersTest() throws Exception { @Test public void updateClusterTest() throws Exception { ManagedKafkaClient managedKafkaClient = mock(ManagedKafkaClient.class); + OperationCallable operationCallable = + mock(OperationCallable.class); + OperationFuture operationFuture = + mock(OperationFuture.class); + try (MockedStatic mockedStatic = Mockito.mockStatic(ManagedKafkaClient.class)) { - mockedStatic.when(() -> create()).thenReturn(managedKafkaClient); + + // client creation + mockedStatic.when(() -> create(any(ManagedKafkaSettings.class))) + .thenReturn(managedKafkaClient); + + // operation callable when(managedKafkaClient.updateClusterOperationCallable()) - .thenReturn(MockOperationFuture.getOperableCallable()); + .thenReturn(operationCallable); + when(operationCallable.futureCall(any(UpdateClusterRequest.class))) + .thenReturn(operationFuture); + + // initial future + ApiFuture initialFuture = mock(ApiFuture.class); + when(operationFuture.getInitialFuture()).thenReturn(initialFuture); + + // Metadata + ApiFuture metadataFuture = mock(ApiFuture.class); + OperationMetadata metadata = mock(OperationMetadata.class); + when(operationFuture.getMetadata()).thenReturn(metadataFuture); + when(metadataFuture.get()).thenReturn(metadata); + + // operation snapshot + OperationSnapshot operationSnapshot = mock(OperationSnapshot.class); + when(operationFuture.getInitialFuture().get()).thenReturn(operationSnapshot); + when(operationSnapshot.getName()) + .thenReturn("projects/test-project/locations/test-location/operations/test-operation"); + when(operationSnapshot.isDone()).thenReturn(false, false, true); + + // Setup final result + Cluster resultCluster = mock(Cluster.class); + when(operationFuture.get()).thenReturn(resultCluster); + when(resultCluster.getName()).thenReturn(clusterName); + long updatedMemory = 4221225472L; UpdateCluster.updateCluster(projectId, region, projectId, updatedMemory); String output = bout.toString(); assertThat(output).contains("Updated cluster"); assertThat(output).contains(clusterName); verify(managedKafkaClient, times(1)).updateClusterOperationCallable(); + verify(operationCallable, times(1)).futureCall(any(UpdateClusterRequest.class)); } } @Test public void deleteClusterTest() throws Exception { ManagedKafkaClient managedKafkaClient = mock(ManagedKafkaClient.class); + OperationCallable operationCallable = + mock(OperationCallable.class); + OperationFuture operationFuture = + mock(OperationFuture.class); try (MockedStatic mockedStatic = Mockito.mockStatic(ManagedKafkaClient.class)) { - mockedStatic.when(() -> create()).thenReturn(managedKafkaClient); + + // client creation + mockedStatic.when(() -> create(any(ManagedKafkaSettings.class))) + .thenReturn(managedKafkaClient); + + // operation callable when(managedKafkaClient.deleteClusterOperationCallable()) - .thenReturn(MockDeleteOperationFuture.getOperableCallable()); + .thenReturn(operationCallable); + when(operationCallable.futureCall(any(DeleteClusterRequest.class))) + .thenReturn(operationFuture); + + // initial future + ApiFuture initialFuture = mock(ApiFuture.class); + when(operationFuture.getInitialFuture()).thenReturn(initialFuture); + + // Metadata + ApiFuture metadataFuture = mock(ApiFuture.class); + OperationMetadata metadata = mock(OperationMetadata.class); + when(operationFuture.getMetadata()).thenReturn(metadataFuture); + when(metadataFuture.get()).thenReturn(metadata); + + // operation snapshot + OperationSnapshot operationSnapshot = mock(OperationSnapshot.class); + when(operationFuture.getInitialFuture().get()).thenReturn(operationSnapshot); + when(operationSnapshot.getName()) + .thenReturn("projects/test-project/locations/test-location/operations/test-operation"); + when(operationSnapshot.isDone()).thenReturn(false, false, true); + + // Setup final result + Cluster resultCluster = mock(Cluster.class); + when(operationFuture.get()).thenReturn(Empty.getDefaultInstance()); + when(resultCluster.getName()).thenReturn(clusterName); + DeleteCluster.deleteCluster(projectId, region, clusterId); String output = bout.toString(); assertThat(output).contains("Deleted cluster"); + verify(managedKafkaClient, times(1)).deleteClusterOperationCallable(); + verify(operationCallable, times(1)).futureCall(any(DeleteClusterRequest.class)); } } } diff --git a/opencensus/pom.xml b/opencensus/pom.xml deleted file mode 100644 index dcdb6115781..00000000000 --- a/opencensus/pom.xml +++ /dev/null @@ -1,93 +0,0 @@ - - - - - 4.0.0 - jar - com.example.opencensus - opencensus-samples - 1.0 - - - - com.google.cloud.samples - shared-configuration - 1.2.0 - - - - 1.8 - 1.8 - UTF-8 - 0.31.1 - - - - - - - com.google.cloud - libraries-bom - 26.32.0 - pom - import - - - - - - - - io.opencensus - opencensus-api - ${opencensus.version} - - - io.opencensus - opencensus-exporter-stats-stackdriver - ${opencensus.version} - - - - - - - - org.codehaus.mojo - exec-maven-plugin - 3.1.1 - - - - java - - - - - com.example.opencensus.Quickstart - false - - - - - - diff --git a/opencensus/src/main/java/com/example/opencensus/Quickstart.java b/opencensus/src/main/java/com/example/opencensus/Quickstart.java deleted file mode 100644 index c304bf056dd..00000000000 --- a/opencensus/src/main/java/com/example/opencensus/Quickstart.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2018 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.opencensus; - -// [START monitoring_opencensus_metrics_quickstart] - -import com.google.common.collect.Lists; -import io.opencensus.exporter.stats.stackdriver.StackdriverStatsExporter; -import io.opencensus.stats.Aggregation; -import io.opencensus.stats.BucketBoundaries; -import io.opencensus.stats.Measure.MeasureLong; -import io.opencensus.stats.Stats; -import io.opencensus.stats.StatsRecorder; -import io.opencensus.stats.View; -import io.opencensus.stats.View.Name; -import io.opencensus.stats.ViewManager; -import java.io.IOException; -import java.util.Collections; -import java.util.Random; -import java.util.concurrent.TimeUnit; - -public class Quickstart { - private static final int EXPORT_INTERVAL = 70; - private static final MeasureLong LATENCY_MS = - MeasureLong.create("task_latency", "The task latency in milliseconds", "ms"); - // Latency in buckets: - // [>=0ms, >=100ms, >=200ms, >=400ms, >=1s, >=2s, >=4s] - private static final BucketBoundaries LATENCY_BOUNDARIES = - BucketBoundaries.create(Lists.newArrayList(0d, 100d, 200d, 400d, 1000d, 2000d, 4000d)); - private static final StatsRecorder STATS_RECORDER = Stats.getStatsRecorder(); - - public static void main(String[] args) throws IOException, InterruptedException { - // Register the view. It is imperative that this step exists, - // otherwise recorded metrics will be dropped and never exported. - View view = - View.create( - Name.create("task_latency_distribution"), - "The distribution of the task latencies.", - LATENCY_MS, - Aggregation.Distribution.create(LATENCY_BOUNDARIES), - Collections.emptyList()); - - ViewManager viewManager = Stats.getViewManager(); - viewManager.registerView(view); - - // [START setup_exporter] - // Enable OpenCensus exporters to export metrics to Stackdriver Monitoring. - // Exporters use Application Default Credentials to authenticate. - // See https://developers.google.com/identity/protocols/application-default-credentials - // for more details. - StackdriverStatsExporter.createAndRegister(); - // [END setup_exporter] - - // Record 100 fake latency values between 0 and 5 seconds. - Random rand = new Random(); - for (int i = 0; i < 100; i++) { - long ms = (long) (TimeUnit.MILLISECONDS.convert(5, TimeUnit.SECONDS) * rand.nextDouble()); - System.out.println(String.format("Latency %d: %d", i, ms)); - STATS_RECORDER.newMeasureMap().put(LATENCY_MS, ms).record(); - } - - // The default export interval is 60 seconds. The thread with the StackdriverStatsExporter must - // live for at least the interval past any metrics that must be collected, or some risk being - // lost if they are recorded after the last export. - - System.out.println( - String.format( - "Sleeping %d seconds before shutdown to ensure all records are flushed.", - EXPORT_INTERVAL)); - Thread.sleep(TimeUnit.MILLISECONDS.convert(EXPORT_INTERVAL, TimeUnit.SECONDS)); - } -} -// [END monitoring_opencensus_metrics_quickstart] diff --git a/routeoptimization/snippets/src/main/java/com/example/OptimizeTours.java b/routeoptimization/snippets/src/main/java/com/example/OptimizeTours.java index aacca58c3ab..cff70e3fbe4 100644 --- a/routeoptimization/snippets/src/main/java/com/example/OptimizeTours.java +++ b/routeoptimization/snippets/src/main/java/com/example/OptimizeTours.java @@ -22,14 +22,17 @@ package com.example; // [START routeoptimization_v1_OptimizeTours_sync] + import com.google.maps.routeoptimization.v1.OptimizeToursRequest; import com.google.maps.routeoptimization.v1.OptimizeToursResponse; import com.google.maps.routeoptimization.v1.RouteOptimizationClient; +import com.google.maps.routeoptimization.v1.RouteOptimizationSettings; import com.google.maps.routeoptimization.v1.Shipment; import com.google.maps.routeoptimization.v1.Shipment.VisitRequest; import com.google.maps.routeoptimization.v1.ShipmentModel; import com.google.maps.routeoptimization.v1.Vehicle; import com.google.type.LatLng; +import java.time.Duration; public class OptimizeTours { // [END routeoptimization_v1_OptimizeTours_sync] @@ -41,7 +44,16 @@ public static void main(String[] args) throws Exception { // [START routeoptimization_v1_OptimizeTours_sync] public static OptimizeToursResponse optimizeTours(String projectId) throws Exception { - RouteOptimizationClient client = RouteOptimizationClient.create(); + // Optional: method calls that last tens of minutes may be interrupted + // without enabling a short keep-alive interval. + RouteOptimizationSettings clientSettings = RouteOptimizationSettings + .newBuilder() + .setTransportChannelProvider(RouteOptimizationSettings + .defaultGrpcTransportProviderBuilder() + .setKeepAliveTimeDuration(Duration.ofSeconds(30)) + .build()).build(); + + RouteOptimizationClient client = RouteOptimizationClient.create(clientSettings); OptimizeToursRequest request = OptimizeToursRequest.newBuilder() .setParent("projects/" + projectId) diff --git a/security-command-center/snippets/pom.xml b/security-command-center/snippets/pom.xml index dfc9f9bbe36..0c12cf541cd 100644 --- a/security-command-center/snippets/pom.xml +++ b/security-command-center/snippets/pom.xml @@ -45,6 +45,18 @@ 2.45.0 + + com.google.cloud + google-cloud-securitycentermanagement + 0.20.0 + + + + com.google.api.grpc + proto-google-cloud-securitycentermanagement-v1 + 0.20.0 + + com.google.cloud google-cloud-pubsub @@ -80,4 +92,13 @@ test + + + + org.apache.maven.plugins + maven-surefire-plugin + 3.2.5 + + + diff --git a/security-command-center/snippets/src/main/java/management/api/CreateSecurityHealthAnalyticsCustomModule.java b/security-command-center/snippets/src/main/java/management/api/CreateSecurityHealthAnalyticsCustomModule.java new file mode 100644 index 00000000000..11c5ae45fa4 --- /dev/null +++ b/security-command-center/snippets/src/main/java/management/api/CreateSecurityHealthAnalyticsCustomModule.java @@ -0,0 +1,104 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package management.api; + +// [START securitycenter_create_security_health_analytics_custom_module] +import com.google.cloud.securitycentermanagement.v1.CreateSecurityHealthAnalyticsCustomModuleRequest; +import com.google.cloud.securitycentermanagement.v1.CustomConfig; +import com.google.cloud.securitycentermanagement.v1.CustomConfig.ResourceSelector; +import com.google.cloud.securitycentermanagement.v1.CustomConfig.Severity; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient; +import com.google.cloud.securitycentermanagement.v1.SecurityHealthAnalyticsCustomModule; +import com.google.cloud.securitycentermanagement.v1.SecurityHealthAnalyticsCustomModule.EnablementState; +import com.google.type.Expr; +import java.io.IOException; + +public class CreateSecurityHealthAnalyticsCustomModule { + + public static void main(String[] args) throws IOException { + // https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.securityHealthAnalyticsCustomModules/create + // TODO: Developer should replace project_id with a real project ID before running this code + String projectId = "project_id"; + + String customModuleDisplayName = "custom_module_display_name"; + + createSecurityHealthAnalyticsCustomModule(projectId, customModuleDisplayName); + } + + public static SecurityHealthAnalyticsCustomModule createSecurityHealthAnalyticsCustomModule( + String projectId, String customModuleDisplayName) throws IOException { + + // Initialize client that will be used to send requests. This client only needs + // to be created + // once, and can be reused for multiple requests. + try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { + + String name = + String.format( + "projects/%s/locations/global/securityHealthAnalyticsCustomModules/%s", + projectId, "custom_module"); + + // define the CEL expression here and this will scans for keys that have not been rotated in + // the last 30 days, change it according to the your requirements + Expr expr = + Expr.newBuilder() + .setExpression( + "has(resource.rotationPeriod) && (resource.rotationPeriod > " + + "duration('2592000s'))") + .build(); + + // define the resource selector + ResourceSelector resourceSelector = + ResourceSelector.newBuilder() + .addResourceTypes("cloudkms.googleapis.com/CryptoKey") + .build(); + + // define the custom module configuration, update the severity, description, + // recommendation below + CustomConfig customConfig = + CustomConfig.newBuilder() + .setPredicate(expr) + .setResourceSelector(resourceSelector) + .setSeverity(Severity.MEDIUM) + .setDescription("add your description here") + .setRecommendation("add your recommendation here") + .build(); + + // define the security health analytics custom module configuration, update the + // EnablementState below + SecurityHealthAnalyticsCustomModule securityHealthAnalyticsCustomModule = + SecurityHealthAnalyticsCustomModule.newBuilder() + .setName(name) + .setDisplayName(customModuleDisplayName) + .setEnablementState(EnablementState.ENABLED) + .setCustomConfig(customConfig) + .build(); + + CreateSecurityHealthAnalyticsCustomModuleRequest request = + CreateSecurityHealthAnalyticsCustomModuleRequest.newBuilder() + .setParent(String.format("projects/%s/locations/global", projectId)) + .setSecurityHealthAnalyticsCustomModule(securityHealthAnalyticsCustomModule) + .build(); + + SecurityHealthAnalyticsCustomModule response = + client.createSecurityHealthAnalyticsCustomModule(request); + + return response; + } + } +} +// [END securitycenter_create_security_health_analytics_custom_module] diff --git a/security-command-center/snippets/src/main/java/management/api/DeleteSecurityHealthAnalyticsCustomModule.java b/security-command-center/snippets/src/main/java/management/api/DeleteSecurityHealthAnalyticsCustomModule.java new file mode 100644 index 00000000000..61d51cc3262 --- /dev/null +++ b/security-command-center/snippets/src/main/java/management/api/DeleteSecurityHealthAnalyticsCustomModule.java @@ -0,0 +1,58 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package management.api; + +// [START securitycenter_delete_security_health_analytics_custom_module] +import com.google.cloud.securitycentermanagement.v1.DeleteSecurityHealthAnalyticsCustomModuleRequest; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient; +import java.io.IOException; + +public class DeleteSecurityHealthAnalyticsCustomModule { + + public static void main(String[] args) throws IOException { + // https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.securityHealthAnalyticsCustomModules/delete + // TODO: Developer should replace project_id with a real project ID before running this code + String projectId = "project_id"; + + String customModuleId = "custom_module_id"; + + deleteSecurityHealthAnalyticsCustomModule(projectId, customModuleId); + } + + public static boolean deleteSecurityHealthAnalyticsCustomModule( + String projectId, String customModuleId) throws IOException { + + // Initialize client that will be used to send requests. This client only needs + // to be created + // once, and can be reused for multiple requests. + try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { + + String name = + String.format( + "projects/%s/locations/global/securityHealthAnalyticsCustomModules/%s", + projectId, customModuleId); + + DeleteSecurityHealthAnalyticsCustomModuleRequest request = + DeleteSecurityHealthAnalyticsCustomModuleRequest.newBuilder().setName(name).build(); + + client.deleteSecurityHealthAnalyticsCustomModule(request); + + return true; + } + } +} +// [END securitycenter_delete_security_health_analytics_custom_module] diff --git a/security-command-center/snippets/src/main/java/management/api/GetEffectiveSecurityHealthAnalyticsCustomModule.java b/security-command-center/snippets/src/main/java/management/api/GetEffectiveSecurityHealthAnalyticsCustomModule.java new file mode 100644 index 00000000000..8fde10c20f8 --- /dev/null +++ b/security-command-center/snippets/src/main/java/management/api/GetEffectiveSecurityHealthAnalyticsCustomModule.java @@ -0,0 +1,61 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package management.api; + +// [START securitycenter_get_effective_security_health_analytics_custom_module] +import com.google.cloud.securitycentermanagement.v1.EffectiveSecurityHealthAnalyticsCustomModule; +import com.google.cloud.securitycentermanagement.v1.GetEffectiveSecurityHealthAnalyticsCustomModuleRequest; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient; +import java.io.IOException; + +public class GetEffectiveSecurityHealthAnalyticsCustomModule { + + public static void main(String[] args) throws IOException { + // https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.effectiveSecurityHealthAnalyticsCustomModules/get + // TODO: Developer should replace project_id with a real project ID before running this code + String projectId = "project_id"; + + String customModuleId = "custom_module_id"; + + getEffectiveSecurityHealthAnalyticsCustomModule(projectId, customModuleId); + } + + public static EffectiveSecurityHealthAnalyticsCustomModule + getEffectiveSecurityHealthAnalyticsCustomModule(String projectId, String customModuleId) + throws IOException { + + // Initialize client that will be used to send requests. This client only needs + // to be created + // once, and can be reused for multiple requests. + try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { + + String name = + String.format( + "projects/%s/locations/global/effectiveSecurityHealthAnalyticsCustomModules/%s", + projectId, customModuleId); + + GetEffectiveSecurityHealthAnalyticsCustomModuleRequest request = + GetEffectiveSecurityHealthAnalyticsCustomModuleRequest.newBuilder().setName(name).build(); + + EffectiveSecurityHealthAnalyticsCustomModule response = + client.getEffectiveSecurityHealthAnalyticsCustomModule(request); + + return response; + } + } +} +// [END securitycenter_get_effective_security_health_analytics_custom_module] diff --git a/security-command-center/snippets/src/main/java/management/api/GetSecurityHealthAnalyticsCustomModule.java b/security-command-center/snippets/src/main/java/management/api/GetSecurityHealthAnalyticsCustomModule.java new file mode 100644 index 00000000000..8e149656aea --- /dev/null +++ b/security-command-center/snippets/src/main/java/management/api/GetSecurityHealthAnalyticsCustomModule.java @@ -0,0 +1,60 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package management.api; + +// [START securitycenter_get_security_health_analytics_custom_module] +import com.google.cloud.securitycentermanagement.v1.GetSecurityHealthAnalyticsCustomModuleRequest; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient; +import com.google.cloud.securitycentermanagement.v1.SecurityHealthAnalyticsCustomModule; +import java.io.IOException; + +public class GetSecurityHealthAnalyticsCustomModule { + + public static void main(String[] args) throws IOException { + // https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.securityHealthAnalyticsCustomModules/get + // TODO: Developer should replace project_id with a real project ID before running this code + String projectId = "project_id"; + + String customModuleId = "custom_module_id"; + + getSecurityHealthAnalyticsCustomModule(projectId, customModuleId); + } + + public static SecurityHealthAnalyticsCustomModule getSecurityHealthAnalyticsCustomModule( + String projectId, String customModuleId) throws IOException { + + // Initialize client that will be used to send requests. This client only needs + // to be created + // once, and can be reused for multiple requests. + try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { + + String name = + String.format( + "projects/%s/locations/global/securityHealthAnalyticsCustomModules/%s", + projectId, customModuleId); + + GetSecurityHealthAnalyticsCustomModuleRequest request = + GetSecurityHealthAnalyticsCustomModuleRequest.newBuilder().setName(name).build(); + + SecurityHealthAnalyticsCustomModule response = + client.getSecurityHealthAnalyticsCustomModule(request); + + return response; + } + } +} +// [END securitycenter_get_security_health_analytics_custom_module] diff --git a/security-command-center/snippets/src/main/java/management/api/ListDescendantSecurityHealthAnalyticsCustomModules.java b/security-command-center/snippets/src/main/java/management/api/ListDescendantSecurityHealthAnalyticsCustomModules.java new file mode 100644 index 00000000000..ae39a37deb5 --- /dev/null +++ b/security-command-center/snippets/src/main/java/management/api/ListDescendantSecurityHealthAnalyticsCustomModules.java @@ -0,0 +1,55 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package management.api; + +// [START securitycenter_list_descendant_security_health_analytics_custom_module] +import com.google.cloud.securitycentermanagement.v1.ListDescendantSecurityHealthAnalyticsCustomModulesRequest; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient.ListDescendantSecurityHealthAnalyticsCustomModulesPagedResponse; +import java.io.IOException; + +public class ListDescendantSecurityHealthAnalyticsCustomModules { + + public static void main(String[] args) throws IOException { + // https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.securityHealthAnalyticsCustomModules/listDescendant + // TODO: Developer should replace project_id with a real project ID before running this code + String projectId = "project_id"; + + listDescendantSecurityHealthAnalyticsCustomModules(projectId); + } + + public static ListDescendantSecurityHealthAnalyticsCustomModulesPagedResponse + listDescendantSecurityHealthAnalyticsCustomModules(String projectId) throws IOException { + + // Initialize client that will be used to send requests. This client only needs + // to be created + // once, and can be reused for multiple requests. + try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { + + ListDescendantSecurityHealthAnalyticsCustomModulesRequest request = + ListDescendantSecurityHealthAnalyticsCustomModulesRequest.newBuilder() + .setParent(String.format("projects/%s/locations/global", projectId)) + .build(); + + ListDescendantSecurityHealthAnalyticsCustomModulesPagedResponse response = + client.listDescendantSecurityHealthAnalyticsCustomModules(request); + + return response; + } + } +} +// [END securitycenter_list_descendant_security_health_analytics_custom_module] diff --git a/security-command-center/snippets/src/main/java/management/api/ListEffectiveSecurityHealthAnalyticsCustomModules.java b/security-command-center/snippets/src/main/java/management/api/ListEffectiveSecurityHealthAnalyticsCustomModules.java new file mode 100644 index 00000000000..8e4da2917d9 --- /dev/null +++ b/security-command-center/snippets/src/main/java/management/api/ListEffectiveSecurityHealthAnalyticsCustomModules.java @@ -0,0 +1,55 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package management.api; + +// [START securitycenter_list_effective_security_health_analytics_custom_module] +import com.google.cloud.securitycentermanagement.v1.ListEffectiveSecurityHealthAnalyticsCustomModulesRequest; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient.ListEffectiveSecurityHealthAnalyticsCustomModulesPagedResponse; +import java.io.IOException; + +public class ListEffectiveSecurityHealthAnalyticsCustomModules { + + public static void main(String[] args) throws IOException { + // https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.effectiveSecurityHealthAnalyticsCustomModules/list + // TODO: Developer should replace project_id with a real project ID before running this code + String projectId = "project_id"; + + listEffectiveSecurityHealthAnalyticsCustomModules(projectId); + } + + public static ListEffectiveSecurityHealthAnalyticsCustomModulesPagedResponse + listEffectiveSecurityHealthAnalyticsCustomModules(String projectId) throws IOException { + + // Initialize client that will be used to send requests. This client only needs + // to be created + // once, and can be reused for multiple requests. + try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { + + ListEffectiveSecurityHealthAnalyticsCustomModulesRequest request = + ListEffectiveSecurityHealthAnalyticsCustomModulesRequest.newBuilder() + .setParent(String.format("projects/%s/locations/global", projectId)) + .build(); + + ListEffectiveSecurityHealthAnalyticsCustomModulesPagedResponse response = + client.listEffectiveSecurityHealthAnalyticsCustomModules(request); + + return response; + } + } +} +// [END securitycenter_list_effective_security_health_analytics_custom_module] diff --git a/security-command-center/snippets/src/main/java/management/api/ListSecurityHealthAnalyticsCustomModules.java b/security-command-center/snippets/src/main/java/management/api/ListSecurityHealthAnalyticsCustomModules.java new file mode 100644 index 00000000000..f3d994f9c60 --- /dev/null +++ b/security-command-center/snippets/src/main/java/management/api/ListSecurityHealthAnalyticsCustomModules.java @@ -0,0 +1,54 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package management.api; + +// [START securitycenter_list_security_health_analytics_custom_module] +import com.google.cloud.securitycentermanagement.v1.ListSecurityHealthAnalyticsCustomModulesRequest; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient.ListSecurityHealthAnalyticsCustomModulesPagedResponse; +import java.io.IOException; + +public class ListSecurityHealthAnalyticsCustomModules { + + public static void main(String[] args) throws IOException { + // https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.securityHealthAnalyticsCustomModules/list + // TODO: Developer should replace project_id with a real project ID before running this code + String projectId = "project_id"; + + listSecurityHealthAnalyticsCustomModules(projectId); + } + + public static ListSecurityHealthAnalyticsCustomModulesPagedResponse + listSecurityHealthAnalyticsCustomModules(String projectId) throws IOException { + // Initialize client that will be used to send requests. This client only needs + // to be created + // once, and can be reused for multiple requests. + try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { + + ListSecurityHealthAnalyticsCustomModulesRequest request = + ListSecurityHealthAnalyticsCustomModulesRequest.newBuilder() + .setParent(String.format("projects/%s/locations/global", projectId)) + .build(); + + ListSecurityHealthAnalyticsCustomModulesPagedResponse response = + client.listSecurityHealthAnalyticsCustomModules(request); + + return response; + } + } +} +// [END securitycenter_list_security_health_analytics_custom_module] diff --git a/security-command-center/snippets/src/main/java/management/api/SimulateSecurityHealthAnalyticsCustomModule.java b/security-command-center/snippets/src/main/java/management/api/SimulateSecurityHealthAnalyticsCustomModule.java new file mode 100644 index 00000000000..c9b2a79c42d --- /dev/null +++ b/security-command-center/snippets/src/main/java/management/api/SimulateSecurityHealthAnalyticsCustomModule.java @@ -0,0 +1,118 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package management.api; + +// [START securitycenter_simulate_security_health_analytics_custom_module] +import com.google.cloud.securitycentermanagement.v1.CustomConfig; +import com.google.cloud.securitycentermanagement.v1.CustomConfig.ResourceSelector; +import com.google.cloud.securitycentermanagement.v1.CustomConfig.Severity; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient; +import com.google.cloud.securitycentermanagement.v1.SimulateSecurityHealthAnalyticsCustomModuleRequest; +import com.google.cloud.securitycentermanagement.v1.SimulateSecurityHealthAnalyticsCustomModuleRequest.SimulatedResource; +import com.google.cloud.securitycentermanagement.v1.SimulateSecurityHealthAnalyticsCustomModuleResponse; +import com.google.iam.v1.Binding; +import com.google.iam.v1.Policy; +import com.google.protobuf.Struct; +import com.google.protobuf.Value; +import com.google.type.Expr; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class SimulateSecurityHealthAnalyticsCustomModule { + + public static void main(String[] args) throws IOException { + // https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.securityHealthAnalyticsCustomModules/simulate + // TODO: Developer should replace project_id with a real project ID before running this code + String projectId = "project_id"; + + simulateSecurityHealthAnalyticsCustomModule(projectId); + } + + public static SimulateSecurityHealthAnalyticsCustomModuleResponse + simulateSecurityHealthAnalyticsCustomModule(String projectId) throws IOException { + + // Initialize client that will be used to send requests. This client only needs + // to be created + // once, and can be reused for multiple requests. + try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { + + // define the CEL expression here and this will scans for keys that have not been rotated in + // the last 30 days, change it according to the your requirements + Expr expr = + Expr.newBuilder() + .setExpression( + "has(resource.rotationPeriod) && (resource.rotationPeriod > " + + "duration('2592000s'))") + .build(); + + // define the resource selector + ResourceSelector resourceSelector = + ResourceSelector.newBuilder() + .addResourceTypes("cloudkms.googleapis.com/CryptoKey") + .build(); + + // define the custom module configuration, update the severity, description, + // recommendation below + CustomConfig customConfig = + CustomConfig.newBuilder() + .setPredicate(expr) + .setResourceSelector(resourceSelector) + .setSeverity(Severity.MEDIUM) + .setDescription("add your description here") + .setRecommendation("add your recommendation here") + .build(); + + // define the simulated resource data + Map resourceData = new HashMap<>(); + resourceData.put("resourceId", Value.newBuilder().setStringValue("test-resource-id").build()); + resourceData.put("name", Value.newBuilder().setStringValue("test-resource-name").build()); + Struct resourceDataStruct = Struct.newBuilder().putAllFields(resourceData).build(); + + // define the policy + Policy policy = + Policy.newBuilder() + .addBindings( + Binding.newBuilder() + .setRole("roles/owner") + .addMembers("user:test-user@gmail.com") + .build()) + .build(); + + // replace with the correct resource type + SimulatedResource simulatedResource = + SimulatedResource.newBuilder() + .setResourceType("cloudkms.googleapis.com/CryptoKey") + .setResourceData(resourceDataStruct) + .setIamPolicyData(policy) + .build(); + + SimulateSecurityHealthAnalyticsCustomModuleRequest request = + SimulateSecurityHealthAnalyticsCustomModuleRequest.newBuilder() + .setParent(String.format("projects/%s/locations/global", projectId)) + .setCustomConfig(customConfig) + .setResource(simulatedResource) + .build(); + + SimulateSecurityHealthAnalyticsCustomModuleResponse response = + client.simulateSecurityHealthAnalyticsCustomModule(request); + + return response; + } + } +} +// [END securitycenter_simulate_security_health_analytics_custom_module] diff --git a/security-command-center/snippets/src/main/java/management/api/UpdateSecurityHealthAnalyticsCustomModule.java b/security-command-center/snippets/src/main/java/management/api/UpdateSecurityHealthAnalyticsCustomModule.java new file mode 100644 index 00000000000..1a92299f896 --- /dev/null +++ b/security-command-center/snippets/src/main/java/management/api/UpdateSecurityHealthAnalyticsCustomModule.java @@ -0,0 +1,76 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package management.api; + +// [START securitycenter_update_security_health_analytics_custom_module] +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient; +import com.google.cloud.securitycentermanagement.v1.SecurityHealthAnalyticsCustomModule; +import com.google.cloud.securitycentermanagement.v1.SecurityHealthAnalyticsCustomModule.EnablementState; +import com.google.cloud.securitycentermanagement.v1.UpdateSecurityHealthAnalyticsCustomModuleRequest; +import com.google.protobuf.FieldMask; +import java.io.IOException; + +public class UpdateSecurityHealthAnalyticsCustomModule { + + public static void main(String[] args) throws IOException { + // https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.securityHealthAnalyticsCustomModules/patch + // TODO: Developer should replace project_id with a real project ID before running this code + String projectId = "project_id"; + + String customModuleId = "custom_module_id"; + + updateSecurityHealthAnalyticsCustomModule(projectId, customModuleId); + } + + public static SecurityHealthAnalyticsCustomModule updateSecurityHealthAnalyticsCustomModule( + String projectId, String customModuleId) throws IOException { + + // Initialize client that will be used to send requests. This client only needs + // to be created + // once, and can be reused for multiple requests. + try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { + + String name = + String.format( + "projects/%s/locations/global/securityHealthAnalyticsCustomModules/%s", + projectId, customModuleId); + + // Define the security health analytics custom module configuration, update the + // EnablementState accordingly. + SecurityHealthAnalyticsCustomModule securityHealthAnalyticsCustomModule = + SecurityHealthAnalyticsCustomModule.newBuilder() + .setName(name) + .setEnablementState(EnablementState.DISABLED) + .build(); + + // Set the field mask to specify which properties should be updated. + FieldMask fieldMask = FieldMask.newBuilder().addPaths("enablement_state").build(); + + UpdateSecurityHealthAnalyticsCustomModuleRequest request = + UpdateSecurityHealthAnalyticsCustomModuleRequest.newBuilder() + .setSecurityHealthAnalyticsCustomModule(securityHealthAnalyticsCustomModule) + .setUpdateMask(fieldMask) + .build(); + + SecurityHealthAnalyticsCustomModule response = + client.updateSecurityHealthAnalyticsCustomModule(request); + + return response; + } + } +} +// [END securitycenter_update_security_health_analytics_custom_module] diff --git a/security-command-center/snippets/src/test/java/management/api/SecurityHealthAnalyticsCustomModuleTest.java b/security-command-center/snippets/src/test/java/management/api/SecurityHealthAnalyticsCustomModuleTest.java new file mode 100644 index 00000000000..51040b2b012 --- /dev/null +++ b/security-command-center/snippets/src/test/java/management/api/SecurityHealthAnalyticsCustomModuleTest.java @@ -0,0 +1,231 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package management.api; + +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.securitycentermanagement.v1.EffectiveSecurityHealthAnalyticsCustomModule; +import com.google.cloud.securitycentermanagement.v1.ListSecurityHealthAnalyticsCustomModulesRequest; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient.ListDescendantSecurityHealthAnalyticsCustomModulesPagedResponse; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient.ListEffectiveSecurityHealthAnalyticsCustomModulesPagedResponse; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient.ListSecurityHealthAnalyticsCustomModulesPagedResponse; +import com.google.cloud.securitycentermanagement.v1.SecurityHealthAnalyticsCustomModule; +import com.google.cloud.securitycentermanagement.v1.SecurityHealthAnalyticsCustomModule.EnablementState; +import com.google.cloud.securitycentermanagement.v1.SimulateSecurityHealthAnalyticsCustomModuleResponse; +import com.google.cloud.testing.junit4.MultipleAttemptsRule; +import com.google.common.base.Strings; +import java.io.IOException; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.StreamSupport; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class SecurityHealthAnalyticsCustomModuleTest { + // TODO(Developer): Replace the below variable + private static final String PROJECT_ID = System.getenv("SCC_PROJECT_ID"); + private static final String CUSTOM_MODULE_DISPLAY_NAME = "java_sample_custom_module_test"; + private static final int MAX_ATTEMPT_COUNT = 3; + private static final int INITIAL_BACKOFF_MILLIS = 120000; // 2 minutes + + @Rule + public final MultipleAttemptsRule multipleAttemptsRule = + new MultipleAttemptsRule(MAX_ATTEMPT_COUNT, INITIAL_BACKOFF_MILLIS); + + // Check if the required environment variables are set. + public static void requireEnvVar(String envVarName) { + assertWithMessage(String.format("Missing environment variable '%s' ", envVarName)) + .that(System.getenv(envVarName)) + .isNotEmpty(); + } + + @BeforeClass + public static void setUp() throws InterruptedException { + requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); + requireEnvVar("SCC_PROJECT_ID"); + } + + @AfterClass + public static void cleanUp() throws IOException { + // Perform cleanup after running tests + cleanupExistingCustomModules(); + } + + // cleanupExistingCustomModules clean up all the existing custom module + private static void cleanupExistingCustomModules() throws IOException { + try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { + ListSecurityHealthAnalyticsCustomModulesRequest request = + ListSecurityHealthAnalyticsCustomModulesRequest.newBuilder() + .setParent(String.format("projects/%s/locations/global", PROJECT_ID)) + .build(); + ListSecurityHealthAnalyticsCustomModulesPagedResponse response = + client.listSecurityHealthAnalyticsCustomModules(request); + // Iterate over the response and delete custom module one by one which start with + // java_sample_custom_module + for (SecurityHealthAnalyticsCustomModule module : response.iterateAll()) { + if (module.getDisplayName().startsWith("java_sample_custom_module")) { + String customModuleId = extractCustomModuleId(module.getName()); + deleteCustomModule(PROJECT_ID, customModuleId); + } + } + } + } + + // extractCustomModuleID extracts the custom module Id from the full name + private static String extractCustomModuleId(String customModuleFullName) { + if (!Strings.isNullOrEmpty(customModuleFullName)) { + Pattern pattern = Pattern.compile(".*/([^/]+)$"); + Matcher matcher = pattern.matcher(customModuleFullName); + if (matcher.find()) { + return matcher.group(1); + } + } + return ""; + } + + // createCustomModule method is for creating the custom module + private static SecurityHealthAnalyticsCustomModule createCustomModule( + String projectId, String customModuleDisplayName) throws IOException { + if (!Strings.isNullOrEmpty(projectId) && !Strings.isNullOrEmpty(customModuleDisplayName)) { + SecurityHealthAnalyticsCustomModule response = + CreateSecurityHealthAnalyticsCustomModule.createSecurityHealthAnalyticsCustomModule( + projectId, customModuleDisplayName); + return response; + } + return null; + } + + // deleteCustomModule method is for deleting the custom module + private static void deleteCustomModule(String projectId, String customModuleId) + throws IOException { + if (!Strings.isNullOrEmpty(projectId) && !Strings.isNullOrEmpty(customModuleId)) { + DeleteSecurityHealthAnalyticsCustomModule.deleteSecurityHealthAnalyticsCustomModule( + projectId, customModuleId); + } + } + + @Test + public void testCreateSecurityHealthAnalyticsCustomModule() throws IOException { + SecurityHealthAnalyticsCustomModule response = + CreateSecurityHealthAnalyticsCustomModule.createSecurityHealthAnalyticsCustomModule( + PROJECT_ID, CUSTOM_MODULE_DISPLAY_NAME); + + assertNotNull(response); + assertThat(response.getDisplayName()).isEqualTo(CUSTOM_MODULE_DISPLAY_NAME); + } + + @Test + public void testDeleteSecurityHealthAnalyticsCustomModule() throws IOException { + SecurityHealthAnalyticsCustomModule response = + createCustomModule(PROJECT_ID, CUSTOM_MODULE_DISPLAY_NAME); + String customModuleId = extractCustomModuleId(response.getName()); + assertTrue( + DeleteSecurityHealthAnalyticsCustomModule.deleteSecurityHealthAnalyticsCustomModule( + PROJECT_ID, customModuleId)); + } + + @Test + public void testListSecurityHealthAnalyticsCustomModules() throws IOException { + createCustomModule(PROJECT_ID, CUSTOM_MODULE_DISPLAY_NAME); + ListSecurityHealthAnalyticsCustomModulesPagedResponse response = + ListSecurityHealthAnalyticsCustomModules.listSecurityHealthAnalyticsCustomModules( + PROJECT_ID); + assertTrue( + StreamSupport.stream(response.iterateAll().spliterator(), false) + .anyMatch(module -> CUSTOM_MODULE_DISPLAY_NAME.equals(module.getDisplayName()))); + } + + @Test + public void testGetSecurityHealthAnalyticsCustomModule() throws IOException { + SecurityHealthAnalyticsCustomModule createCustomModuleResponse = + createCustomModule(PROJECT_ID, CUSTOM_MODULE_DISPLAY_NAME); + String customModuleId = extractCustomModuleId(createCustomModuleResponse.getName()); + SecurityHealthAnalyticsCustomModule getCustomModuleResponse = + GetSecurityHealthAnalyticsCustomModule.getSecurityHealthAnalyticsCustomModule( + PROJECT_ID, customModuleId); + + assertThat(getCustomModuleResponse.getDisplayName()).isEqualTo(CUSTOM_MODULE_DISPLAY_NAME); + assertThat(extractCustomModuleId(getCustomModuleResponse.getName())).isEqualTo(customModuleId); + } + + @Test + public void testUpdateSecurityHealthAnalyticsCustomModule() throws IOException { + SecurityHealthAnalyticsCustomModule createCustomModuleResponse = + createCustomModule(PROJECT_ID, CUSTOM_MODULE_DISPLAY_NAME); + String customModuleId = extractCustomModuleId(createCustomModuleResponse.getName()); + SecurityHealthAnalyticsCustomModule response = + UpdateSecurityHealthAnalyticsCustomModule.updateSecurityHealthAnalyticsCustomModule( + PROJECT_ID, customModuleId); + assertNotNull(response); + assertThat(response.getEnablementState().equals(EnablementState.DISABLED)); + } + + @Test + public void testGetEffectiveSecurityHealthAnalyticsCustomModule() throws IOException { + SecurityHealthAnalyticsCustomModule createCustomModuleResponse = + createCustomModule(PROJECT_ID, CUSTOM_MODULE_DISPLAY_NAME); + String customModuleId = extractCustomModuleId(createCustomModuleResponse.getName()); + EffectiveSecurityHealthAnalyticsCustomModule getEffectiveCustomModuleResponse = + GetEffectiveSecurityHealthAnalyticsCustomModule + .getEffectiveSecurityHealthAnalyticsCustomModule(PROJECT_ID, customModuleId); + + assertThat(getEffectiveCustomModuleResponse.getDisplayName()) + .isEqualTo(CUSTOM_MODULE_DISPLAY_NAME); + assertThat(extractCustomModuleId(getEffectiveCustomModuleResponse.getName())) + .isEqualTo(customModuleId); + } + + @Test + public void testListEffectiveSecurityHealthAnalyticsCustomModules() throws IOException { + createCustomModule(PROJECT_ID, CUSTOM_MODULE_DISPLAY_NAME); + ListEffectiveSecurityHealthAnalyticsCustomModulesPagedResponse response = + ListEffectiveSecurityHealthAnalyticsCustomModules + .listEffectiveSecurityHealthAnalyticsCustomModules(PROJECT_ID); + assertTrue( + StreamSupport.stream(response.iterateAll().spliterator(), false) + .anyMatch(module -> CUSTOM_MODULE_DISPLAY_NAME.equals(module.getDisplayName()))); + } + + @Test + public void testListDescendantSecurityHealthAnalyticsCustomModules() throws IOException { + createCustomModule(PROJECT_ID, CUSTOM_MODULE_DISPLAY_NAME); + ListDescendantSecurityHealthAnalyticsCustomModulesPagedResponse response = + ListDescendantSecurityHealthAnalyticsCustomModules + .listDescendantSecurityHealthAnalyticsCustomModules(PROJECT_ID); + assertTrue( + StreamSupport.stream(response.iterateAll().spliterator(), false) + .anyMatch(module -> CUSTOM_MODULE_DISPLAY_NAME.equals(module.getDisplayName()))); + } + + @Test + public void testSimulateSecurityHealthAnalyticsCustomModule() throws IOException { + SimulateSecurityHealthAnalyticsCustomModuleResponse response = + SimulateSecurityHealthAnalyticsCustomModule.simulateSecurityHealthAnalyticsCustomModule( + PROJECT_ID); + assertNotNull(response); + assertThat(response.getResult().equals("no_violation")); + } +} diff --git a/security-command-center/snippets/src/test/java/vtwo/IamIT.java b/security-command-center/snippets/src/test/java/vtwo/IamIT.java index a116cf6ee0f..8e95bf38a11 100644 --- a/security-command-center/snippets/src/test/java/vtwo/IamIT.java +++ b/security-command-center/snippets/src/test/java/vtwo/IamIT.java @@ -37,7 +37,7 @@ public class IamIT { private static final String ORGANIZATION_ID = System.getenv("SCC_PROJECT_ORG_ID"); - private static final String USER_EMAIL = "someuser@domain.com"; + private static final String USER_EMAIL = "example@domain.com"; private static final String USER_PERMISSION = "securitycenter.findings.update"; private static final String USER_ROLE = "roles/securitycenter.findingsEditor"; private static Source SOURCE; diff --git a/spanner/opentelemetry/pom.xml b/spanner/opentelemetry/pom.xml index 46bd1872dbc..534c9ae3ca7 100644 --- a/spanner/opentelemetry/pom.xml +++ b/spanner/opentelemetry/pom.xml @@ -25,6 +25,7 @@ 1.2.0 + @@ -45,7 +46,6 @@ - com.google.cloud google-cloud-spanner @@ -66,7 +66,7 @@ io.opentelemetry opentelemetry-exporter-otlp - + diff --git a/spanner/opentelemetry/src/main/java/com/example/spanner/OpenTelemetryUsage.java b/spanner/opentelemetry/src/main/java/com/example/spanner/OpenTelemetryUsage.java index 43a9eb500d6..4648142d7bc 100644 --- a/spanner/opentelemetry/src/main/java/com/example/spanner/OpenTelemetryUsage.java +++ b/spanner/opentelemetry/src/main/java/com/example/spanner/OpenTelemetryUsage.java @@ -77,14 +77,16 @@ public static void main(String[] args) { .build(); Spanner spanner = options.getService(); - // [END spanner_opentelemetry_usage] DatabaseClient dbClient = spanner .getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); captureGfeMetric(dbClient); captureQueryStatsMetric(openTelemetry, dbClient); - sdkMeterProvider.forceFlush(); - sdkTracerProvider.forceFlush(); + + // Close the providers to free up the resources and export the data. */ + sdkMeterProvider.close(); + sdkTracerProvider.close(); + // [END spanner_opentelemetry_usage] } diff --git a/speech/src/main/java/com/example/speech/SpeechAdaptation.java b/speech/src/main/java/com/example/speech/SpeechAdaptation.java deleted file mode 100644 index 4c51672d134..00000000000 --- a/speech/src/main/java/com/example/speech/SpeechAdaptation.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.speech; - -// [START speech_adaptation_beta] -import com.google.cloud.speech.v1p1beta1.RecognitionAudio; -import com.google.cloud.speech.v1p1beta1.RecognitionConfig; -import com.google.cloud.speech.v1p1beta1.RecognizeRequest; -import com.google.cloud.speech.v1p1beta1.RecognizeResponse; -import com.google.cloud.speech.v1p1beta1.SpeechClient; -import com.google.cloud.speech.v1p1beta1.SpeechContext; -import com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative; -import com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult; -import java.io.IOException; - -public class SpeechAdaptation { - - public void speechAdaptation() throws IOException { - String uriPath = "gs://cloud-samples-data/speech/brooklyn_bridge.mp3"; - speechAdaptation(uriPath); - } - - public static void speechAdaptation(String uriPath) throws IOException { - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources. - try (SpeechClient speechClient = SpeechClient.create()) { - - // Provides "hints" to the speech recognizer to favor specific words and phrases in the - // results. - // https://cloud.google.com/speech-to-text/docs/reference/rpc/google.cloud.speech.v1p1beta1#google.cloud.speech.v1p1beta1.SpeechContext - SpeechContext speechContext = - SpeechContext.newBuilder().addPhrases("Brooklyn Bridge").setBoost(20.0F).build(); - // Configure recognition config to match your audio file. - RecognitionConfig config = - RecognitionConfig.newBuilder() - .setEncoding(RecognitionConfig.AudioEncoding.MP3) - .setSampleRateHertz(44100) - .setLanguageCode("en-US") - .addSpeechContexts(speechContext) - .build(); - // Set the path to your audio file - RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(uriPath).build(); - - // Make the request - RecognizeRequest request = - RecognizeRequest.newBuilder().setConfig(config).setAudio(audio).build(); - - // Display the results - RecognizeResponse response = speechClient.recognize(request); - for (SpeechRecognitionResult result : response.getResultsList()) { - // First alternative is the most probable result - SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0); - System.out.printf("Transcript: %s\n", alternative.getTranscript()); - } - } - } -} -// [END speech_adaptation_beta] diff --git a/speech/src/test/java/com/example/speech/SpeechAdaptationTest.java b/speech/src/test/java/com/example/speech/SpeechAdaptationTest.java deleted file mode 100644 index a31b3637d5d..00000000000 --- a/speech/src/test/java/com/example/speech/SpeechAdaptationTest.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.speech; - -import static com.google.common.truth.Truth.assertThat; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.PrintStream; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -@RunWith(JUnit4.class) -@SuppressWarnings("checkstyle:abbreviationaswordinname") -public class SpeechAdaptationTest { - private static final String AUDIO_FILE = "gs://cloud-samples-data/speech/brooklyn_bridge.mp3"; - private ByteArrayOutputStream bout; - private PrintStream out; - - @Before - public void setUp() { - bout = new ByteArrayOutputStream(); - out = new PrintStream(bout); - System.setOut(out); - } - - @After - public void tearDown() { - System.setOut(null); - } - - @Test - public void testTranscribeContextClasses() throws IOException { - SpeechAdaptation.speechAdaptation(AUDIO_FILE); - String got = bout.toString(); - assertThat(got).contains("Transcript:"); - } -} diff --git a/texttospeech/beta/src/main/java/com/example/texttospeech/SynthesizeFile.java b/texttospeech/beta/src/main/java/com/example/texttospeech/SynthesizeFile.java deleted file mode 100644 index bea8c47748d..00000000000 --- a/texttospeech/beta/src/main/java/com/example/texttospeech/SynthesizeFile.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Copyright 2018 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.texttospeech; - -// Imports the Google Cloud client library -import com.google.cloud.texttospeech.v1beta1.AudioConfig; -import com.google.cloud.texttospeech.v1beta1.AudioEncoding; -import com.google.cloud.texttospeech.v1beta1.SsmlVoiceGender; -import com.google.cloud.texttospeech.v1beta1.SynthesisInput; -import com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse; -import com.google.cloud.texttospeech.v1beta1.TextToSpeechClient; -import com.google.cloud.texttospeech.v1beta1.VoiceSelectionParams; -import com.google.protobuf.ByteString; -import java.io.FileOutputStream; -import java.io.OutputStream; -import java.nio.file.Files; -import java.nio.file.Paths; -import net.sourceforge.argparse4j.ArgumentParsers; -import net.sourceforge.argparse4j.inf.ArgumentParser; -import net.sourceforge.argparse4j.inf.ArgumentParserException; -import net.sourceforge.argparse4j.inf.MutuallyExclusiveGroup; -import net.sourceforge.argparse4j.inf.Namespace; - -/** - * Google Cloud TextToSpeech API sample application. Example usage: mvn package exec:java - * -Dexec.mainClass='com.example.texttospeech.SynthesizeFile' -Dexec.args='--text - * resources/hello.txt' - */ -public class SynthesizeFile { - - // [START tts_synthesize_text_file] - /** - * Demonstrates using the Text to Speech client to synthesize a text file or ssml file. - * - * @param textFile the text file to be synthesized. (e.g., hello.txt) - * @throws Exception on TextToSpeechClient Errors. - */ - public static void synthesizeTextFile(String textFile) throws Exception { - // Instantiates a client - try (TextToSpeechClient textToSpeechClient = TextToSpeechClient.create()) { - // Read the file's contents - String contents = new String(Files.readAllBytes(Paths.get(textFile))); - // Set the text input to be synthesized - SynthesisInput input = SynthesisInput.newBuilder().setText(contents).build(); - - // Build the voice request - VoiceSelectionParams voice = - VoiceSelectionParams.newBuilder() - .setLanguageCode("en-US") // languageCode = "en_us" - .setSsmlGender(SsmlVoiceGender.FEMALE) // ssmlVoiceGender = SsmlVoiceGender.FEMALE - .build(); - - // Select the type of audio file you want returned - AudioConfig audioConfig = - AudioConfig.newBuilder() - .setAudioEncoding(AudioEncoding.MP3) // MP3 audio. - .build(); - - // Perform the text-to-speech request - SynthesizeSpeechResponse response = - textToSpeechClient.synthesizeSpeech(input, voice, audioConfig); - - // Get the audio contents from the response - ByteString audioContents = response.getAudioContent(); - - // Write the response to the output file. - try (OutputStream out = new FileOutputStream("output.mp3")) { - out.write(audioContents.toByteArray()); - System.out.println("Audio content written to file \"output.mp3\""); - } - } - } - // [END tts_synthesize_text_file] - - // [START tts_synthesize_ssml_file] - /** - * Demonstrates using the Text to Speech client to synthesize a text file or ssml file. - * - * @param ssmlFile the ssml document to be synthesized. (e.g., hello.ssml) - * @throws Exception on TextToSpeechClient Errors. - */ - public static void synthesizeSsmlFile(String ssmlFile) throws Exception { - // Instantiates a client - try (TextToSpeechClient textToSpeechClient = TextToSpeechClient.create()) { - // Read the file's contents - String contents = new String(Files.readAllBytes(Paths.get(ssmlFile))); - // Set the ssml input to be synthesized - SynthesisInput input = SynthesisInput.newBuilder().setSsml(contents).build(); - - // Build the voice request - VoiceSelectionParams voice = - VoiceSelectionParams.newBuilder() - .setLanguageCode("en-US") // languageCode = "en_us" - .setSsmlGender(SsmlVoiceGender.FEMALE) // ssmlVoiceGender = SsmlVoiceGender.FEMALE - .build(); - - // Select the type of audio file you want returned - AudioConfig audioConfig = - AudioConfig.newBuilder() - .setAudioEncoding(AudioEncoding.MP3) // MP3 audio. - .build(); - - // Perform the text-to-speech request - SynthesizeSpeechResponse response = - textToSpeechClient.synthesizeSpeech(input, voice, audioConfig); - - // Get the audio contents from the response - ByteString audioContents = response.getAudioContent(); - - // Write the response to the output file. - try (OutputStream out = new FileOutputStream("output.mp3")) { - out.write(audioContents.toByteArray()); - System.out.println("Audio content written to file \"output.mp3\""); - } - } - } - // [END tts_synthesize_ssml_file] - - public static void main(String... args) throws Exception { - ArgumentParser parser = - ArgumentParsers.newFor("SynthesizeFile") - .build() - .defaultHelp(true) - .description("Synthesize a text file or ssml file."); - MutuallyExclusiveGroup group = parser.addMutuallyExclusiveGroup().required(true); - group.addArgument("--text").help("The text file from which to synthesize speech."); - group.addArgument("--ssml").help("The ssml file from which to synthesize speech."); - - try { - Namespace namespace = parser.parseArgs(args); - - if (namespace.get("text") != null) { - synthesizeTextFile(namespace.getString("text")); - } else { - synthesizeSsmlFile(namespace.getString("ssml")); - } - } catch (ArgumentParserException e) { - parser.handleError(e); - } - } -} diff --git a/texttospeech/beta/src/test/java/com/example/texttospeech/SynthesizeFileIT.java b/texttospeech/beta/src/test/java/com/example/texttospeech/SynthesizeFileIT.java deleted file mode 100644 index eed608b1c18..00000000000 --- a/texttospeech/beta/src/test/java/com/example/texttospeech/SynthesizeFileIT.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2018 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.texttospeech; - -import static com.google.common.truth.Truth.assertThat; - -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.PrintStream; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -/** Tests for SynthesizeFile sample. */ -@RunWith(JUnit4.class) -@SuppressWarnings("checkstyle:abbreviationaswordinname") -public class SynthesizeFileIT { - - private static String OUTPUT = "output.mp3"; - private static String TEXT_FILE = "resources/hello.txt"; - private static String SSML_FILE = "resources/hello.ssml"; - - private ByteArrayOutputStream bout; - private PrintStream out; - private File outputFile; - - @Before - public void setUp() { - bout = new ByteArrayOutputStream(); - out = new PrintStream(bout); - System.setOut(out); - } - - @After - public void tearDown() { - outputFile.delete(); - } - - @Test - public void testSynthesizeText() throws Exception { - // Act - SynthesizeFile.synthesizeTextFile(TEXT_FILE); - - // Assert - outputFile = new File(OUTPUT); - assertThat(outputFile.isFile()).isTrue(); - String got = bout.toString(); - assertThat(got).contains("Audio content written to file \"output.mp3\""); - } - - @Test - public void testSynthesizeSsml() throws Exception { - // Act - SynthesizeFile.synthesizeSsmlFile(SSML_FILE); - - // Assert - outputFile = new File(OUTPUT); - assertThat(outputFile.isFile()).isTrue(); - String got = bout.toString(); - assertThat(got).contains("Audio content written to file \"output.mp3\""); - } -} diff --git a/tpu/pom.xml b/tpu/pom.xml new file mode 100644 index 00000000000..601db56977d --- /dev/null +++ b/tpu/pom.xml @@ -0,0 +1,101 @@ + + + + 4.0.0 + com.example.tpu + gce-diregapic-samples + 1.0-SNAPSHOT + + + + shared-configuration + com.google.cloud.samples + 1.2.0 + + + + 11 + 11 + + + + + com.google.cloud + google-cloud-tpu + 2.52.0 + + + + com.google.api + gax + + + + + google-cloud-storage + com.google.cloud + test + + + + truth + com.google.truth + test + 1.4.0 + + + junit + junit + test + 4.13.2 + + + + + org.junit.jupiter + junit-jupiter-engine + 5.10.2 + test + + + org.mockito + mockito-core + 5.13.0 + test + + + + + + + libraries-bom + com.google.cloud + import + pom + 26.40.0 + + + + + \ No newline at end of file diff --git a/tpu/src/main/java/tpu/CreateQueuedResourceWithNetwork.java b/tpu/src/main/java/tpu/CreateQueuedResourceWithNetwork.java new file mode 100644 index 00000000000..9598603ad34 --- /dev/null +++ b/tpu/src/main/java/tpu/CreateQueuedResourceWithNetwork.java @@ -0,0 +1,138 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +//[START tpu_queued_resources_network] +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.tpu.v2alpha1.CreateQueuedResourceRequest; +import com.google.cloud.tpu.v2alpha1.NetworkConfig; +import com.google.cloud.tpu.v2alpha1.Node; +import com.google.cloud.tpu.v2alpha1.QueuedResource; +import com.google.cloud.tpu.v2alpha1.TpuClient; +import com.google.cloud.tpu.v2alpha1.TpuSettings; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import org.threeten.bp.Duration; + +public class CreateQueuedResourceWithNetwork { + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project you want to create a node. + String projectId = "YOUR_PROJECT_ID"; + // The zone in which to create the TPU. + // For more information about supported TPU types for specific zones, + // see https://cloud.google.com/tpu/docs/regions-zones + String zone = "europe-west4-a"; + // The name for your TPU. + String nodeName = "YOUR_TPU_NAME"; + // The accelerator type that specifies the version and size of the Cloud TPU you want to create. + // For more information about supported accelerator types for each TPU version, + // see https://cloud.google.com/tpu/docs/system-architecture-tpu-vm#versions. + String tpuType = "v2-8"; + // Software version that specifies the version of the TPU runtime to install. + // For more information see https://cloud.google.com/tpu/docs/runtimes + String tpuSoftwareVersion = "tpu-vm-tf-2.14.1"; + // The name for your Queued Resource. + String queuedResourceId = "QUEUED_RESOURCE_ID"; + // The name of the network you want the node to connect to. + // The network should be assigned to your project. + String networkName = "YOUR_COMPUTE_TPU_NETWORK"; + + createQueuedResourceWithNetwork(projectId, zone, queuedResourceId, nodeName, + tpuType, tpuSoftwareVersion, networkName); + } + + // Creates a Queued Resource with network configuration. + public static QueuedResource createQueuedResourceWithNetwork( + String projectId, String zone, String queuedResourceId, String nodeName, + String tpuType, String tpuSoftwareVersion, String networkName) + throws IOException, ExecutionException, InterruptedException { + // With these settings the client library handles the Operation's polling mechanism + // and prevent CancellationException error + TpuSettings.Builder clientSettings = + TpuSettings.newBuilder(); + clientSettings + .createQueuedResourceSettings() + .setRetrySettings( + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(2.0) + .setInitialRpcTimeout(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRetryDelay(Duration.ofMillis(45000L)) + .setTotalTimeout(Duration.ofHours(24L)) + .build()); + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (TpuClient tpuClient = TpuClient.create(clientSettings.build())) { + String parent = String.format("projects/%s/locations/%s", projectId, zone); + String region = zone.substring(0, zone.length() - 2); + + // Specify the network and subnetwork that you want to connect your TPU to. + NetworkConfig networkConfig = + NetworkConfig.newBuilder() + .setEnableExternalIps(true) + .setNetwork(String.format("projects/%s/global/networks/%s", projectId, networkName)) + .setSubnetwork( + String.format( + "projects/%s/regions/%s/subnetworks/%s", projectId, region, networkName)) + .build(); + + // Create a node + Node node = + Node.newBuilder() + .setName(nodeName) + .setAcceleratorType(tpuType) + .setRuntimeVersion(tpuSoftwareVersion) + .setNetworkConfig(networkConfig) + .setQueuedResource( + String.format( + "projects/%s/locations/%s/queuedResources/%s", + projectId, zone, queuedResourceId)) + .build(); + + // Create queued resource + QueuedResource queuedResource = + QueuedResource.newBuilder() + .setName(queuedResourceId) + .setTpu( + QueuedResource.Tpu.newBuilder() + .addNodeSpec( + QueuedResource.Tpu.NodeSpec.newBuilder() + .setParent(parent) + .setNode(node) + .setNodeId(nodeName) + .build()) + .build()) + .build(); + + CreateQueuedResourceRequest request = + CreateQueuedResourceRequest.newBuilder() + .setParent(parent) + .setQueuedResource(queuedResource) + .setQueuedResourceId(queuedResourceId) + .build(); + + // You can wait until TPU Node is READY, + // and check its status using getTpuVm() from "tpu_vm_get" sample. + + return tpuClient.createQueuedResourceAsync(request).get(); + } + } +} +//[END tpu_queued_resources_network] diff --git a/tpu/src/main/java/tpu/CreateQueuedResourceWithStartupScript.java b/tpu/src/main/java/tpu/CreateQueuedResourceWithStartupScript.java new file mode 100644 index 00000000000..c070a627388 --- /dev/null +++ b/tpu/src/main/java/tpu/CreateQueuedResourceWithStartupScript.java @@ -0,0 +1,106 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +//[START tpu_queued_resources_startup_script] +import com.google.cloud.tpu.v2alpha1.CreateQueuedResourceRequest; +import com.google.cloud.tpu.v2alpha1.Node; +import com.google.cloud.tpu.v2alpha1.QueuedResource; +import com.google.cloud.tpu.v2alpha1.TpuClient; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ExecutionException; + +public class CreateQueuedResourceWithStartupScript { + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project you want to create a node. + String projectId = "YOUR_PROJECT_ID"; + // The zone in which to create the TPU. + // For more information about supported TPU types for specific zones, + // see https://cloud.google.com/tpu/docs/regions-zones + String zone = "us-central1-a"; + // The name for your TPU. + String nodeName = "YOUR_TPU_NAME"; + // The accelerator type that specifies the version and size of the Cloud TPU you want to create. + // For more information about supported accelerator types for each TPU version, + // see https://cloud.google.com/tpu/docs/system-architecture-tpu-vm#versions. + String tpuType = "v2-8"; + // Software version that specifies the version of the TPU runtime to install. + // For more information see https://cloud.google.com/tpu/docs/runtimes + String tpuSoftwareVersion = "tpu-vm-tf-2.14.1"; + // The name for your Queued Resource. + String queuedResourceId = "QUEUED_RESOURCE_ID"; + + createQueuedResource(projectId, zone, queuedResourceId, nodeName, + tpuType, tpuSoftwareVersion); + } + + // Creates a Queued Resource with startup script. + public static QueuedResource createQueuedResource( + String projectId, String zone, String queuedResourceId, + String nodeName, String tpuType, String tpuSoftwareVersion) + throws IOException, ExecutionException, InterruptedException { + String parent = String.format("projects/%s/locations/%s", projectId, zone); + String startupScriptContent = "#!/bin/bash\necho \"Hello from the startup script!\""; + // Add startup script to metadata + Map metadata = new HashMap<>(); + metadata.put("startup-script", startupScriptContent); + String queuedResourceForTpu = String.format("projects/%s/locations/%s/queuedResources/%s", + projectId, zone, queuedResourceId); + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (TpuClient tpuClient = TpuClient.create()) { + Node node = + Node.newBuilder() + .setName(nodeName) + .setAcceleratorType(tpuType) + .setRuntimeVersion(tpuSoftwareVersion) + .setQueuedResource(queuedResourceForTpu) + .putAllMetadata(metadata) + .build(); + + QueuedResource queuedResource = + QueuedResource.newBuilder() + .setName(queuedResourceId) + .setTpu( + QueuedResource.Tpu.newBuilder() + .addNodeSpec( + QueuedResource.Tpu.NodeSpec.newBuilder() + .setParent(parent) + .setNode(node) + .setNodeId(nodeName) + .build()) + .build()) + .build(); + + CreateQueuedResourceRequest request = + CreateQueuedResourceRequest.newBuilder() + .setParent(parent) + .setQueuedResourceId(queuedResourceId) + .setQueuedResource(queuedResource) + .build(); + // You can wait until TPU Node is READY, + // and check its status using getTpuVm() from "tpu_vm_get" sample. + + return tpuClient.createQueuedResourceAsync(request).get(); + } + } +} +// [END tpu_queued_resources_startup_script] \ No newline at end of file diff --git a/tpu/src/main/java/tpu/CreateTpuVm.java b/tpu/src/main/java/tpu/CreateTpuVm.java new file mode 100644 index 00000000000..667b7aa5012 --- /dev/null +++ b/tpu/src/main/java/tpu/CreateTpuVm.java @@ -0,0 +1,97 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +//[START tpu_vm_create] +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.tpu.v2.CreateNodeRequest; +import com.google.cloud.tpu.v2.Node; +import com.google.cloud.tpu.v2.TpuClient; +import com.google.cloud.tpu.v2.TpuSettings; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import org.threeten.bp.Duration; + +public class CreateTpuVm { + + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project you want to create a node. + String projectId = "YOUR_PROJECT_ID"; + // The zone in which to create the TPU. + // For more information about supported TPU types for specific zones, + // see https://cloud.google.com/tpu/docs/regions-zones + String zone = "europe-west4-a"; + // The name for your TPU. + String nodeName = "YOUR_TPU_NAME"; + // The accelerator type that specifies the version and size of the Cloud TPU you want to create. + // For more information about supported accelerator types for each TPU version, + // see https://cloud.google.com/tpu/docs/system-architecture-tpu-vm#versions. + String tpuType = "v2-8"; + // Software version that specifies the version of the TPU runtime to install. + // For more information see https://cloud.google.com/tpu/docs/runtimes + String tpuSoftwareVersion = "tpu-vm-tf-2.14.1"; + + createTpuVm(projectId, zone, nodeName, tpuType, tpuSoftwareVersion); + } + + // Creates a TPU VM with the specified name, zone, accelerator type, and version. + public static Node createTpuVm( + String projectId, String zone, String nodeName, String tpuType, String tpuSoftwareVersion) + throws IOException, ExecutionException, InterruptedException { + // With these settings the client library handles the Operation's polling mechanism + // and prevent CancellationException error + TpuSettings.Builder clientSettings = + TpuSettings.newBuilder(); + clientSettings + .createNodeOperationSettings() + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelay(Duration.ofMillis(45000L)) + .setInitialRpcTimeout(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeout(Duration.ZERO) + .setTotalTimeout(Duration.ofHours(24L)) + .build())); + + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (TpuClient tpuClient = TpuClient.create(clientSettings.build())) { + String parent = String.format("projects/%s/locations/%s", projectId, zone); + + Node tpuVm = Node.newBuilder() + .setName(nodeName) + .setAcceleratorType(tpuType) + .setRuntimeVersion(tpuSoftwareVersion) + .build(); + + CreateNodeRequest request = CreateNodeRequest.newBuilder() + .setParent(parent) + .setNodeId(nodeName) + .setNode(tpuVm) + .build(); + + return tpuClient.createNodeAsync(request).get(); + } + } +} +//[END tpu_vm_create] diff --git a/tpu/src/main/java/tpu/CreateTpuWithTopologyFlag.java b/tpu/src/main/java/tpu/CreateTpuWithTopologyFlag.java new file mode 100644 index 00000000000..86e7e28a007 --- /dev/null +++ b/tpu/src/main/java/tpu/CreateTpuWithTopologyFlag.java @@ -0,0 +1,85 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +//[START tpu_vm_create_topology] +import com.google.cloud.tpu.v2.AcceleratorConfig; +import com.google.cloud.tpu.v2.AcceleratorConfig.Type; +import com.google.cloud.tpu.v2.CreateNodeRequest; +import com.google.cloud.tpu.v2.Node; +import com.google.cloud.tpu.v2.TpuClient; +import java.io.IOException; +import java.util.concurrent.ExecutionException; + +public class CreateTpuWithTopologyFlag { + + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project you want to create a node. + String projectId = "YOUR_PROJECT_ID"; + // The zone in which to create the TPU. + // For more information about supported TPU types for specific zones, + // see https://cloud.google.com/tpu/docs/regions-zones + String zone = "europe-west4-a"; + // The name for your TPU. + String nodeName = "YOUR_TPU_NAME"; + // The version of the Cloud TPU you want to create. + // Available options: TYPE_UNSPECIFIED = 0, V2 = 2, V3 = 4, V4 = 7 + Type tpuVersion = AcceleratorConfig.Type.V2; + // Software version that specifies the version of the TPU runtime to install. + // For more information, see https://cloud.google.com/tpu/docs/runtimes + String tpuSoftwareVersion = "tpu-vm-tf-2.17.0-pod-pjrt"; + // The physical topology of your TPU slice. + // For more information about topology for each TPU version, + // see https://cloud.google.com/tpu/docs/system-architecture-tpu-vm#versions. + String topology = "2x2"; + + createTpuWithTopologyFlag(projectId, zone, nodeName, tpuVersion, tpuSoftwareVersion, topology); + } + + // Creates a TPU VM with the specified name, zone, version and topology. + public static Node createTpuWithTopologyFlag(String projectId, String zone, String nodeName, + Type tpuVersion, String tpuSoftwareVersion, String topology) + throws IOException, ExecutionException, InterruptedException { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (TpuClient tpuClient = TpuClient.create()) { + String parent = String.format("projects/%s/locations/%s", projectId, zone); + Node tpuVm = + Node.newBuilder() + .setName(nodeName) + .setAcceleratorConfig(Node.newBuilder() + .getAcceleratorConfigBuilder() + .setType(tpuVersion) + .setTopology(topology) + .build()) + .setRuntimeVersion(tpuSoftwareVersion) + .build(); + + CreateNodeRequest request = + CreateNodeRequest.newBuilder() + .setParent(parent) + .setNodeId(nodeName) + .setNode(tpuVm) + .build(); + + return tpuClient.createNodeAsync(request).get(); + } + } +} +//[END tpu_vm_create_topology] \ No newline at end of file diff --git a/tpu/src/main/java/tpu/DeleteForceQueuedResource.java b/tpu/src/main/java/tpu/DeleteForceQueuedResource.java new file mode 100644 index 00000000000..f619889001c --- /dev/null +++ b/tpu/src/main/java/tpu/DeleteForceQueuedResource.java @@ -0,0 +1,77 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +//[START tpu_queued_resources_delete_force] +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.tpu.v2alpha1.DeleteQueuedResourceRequest; +import com.google.cloud.tpu.v2alpha1.TpuClient; +import com.google.cloud.tpu.v2alpha1.TpuSettings; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import org.threeten.bp.Duration; + +public class DeleteForceQueuedResource { + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project. + String projectId = "YOUR_PROJECT_ID"; + // The zone in which the TPU was created. + String zone = "us-central1-f"; + // The name for your Queued Resource. + String queuedResourceId = "QUEUED_RESOURCE_ID"; + + deleteForceQueuedResource(projectId, zone, queuedResourceId); + } + + // Deletes a Queued Resource asynchronously with --force flag. + public static void deleteForceQueuedResource( + String projectId, String zone, String queuedResourceId) + throws ExecutionException, InterruptedException, IOException { + String name = String.format("projects/%s/locations/%s/queuedResources/%s", + projectId, zone, queuedResourceId); + // With these settings the client library handles the Operation's polling mechanism + // and prevent CancellationException error + TpuSettings.Builder clientSettings = + TpuSettings.newBuilder(); + clientSettings + .deleteQueuedResourceSettings() + .setRetrySettings( + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(2.0) + .setInitialRpcTimeout(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRetryDelay(Duration.ofMillis(45000L)) + .setTotalTimeout(Duration.ofHours(24L)) + .build()); + + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (TpuClient tpuClient = TpuClient.create(clientSettings.build())) { + DeleteQueuedResourceRequest request = + DeleteQueuedResourceRequest.newBuilder().setName(name).setForce(true).build(); + // Waiting for updates in the library. Until then, the operation will complete successfully, + // but the user will receive an error message with UnknownException and IllegalStateException. + tpuClient.deleteQueuedResourceAsync(request).get(); + + System.out.printf("Deleted Queued Resource: %s\n", name); + } + } +} +//[END tpu_queued_resources_delete_force] diff --git a/tpu/src/main/java/tpu/DeleteTpuVm.java b/tpu/src/main/java/tpu/DeleteTpuVm.java new file mode 100644 index 00000000000..a76b1d5487c --- /dev/null +++ b/tpu/src/main/java/tpu/DeleteTpuVm.java @@ -0,0 +1,80 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +//[START tpu_vm_delete] +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.tpu.v2.DeleteNodeRequest; +import com.google.cloud.tpu.v2.NodeName; +import com.google.cloud.tpu.v2.TpuClient; +import com.google.cloud.tpu.v2.TpuSettings; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import org.threeten.bp.Duration; + +public class DeleteTpuVm { + + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project you want to create a node. + String projectId = "YOUR_PROJECT_ID"; + // The zone in which to create the TPU. + // For more information about supported TPU types for specific zones, + // see https://cloud.google.com/tpu/docs/regions-zones + String zone = "europe-west4-a"; + // The name for your TPU. + String nodeName = "YOUR_TPU_NAME"; + + deleteTpuVm(projectId, zone, nodeName); + } + + // Deletes a TPU VM with the specified name in the given project and zone. + public static void deleteTpuVm(String projectId, String zone, String nodeName) + throws IOException, ExecutionException, InterruptedException { + // With these settings the client library handles the Operation's polling mechanism + // and prevent CancellationException error + TpuSettings.Builder clientSettings = + TpuSettings.newBuilder(); + clientSettings + .deleteNodeOperationSettings() + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelay(Duration.ofMillis(45000L)) + .setInitialRpcTimeout(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeout(Duration.ZERO) + .setTotalTimeout(Duration.ofHours(24L)) + .build())); + + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (TpuClient tpuClient = TpuClient.create(clientSettings.build())) { + String name = NodeName.of(projectId, zone, nodeName).toString(); + + DeleteNodeRequest request = DeleteNodeRequest.newBuilder().setName(name).build(); + + tpuClient.deleteNodeAsync(request).get(); + System.out.println("TPU VM deleted"); + } + } +} +//[END tpu_vm_delete] \ No newline at end of file diff --git a/tpu/src/main/java/tpu/GetQueuedResource.java b/tpu/src/main/java/tpu/GetQueuedResource.java new file mode 100644 index 00000000000..a17c2b41f79 --- /dev/null +++ b/tpu/src/main/java/tpu/GetQueuedResource.java @@ -0,0 +1,53 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +//[START tpu_queued_resources_get] +import com.google.cloud.tpu.v2alpha1.GetQueuedResourceRequest; +import com.google.cloud.tpu.v2alpha1.QueuedResource; +import com.google.cloud.tpu.v2alpha1.TpuClient; +import java.io.IOException; + +public class GetQueuedResource { + public static void main(String[] args) throws IOException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project. + String projectId = "YOUR_PROJECT_ID"; + // The zone in which the TPU was created. + String zone = "europe-west4-a"; + // The name for your Queued Resource. + String queuedResourceId = "QUEUED_RESOURCE_ID"; + + getQueuedResource(projectId, zone, queuedResourceId); + } + + // Get a Queued Resource. + public static QueuedResource getQueuedResource( + String projectId, String zone, String queuedResourceId) throws IOException { + String name = String.format("projects/%s/locations/%s/queuedResources/%s", + projectId, zone, queuedResourceId); + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (TpuClient tpuClient = TpuClient.create()) { + GetQueuedResourceRequest request = + GetQueuedResourceRequest.newBuilder().setName(name).build(); + + return tpuClient.getQueuedResource(request); + } + } +} +//[END tpu_queued_resources_get] diff --git a/tpu/src/main/java/tpu/GetTpuVm.java b/tpu/src/main/java/tpu/GetTpuVm.java new file mode 100644 index 00000000000..6dc40f4150e --- /dev/null +++ b/tpu/src/main/java/tpu/GetTpuVm.java @@ -0,0 +1,56 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +//[START tpu_vm_get] +import com.google.cloud.tpu.v2.GetNodeRequest; +import com.google.cloud.tpu.v2.Node; +import com.google.cloud.tpu.v2.NodeName; +import com.google.cloud.tpu.v2.TpuClient; +import java.io.IOException; + +public class GetTpuVm { + + public static void main(String[] args) throws IOException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project you want to use. + String projectId = "YOUR_PROJECT_ID"; + // The zone in which to create the TPU. + // For more information about supported TPU types for specific zones, + // see https://cloud.google.com/tpu/docs/regions-zones + String zone = "europe-west4-a"; + // The name for your TPU. + String nodeName = "YOUR_TPU_NAME"; + + getTpuVm(projectId, zone, nodeName); + } + + // Describes a TPU VM with the specified name in the given project and zone. + public static Node getTpuVm(String projectId, String zone, String nodeName) + throws IOException { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (TpuClient tpuClient = TpuClient.create()) { + String name = NodeName.of(projectId, zone, nodeName).toString(); + + GetNodeRequest request = GetNodeRequest.newBuilder().setName(name).build(); + + return tpuClient.getNode(request); + } + } +} +//[END tpu_vm_get] diff --git a/tpu/src/main/java/tpu/ListTpuVms.java b/tpu/src/main/java/tpu/ListTpuVms.java new file mode 100644 index 00000000000..b9d834b758e --- /dev/null +++ b/tpu/src/main/java/tpu/ListTpuVms.java @@ -0,0 +1,52 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +//[START tpu_vm_list] +import com.google.cloud.tpu.v2.ListNodesRequest; +import com.google.cloud.tpu.v2.TpuClient; +import java.io.IOException; + +public class ListTpuVms { + + public static void main(String[] args) throws IOException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project you want to use. + String projectId = "YOUR_PROJECT_ID"; + // The zone where the TPUs are located. + // For more information about supported TPU types for specific zones, + // see https://cloud.google.com/tpu/docs/regions-zones + String zone = "us-central1-f"; + + listTpuVms(projectId, zone); + } + + // Lists TPU VMs in the specified zone. + public static TpuClient.ListNodesPage listTpuVms(String projectId, String zone) + throws IOException { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (TpuClient tpuClient = TpuClient.create()) { + String parent = String.format("projects/%s/locations/%s", projectId, zone); + + ListNodesRequest request = ListNodesRequest.newBuilder().setParent(parent).build(); + + return tpuClient.listNodes(request).getPage(); + } + } +} +//[END tpu_vm_list] diff --git a/tpu/src/main/java/tpu/StartTpuVm.java b/tpu/src/main/java/tpu/StartTpuVm.java new file mode 100644 index 00000000000..16546a78bf5 --- /dev/null +++ b/tpu/src/main/java/tpu/StartTpuVm.java @@ -0,0 +1,58 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +//[START tpu_vm_start] +import com.google.cloud.tpu.v2.Node; +import com.google.cloud.tpu.v2.NodeName; +import com.google.cloud.tpu.v2.StartNodeRequest; +import com.google.cloud.tpu.v2.TpuClient; +import java.io.IOException; +import java.util.concurrent.ExecutionException; + +public class StartTpuVm { + + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project you want to use. + String projectId = "YOUR_PROJECT_ID"; + // The zone where the TPU is located. + // For more information about supported TPU types for specific zones, + // see https://cloud.google.com/tpu/docs/regions-zones + String zone = "us-central1-f"; + // The name for your TPU. + String nodeName = "YOUR_TPU_NAME"; + + startTpuVm(projectId, zone, nodeName); + } + + // Starts a TPU VM with the specified name in the given project and zone. + public static Node startTpuVm(String projectId, String zone, String nodeName) + throws IOException, ExecutionException, InterruptedException { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (TpuClient tpuClient = TpuClient.create()) { + String name = NodeName.of(projectId, zone, nodeName).toString(); + + StartNodeRequest request = StartNodeRequest.newBuilder().setName(name).build(); + + return tpuClient.startNodeAsync(request).get(); + } + } +} +//[END tpu_vm_start] \ No newline at end of file diff --git a/tpu/src/main/java/tpu/StopTpuVm.java b/tpu/src/main/java/tpu/StopTpuVm.java new file mode 100644 index 00000000000..ccaf668e889 --- /dev/null +++ b/tpu/src/main/java/tpu/StopTpuVm.java @@ -0,0 +1,59 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +//[START tpu_vm_stop] +import com.google.cloud.tpu.v2.Node; +import com.google.cloud.tpu.v2.NodeName; +import com.google.cloud.tpu.v2.StopNodeRequest; +import com.google.cloud.tpu.v2.TpuClient; +import java.io.IOException; +import java.util.concurrent.ExecutionException; + +public class StopTpuVm { + + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project you want to use. + String projectId = "YOUR_PROJECT_ID"; + // The zone where the TPU is located. + // For more information about supported TPU types for specific zones, + // see https://cloud.google.com/tpu/docs/regions-zones + String zone = "us-central1-f"; + // The name for your TPU. + String nodeName = "YOUR_TPU_NAME"; + + stopTpuVm(projectId, zone, nodeName); + } + + // Stops a TPU VM with the specified name in the given project and zone. + public static Node stopTpuVm(String projectId, String zone, String nodeName) + throws IOException, ExecutionException, InterruptedException { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (TpuClient tpuClient = TpuClient.create()) { + String name = NodeName.of(projectId, zone, nodeName).toString(); + + StopNodeRequest request = StopNodeRequest.newBuilder().setName(name).build(); + + return tpuClient.stopNodeAsync(request).get(); + } + } +} +//[END tpu_vm_stop] + diff --git a/tpu/src/test/java/tpu/QueuedResourceIT.java b/tpu/src/test/java/tpu/QueuedResourceIT.java new file mode 100644 index 00000000000..15dd2e768cc --- /dev/null +++ b/tpu/src/test/java/tpu/QueuedResourceIT.java @@ -0,0 +1,139 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.tpu.v2alpha1.CreateQueuedResourceRequest; +import com.google.cloud.tpu.v2alpha1.DeleteQueuedResourceRequest; +import com.google.cloud.tpu.v2alpha1.GetQueuedResourceRequest; +import com.google.cloud.tpu.v2alpha1.QueuedResource; +import com.google.cloud.tpu.v2alpha1.TpuClient; +import com.google.cloud.tpu.v2alpha1.TpuSettings; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.MockedStatic; + +@RunWith(JUnit4.class) +@Timeout(value = 10) +public class QueuedResourceIT { + private static final String PROJECT_ID = "project-id"; + private static final String ZONE = "europe-west4-a"; + private static final String NODE_NAME = "test-tpu"; + private static final String TPU_TYPE = "v2-8"; + private static final String TPU_SOFTWARE_VERSION = "tpu-vm-tf-2.14.1"; + private static final String QUEUED_RESOURCE_NAME = "queued-resource"; + private static final String NETWORK_NAME = "default"; + + @Test + public void testCreateQueuedResourceWithSpecifiedNetwork() throws Exception { + try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { + QueuedResource mockQueuedResource = mock(QueuedResource.class); + TpuClient mockTpuClient = mock(TpuClient.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedTpuClient.when(() -> TpuClient.create(any(TpuSettings.class))) + .thenReturn(mockTpuClient); + when(mockTpuClient.createQueuedResourceAsync(any(CreateQueuedResourceRequest.class))) + .thenReturn(mockFuture); + when(mockFuture.get()).thenReturn(mockQueuedResource); + + QueuedResource returnedQueuedResource = + CreateQueuedResourceWithNetwork.createQueuedResourceWithNetwork( + PROJECT_ID, ZONE, QUEUED_RESOURCE_NAME, NODE_NAME, + TPU_TYPE, TPU_SOFTWARE_VERSION, NETWORK_NAME); + + verify(mockTpuClient, times(1)) + .createQueuedResourceAsync(any(CreateQueuedResourceRequest.class)); + verify(mockFuture, times(1)).get(); + assertEquals(returnedQueuedResource, mockQueuedResource); + } + } + + @Test + public void testGetQueuedResource() throws IOException { + try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { + TpuClient mockClient = mock(TpuClient.class); + QueuedResource mockQueuedResource = mock(QueuedResource.class); + + mockedTpuClient.when(TpuClient::create).thenReturn(mockClient); + when(mockClient.getQueuedResource(any(GetQueuedResourceRequest.class))) + .thenReturn(mockQueuedResource); + + QueuedResource returnedQueuedResource = + GetQueuedResource.getQueuedResource(PROJECT_ID, ZONE, NODE_NAME); + + verify(mockClient, times(1)) + .getQueuedResource(any(GetQueuedResourceRequest.class)); + assertEquals(returnedQueuedResource, mockQueuedResource); + } + } + + @Test + public void testDeleteForceQueuedResource() + throws IOException, InterruptedException, ExecutionException { + try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { + TpuClient mockTpuClient = mock(TpuClient.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedTpuClient.when(() -> TpuClient.create(any(TpuSettings.class))) + .thenReturn(mockTpuClient); + when(mockTpuClient.deleteQueuedResourceAsync(any(DeleteQueuedResourceRequest.class))) + .thenReturn(mockFuture); + + DeleteForceQueuedResource.deleteForceQueuedResource(PROJECT_ID, ZONE, QUEUED_RESOURCE_NAME); + + verify(mockTpuClient, times(1)) + .deleteQueuedResourceAsync(any(DeleteQueuedResourceRequest.class)); + } + } + + @Test + public void testCreateQueuedResourceWithStartupScript() throws Exception { + try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { + QueuedResource mockQueuedResource = mock(QueuedResource.class); + TpuClient mockTpuClient = mock(TpuClient.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedTpuClient.when(TpuClient::create).thenReturn(mockTpuClient); + when(mockTpuClient.createQueuedResourceAsync(any(CreateQueuedResourceRequest.class))) + .thenReturn(mockFuture); + when(mockFuture.get()).thenReturn(mockQueuedResource); + + QueuedResource returnedQueuedResource = + CreateQueuedResourceWithStartupScript.createQueuedResource( + PROJECT_ID, ZONE, QUEUED_RESOURCE_NAME, NODE_NAME, + TPU_TYPE, TPU_SOFTWARE_VERSION); + + verify(mockTpuClient, times(1)) + .createQueuedResourceAsync(any(CreateQueuedResourceRequest.class)); + verify(mockFuture, times(1)).get(); + assertEquals(returnedQueuedResource, mockQueuedResource); + } + } +} \ No newline at end of file diff --git a/tpu/src/test/java/tpu/TpuVmIT.java b/tpu/src/test/java/tpu/TpuVmIT.java new file mode 100644 index 00000000000..e40e220fec8 --- /dev/null +++ b/tpu/src/test/java/tpu/TpuVmIT.java @@ -0,0 +1,213 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.tpu.v2.AcceleratorConfig; +import com.google.cloud.tpu.v2.CreateNodeRequest; +import com.google.cloud.tpu.v2.DeleteNodeRequest; +import com.google.cloud.tpu.v2.GetNodeRequest; +import com.google.cloud.tpu.v2.ListNodesRequest; +import com.google.cloud.tpu.v2.Node; +import com.google.cloud.tpu.v2.StartNodeRequest; +import com.google.cloud.tpu.v2.StopNodeRequest; +import com.google.cloud.tpu.v2.TpuClient; +import com.google.cloud.tpu.v2.TpuSettings; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.PrintStream; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.ExecutionException; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.MockedStatic; + +@RunWith(JUnit4.class) +@Timeout(value = 10) +public class TpuVmIT { + private static final String PROJECT_ID = "project-id"; + private static final String ZONE = "asia-east1-c"; + private static final String NODE_NAME = "test-tpu"; + private static final String TPU_TYPE = "v2-8"; + private static final AcceleratorConfig.Type ACCELERATOR_TYPE = AcceleratorConfig.Type.V2; + private static final String TPU_SOFTWARE_VERSION = "tpu-vm-tf-2.14.1"; + private static final String TOPOLOGY = "2x2"; + + @Test + public void testCreateTpuVm() throws Exception { + try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { + Node mockNode = mock(Node.class); + TpuClient mockTpuClient = mock(TpuClient.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedTpuClient.when(() -> TpuClient.create(any(TpuSettings.class))) + .thenReturn(mockTpuClient); + when(mockTpuClient.createNodeAsync(any(CreateNodeRequest.class))) + .thenReturn(mockFuture); + when(mockFuture.get()).thenReturn(mockNode); + + Node returnedNode = CreateTpuVm.createTpuVm( + PROJECT_ID, ZONE, NODE_NAME, + TPU_TYPE, TPU_SOFTWARE_VERSION); + + verify(mockTpuClient, times(1)) + .createNodeAsync(any(CreateNodeRequest.class)); + verify(mockFuture, times(1)).get(); + assertEquals(returnedNode, mockNode); + } + } + + @Test + public void testGetTpuVm() throws IOException { + try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { + Node mockNode = mock(Node.class); + TpuClient mockClient = mock(TpuClient.class); + + mockedTpuClient.when(TpuClient::create).thenReturn(mockClient); + when(mockClient.getNode(any(GetNodeRequest.class))).thenReturn(mockNode); + + Node returnedNode = GetTpuVm.getTpuVm(PROJECT_ID, ZONE, NODE_NAME); + + verify(mockClient, times(1)) + .getNode(any(GetNodeRequest.class)); + assertThat(returnedNode).isEqualTo(mockNode); + } + } + + @Test + public void testDeleteTpuVm() throws IOException, ExecutionException, InterruptedException { + ByteArrayOutputStream bout = new ByteArrayOutputStream(); + System.setOut(new PrintStream(bout)); + try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { + TpuClient mockTpuClient = mock(TpuClient.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedTpuClient.when(() -> TpuClient.create(any(TpuSettings.class))) + .thenReturn(mockTpuClient); + when(mockTpuClient.deleteNodeAsync(any(DeleteNodeRequest.class))) + .thenReturn(mockFuture); + + DeleteTpuVm.deleteTpuVm(PROJECT_ID, ZONE, NODE_NAME); + String output = bout.toString(); + + assertThat(output).contains("TPU VM deleted"); + verify(mockTpuClient, times(1)).deleteNodeAsync(any(DeleteNodeRequest.class)); + + bout.close(); + } + } + + @Test + public void testCreateTpuVmWithTopologyFlag() + throws IOException, ExecutionException, InterruptedException { + try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { + Node mockNode = mock(Node.class); + TpuClient mockTpuClient = mock(TpuClient.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedTpuClient.when(TpuClient::create).thenReturn(mockTpuClient); + when(mockTpuClient.createNodeAsync(any(CreateNodeRequest.class))) + .thenReturn(mockFuture); + when(mockFuture.get()).thenReturn(mockNode); + Node returnedNode = CreateTpuWithTopologyFlag.createTpuWithTopologyFlag( + PROJECT_ID, ZONE, NODE_NAME, ACCELERATOR_TYPE, + TPU_SOFTWARE_VERSION, TOPOLOGY); + + verify(mockTpuClient, times(1)) + .createNodeAsync(any(CreateNodeRequest.class)); + verify(mockFuture, times(1)).get(); + assertEquals(returnedNode, mockNode); + } + } + + @Test + public void testListTpuVm() throws IOException { + try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { + Node mockNode1 = mock(Node.class); + Node mockNode2 = mock(Node.class); + List mockListNodes = Arrays.asList(mockNode1, mockNode2); + TpuClient mockTpuClient = mock(TpuClient.class); + TpuClient.ListNodesPagedResponse mockListNodesResponse = + mock(TpuClient.ListNodesPagedResponse.class); + TpuClient.ListNodesPage mockListNodesPage = mock(TpuClient.ListNodesPage.class); + + mockedTpuClient.when(TpuClient::create).thenReturn(mockTpuClient); + when(mockTpuClient.listNodes(any(ListNodesRequest.class))).thenReturn(mockListNodesResponse); + when(mockListNodesResponse.getPage()).thenReturn(mockListNodesPage); + when(mockListNodesPage.getValues()).thenReturn(mockListNodes); + + TpuClient.ListNodesPage returnedListNodes = ListTpuVms.listTpuVms(PROJECT_ID, ZONE); + + assertThat(returnedListNodes.getValues()).isEqualTo(mockListNodes); + verify(mockTpuClient, times(1)).listNodes(any(ListNodesRequest.class)); + } + } + + @Test + public void testStartTpuVm() throws IOException, ExecutionException, InterruptedException { + try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { + TpuClient mockClient = mock(TpuClient.class); + Node mockNode = mock(Node.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedTpuClient.when(TpuClient::create).thenReturn(mockClient); + when(mockClient.startNodeAsync(any(StartNodeRequest.class))) + .thenReturn(mockFuture); + when(mockFuture.get()).thenReturn(mockNode); + + Node returnedNode = StartTpuVm.startTpuVm(PROJECT_ID, ZONE, NODE_NAME); + + verify(mockClient, times(1)) + .startNodeAsync(any(StartNodeRequest.class)); + verify(mockFuture, times(1)).get(); + assertEquals(returnedNode, mockNode); + } + } + + @Test + public void testStopTpuVm() throws IOException, ExecutionException, InterruptedException { + try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { + TpuClient mockClient = mock(TpuClient.class); + Node mockNode = mock(Node.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedTpuClient.when(TpuClient::create).thenReturn(mockClient); + when(mockClient.stopNodeAsync(any(StopNodeRequest.class))) + .thenReturn(mockFuture); + when(mockFuture.get()).thenReturn(mockNode); + + Node returnedNode = StopTpuVm.stopTpuVm(PROJECT_ID, ZONE, NODE_NAME); + + verify(mockClient, times(1)) + .stopNodeAsync(any(StopNodeRequest.class)); + verify(mockFuture, times(1)).get(); + assertEquals(returnedNode, mockNode); + } + } +} \ No newline at end of file diff --git a/trace/pom.xml b/trace/pom.xml deleted file mode 100644 index e8320cf319d..00000000000 --- a/trace/pom.xml +++ /dev/null @@ -1,86 +0,0 @@ - - - - 4.0.0 - jar - com.example.trace - trace-samples - 1.0 - - - - com.google.cloud.samples - shared-configuration - 1.2.0 - - - - - 1.8 - 1.8 - UTF-8 - - - - - - com.google.cloud - libraries-bom - 26.32.0 - pom - import - - - - - - - - io.opencensus - opencensus-exporter-trace-stackdriver - 0.31.1 - - - io.grpc - grpc-api - - - - - com.google.cloud - google-cloud-core - - - com.google.api - gax-grpc - - - - - - junit - junit - 4.13.2 - test - - - - diff --git a/trace/src/main/java/com/example/trace/TraceSample.java b/trace/src/main/java/com/example/trace/TraceSample.java deleted file mode 100644 index f706c59278d..00000000000 --- a/trace/src/main/java/com/example/trace/TraceSample.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright 2018 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.trace; - -import com.google.auth.oauth2.AccessToken; -import com.google.auth.oauth2.GoogleCredentials; -import io.opencensus.common.Scope; -import io.opencensus.exporter.trace.stackdriver.StackdriverTraceConfiguration; -import io.opencensus.exporter.trace.stackdriver.StackdriverTraceExporter; -import io.opencensus.trace.Tracer; -import io.opencensus.trace.Tracing; -import io.opencensus.trace.samplers.Samplers; -import java.io.IOException; -import java.time.Instant; -import java.util.Date; - -public class TraceSample { - - // [START trace_setup_java_custom_span] - private static final Tracer tracer = Tracing.getTracer(); - - public static void doWork() { - // Create a child Span of the current Span. - try (Scope ss = tracer.spanBuilder("MyChildWorkSpan").startScopedSpan()) { - doInitialWork(); - tracer.getCurrentSpan().addAnnotation("Finished initial work"); - doFinalWork(); - } - } - - private static void doInitialWork() { - // ... - tracer.getCurrentSpan().addAnnotation("Doing initial work"); - // ... - } - - private static void doFinalWork() { - // ... - tracer.getCurrentSpan().addAnnotation("Hello world!"); - // ... - } - // [END trace_setup_java_custom_span] - - // [START trace_setup_java_full_sampling] - public static void doWorkFullSampled() { - try (Scope ss = - tracer - .spanBuilder("MyChildWorkSpan") - .setSampler(Samplers.alwaysSample()) - .startScopedSpan()) { - doInitialWork(); - tracer.getCurrentSpan().addAnnotation("Finished initial work"); - doFinalWork(); - } - } - // [END trace_setup_java_full_sampling] - - // [START trace_setup_java_create_and_register] - public static void createAndRegister() throws IOException { - StackdriverTraceExporter.createAndRegister(StackdriverTraceConfiguration.builder().build()); - } - // [END trace_setup_java_create_and_register] - - // [START trace_setup_java_create_and_register_with_token] - public static void createAndRegisterWithToken(String accessToken) throws IOException { - Date expirationTime = Date.from(Instant.now().plusSeconds(60)); - - GoogleCredentials credentials = - GoogleCredentials.create(new AccessToken(accessToken, expirationTime)); - StackdriverTraceExporter.createAndRegister( - StackdriverTraceConfiguration.builder() - .setProjectId("MyStackdriverProjectId") - .setCredentials(credentials) - .build()); - } - // [END trace_setup_java_create_and_register_with_token] - - // [START trace_setup_java_register_exporter] - public static void createAndRegisterGoogleCloudPlatform(String projectId) throws IOException { - StackdriverTraceExporter.createAndRegister( - StackdriverTraceConfiguration.builder().setProjectId(projectId).build()); - } - // [END trace_setup_java_register_exporter] -} diff --git a/trace/src/test/java/com/example/trace/TraceSampleIT.java b/trace/src/test/java/com/example/trace/TraceSampleIT.java deleted file mode 100644 index b4a09bca316..00000000000 --- a/trace/src/test/java/com/example/trace/TraceSampleIT.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2018 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.trace; - -import com.google.common.base.Strings; -import io.opencensus.exporter.trace.stackdriver.StackdriverTraceExporter; -import java.io.IOException; -import org.junit.After; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -/** Tests for stackdriver tracing sample. */ -@RunWith(JUnit4.class) -@SuppressWarnings("checkstyle:abbreviationaswordinname") -public class TraceSampleIT { - private static final String CLOUD_PROJECT_KEY = "GOOGLE_CLOUD_PROJECT"; - - @BeforeClass - public static void setup() { - Assert.assertFalse(Strings.isNullOrEmpty(System.getenv(CLOUD_PROJECT_KEY))); - } - - @After - public void tearDown() { - StackdriverTraceExporter.unregister(); - } - - @Test - public void testCreateAndRegister() throws IOException { - TraceSample.createAndRegister(); - TraceSample.doWork(); - } - - @Test - public void testCreateAndRegisterFullSampled() throws IOException { - TraceSample.createAndRegister(); - TraceSample.doWorkFullSampled(); - } - - @Test - public void testCreateAndRegisterGoogleCloudPlatform() throws IOException { - TraceSample.createAndRegisterGoogleCloudPlatform(System.getenv(CLOUD_PROJECT_KEY)); - TraceSample.doWork(); - } -}